From 4ba029fe15a5d4314c2e233b015705c3d9be6ad2 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Fri, 5 Apr 2024 16:49:02 +0200 Subject: [PATCH 01/69] feat: validium with da --- .../migrations/20240403214948_add_da_inclusion_proof.down.sql | 2 ++ .../migrations/20240403214948_add_da_inclusion_proof.up.sql | 2 ++ core/lib/dal/src/models/storage_block.rs | 2 ++ core/lib/zksync_core/src/da_sender/mod.rs | 2 ++ core/lib/zksync_core/src/da_sender/tests.rs | 1 + .../src/eth_sender/l1_batch_commit_data_generator.rs | 4 +++- core/lib/zksync_core/src/lib.rs | 1 + 7 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 core/lib/dal/migrations/20240403214948_add_da_inclusion_proof.down.sql create mode 100644 core/lib/dal/migrations/20240403214948_add_da_inclusion_proof.up.sql create mode 100644 core/lib/zksync_core/src/da_sender/mod.rs create mode 100644 core/lib/zksync_core/src/da_sender/tests.rs diff --git a/core/lib/dal/migrations/20240403214948_add_da_inclusion_proof.down.sql b/core/lib/dal/migrations/20240403214948_add_da_inclusion_proof.down.sql new file mode 100644 index 000000000000..a630f8ae3e2c --- /dev/null +++ b/core/lib/dal/migrations/20240403214948_add_da_inclusion_proof.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE l1_batches + DROP COLUMN da_inclusion_proof; diff --git a/core/lib/dal/migrations/20240403214948_add_da_inclusion_proof.up.sql b/core/lib/dal/migrations/20240403214948_add_da_inclusion_proof.up.sql new file mode 100644 index 000000000000..70aeba3e3a2d --- /dev/null +++ b/core/lib/dal/migrations/20240403214948_add_da_inclusion_proof.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE l1_batches + ADD COLUMN IF NOT EXISTS da_inclusion_proof BYTEA[] NOT NULL DEFAULT '{}'; diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index c7957162e8c0..7a4e4550ac37 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -146,6 +146,8 @@ pub struct StorageL1Batch { pub events_queue_commitment: Option>, pub bootloader_initial_content_commitment: Option>, pub pubdata_input: Option>, + + pub da_inclusion_proof: Option>>, } impl From for L1BatchHeader { diff --git a/core/lib/zksync_core/src/da_sender/mod.rs b/core/lib/zksync_core/src/da_sender/mod.rs new file mode 100644 index 000000000000..87c2771955a9 --- /dev/null +++ b/core/lib/zksync_core/src/da_sender/mod.rs @@ -0,0 +1,2 @@ +#[cfg(test)] +mod tests; diff --git a/core/lib/zksync_core/src/da_sender/tests.rs b/core/lib/zksync_core/src/da_sender/tests.rs new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/core/lib/zksync_core/src/da_sender/tests.rs @@ -0,0 +1 @@ + diff --git a/core/lib/zksync_core/src/eth_sender/l1_batch_commit_data_generator.rs b/core/lib/zksync_core/src/eth_sender/l1_batch_commit_data_generator.rs index 32ae80b1e176..d63dce188af1 100644 --- a/core/lib/zksync_core/src/eth_sender/l1_batch_commit_data_generator.rs +++ b/core/lib/zksync_core/src/eth_sender/l1_batch_commit_data_generator.rs @@ -38,7 +38,9 @@ pub struct RollupModeL1BatchCommitDataGenerator; /// contracts operating in validium mode. It differs from [`RollupModeL1BatchCommitDataGenerator`] /// in that it does not include the pubdata in the produced message. #[derive(Debug, Clone)] -pub struct ValidiumModeL1BatchCommitDataGenerator; +pub struct ValidiumModeL1BatchCommitDataGenerator { + pub validium_da_mode: ValidiumDAMode, +} impl L1BatchCommitDataGenerator for RollupModeL1BatchCommitDataGenerator { fn l1_commit_batches( diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index 8484987eb12e..def51a234b87 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -96,6 +96,7 @@ pub mod block_reverter; pub mod commitment_generator; pub mod consensus; pub mod consistency_checker; +pub mod da_sender; pub mod eth_sender; pub mod eth_watch; pub mod fee_model; From 638d0a762d65a1227e31cf8c56eb0b16de96133c Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Tue, 30 Apr 2024 14:21:05 +0200 Subject: [PATCH 02/69] chore: tmp --- core/lib/config/src/configs/da_dispatcher.rs | 17 ++++++++++ core/lib/config/src/configs/general.rs | 2 ++ core/lib/config/src/configs/mod.rs | 1 + core/lib/dal/src/blocks_dal.rs | 31 +++++++++++++------ core/lib/dal/src/models/storage_block.rs | 2 +- core/lib/types/src/block.rs | 5 +++ .../l1_batch_commit_data_generator.rs | 4 +-- core/lib/zksync_core/src/lib.rs | 5 ++- 8 files changed, 53 insertions(+), 14 deletions(-) create mode 100644 core/lib/config/src/configs/da_dispatcher.rs diff --git a/core/lib/config/src/configs/da_dispatcher.rs b/core/lib/config/src/configs/da_dispatcher.rs new file mode 100644 index 000000000000..c5d49531c962 --- /dev/null +++ b/core/lib/config/src/configs/da_dispatcher.rs @@ -0,0 +1,17 @@ +use serde::Deserialize; + +#[derive(Debug, Deserialize, Clone, Copy, PartialEq, Default)] +pub enum ValidiumDAMode { + #[default] + NoDA, + Celestia, + EigenDA, + Avail, + GCS, +} + +#[derive(Debug, Clone, PartialEq)] +pub struct DADispatcherConfig { + pub mode: ValidiumDAMode, + pub da_api_url: String, +} diff --git a/core/lib/config/src/configs/general.rs b/core/lib/config/src/configs/general.rs index 0cd55ed9222c..07d622e0787e 100644 --- a/core/lib/config/src/configs/general.rs +++ b/core/lib/config/src/configs/general.rs @@ -1,6 +1,7 @@ use crate::{ configs::{ chain::{CircuitBreakerConfig, MempoolConfig, OperationsManagerConfig, StateKeeperConfig}, + da_dispatcher::DADispatcherConfig, fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig, @@ -32,4 +33,5 @@ pub struct GeneralConfig { pub eth: Option, pub snapshot_creator: Option, pub observability: Option, + pub da_dispatcher_config: Option, } diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index 4289e6625c22..adf0cd28d6c5 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -25,6 +25,7 @@ pub mod chain; pub mod consensus; pub mod contract_verifier; pub mod contracts; +pub mod da_dispatcher; pub mod database; pub mod eth_sender; pub mod eth_watch; diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 4e39a7bcea31..753e3f993edc 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -14,7 +14,10 @@ use zksync_db_connection::{ }; use zksync_types::{ aggregated_operations::AggregatedActionType, - block::{BlockGasCount, L1BatchHeader, L1BatchTreeData, L2BlockHeader, StorageOracleInfo}, + block::{ + BlockGasCount, L1BatchDAData, L1BatchHeader, L1BatchTreeData, L2BlockHeader, + StorageOracleInfo, + }, circuit::CircuitStatistic, commitment::{L1BatchCommitmentArtifacts, L1BatchWithMetadata}, Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, U256, @@ -289,7 +292,8 @@ impl BlocksDal<'_, '_> { compressed_state_diffs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input + pubdata_input, + da_inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -836,6 +840,8 @@ impl BlocksDal<'_, '_> { Ok(()) } + pub async fn save_l1_batch_da_data(&mut self, da_data: &L1BatchDAData) -> anyhow::Result<()> {} + pub async fn save_l1_batch_commitment_artifacts( &mut self, number: L1BatchNumber, @@ -978,7 +984,8 @@ impl BlocksDal<'_, '_> { system_logs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input + pubdata_input, + da_inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1162,7 +1169,8 @@ impl BlocksDal<'_, '_> { system_logs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input + pubdata_input, + da_inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1246,7 +1254,8 @@ impl BlocksDal<'_, '_> { protocol_version, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input + pubdata_input, + da_inclusion_data FROM ( SELECT @@ -1323,7 +1332,8 @@ impl BlocksDal<'_, '_> { system_logs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input + pubdata_input, + da_inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1452,7 +1462,8 @@ impl BlocksDal<'_, '_> { system_logs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input + pubdata_input, + da_inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1520,7 +1531,8 @@ impl BlocksDal<'_, '_> { system_logs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input + pubdata_input, + da_inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1598,7 +1610,8 @@ impl BlocksDal<'_, '_> { system_logs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input + pubdata_input, + da_inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index 7239979a3e52..4ab2a010c221 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -149,7 +149,7 @@ pub struct StorageL1Batch { pub bootloader_initial_content_commitment: Option>, pub pubdata_input: Option>, - pub da_inclusion_proof: Option>>, + pub da_inclusion_data: Option>, } impl From for L1BatchHeader { diff --git a/core/lib/types/src/block.rs b/core/lib/types/src/block.rs index 3cf09f9645f1..bf35e0698baf 100644 --- a/core/lib/types/src/block.rs +++ b/core/lib/types/src/block.rs @@ -270,6 +270,11 @@ pub struct L1BatchTreeData { pub rollup_last_leaf_index: u64, } +pub struct L1BatchDAData { + pub commitment: H256, + pub inclusion_proof: Vec, +} + #[cfg(test)] mod tests { use super::*; diff --git a/core/lib/zksync_core/src/eth_sender/l1_batch_commit_data_generator.rs b/core/lib/zksync_core/src/eth_sender/l1_batch_commit_data_generator.rs index d63dce188af1..545cbf52ddfd 100644 --- a/core/lib/zksync_core/src/eth_sender/l1_batch_commit_data_generator.rs +++ b/core/lib/zksync_core/src/eth_sender/l1_batch_commit_data_generator.rs @@ -38,9 +38,7 @@ pub struct RollupModeL1BatchCommitDataGenerator; /// contracts operating in validium mode. It differs from [`RollupModeL1BatchCommitDataGenerator`] /// in that it does not include the pubdata in the produced message. #[derive(Debug, Clone)] -pub struct ValidiumModeL1BatchCommitDataGenerator { - pub validium_da_mode: ValidiumDAMode, -} +pub struct ValidiumModeL1BatchCommitDataGenerator {} impl L1BatchCommitDataGenerator for RollupModeL1BatchCommitDataGenerator { fn l1_commit_batches( diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index dc6e3dc016ba..bbb4796dc9c9 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -99,6 +99,7 @@ pub mod api_server; pub mod basic_witness_input_producer; pub mod consensus; pub mod consistency_checker; +pub mod da_sender; pub mod db_pruner; pub mod eth_sender; pub mod fee_model; @@ -658,7 +659,9 @@ pub async fn initialize_components( Arc::new(RollupModeL1BatchCommitDataGenerator {}) } L1BatchCommitDataGeneratorMode::Validium => { - Arc::new(ValidiumModeL1BatchCommitDataGenerator {}) + Arc::new(ValidiumModeL1BatchCommitDataGenerator { + validium_da_mode: configs.da_dispatcher_config?.mode, + }) } }; From a25d0ee716b3bd3eb05a82d08e1a4bed4436243b Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Wed, 1 May 2024 16:38:00 +0200 Subject: [PATCH 03/69] chore: tmp --- ...40403214948_add_da_inclusion_data.down.sql | 2 + ...0240403214948_add_da_inclusion_data.up.sql | 2 + ...0403214948_add_da_inclusion_proof.down.sql | 2 - ...240403214948_add_da_inclusion_proof.up.sql | 2 - core/lib/dal/src/blocks_dal.rs | 57 ++++++++++++++++++- core/lib/types/src/block.rs | 5 -- core/node/da_dispatcher/Cargo.toml | 14 +++++ core/node/da_dispatcher/README.md | 4 ++ core/node/da_dispatcher/src/lib.rs | 14 +++++ 9 files changed, 92 insertions(+), 10 deletions(-) create mode 100644 core/lib/dal/migrations/20240403214948_add_da_inclusion_data.down.sql create mode 100644 core/lib/dal/migrations/20240403214948_add_da_inclusion_data.up.sql delete mode 100644 core/lib/dal/migrations/20240403214948_add_da_inclusion_proof.down.sql delete mode 100644 core/lib/dal/migrations/20240403214948_add_da_inclusion_proof.up.sql create mode 100644 core/node/da_dispatcher/Cargo.toml create mode 100644 core/node/da_dispatcher/README.md create mode 100644 core/node/da_dispatcher/src/lib.rs diff --git a/core/lib/dal/migrations/20240403214948_add_da_inclusion_data.down.sql b/core/lib/dal/migrations/20240403214948_add_da_inclusion_data.down.sql new file mode 100644 index 000000000000..0c7753c74044 --- /dev/null +++ b/core/lib/dal/migrations/20240403214948_add_da_inclusion_data.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE l1_batches + DROP COLUMN da_inclusion_data; diff --git a/core/lib/dal/migrations/20240403214948_add_da_inclusion_data.up.sql b/core/lib/dal/migrations/20240403214948_add_da_inclusion_data.up.sql new file mode 100644 index 000000000000..f5067acf1e1d --- /dev/null +++ b/core/lib/dal/migrations/20240403214948_add_da_inclusion_data.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE l1_batches + ADD COLUMN IF NOT EXISTS da_inclusion_data BYTEA[] NOT NULL DEFAULT '{}'; diff --git a/core/lib/dal/migrations/20240403214948_add_da_inclusion_proof.down.sql b/core/lib/dal/migrations/20240403214948_add_da_inclusion_proof.down.sql deleted file mode 100644 index a630f8ae3e2c..000000000000 --- a/core/lib/dal/migrations/20240403214948_add_da_inclusion_proof.down.sql +++ /dev/null @@ -1,2 +0,0 @@ -ALTER TABLE l1_batches - DROP COLUMN da_inclusion_proof; diff --git a/core/lib/dal/migrations/20240403214948_add_da_inclusion_proof.up.sql b/core/lib/dal/migrations/20240403214948_add_da_inclusion_proof.up.sql deleted file mode 100644 index 70aeba3e3a2d..000000000000 --- a/core/lib/dal/migrations/20240403214948_add_da_inclusion_proof.up.sql +++ /dev/null @@ -1,2 +0,0 @@ -ALTER TABLE l1_batches - ADD COLUMN IF NOT EXISTS da_inclusion_proof BYTEA[] NOT NULL DEFAULT '{}'; diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 753e3f993edc..4f23127506a5 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -840,7 +840,61 @@ impl BlocksDal<'_, '_> { Ok(()) } - pub async fn save_l1_batch_da_data(&mut self, da_data: &L1BatchDAData) -> anyhow::Result<()> {} + pub async fn save_l1_batch_da_data( + &mut self, + number: L1BatchNumber, + da_inclusion_data: Vec, + ) -> anyhow::Result<()> { + let update_result = sqlx::query!( + r#" + UPDATE l1_batches + SET + da_inclusion_data = $1, + updated_at = NOW() + WHERE + number = $2 + AND da_inclusion_data IS NULL + "#, + da_inclusion_data.as_slice(), + i64::from(number.0), + ) + .instrument("save_l1_batch_da_data") + .with_arg("number", &number) + .report_latency() + .execute(self.storage) + .await?; + + if update_result.rows_affected() == 0 { + tracing::debug!("L1 batch #{number}: DA data wasn't updated as it's already present"); + + // Batch was already processed. Verify that existing DA data matches + let matched: i64 = sqlx::query!( + r#" + SELECT + COUNT(*) AS "count!" + FROM + l1_batches + WHERE + number = $1 + AND da_inclusion_data = $2 + "#, + i64::from(number.0), + da_inclusion_data.as_slice(), + ) + .instrument("get_matching_batch_da_data") + .with_arg("number", &number) + .report_latency() + .fetch_one(self.storage) + .await? + .count; + + anyhow::ensure!( + matched == 1, + "DA data verification failed. DA data for L1 batch #{number} does not match the expected value" + ); + } + Ok(()) + } pub async fn save_l1_batch_commitment_artifacts( &mut self, @@ -1628,6 +1682,7 @@ impl BlocksDal<'_, '_> { ) AND events_queue_commitment IS NOT NULL AND bootloader_initial_content_commitment IS NOT NULL + AND da_inclusion_data IS NOT NULL ORDER BY number LIMIT diff --git a/core/lib/types/src/block.rs b/core/lib/types/src/block.rs index bf35e0698baf..3cf09f9645f1 100644 --- a/core/lib/types/src/block.rs +++ b/core/lib/types/src/block.rs @@ -270,11 +270,6 @@ pub struct L1BatchTreeData { pub rollup_last_leaf_index: u64, } -pub struct L1BatchDAData { - pub commitment: H256, - pub inclusion_proof: Vec, -} - #[cfg(test)] mod tests { use super::*; diff --git a/core/node/da_dispatcher/Cargo.toml b/core/node/da_dispatcher/Cargo.toml new file mode 100644 index 000000000000..ac66107f9121 --- /dev/null +++ b/core/node/da_dispatcher/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "zksync_da_dispatcher" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] diff --git a/core/node/da_dispatcher/README.md b/core/node/da_dispatcher/README.md new file mode 100644 index 000000000000..37ca4f1e3101 --- /dev/null +++ b/core/node/da_dispatcher/README.md @@ -0,0 +1,4 @@ +# zkSync Era data availability dispatcher + +This crate contains an implementation of the zkSync Era DA dispatcher component, that connects to the different DA +layers and . diff --git a/core/node/da_dispatcher/src/lib.rs b/core/node/da_dispatcher/src/lib.rs new file mode 100644 index 000000000000..7d12d9af8195 --- /dev/null +++ b/core/node/da_dispatcher/src/lib.rs @@ -0,0 +1,14 @@ +pub fn add(left: usize, right: usize) -> usize { + left + right +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn it_works() { + let result = add(2, 2); + assert_eq!(result, 4); + } +} From ea7d8819d33fdf68d16fd6b06a43c893d236e90e Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Wed, 8 May 2024 16:24:11 +0200 Subject: [PATCH 04/69] chore: define basic types --- Cargo.lock | 9 ++++++++ Cargo.toml | 1 + contracts | 2 +- core/lib/config/src/configs/mod.rs | 1 + core/lib/config/src/lib.rs | 5 +++-- core/lib/dal/src/blocks_dal.rs | 5 +---- core/lib/types/src/data_availability/mod.rs | 10 +++++++++ core/lib/types/src/lib.rs | 1 + core/lib/zksync_core/src/da_sender/mod.rs | 2 -- core/lib/zksync_core/src/da_sender/tests.rs | 1 - core/lib/zksync_core/src/lib.rs | 1 - core/node/da_dispatcher/Cargo.toml | 3 +++ core/node/da_dispatcher/src/lib.rs | 24 ++++++++++++--------- 13 files changed, 44 insertions(+), 21 deletions(-) create mode 100644 core/lib/types/src/data_availability/mod.rs delete mode 100644 core/lib/zksync_core/src/da_sender/mod.rs delete mode 100644 core/lib/zksync_core/src/da_sender/tests.rs diff --git a/Cargo.lock b/Cargo.lock index 590e715a90e4..371550c636c8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8514,6 +8514,15 @@ dependencies = [ "zksync_utils", ] +[[package]] +name = "zksync_da_dispatcher" +version = "0.1.0" +dependencies = [ + "zksync_config", + "zksync_health_check", + "zksync_types", +] + [[package]] name = "zksync_dal" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index d2e935b3c6f6..d892d24bfd27 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,6 +15,7 @@ members = [ "core/node/block_reverter", "core/node/commitment_generator", "core/node/house_keeper", + "core/node/da_dispatcher", # Libraries "core/lib/db_connection", "core/lib/zksync_core", diff --git a/contracts b/contracts index 5042588322f2..d89e406cd20c 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 5042588322f2257d1ea580ac800cabba429742a6 +Subproject commit d89e406cd20c6d6e9052ba2321334b71ef53c54e diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index adf0cd28d6c5..6601a86be1b1 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -3,6 +3,7 @@ pub use self::{ api::ApiConfig, contract_verifier::ContractVerifierConfig, contracts::ContractsConfig, + da_dispatcher::DADispatcherConfig, database::{DBConfig, PostgresConfig}, eth_sender::{EthConfig, GasAdjusterConfig}, eth_watch::EthWatchConfig, diff --git a/core/lib/config/src/lib.rs b/core/lib/config/src/lib.rs index 66656e60b702..1d74e51b6728 100644 --- a/core/lib/config/src/lib.rs +++ b/core/lib/config/src/lib.rs @@ -1,8 +1,9 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] pub use crate::configs::{ - ApiConfig, ContractVerifierConfig, ContractsConfig, DBConfig, EthConfig, EthWatchConfig, - GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, + ApiConfig, ContractVerifierConfig, ContractsConfig, DADispatcherConfig, DBConfig, EthConfig, + EthWatchConfig, GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, PostgresConfig, + SnapshotsCreatorConfig, }; pub mod configs; diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 4f23127506a5..e68035dd130e 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -14,10 +14,7 @@ use zksync_db_connection::{ }; use zksync_types::{ aggregated_operations::AggregatedActionType, - block::{ - BlockGasCount, L1BatchDAData, L1BatchHeader, L1BatchTreeData, L2BlockHeader, - StorageOracleInfo, - }, + block::{BlockGasCount, L1BatchHeader, L1BatchTreeData, L2BlockHeader, StorageOracleInfo}, circuit::CircuitStatistic, commitment::{L1BatchCommitmentArtifacts, L1BatchWithMetadata}, Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, U256, diff --git a/core/lib/types/src/data_availability/mod.rs b/core/lib/types/src/data_availability/mod.rs new file mode 100644 index 000000000000..7eac9284884c --- /dev/null +++ b/core/lib/types/src/data_availability/mod.rs @@ -0,0 +1,10 @@ +type Error = String; + +pub struct DADispatchResponse { + tx_hash: Vec, + inclusion_data: Vec, +} + +pub trait DataAvailabilityClient { + fn dispatch(&self, data: Vec) -> Result; +} diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index 5f3e5cbe5555..956d683c59bc 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -49,6 +49,7 @@ pub mod vm_trace; pub mod zk_evm_types; pub mod api; +pub mod data_availability; pub mod eth_sender; pub mod helpers; pub mod proto; diff --git a/core/lib/zksync_core/src/da_sender/mod.rs b/core/lib/zksync_core/src/da_sender/mod.rs deleted file mode 100644 index 87c2771955a9..000000000000 --- a/core/lib/zksync_core/src/da_sender/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -#[cfg(test)] -mod tests; diff --git a/core/lib/zksync_core/src/da_sender/tests.rs b/core/lib/zksync_core/src/da_sender/tests.rs deleted file mode 100644 index 8b137891791f..000000000000 --- a/core/lib/zksync_core/src/da_sender/tests.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index bbb4796dc9c9..4bbeb2552899 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -99,7 +99,6 @@ pub mod api_server; pub mod basic_witness_input_producer; pub mod consensus; pub mod consistency_checker; -pub mod da_sender; pub mod db_pruner; pub mod eth_sender; pub mod fee_model; diff --git a/core/node/da_dispatcher/Cargo.toml b/core/node/da_dispatcher/Cargo.toml index ac66107f9121..ca25d8bd7f0b 100644 --- a/core/node/da_dispatcher/Cargo.toml +++ b/core/node/da_dispatcher/Cargo.toml @@ -12,3 +12,6 @@ categories.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +zksync_types.workspace = true +zksync_health_check.workspace = true +zksync_config.workspace = true \ No newline at end of file diff --git a/core/node/da_dispatcher/src/lib.rs b/core/node/da_dispatcher/src/lib.rs index 7d12d9af8195..1468db27833e 100644 --- a/core/node/da_dispatcher/src/lib.rs +++ b/core/node/da_dispatcher/src/lib.rs @@ -1,14 +1,18 @@ -pub fn add(left: usize, right: usize) -> usize { - left + right -} +use zksync_config::da_dispatcher::ValidiumDAMode; +use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; +use zksync_types::data_availability::DataAvailabilityClient; -#[cfg(test)] -mod tests { - use super::*; +#[derive(Debug)] +pub struct DADispatcher { + client: dyn DataAvailabilityClient, + health_updater: HealthUpdater, +} - #[test] - fn it_works() { - let result = add(2, 2); - assert_eq!(result, 4); +impl DADispatcher { + pub fn new(mode: ValidiumDAMode) -> Self { + Self { + connector, + health_updater: ReactiveHealthCheck::new("da_dispatcher").1, + } } } From 8466a74dda166bee6bd68372404d39723e8cf768 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Fri, 10 May 2024 16:25:18 +0200 Subject: [PATCH 05/69] chore: proper configs --- Cargo.lock | 7 +- Cargo.toml | 3 +- core/lib/config/src/configs/da_dispatcher.rs | 36 ++- core/lib/config/src/configs/eth_sender.rs | 7 +- core/lib/config/src/configs/mod.rs | 1 - core/lib/config/src/lib.rs | 5 +- .../da_client}/Cargo.toml | 10 +- core/lib/da_client/src/clients/gcs/mod.rs | 44 +++ core/lib/da_client/src/clients/mock.rs | 1 + core/lib/da_client/src/clients/mod.rs | 2 + core/lib/da_client/src/lib.rs | 11 + core/lib/da_client/src/types.rs | 9 + ...40403214948_add_da_inclusion_data.down.sql | 2 - ...0240403214948_add_da_inclusion_data.up.sql | 2 - ...54_create_data_availability_table.down.sql | 1 + ...5354_create_data_availability_table.up.sql | 12 + core/lib/dal/src/blocks_dal.rs | 28 +- core/lib/dal/src/models/storage_block.rs | 2 - core/lib/object_store/src/raw.rs | 2 +- core/lib/protobuf_config/src/da_dispatcher.rs | 48 +++ core/lib/protobuf_config/src/eth.rs | 10 + core/lib/protobuf_config/src/lib.rs | 1 + .../src/proto/config/da_dispatcher.proto | 17 + .../src/proto/config/eth_sender.proto | 5 + core/lib/types/src/data_availability/mod.rs | 10 - core/lib/types/src/lib.rs | 1 - core/lib/types/src/pubdata_da.rs | 10 + core/lib/zksync_core/src/lib.rs | 4 +- core/node/da_dispatcher/README.md | 4 - core/node/da_dispatcher/src/lib.rs | 18 -- .../src/l1_batch_commit_data_generator.rs | 2 +- core/node/node_framework/Cargo.toml | 1 + .../src/implementations/layers/da_client.rs | 1 + .../implementations/layers/da_dispatcher.rs | 169 ++++++++++ .../src/implementations/layers/mod.rs | 2 + .../implementations/resources/da_interface.rs | 30 ++ .../src/implementations/resources/mod.rs | 1 + etc/env/dev.env | 305 ++++++++++++++++++ 38 files changed, 738 insertions(+), 86 deletions(-) rename core/{node/da_dispatcher => lib/da_client}/Cargo.toml (67%) create mode 100644 core/lib/da_client/src/clients/gcs/mod.rs create mode 100644 core/lib/da_client/src/clients/mock.rs create mode 100644 core/lib/da_client/src/clients/mod.rs create mode 100644 core/lib/da_client/src/lib.rs create mode 100644 core/lib/da_client/src/types.rs delete mode 100644 core/lib/dal/migrations/20240403214948_add_da_inclusion_data.down.sql delete mode 100644 core/lib/dal/migrations/20240403214948_add_da_inclusion_data.up.sql create mode 100644 core/lib/dal/migrations/20240508145354_create_data_availability_table.down.sql create mode 100644 core/lib/dal/migrations/20240508145354_create_data_availability_table.up.sql create mode 100644 core/lib/protobuf_config/src/da_dispatcher.rs create mode 100644 core/lib/protobuf_config/src/proto/config/da_dispatcher.proto delete mode 100644 core/lib/types/src/data_availability/mod.rs delete mode 100644 core/node/da_dispatcher/README.md delete mode 100644 core/node/da_dispatcher/src/lib.rs create mode 100644 core/node/node_framework/src/implementations/layers/da_client.rs create mode 100644 core/node/node_framework/src/implementations/layers/da_dispatcher.rs create mode 100644 core/node/node_framework/src/implementations/resources/da_interface.rs create mode 100644 etc/env/dev.env diff --git a/Cargo.lock b/Cargo.lock index c3f0427f7cad..eb0ac87f4f50 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8418,12 +8418,12 @@ dependencies = [ ] [[package]] -name = "zksync_da_dispatcher" +name = "zksync_da_client" version = "0.1.0" dependencies = [ + "tokio", "zksync_config", - "zksync_health_check", - "zksync_types", + "zksync_object_store", ] [[package]] @@ -8780,6 +8780,7 @@ dependencies = [ "zksync_config", "zksync_contracts", "zksync_core", + "zksync_da_client", "zksync_dal", "zksync_db_connection", "zksync_env_config", diff --git a/Cargo.toml b/Cargo.toml index d842acfc7e30..f83bbd95295e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,7 +16,6 @@ members = [ "core/node/block_reverter", "core/node/commitment_generator", "core/node/house_keeper", - "core/node/da_dispatcher", "core/node/genesis", "core/node/shared_metrics", "core/node/db_pruner", @@ -36,6 +35,7 @@ members = [ "core/lib/dal", "core/lib/env_config", "core/lib/eth_client", + "core/lib/da_client", "core/lib/eth_signer", "core/lib/l1_contract_interface", "core/lib/mempool", @@ -208,6 +208,7 @@ zksync_dal = { path = "core/lib/dal" } zksync_db_connection = { path = "core/lib/db_connection" } zksync_env_config = { path = "core/lib/env_config" } zksync_eth_client = { path = "core/lib/eth_client" } +zksync_da_client = { path = "core/lib/da_client" } zksync_eth_signer = { path = "core/lib/eth_signer" } zksync_health_check = { path = "core/lib/health_check" } zksync_l1_contract_interface = { path = "core/lib/l1_contract_interface" } diff --git a/core/lib/config/src/configs/da_dispatcher.rs b/core/lib/config/src/configs/da_dispatcher.rs index c5d49531c962..9e838eb6aeb5 100644 --- a/core/lib/config/src/configs/da_dispatcher.rs +++ b/core/lib/config/src/configs/da_dispatcher.rs @@ -1,17 +1,29 @@ -use serde::Deserialize; +use crate::ObjectStoreConfig; -#[derive(Debug, Deserialize, Clone, Copy, PartialEq, Default)] -pub enum ValidiumDAMode { - #[default] - NoDA, - Celestia, - EigenDA, - Avail, - GCS, +#[derive(Clone, Debug)] +pub struct DALayerInfo { + pub url: String, + pub private_key: Vec, } -#[derive(Debug, Clone, PartialEq)] +#[derive(Clone, Debug)] +pub enum DACredentials { + DALayer(DALayerInfo), + GCS(ObjectStoreConfig), +} + +#[derive(Debug, Clone)] pub struct DADispatcherConfig { - pub mode: ValidiumDAMode, - pub da_api_url: String, + pub credentials: DACredentials, +} + +impl DADispatcherConfig { + pub fn for_tests() -> Self { + Self { + credentials: DACredentials::DALayer(DALayerInfo { + url: "http://localhost:1234".to_string(), + private_key: vec![1, 2, 3], + }), + } + } } diff --git a/core/lib/config/src/configs/eth_sender.rs b/core/lib/config/src/configs/eth_sender.rs index a7dce1959810..72de33778688 100644 --- a/core/lib/config/src/configs/eth_sender.rs +++ b/core/lib/config/src/configs/eth_sender.rs @@ -84,6 +84,11 @@ pub enum PubdataSendingMode { #[default] Calldata, Blobs, + NoDA, + GCS, + Celestia, + EigenDA, + Avail, } #[derive(Debug, Deserialize, Clone, PartialEq)] @@ -120,7 +125,7 @@ pub struct SenderConfig { /// The mode in which proofs are loaded, either from DB/GCS for FRI/Old proof. pub proof_loading_mode: ProofLoadingMode, - /// The mode in which we send pubdata, either Calldata or Blobs + /// The mode in which we send pubdata pub pubdata_sending_mode: PubdataSendingMode, } diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index b0e851f7506c..3a5633a7a17c 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -3,7 +3,6 @@ pub use self::{ api::ApiConfig, contract_verifier::ContractVerifierConfig, contracts::{ContractsConfig, EcosystemContracts}, - da_dispatcher::DADispatcherConfig, database::{DBConfig, PostgresConfig}, eth_sender::{EthConfig, GasAdjusterConfig}, eth_watch::EthWatchConfig, diff --git a/core/lib/config/src/lib.rs b/core/lib/config/src/lib.rs index 1d74e51b6728..66656e60b702 100644 --- a/core/lib/config/src/lib.rs +++ b/core/lib/config/src/lib.rs @@ -1,9 +1,8 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] pub use crate::configs::{ - ApiConfig, ContractVerifierConfig, ContractsConfig, DADispatcherConfig, DBConfig, EthConfig, - EthWatchConfig, GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, PostgresConfig, - SnapshotsCreatorConfig, + ApiConfig, ContractVerifierConfig, ContractsConfig, DBConfig, EthConfig, EthWatchConfig, + GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, }; pub mod configs; diff --git a/core/node/da_dispatcher/Cargo.toml b/core/lib/da_client/Cargo.toml similarity index 67% rename from core/node/da_dispatcher/Cargo.toml rename to core/lib/da_client/Cargo.toml index ca25d8bd7f0b..f2d317ddbeeb 100644 --- a/core/node/da_dispatcher/Cargo.toml +++ b/core/lib/da_client/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "zksync_da_dispatcher" +name = "zksync_da_client" version.workspace = true edition.workspace = true authors.workspace = true @@ -12,6 +12,8 @@ categories.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -zksync_types.workspace = true -zksync_health_check.workspace = true -zksync_config.workspace = true \ No newline at end of file +zksync_object_store.workspace = true +zksync_config.workspace = true + +[dev-dependencies] +tokio = { workspace = true, features = ["full"] } \ No newline at end of file diff --git a/core/lib/da_client/src/clients/gcs/mod.rs b/core/lib/da_client/src/clients/gcs/mod.rs new file mode 100644 index 000000000000..e2656ed351a8 --- /dev/null +++ b/core/lib/da_client/src/clients/gcs/mod.rs @@ -0,0 +1,44 @@ +use std::{ + fmt, + fmt::{Debug, Formatter}, + sync::Arc, +}; + +use zksync_config::ObjectStoreConfig; +use zksync_object_store::{ObjectStore, ObjectStoreError, ObjectStoreFactory}; + +use crate::{ + types::{DispatchResponse, InclusionData}, + DataAvailabilityClient, +}; + +struct GCSDAClient { + object_store: Arc, +} + +impl GCSDAClient { + pub fn new(object_store_conf: ObjectStoreConfig) -> Self { + GCSDAClient { + object_store: ObjectStoreFactory::create_from_config(&object_store_conf), + } + } +} + +impl DataAvailabilityClient for GCSDAClient { + fn dispatch_blob(&self, data: Vec) -> Result { + Ok(DispatchResponse::default()) + } + + fn get_inclusion_data(&self, _: Vec) -> Result { + return Ok(InclusionData::default()); + } +} + +impl Debug for GCSDAClient { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter + .debug_struct("GCSDAClient") + .field("object_store", &self.object_store) + .finish() + } +} diff --git a/core/lib/da_client/src/clients/mock.rs b/core/lib/da_client/src/clients/mock.rs new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/core/lib/da_client/src/clients/mock.rs @@ -0,0 +1 @@ + diff --git a/core/lib/da_client/src/clients/mod.rs b/core/lib/da_client/src/clients/mod.rs new file mode 100644 index 000000000000..b1586f939c89 --- /dev/null +++ b/core/lib/da_client/src/clients/mod.rs @@ -0,0 +1,2 @@ +mod gcs; +mod mock; diff --git a/core/lib/da_client/src/lib.rs b/core/lib/da_client/src/lib.rs new file mode 100644 index 000000000000..389f3c9c8ea3 --- /dev/null +++ b/core/lib/da_client/src/lib.rs @@ -0,0 +1,11 @@ +use std::fmt; + +use crate::types::{DispatchResponse, InclusionData}; + +pub mod clients; +mod types; + +pub trait DataAvailabilityClient: Sync + Send + fmt::Debug { + fn dispatch_blob(&self, data: Vec) -> Result; + fn get_inclusion_data(&self, blob_id: Vec) -> Result; +} diff --git a/core/lib/da_client/src/types.rs b/core/lib/da_client/src/types.rs new file mode 100644 index 000000000000..b2618efe4dbd --- /dev/null +++ b/core/lib/da_client/src/types.rs @@ -0,0 +1,9 @@ +#[derive(Default)] +pub struct DispatchResponse { + blob_id: Vec, +} + +#[derive(Default)] +pub struct InclusionData { + data: Vec, +} diff --git a/core/lib/dal/migrations/20240403214948_add_da_inclusion_data.down.sql b/core/lib/dal/migrations/20240403214948_add_da_inclusion_data.down.sql deleted file mode 100644 index 0c7753c74044..000000000000 --- a/core/lib/dal/migrations/20240403214948_add_da_inclusion_data.down.sql +++ /dev/null @@ -1,2 +0,0 @@ -ALTER TABLE l1_batches - DROP COLUMN da_inclusion_data; diff --git a/core/lib/dal/migrations/20240403214948_add_da_inclusion_data.up.sql b/core/lib/dal/migrations/20240403214948_add_da_inclusion_data.up.sql deleted file mode 100644 index f5067acf1e1d..000000000000 --- a/core/lib/dal/migrations/20240403214948_add_da_inclusion_data.up.sql +++ /dev/null @@ -1,2 +0,0 @@ -ALTER TABLE l1_batches - ADD COLUMN IF NOT EXISTS da_inclusion_data BYTEA[] NOT NULL DEFAULT '{}'; diff --git a/core/lib/dal/migrations/20240508145354_create_data_availability_table.down.sql b/core/lib/dal/migrations/20240508145354_create_data_availability_table.down.sql new file mode 100644 index 000000000000..fd4c3d62f315 --- /dev/null +++ b/core/lib/dal/migrations/20240508145354_create_data_availability_table.down.sql @@ -0,0 +1 @@ +DROP TABLE data_availability; diff --git a/core/lib/dal/migrations/20240508145354_create_data_availability_table.up.sql b/core/lib/dal/migrations/20240508145354_create_data_availability_table.up.sql new file mode 100644 index 000000000000..05ee7f90ee3a --- /dev/null +++ b/core/lib/dal/migrations/20240508145354_create_data_availability_table.up.sql @@ -0,0 +1,12 @@ +CREATE TABLE data_availability +( + l1_batch_number BIGINT PRIMARY KEY REFERENCES l1_batches (number) ON DELETE CASCADE, + + -- the BYTEA used for this 2 columns because it is the most generic type + -- the actual format if blob identifier and inclusion data is defined by the DA client implementation + blob_id BYTEA, -- blob here is an abstract term, unrelated to any DA implementation + inclusion_data BYTEA, + + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL +); diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index e68035dd130e..04ab8ec8f42a 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -289,8 +289,7 @@ impl BlocksDal<'_, '_> { compressed_state_diffs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input, - da_inclusion_data + pubdata_input FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1035,8 +1034,7 @@ impl BlocksDal<'_, '_> { system_logs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input, - da_inclusion_data + pubdata_input FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1220,8 +1218,7 @@ impl BlocksDal<'_, '_> { system_logs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input, - da_inclusion_data + pubdata_input FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1305,8 +1302,7 @@ impl BlocksDal<'_, '_> { protocol_version, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input, - da_inclusion_data + pubdata_input FROM ( SELECT @@ -1383,8 +1379,7 @@ impl BlocksDal<'_, '_> { system_logs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input, - da_inclusion_data + pubdata_input FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1513,8 +1508,7 @@ impl BlocksDal<'_, '_> { system_logs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input, - da_inclusion_data + pubdata_input FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1582,8 +1576,7 @@ impl BlocksDal<'_, '_> { system_logs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input, - da_inclusion_data + pubdata_input FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1661,11 +1654,11 @@ impl BlocksDal<'_, '_> { system_logs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input, - da_inclusion_data + pubdata_input FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version WHERE eth_commit_tx_id IS NULL @@ -1679,7 +1672,8 @@ impl BlocksDal<'_, '_> { ) AND events_queue_commitment IS NOT NULL AND bootloader_initial_content_commitment IS NOT NULL - AND da_inclusion_data IS NOT NULL + AND data_availability.blob_id IS NOT NULL + AND data_availability.inclusion_data IS NOT NULL ORDER BY number LIMIT diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index 4ab2a010c221..a336888f312a 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -148,8 +148,6 @@ pub struct StorageL1Batch { pub events_queue_commitment: Option>, pub bootloader_initial_content_commitment: Option>, pub pubdata_input: Option>, - - pub da_inclusion_data: Option>, } impl From for L1BatchHeader { diff --git a/core/lib/object_store/src/raw.rs b/core/lib/object_store/src/raw.rs index 1776612577bd..d7036e5189dd 100644 --- a/core/lib/object_store/src/raw.rs +++ b/core/lib/object_store/src/raw.rs @@ -189,7 +189,7 @@ impl ObjectStoreFactory { } } - async fn create_from_config(config: &ObjectStoreConfig) -> Arc { + pub async fn create_from_config(config: &ObjectStoreConfig) -> Arc { match &config.mode { ObjectStoreMode::GCS { bucket_base_url } => { tracing::trace!( diff --git a/core/lib/protobuf_config/src/da_dispatcher.rs b/core/lib/protobuf_config/src/da_dispatcher.rs new file mode 100644 index 000000000000..d42daec0fd18 --- /dev/null +++ b/core/lib/protobuf_config/src/da_dispatcher.rs @@ -0,0 +1,48 @@ +use anyhow::Context; +use zksync_config::configs; +use zksync_protobuf::{required, ProtoRepr}; + +use crate::proto::{da_dispatcher as proto, object_store::ObjectStore}; + +impl ProtoRepr for proto::DataAvailabilityDispatcher { + type Type = configs::da_dispatcher::DADispatcherConfig; + + fn read(&self) -> anyhow::Result { + configs::da_dispatcher::DADispatcherConfig { + credentials: match &self.credentials { + Some(proto::data_availability_dispatcher::Credentials::DaLayer(config)) => { + configs::da_dispatcher::DACredentials::DALayer( + configs::da_dispatcher::DALayerInfo { + url: *required(&config.url).context("url"), + private_key: required(&config.private_key) + .context("private_key") + .into_bytes(), + }, + ) + } + Some(proto::data_availability_dispatcher::Credentials::ObjectStore(config)) => { + configs::da_dispatcher::DACredentials::GCS(config.read()?) + } + None => None, + }, + } + } + + fn build(this: &Self::Type) -> Self { + let credentials = match this.credentials.clone() { + configs::da_dispatcher::DACredentials::DALayer(info) => Some( + proto::data_availability_dispatcher::Credentials::DaLayer(proto::DaLayer { + url: Some(info.url.clone()), + private_key: info.private_key.clone().into(), + }), + ), + configs::da_dispatcher::DACredentials::GCS(config) => Some( + proto::data_availability_dispatcher::Credentials::ObjectStore(ObjectStore::build( + &config, + )), + ), + }; + + Self { credentials } + } +} diff --git a/core/lib/protobuf_config/src/eth.rs b/core/lib/protobuf_config/src/eth.rs index 0e3aacf1696a..759e05f8d5c0 100644 --- a/core/lib/protobuf_config/src/eth.rs +++ b/core/lib/protobuf_config/src/eth.rs @@ -48,6 +48,11 @@ impl proto::PubdataSendingMode { match x { From::Calldata => Self::Calldata, From::Blobs => Self::Blobs, + From::NoDA => Self::NoDa, + From::GCS => Self::Gcs, + From::Celestia => Self::Celestia, + From::EigenDA => Self::EigenDa, + From::Avail => Self::Avail, } } @@ -56,6 +61,11 @@ impl proto::PubdataSendingMode { match self { Self::Calldata => To::Calldata, Self::Blobs => To::Blobs, + Self::NoDa => To::NoDA, + Self::Gcs => To::GCS, + Self::Celestia => To::Celestia, + Self::EigenDa => To::EigenDA, + Self::Avail => To::Avail, } } } diff --git a/core/lib/protobuf_config/src/lib.rs b/core/lib/protobuf_config/src/lib.rs index f3d1614c98f8..ec57358f19c3 100644 --- a/core/lib/protobuf_config/src/lib.rs +++ b/core/lib/protobuf_config/src/lib.rs @@ -10,6 +10,7 @@ mod circuit_breaker; mod consensus; mod contract_verifier; mod contracts; +mod da_dispatcher; mod database; mod eth; mod experimental; diff --git a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto new file mode 100644 index 000000000000..0aeeef9c6799 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package zksync.config.da_dispatcher; + +import "zksync/config/object_store.proto"; + +message DALayer { + optional string url = 1; // required + optional string private_key = 2; // required +} + +message DataAvailabilityDispatcher { + oneof credentials { + config.object_store.ObjectStore object_store = 1; + DALayer da_layer = 2; + } +} diff --git a/core/lib/protobuf_config/src/proto/config/eth_sender.proto b/core/lib/protobuf_config/src/proto/config/eth_sender.proto index d4db30682030..10857d5c0df8 100644 --- a/core/lib/protobuf_config/src/proto/config/eth_sender.proto +++ b/core/lib/protobuf_config/src/proto/config/eth_sender.proto @@ -23,6 +23,11 @@ enum ProofLoadingMode { enum PubdataSendingMode { CALLDATA = 0; BLOBS = 1; + NO_DA = 2; + GCS = 3; + CELESTIA = 4; + EIGEN_DA = 5; + AVAIL = 6; } message Sender { diff --git a/core/lib/types/src/data_availability/mod.rs b/core/lib/types/src/data_availability/mod.rs deleted file mode 100644 index 7eac9284884c..000000000000 --- a/core/lib/types/src/data_availability/mod.rs +++ /dev/null @@ -1,10 +0,0 @@ -type Error = String; - -pub struct DADispatchResponse { - tx_hash: Vec, - inclusion_data: Vec, -} - -pub trait DataAvailabilityClient { - fn dispatch(&self, data: Vec) -> Result; -} diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index 4ec605d84eca..25f4173831b7 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -49,7 +49,6 @@ pub mod vm_trace; pub mod zk_evm_types; pub mod api; -pub mod data_availability; pub mod eth_sender; pub mod helpers; pub mod proto; diff --git a/core/lib/types/src/pubdata_da.rs b/core/lib/types/src/pubdata_da.rs index 8f7d3a96f55e..22b6184df510 100644 --- a/core/lib/types/src/pubdata_da.rs +++ b/core/lib/types/src/pubdata_da.rs @@ -9,6 +9,11 @@ use zksync_config::configs::eth_sender::PubdataSendingMode; pub enum PubdataDA { Calldata = 0, Blobs, + NoDA, + GCS, + Celestia, + EigenDA, + Avail, } impl From for PubdataDA { @@ -16,6 +21,11 @@ impl From for PubdataDA { match value { PubdataSendingMode::Calldata => PubdataDA::Calldata, PubdataSendingMode::Blobs => PubdataDA::Blobs, + PubdataSendingMode::NoDA => PubdataDA::NoDA, + PubdataSendingMode::GCS => PubdataDA::GCS, + PubdataSendingMode::Celestia => PubdataDA::Celestia, + PubdataSendingMode::EigenDA => PubdataDA::EigenDA, + PubdataSendingMode::Avail => PubdataDA::Avail, } } } diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index 4aa96e513f48..b8513c6963fb 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -655,9 +655,7 @@ pub async fn initialize_components( Arc::new(RollupModeL1BatchCommitDataGenerator {}) } L1BatchCommitDataGeneratorMode::Validium => { - Arc::new(ValidiumModeL1BatchCommitDataGenerator { - validium_da_mode: configs.da_dispatcher_config?.mode, - }) + Arc::new(ValidiumModeL1BatchCommitDataGenerator {}) } }; diff --git a/core/node/da_dispatcher/README.md b/core/node/da_dispatcher/README.md deleted file mode 100644 index 37ca4f1e3101..000000000000 --- a/core/node/da_dispatcher/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# zkSync Era data availability dispatcher - -This crate contains an implementation of the zkSync Era DA dispatcher component, that connects to the different DA -layers and . diff --git a/core/node/da_dispatcher/src/lib.rs b/core/node/da_dispatcher/src/lib.rs deleted file mode 100644 index 1468db27833e..000000000000 --- a/core/node/da_dispatcher/src/lib.rs +++ /dev/null @@ -1,18 +0,0 @@ -use zksync_config::da_dispatcher::ValidiumDAMode; -use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; -use zksync_types::data_availability::DataAvailabilityClient; - -#[derive(Debug)] -pub struct DADispatcher { - client: dyn DataAvailabilityClient, - health_updater: HealthUpdater, -} - -impl DADispatcher { - pub fn new(mode: ValidiumDAMode) -> Self { - Self { - connector, - health_updater: ReactiveHealthCheck::new("da_dispatcher").1, - } - } -} diff --git a/core/node/eth_sender/src/l1_batch_commit_data_generator.rs b/core/node/eth_sender/src/l1_batch_commit_data_generator.rs index 545cbf52ddfd..32ae80b1e176 100644 --- a/core/node/eth_sender/src/l1_batch_commit_data_generator.rs +++ b/core/node/eth_sender/src/l1_batch_commit_data_generator.rs @@ -38,7 +38,7 @@ pub struct RollupModeL1BatchCommitDataGenerator; /// contracts operating in validium mode. It differs from [`RollupModeL1BatchCommitDataGenerator`] /// in that it does not include the pubdata in the produced message. #[derive(Debug, Clone)] -pub struct ValidiumModeL1BatchCommitDataGenerator {} +pub struct ValidiumModeL1BatchCommitDataGenerator; impl L1BatchCommitDataGenerator for RollupModeL1BatchCommitDataGenerator { fn l1_commit_batches( diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index e02266c16546..4d86e47b742e 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -23,6 +23,7 @@ zksync_object_store.workspace = true zksync_core.workspace = true zksync_storage.workspace = true zksync_eth_client.workspace = true +zksync_da_client.workspace = true zksync_contracts.workspace = true zksync_web3_decl.workspace = true zksync_utils.workspace = true diff --git a/core/node/node_framework/src/implementations/layers/da_client.rs b/core/node/node_framework/src/implementations/layers/da_client.rs new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/da_client.rs @@ -0,0 +1 @@ + diff --git a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs new file mode 100644 index 000000000000..cbed9ec05afe --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs @@ -0,0 +1,169 @@ +use std::sync::Arc; + +use anyhow::Context; +use zksync_circuit_breaker::l1_txs::FailedL1TransactionChecker; +use zksync_config::{ + configs::{ + chain::{L1BatchCommitDataGeneratorMode, NetworkConfig}, + eth_sender::EthConfig, + ContractsConfig, + }, + GenesisConfig, +}; +use zksync_eth_client::BoundEthInterface; +use zksync_eth_sender::{ + l1_batch_commit_data_generator::{ + L1BatchCommitDataGenerator, RollupModeL1BatchCommitDataGenerator, + ValidiumModeL1BatchCommitDataGenerator, + }, + Aggregator, EthTxAggregator, EthTxManager, +}; + +use crate::{ + implementations::resources::{ + circuit_breakers::CircuitBreakersResource, + eth_interface::{BoundEthInterfaceForBlobsResource, BoundEthInterfaceResource}, + l1_tx_params::L1TxParamsResource, + object_store::ObjectStoreResource, + pools::{MasterPool, PoolResource, ReplicaPool}, + }, + service::{ServiceContext, StopReceiver}, + task::Task, + wiring_layer::{WiringError, WiringLayer}, +}; + +#[derive(Debug)] +pub struct DataAvailabilityDispatcherLayer { + eth_sender_config: EthConfig, + l1_batch_commit_data_generator_mode: L1BatchCommitDataGeneratorMode, +} + +impl DataAvailabilityDispatcherLayer { + pub fn new( + eth_sender_config: EthConfig, + l1_batch_commit_data_generator_mode: L1BatchCommitDataGeneratorMode, + ) -> Self { + Self { + eth_sender_config, + l1_batch_commit_data_generator_mode, + } + } +} + +#[async_trait::async_trait] +impl WiringLayer for DataAvailabilityDispatcherLayer { + fn layer_name(&self) -> &'static str { + "da_dispatcher_layer" + } + + async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + let master_pool_resource = context.get_resource::>().await?; + let master_pool = master_pool_resource.get().await.unwrap(); + + let da_client = context.get_resource::().await?.0; + let eth_client_blobs = match context + .get_resource::() + .await + { + Ok(BoundEthInterfaceForBlobsResource(client)) => Some(client), + Err(WiringError::ResourceLacking { .. }) => None, + Err(err) => return Err(err), + }; + let object_store = context.get_resource::().await?.0; + + // Create and add tasks. + let eth_client_blobs_addr = eth_client_blobs + .as_deref() + .map(BoundEthInterface::sender_account); + + let l1_batch_commit_data_generator: Arc = + match self.l1_batch_commit_data_generator_mode { + L1BatchCommitDataGeneratorMode::Rollup => { + Arc::new(RollupModeL1BatchCommitDataGenerator {}) + } + L1BatchCommitDataGeneratorMode::Validium => { + Arc::new(ValidiumModeL1BatchCommitDataGenerator {}) + } + }; + + let config = self.eth_sender_config.sender.context("sender")?; + let aggregator = Aggregator::new( + config.clone(), + object_store, + eth_client_blobs_addr.is_some(), + l1_batch_commit_data_generator.clone(), + ); + + let eth_tx_aggregator_actor = EthTxAggregator::new( + master_pool.clone(), + config.clone(), + aggregator, + eth_client.clone(), + self.contracts_config.validator_timelock_addr, + self.contracts_config.l1_multicall3_addr, + self.contracts_config.diamond_proxy_addr, + self.network_config.zksync_network_id, + eth_client_blobs_addr, + l1_batch_commit_data_generator, + ) + .await; + + context.add_task(Box::new(EthTxAggregatorTask { + eth_tx_aggregator_actor, + })); + + let gas_adjuster = context.get_resource::().await?.0; + + let eth_tx_manager_actor = EthTxManager::new( + master_pool, + config, + gas_adjuster, + eth_client, + eth_client_blobs, + ); + + context.add_task(Box::new(EthTxManagerTask { + eth_tx_manager_actor, + })); + + // Insert circuit breaker. + let CircuitBreakersResource { breakers } = context.get_resource_or_default().await; + breakers + .insert(Box::new(FailedL1TransactionChecker { pool: replica_pool })) + .await; + + Ok(()) + } +} + +#[derive(Debug)] +struct EthTxAggregatorTask { + eth_tx_aggregator_actor: EthTxAggregator, +} + +#[async_trait::async_trait] +impl Task for EthTxAggregatorTask { + fn name(&self) -> &'static str { + "eth_tx_aggregator" + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + self.eth_tx_aggregator_actor.run(stop_receiver.0).await + } +} + +#[derive(Debug)] +struct EthTxManagerTask { + eth_tx_manager_actor: EthTxManager, +} + +#[async_trait::async_trait] +impl Task for EthTxManagerTask { + fn name(&self) -> &'static str { + "eth_tx_manager" + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + self.eth_tx_manager_actor.run(stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/layers/mod.rs b/core/node/node_framework/src/implementations/layers/mod.rs index 9a55cf18d24d..37d2d300e6ab 100644 --- a/core/node/node_framework/src/implementations/layers/mod.rs +++ b/core/node/node_framework/src/implementations/layers/mod.rs @@ -3,6 +3,8 @@ pub mod commitment_generator; pub mod consensus; pub mod consistency_checker; pub mod contract_verification_api; +mod da_client; +mod da_dispatcher; pub mod eth_sender; pub mod eth_watch; pub mod healtcheck_server; diff --git a/core/node/node_framework/src/implementations/resources/da_interface.rs b/core/node/node_framework/src/implementations/resources/da_interface.rs new file mode 100644 index 000000000000..ca569b2e1f7a --- /dev/null +++ b/core/node/node_framework/src/implementations/resources/da_interface.rs @@ -0,0 +1,30 @@ +use zksync_da_client::DataAvailabilityClient; + +use crate::resource::Resource; + +#[derive(Debug, Clone)] +pub struct EthInterfaceResource(pub Box>); + +impl Resource for EthInterfaceResource { + fn name() -> String { + "common/eth_interface".into() + } +} + +#[derive(Debug, Clone)] +pub struct BoundEthInterfaceResource(pub Box); + +impl Resource for BoundEthInterfaceResource { + fn name() -> String { + "common/bound_eth_interface".into() + } +} + +#[derive(Debug, Clone)] +pub struct BoundEthInterfaceForBlobsResource(pub Box); + +impl Resource for BoundEthInterfaceForBlobsResource { + fn name() -> String { + "common/bound_eth_interface_for_blobs".into() + } +} diff --git a/core/node/node_framework/src/implementations/resources/mod.rs b/core/node/node_framework/src/implementations/resources/mod.rs index 2225fcd2f4c9..240af30f1765 100644 --- a/core/node/node_framework/src/implementations/resources/mod.rs +++ b/core/node/node_framework/src/implementations/resources/mod.rs @@ -1,5 +1,6 @@ pub mod action_queue; pub mod circuit_breakers; +mod da_interface; pub mod eth_interface; pub mod fee_input; pub mod healthcheck; diff --git a/etc/env/dev.env b/etc/env/dev.env new file mode 100644 index 000000000000..405845091b74 --- /dev/null +++ b/etc/env/dev.env @@ -0,0 +1,305 @@ +ALERTS_SPORADIC_CRYPTO_ERRORS_SUBSTRS="EventDestroyErr,Can't free memory of DeviceBuf,value: PoisonError" +API_WEB3_JSON_RPC_HTTP_PORT=3050 +API_WEB3_JSON_RPC_HTTP_URL=http://127.0.0.1:3050 +API_WEB3_JSON_RPC_WS_PORT=3051 +API_WEB3_JSON_RPC_WS_URL=ws://127.0.0.1:3051 +API_WEB3_JSON_RPC_REQ_ENTITIES_LIMIT=10000 +API_WEB3_JSON_RPC_FILTERS_DISABLED=false +API_WEB3_JSON_RPC_FILTERS_LIMIT=10000 +API_WEB3_JSON_RPC_SUBSCRIPTIONS_LIMIT=10000 +API_WEB3_JSON_RPC_PUBSUB_POLLING_INTERVAL=200 +API_WEB3_JSON_RPC_THREADS_PER_SERVER=128 +API_WEB3_JSON_RPC_MAX_NONCE_AHEAD=50 +API_WEB3_JSON_RPC_GAS_PRICE_SCALE_FACTOR=1.2 +API_WEB3_JSON_RPC_L1_TO_L2_TRANSACTIONS_COMPATIBILITY_MODE=true +API_WEB3_JSON_RPC_REQUEST_TIMEOUT=10 +API_WEB3_JSON_RPC_ACCOUNT_PKS=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80,0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d,0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a,0x7c852118294e51e653712a81e05800f419141751be58f605c371e15141b007a6,0x47e179ec197488593b187f80a00eb0da91f1b9d0b13f8733639f19c30a34926a,0x8b3a350cf5c34c9194ca85829a2df0ec3153be0318b5e2d3348e872092edffba,0x92db14e403b83dfe3df233f83dfa3a0d7096f21ca9b0d6d6b8d88b2b4ec1564e,0x4bbbf85ce3377467afe5d46f804f221813b2bb87f24d81f60f1fcdbf7cbf4356,0xdbda1821b80551c9d65939329250298aa3472ba22feea921c0cf5d620ea67b97,0x2a871d0798f97d79848a013d4936a73bf4cc922c825d33c1cf7073dff6d409c6,0xf214f2b2cd398c806f84e317254e0f0b801d0643303237d97a22a48e01628897,0x701b615bbdfb9de65240bc28bd21bbc0d996645a3dd57e7b12bc2bdf6f192c82,0xa267530f49f8280200edf313ee7af6b827f2a8bce2897751d06a843f644967b1,0x47c99abed3324a2707c28affff1267e45918ec8c3f20b8aa892e8b065d2942dd,0xc526ee95bf44d8fc405a158bb884d9d1238d99f0612e9f33d006bb0789009aaa,0x8166f546bab6da521a8369cab06c5d2b9e46670292d85c875ee9ec20e84ffb61,0xea6c44ac03bff858b476bba40716402b03e41b8e97e276d1baec7c37d42484a0,0x689af8efa8c651a91ad287602527f3af2fe9f6501a7ac4b061667b5a93e037fd,0xde9be858da4a475276426320d5e9262ecfc3ba460bfac56360bfa6c4c28b4ee0,0xdf57089febbacf7ba0bc227dafbffa9fc08a93fdc68e1e42411a14efcf23656e +API_WEB3_JSON_RPC_ESTIMATE_GAS_SCALE_FACTOR=1.2 +API_WEB3_JSON_RPC_ESTIMATE_GAS_ACCEPTABLE_OVERESTIMATION=1000 +API_WEB3_JSON_RPC_MAX_TX_SIZE=1000000 +API_CONTRACT_VERIFICATION_PORT=3070 +API_CONTRACT_VERIFICATION_URL=http://127.0.0.1:3070 +API_CONTRACT_VERIFICATION_THREADS_PER_SERVER=128 +API_PROMETHEUS_LISTENER_PORT=3312 +API_PROMETHEUS_PUSHGATEWAY_URL=http://127.0.0.1:9091 +API_PROMETHEUS_PUSH_INTERVAL_MS=100 +API_HEALTHCHECK_PORT=3071 +API_MERKLE_TREE_PORT=3072 +CHAIN_ETH_NETWORK=localhost +CHAIN_ETH_ZKSYNC_NETWORK=zkcany8 +CHAIN_ETH_ZKSYNC_NETWORK_ID=308 +CHAIN_STATE_KEEPER_FEE_ACCOUNT_ADDR=0xA13c10C0D5bd6f79041B9835c63f91de35A15883 +CHAIN_STATE_KEEPER_TRANSACTION_SLOTS=250 +CHAIN_STATE_KEEPER_MAX_ALLOWED_L2_TX_GAS_LIMIT=4000000000 +CHAIN_STATE_KEEPER_BLOCK_COMMIT_DEADLINE_MS=2500 +CHAIN_STATE_KEEPER_MINIBLOCK_COMMIT_DEADLINE_MS=1000 +CHAIN_STATE_KEEPER_MINIBLOCK_SEAL_QUEUE_CAPACITY=10 +CHAIN_STATE_KEEPER_MAX_SINGLE_TX_GAS=6000000 +CHAIN_STATE_KEEPER_CLOSE_BLOCK_AT_GEOMETRY_PERCENTAGE=0.95 +CHAIN_STATE_KEEPER_CLOSE_BLOCK_AT_ETH_PARAMS_PERCENTAGE=0.95 +CHAIN_STATE_KEEPER_CLOSE_BLOCK_AT_GAS_PERCENTAGE=0.95 +CHAIN_STATE_KEEPER_REJECT_TX_AT_GEOMETRY_PERCENTAGE=0.95 +CHAIN_STATE_KEEPER_REJECT_TX_AT_ETH_PARAMS_PERCENTAGE=0.95 +CHAIN_STATE_KEEPER_REJECT_TX_AT_GAS_PERCENTAGE=0.95 +CHAIN_STATE_KEEPER_MINIMAL_L2_GAS_PRICE=100000000 +CHAIN_STATE_KEEPER_COMPUTE_OVERHEAD_PART=0 +CHAIN_STATE_KEEPER_PUBDATA_OVERHEAD_PART=1 +CHAIN_STATE_KEEPER_BATCH_OVERHEAD_L1_GAS=800000 +CHAIN_STATE_KEEPER_MAX_GAS_PER_BATCH=200000000 +CHAIN_STATE_KEEPER_MAX_PUBDATA_PER_BATCH=100000 +CHAIN_STATE_KEEPER_FEE_MODEL_VERSION=V1 +CHAIN_STATE_KEEPER_VALIDATION_COMPUTATIONAL_GAS_LIMIT=300000 +CHAIN_STATE_KEEPER_SAVE_CALL_TRACES=true +CHAIN_STATE_KEEPER_VIRTUAL_BLOCKS_INTERVAL=1 +CHAIN_STATE_KEEPER_VIRTUAL_BLOCKS_PER_MINIBLOCK=1 +CHAIN_STATE_KEEPER_BOOTLOADER_HASH=0x010007ede999d096c84553fb514d3d6ca76fbf39789dda76bfeda9f3ae06236e +CHAIN_STATE_KEEPER_DEFAULT_AA_HASH=0x0100055b041eb28aff6e3a6e0f37c31fd053fc9ef142683b05e5f0aee6934066 +CHAIN_OPERATIONS_MANAGER_DELAY_INTERVAL=100 +CHAIN_MEMPOOL_DELAY_INTERVAL=100 +CHAIN_MEMPOOL_SYNC_INTERVAL_MS=10 +CHAIN_MEMPOOL_SYNC_BATCH_SIZE=1000 +CHAIN_MEMPOOL_CAPACITY=10000000 +CHAIN_MEMPOOL_STUCK_TX_TIMEOUT=86400 +CHAIN_MEMPOOL_REMOVE_STUCK_TXS=true +CHAIN_CIRCUIT_BREAKER_SYNC_INTERVAL_MS=30000 +CHAIN_CIRCUIT_BREAKER_HTTP_REQ_MAX_RETRY_NUMBER=5 +CHAIN_CIRCUIT_BREAKER_HTTP_REQ_RETRY_INTERVAL_SEC=2 +CONTRACT_VERIFIER_COMPILATION_TIMEOUT=30 +CONTRACT_VERIFIER_POLLING_INTERVAL=1000 +CONTRACT_VERIFIER_PROMETHEUS_PORT=3314 +CONTRACTS_ADMIN_FACET_ADDR=0x0c2D3f8f56bE6170c876f5D4D2140944D004304f +CONTRACTS_DIAMOND_INIT_ADDR=0x9E1946D8496815d901f1274E4D18a3D45430F069 +CONTRACTS_DIAMOND_UPGRADE_INIT_ADDR=0x592ed7C3D2642dd34cef46989763979d48193d49 +CONTRACTS_DEFAULT_UPGRADE_ADDR=0xdc21cD787277b6A9c82f58777a4010DFCC4B7634 +CONTRACTS_MAILBOX_FACET_ADDR=0xc76637ddcF17044ea1b9ed96D5434e61133A2FF6 +CONTRACTS_EXECUTOR_FACET_ADDR=0xd736304E2C83D10861b80c6253880A7451f3080c +CONTRACTS_GOVERNANCE_ADDR=0xc23e02646203D8Cc0A7EdeeC938dD1514f411b6D +CONTRACTS_GETTERS_FACET_ADDR=0x37dA982B5a1Df37BB07E38F0C5b1824B62A08Cfd +CONTRACTS_VERIFIER_ADDR=0x3D9bA3C048E77E7c2C597E7Aa1DF81C004FF4Ca5 +CONTRACTS_DIAMOND_PROXY_ADDR=0x101467c948C359432DfC8078C4eB45a64cd10b0F +CONTRACTS_L1_MULTICALL3_ADDR=0xe6Cf83F3A38B6b55C792953d2576f1165Ff8395b +CONTRACTS_L1_ERC20_BRIDGE_PROXY_ADDR=0x56f81F235C3C78Fb675A268B1c8ded199c91E7BB +CONTRACTS_L1_ERC20_BRIDGE_IMPL_ADDR=0x8d52961335EBAD57C10150EadeDee98EB79A64C9 +CONTRACTS_L2_ERC20_BRIDGE_ADDR=0x29576C296156ba223487679Fbe21b1321d159823 +CONTRACTS_L2_TESTNET_PAYMASTER_ADDR=0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF +CONTRACTS_L1_ALLOW_LIST_ADDR=0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF +CONTRACTS_CREATE2_FACTORY_ADDR=0xcd7e4A048BEf47c58D1dF2bcfceC8b30cd5DB906 +CONTRACTS_VALIDATOR_TIMELOCK_ADDR=0x1C7EC23dEFFf159aB22d2BeA4c11F6d9691D2333 +CONTRACTS_VALIDATOR_TIMELOCK_EXECUTION_DELAY=0 +CONTRACTS_RECURSION_SCHEDULER_LEVEL_VK_HASH=0x18518ce15be02847459f304b1567cb914ae357eca82af07c09582e78592b987b +CONTRACTS_RECURSION_NODE_LEVEL_VK_HASH=0x1186ec268d49f1905f8d9c1e9d39fc33e98c74f91d91a21b8f7ef78bd09a8db8 +CONTRACTS_RECURSION_LEAF_LEVEL_VK_HASH=0x101e08b00193e529145ee09823378ef51a3bc8966504064f1f6ba3f1ba863210 +CONTRACTS_RECURSION_CIRCUITS_SET_VKS_HASH=0x18c1639094f58177409186e8c48d9f577c9410901d2f1d486b3e7d6cf553ae4c +CONTRACTS_GENESIS_TX_HASH=0xb99ebfea46cbe05a21cd80fe5597d97b204befc52a16303f579c607dc1ac2e2e +CONTRACTS_GENESIS_ROOT=0xe25bb13818ce30e19210a13aee061a9bf3be7f72050c6cd99e03465c21101475 +CONTRACTS_PRIORITY_TX_MAX_GAS_LIMIT=72000000 +CONTRACTS_DEPLOY_L2_BRIDGE_COUNTERPART_GAS_LIMIT=10000000 +CONTRACTS_GENESIS_BATCH_COMMITMENT=0x901804a1842c321c9a5789308aa5d05d58679629b7ceeb374747a9d165c02794 +CONTRACTS_GENESIS_ROLLUP_LEAF_INDEX=26 +CONTRACTS_GENESIS_PROTOCOL_VERSION=22 +CONTRACTS_L1_WETH_BRIDGE_IMPL_ADDR=0x66e963BD9cdeBb91BdbDA0B7f9578D59887f53eE +CONTRACTS_L1_WETH_BRIDGE_PROXY_ADDR=0x75fBc31d31fBdeeE68d9255dDB5d011bC28B658F +CONTRACTS_L1_WETH_TOKEN_ADDR=0x723527c48d14D5eC7aF0Bc19EA11Eb683E030804 +CONTRACTS_L2_WETH_BRIDGE_ADDR=0x0e059E80Acd29e065323e9315F660468df59fFCc +CONTRACTS_L2_WETH_TOKEN_IMPL_ADDR=0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9 +CONTRACTS_L2_WETH_TOKEN_PROXY_ADDR=0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9 +CONTRACTS_FRI_RECURSION_LEAF_LEVEL_VK_HASH=0x400a4b532c6f072c00d1806ef299300d4c104f4ac55bd8698ade78894fcadc0a +CONTRACTS_FRI_RECURSION_NODE_LEVEL_VK_HASH=0x5a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d080 +CONTRACTS_FRI_RECURSION_SCHEDULER_LEVEL_VK_HASH=0x1405880dc3317d635bddb0ab62bf5d013e5d1f462161c1f7ac3289c7fef956da +CONTRACTS_SNARK_WRAPPER_VK_HASH=0x063c6fb5c70404c2867f413a8e35563ad3d040b1ad8c11786231bfdba7b472c7 +CONTRACTS_BLOB_VERSIONED_HASH_RETRIEVER_ADDR=0x4Ab1e9A16638E35C13CcA6067433463843989001 +CONTRACTS_INITIAL_PROTOCOL_VERSION=22 +CONTRACTS_BRIDGEHUB_PROXY_ADDR=0x0000000000000000000000000000000000000000 +CONTRACTS_BRIDGEHUB_IMPL_ADDR=0x0000000000000000000000000000000000000000 +CONTRACTS_STATE_TRANSITION_PROXY_ADDR=0x0000000000000000000000000000000000000000 +CONTRACTS_STATE_TRANSITION_IMPL_ADDR=0x0000000000000000000000000000000000000000 +CONTRACTS_TRANSPARENT_PROXY_ADMIN_ADDR=0x0000000000000000000000000000000000000000 +CONTRACTS_TEST_DUMMY_VERIFIER=true +CONTRACTS_TEST_EASY_PRIORITY_MODE=false +DATABASE_STATE_KEEPER_DB_PATH=./db/main/state_keeper +DATABASE_BACKUP_COUNT=5 +DATABASE_BACKUP_INTERVAL_MS=60000 +DATABASE_POOL_SIZE=50 +DATABASE_STATEMENT_TIMEOUT_SEC=300 +DATABASE_MERKLE_TREE_PATH=./db/main/tree +DATABASE_MERKLE_TREE_BACKUP_PATH=./db/main/backups +ETH_CLIENT_CHAIN_ID=9 +ETH_CLIENT_WEB3_URL=http://127.0.0.1:8545 +ETH_SENDER_SENDER_WAIT_CONFIRMATIONS=1 +ETH_SENDER_SENDER_EXPECTED_WAIT_TIME_BLOCK=30 +ETH_SENDER_SENDER_TX_POLL_PERIOD=1 +ETH_SENDER_SENDER_AGGREGATE_TX_POLL_PERIOD=1 +ETH_SENDER_SENDER_MAX_TXS_IN_FLIGHT=30 +ETH_SENDER_SENDER_PROOF_SENDING_MODE=SkipEveryProof +ETH_SENDER_SENDER_MAX_AGGREGATED_BLOCKS_TO_COMMIT=1 +ETH_SENDER_SENDER_MAX_AGGREGATED_BLOCKS_TO_EXECUTE=10 +ETH_SENDER_SENDER_AGGREGATED_BLOCK_COMMIT_DEADLINE=1 +ETH_SENDER_SENDER_AGGREGATED_BLOCK_PROVE_DEADLINE=10 +ETH_SENDER_SENDER_AGGREGATED_BLOCK_EXECUTE_DEADLINE=10 +ETH_SENDER_SENDER_TIMESTAMP_CRITERIA_MAX_ALLOWED_LAG=30 +ETH_SENDER_SENDER_MAX_ETH_TX_DATA_SIZE=120000 +ETH_SENDER_SENDER_AGGREGATED_PROOF_SIZES=1,4 +ETH_SENDER_SENDER_MAX_AGGREGATED_TX_GAS=4000000 +ETH_SENDER_SENDER_MAX_SINGLE_TX_GAS=6000000 +ETH_SENDER_SENDER_MAX_ACCEPTABLE_PRIORITY_FEE_IN_GWEI=100000000000 +ETH_SENDER_SENDER_PROOF_LOADING_MODE=OldProofFromDb +ETH_SENDER_SENDER_PUBDATA_SENDING_MODE=Blobs +ETH_SENDER_SENDER_OPERATOR_PRIVATE_KEY=0x7726827caac94a7f9e1b160f7ea819f172f7b6f9d2a97f992c38edeab82d4110 +ETH_SENDER_SENDER_OPERATOR_COMMIT_ETH_ADDR=0x36615Cf349d7F6344891B1e7CA7C72883F5dc049 +ETH_SENDER_SENDER_OPERATOR_BLOBS_PRIVATE_KEY=0xac1e735be8536c6534bb4f17f06f6afc73b2b5ba84ac2cfb12f7461b20c0bbe3 +ETH_SENDER_SENDER_OPERATOR_BLOBS_ETH_ADDR=0xa61464658AfeAf65CccaaFD3a512b69A83B77618 +ETH_SENDER_GAS_ADJUSTER_DEFAULT_PRIORITY_FEE_PER_GAS=1000000000 +ETH_SENDER_GAS_ADJUSTER_MAX_BASE_FEE_SAMPLES=10000 +ETH_SENDER_GAS_ADJUSTER_PRICING_FORMULA_PARAMETER_A=1.5 +ETH_SENDER_GAS_ADJUSTER_PRICING_FORMULA_PARAMETER_B=1.0005 +ETH_SENDER_GAS_ADJUSTER_INTERNAL_L1_PRICING_MULTIPLIER=0.8 +ETH_SENDER_GAS_ADJUSTER_POLL_PERIOD=5 +ETH_WATCH_CONFIRMATIONS_FOR_ETH_EVENT=0 +ETH_WATCH_ETH_NODE_POLL_INTERVAL=300 +ZKSYNC_ACTION=dont_ask +MISC_LOG_FORMAT=plain +MISC_SENTRY_URL=unset +MISC_SENTRY_PANIC_INTERVAL=1800 +MISC_SENTRY_ERROR_INTERVAL=10800 +MISC_OTLP_URL=unset +MISC_FEE_ACCOUNT_PRIVATE_KEY=0xd293c684d884d56f8d6abd64fc76757d3664904e309a0645baf8522ab6366d9e +OBJECT_STORE_MODE=FileBacked +OBJECT_STORE_FILE_BACKED_BASE_PATH=artifacts +PUBLIC_OBJECT_STORE_MODE=FileBacked +PUBLIC_OBJECT_STORE_FILE_BACKED_BASE_PATH=artifacts +PROVER_OBJECT_STORE_MODE=FileBacked +PROVER_OBJECT_STORE_FILE_BACKED_BASE_PATH=artifacts +SNAPSHOTS_OBJECT_STORE_MODE=FileBacked +SNAPSHOTS_OBJECT_STORE_FILE_BACKED_BASE_PATH=artifacts +NFS_SETUP_KEY_MOUNT_PATH=/home/setup_keys/ +RUST_LOG=zksync_node_framework=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_eth_client=info,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=debug,snapshots_creator=debug, +RUST_BACKTRACE=full +RUST_LIB_BACKTRACE=1 +DATABASE_URL=postgres://postgres:notsecurepassword@127.0.0.1:5432/zksync_local +DATABASE_PROVER_URL=postgres://postgres:notsecurepassword@127.0.0.1:5432/prover_local +TEST_DATABASE_URL=postgres://postgres:notsecurepassword@localhost:5433/zksync_local_test +TEST_DATABASE_PROVER_URL=postgres://postgres:notsecurepassword@localhost:5433/prover_local_test +CONSENSUS_CONFIG_PATH=etc/env/consensus_config.yaml +CONSENSUS_SECRETS_PATH=etc/env/consensus_secrets.yaml +WITNESS_GENERATION_TIMEOUT_IN_SECS=900 +WITNESS_INITIAL_SETUP_KEY_PATH=./keys/setup/setup_2^22.key +WITNESS_KEY_DOWNLOAD_URL=https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2^22.key +WITNESS_MAX_ATTEMPTS=1 +WITNESS_DUMP_ARGUMENTS_FOR_BLOCKS=2,3 +WITNESS_DATA_SOURCE=FromPostgres +HOUSE_KEEPER_L1_BATCH_METRICS_REPORTING_INTERVAL_MS=10000 +HOUSE_KEEPER_GPU_PROVER_QUEUE_REPORTING_INTERVAL_MS=10000 +HOUSE_KEEPER_PROVER_JOB_RETRYING_INTERVAL_MS=300000 +HOUSE_KEEPER_PROVER_STATS_REPORTING_INTERVAL_MS=5000 +HOUSE_KEEPER_WITNESS_JOB_MOVING_INTERVAL_MS=30000 +HOUSE_KEEPER_WITNESS_GENERATOR_STATS_REPORTING_INTERVAL_MS=10000 +HOUSE_KEEPER_FRI_WITNESS_JOB_MOVING_INTERVAL_MS=40000 +HOUSE_KEEPER_FRI_PROVER_JOB_RETRYING_INTERVAL_MS=30000 +HOUSE_KEEPER_FRI_WITNESS_GENERATOR_JOB_RETRYING_INTERVAL_MS=30000 +HOUSE_KEEPER_PROVER_DB_POOL_SIZE=2 +HOUSE_KEEPER_FRI_PROVER_STATS_REPORTING_INTERVAL_MS=30000 +HOUSE_KEEPER_FRI_PROOF_COMPRESSOR_JOB_RETRYING_INTERVAL_MS=30000 +HOUSE_KEEPER_FRI_PROOF_COMPRESSOR_STATS_REPORTING_INTERVAL_MS=10000 +FRI_PROVER_SETUP_DATA_PATH=/usr/src/setup-data +FRI_PROVER_PROMETHEUS_PORT=3315 +FRI_PROVER_MAX_ATTEMPTS=10 +FRI_PROVER_GENERATION_TIMEOUT_IN_SECS=600 +FRI_PROVER_BASE_LAYER_CIRCUIT_IDS_TO_BE_VERIFIED=1 +FRI_PROVER_RECURSIVE_LAYER_CIRCUIT_IDS_TO_BE_VERIFIED=1 +FRI_PROVER_SETUP_LOAD_MODE=FromDisk +FRI_PROVER_SPECIALIZED_GROUP_ID=100 +FRI_PROVER_WITNESS_VECTOR_GENERATOR_THREAD_COUNT=5 +FRI_PROVER_QUEUE_CAPACITY=10 +FRI_PROVER_WITNESS_VECTOR_RECEIVER_PORT=3316 +FRI_PROVER_ZONE_READ_URL=http://metadata.google.internal/computeMetadata/v1/instance/zone +FRI_PROVER_SHALL_SAVE_TO_PUBLIC_BUCKET=true +FRI_WITNESS_GENERATION_TIMEOUT_IN_SECS=900 +FRI_WITNESS_BASIC_GENERATION_TIMEOUT_IN_SECS=900 +FRI_WITNESS_LEAF_GENERATION_TIMEOUT_IN_SECS=900 +FRI_WITNESS_NODE_GENERATION_TIMEOUT_IN_SECS=900 +FRI_WITNESS_SCHEDULER_GENERATION_TIMEOUT_IN_SECS=900 +FRI_WITNESS_MAX_ATTEMPTS=10 +FRI_WITNESS_DUMP_ARGUMENTS_FOR_BLOCKS=1 +FRI_WITNESS_FORCE_PROCESS_BLOCK=1 +FRI_WITNESS_SHALL_SAVE_TO_PUBLIC_BUCKET=true +FRI_PROVER_GROUP_GROUP_0_0_CIRCUIT_ID=1 +FRI_PROVER_GROUP_GROUP_0_0_AGGREGATION_ROUND=3 +FRI_PROVER_GROUP_GROUP_0_1_CIRCUIT_ID=2 +FRI_PROVER_GROUP_GROUP_0_1_AGGREGATION_ROUND=2 +FRI_PROVER_GROUP_GROUP_1_0_CIRCUIT_ID=1 +FRI_PROVER_GROUP_GROUP_1_0_AGGREGATION_ROUND=0 +FRI_PROVER_GROUP_GROUP_2_0_CIRCUIT_ID=2 +FRI_PROVER_GROUP_GROUP_2_0_AGGREGATION_ROUND=0 +FRI_PROVER_GROUP_GROUP_2_1_CIRCUIT_ID=4 +FRI_PROVER_GROUP_GROUP_2_1_AGGREGATION_ROUND=0 +FRI_PROVER_GROUP_GROUP_2_2_CIRCUIT_ID=6 +FRI_PROVER_GROUP_GROUP_2_2_AGGREGATION_ROUND=0 +FRI_PROVER_GROUP_GROUP_2_3_CIRCUIT_ID=9 +FRI_PROVER_GROUP_GROUP_2_3_AGGREGATION_ROUND=0 +FRI_PROVER_GROUP_GROUP_3_0_CIRCUIT_ID=3 +FRI_PROVER_GROUP_GROUP_3_0_AGGREGATION_ROUND=0 +FRI_PROVER_GROUP_GROUP_4_0_CIRCUIT_ID=11 +FRI_PROVER_GROUP_GROUP_4_0_AGGREGATION_ROUND=0 +FRI_PROVER_GROUP_GROUP_4_1_CIRCUIT_ID=12 +FRI_PROVER_GROUP_GROUP_4_1_AGGREGATION_ROUND=0 +FRI_PROVER_GROUP_GROUP_4_2_CIRCUIT_ID=13 +FRI_PROVER_GROUP_GROUP_4_2_AGGREGATION_ROUND=0 +FRI_PROVER_GROUP_GROUP_4_3_CIRCUIT_ID=255 +FRI_PROVER_GROUP_GROUP_4_3_AGGREGATION_ROUND=0 +FRI_PROVER_GROUP_GROUP_5_0_CIRCUIT_ID=5 +FRI_PROVER_GROUP_GROUP_5_0_AGGREGATION_ROUND=0 +FRI_PROVER_GROUP_GROUP_6_0_CIRCUIT_ID=3 +FRI_PROVER_GROUP_GROUP_6_0_AGGREGATION_ROUND=1 +FRI_PROVER_GROUP_GROUP_7_0_CIRCUIT_ID=7 +FRI_PROVER_GROUP_GROUP_7_0_AGGREGATION_ROUND=0 +FRI_PROVER_GROUP_GROUP_8_0_CIRCUIT_ID=8 +FRI_PROVER_GROUP_GROUP_8_0_AGGREGATION_ROUND=0 +FRI_PROVER_GROUP_GROUP_9_0_CIRCUIT_ID=12 +FRI_PROVER_GROUP_GROUP_9_0_AGGREGATION_ROUND=1 +FRI_PROVER_GROUP_GROUP_9_1_CIRCUIT_ID=13 +FRI_PROVER_GROUP_GROUP_9_1_AGGREGATION_ROUND=1 +FRI_PROVER_GROUP_GROUP_9_2_CIRCUIT_ID=14 +FRI_PROVER_GROUP_GROUP_9_2_AGGREGATION_ROUND=1 +FRI_PROVER_GROUP_GROUP_9_3_CIRCUIT_ID=15 +FRI_PROVER_GROUP_GROUP_9_3_AGGREGATION_ROUND=1 +FRI_PROVER_GROUP_GROUP_10_0_CIRCUIT_ID=10 +FRI_PROVER_GROUP_GROUP_10_0_AGGREGATION_ROUND=0 +FRI_PROVER_GROUP_GROUP_11_0_CIRCUIT_ID=7 +FRI_PROVER_GROUP_GROUP_11_0_AGGREGATION_ROUND=1 +FRI_PROVER_GROUP_GROUP_11_1_CIRCUIT_ID=8 +FRI_PROVER_GROUP_GROUP_11_1_AGGREGATION_ROUND=1 +FRI_PROVER_GROUP_GROUP_11_2_CIRCUIT_ID=10 +FRI_PROVER_GROUP_GROUP_11_2_AGGREGATION_ROUND=1 +FRI_PROVER_GROUP_GROUP_11_3_CIRCUIT_ID=11 +FRI_PROVER_GROUP_GROUP_11_3_AGGREGATION_ROUND=1 +FRI_PROVER_GROUP_GROUP_12_0_CIRCUIT_ID=4 +FRI_PROVER_GROUP_GROUP_12_0_AGGREGATION_ROUND=1 +FRI_PROVER_GROUP_GROUP_12_1_CIRCUIT_ID=5 +FRI_PROVER_GROUP_GROUP_12_1_AGGREGATION_ROUND=1 +FRI_PROVER_GROUP_GROUP_12_2_CIRCUIT_ID=6 +FRI_PROVER_GROUP_GROUP_12_2_AGGREGATION_ROUND=1 +FRI_PROVER_GROUP_GROUP_12_3_CIRCUIT_ID=9 +FRI_PROVER_GROUP_GROUP_12_3_AGGREGATION_ROUND=1 +PROOF_DATA_HANDLER_HTTP_PORT=3320 +PROOF_DATA_HANDLER_PROOF_GENERATION_TIMEOUT_IN_SECS=18000 +FRI_WITNESS_VECTOR_GENERATOR_PROVER_INSTANCE_WAIT_TIMEOUT_IN_SECS=200 +FRI_WITNESS_VECTOR_GENERATOR_PROVER_INSTANCE_POLL_TIME_IN_MILLI_SECS=250 +FRI_WITNESS_VECTOR_GENERATOR_PROMETHEUS_LISTENER_PORT=3314 +FRI_WITNESS_VECTOR_GENERATOR_PROMETHEUS_PUSHGATEWAY_URL=http://127.0.0.1:9091 +FRI_WITNESS_VECTOR_GENERATOR_PROMETHEUS_PUSH_INTERVAL_MS=100 +FRI_WITNESS_VECTOR_GENERATOR_SPECIALIZED_GROUP_ID=100 +FRI_WITNESS_VECTOR_GENERATOR_MAX_PROVER_RESERVATION_DURATION_IN_SECS=1000 +FRI_PROVER_GATEWAY_API_URL=http://127.0.0.1:3320 +FRI_PROVER_GATEWAY_API_POLL_DURATION_SECS=1000 +FRI_PROVER_GATEWAY_PROMETHEUS_LISTENER_PORT=3314 +FRI_PROVER_GATEWAY_PROMETHEUS_PUSHGATEWAY_URL=http://127.0.0.1:9091 +FRI_PROVER_GATEWAY_PROMETHEUS_PUSH_INTERVAL_MS=100 +FRI_PROOF_COMPRESSOR_COMPRESSION_MODE=1 +FRI_PROOF_COMPRESSOR_PROMETHEUS_LISTENER_PORT=3321 +FRI_PROOF_COMPRESSOR_PROMETHEUS_PUSHGATEWAY_URL=http://127.0.0.1:9091 +FRI_PROOF_COMPRESSOR_PROMETHEUS_PUSH_INTERVAL_MS=100 +FRI_PROOF_COMPRESSOR_GENERATION_TIMEOUT_IN_SECS=3600 +FRI_PROOF_COMPRESSOR_MAX_ATTEMPTS=5 +FRI_PROOF_COMPRESSOR_UNIVERSAL_SETUP_PATH=keys/setup/setup_2^26.key +FRI_PROOF_COMPRESSOR_UNIVERSAL_SETUP_DOWNLOAD_URL=https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2^26.key +FRI_PROOF_COMPRESSOR_VERIFY_WRAPPER_PROOF=true From 47e0a16a99cad735be06aec163a4e39f65a91d9f Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Tue, 14 May 2024 18:44:10 +0200 Subject: [PATCH 06/69] feat: add example of GCS da client --- Cargo.lock | 2 + contracts | 2 +- core/lib/config/src/configs/da_dispatcher.rs | 19 +-- core/lib/config/src/configs/eth_sender.rs | 6 +- core/lib/config/src/configs/mod.rs | 1 + core/lib/config/src/lib.rs | 5 +- core/lib/da_client/Cargo.toml | 2 + core/lib/da_client/src/clients/gcs/mod.rs | 21 +++- core/lib/da_client/src/clients/mock.rs | 1 - core/lib/da_client/src/clients/mod.rs | 1 - core/lib/da_client/src/lib.rs | 12 +- core/lib/da_client/src/types.rs | 4 +- core/lib/object_store/src/objects.rs | 28 +++++ core/lib/object_store/src/raw.rs | 2 + core/lib/protobuf_config/src/da_dispatcher.rs | 19 +-- core/lib/protobuf_config/src/eth.rs | 12 +- .../src/proto/config/da_dispatcher.proto | 2 +- .../src/proto/config/eth_sender.proto | 6 +- core/lib/types/src/pubdata_da.rs | 16 +-- .../node/node_framework/examples/main_node.rs | 12 ++ .../src/implementations/layers/da_client.rs | 68 +++++++++++ .../implementations/layers/da_dispatcher.rs | 114 +----------------- .../src/implementations/layers/mod.rs | 2 +- .../implementations/resources/da_interface.rs | 26 +--- .../src/implementations/resources/mod.rs | 2 +- 25 files changed, 189 insertions(+), 196 deletions(-) delete mode 100644 core/lib/da_client/src/clients/mock.rs diff --git a/Cargo.lock b/Cargo.lock index eb0ac87f4f50..fc6de17f386e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8422,8 +8422,10 @@ name = "zksync_da_client" version = "0.1.0" dependencies = [ "tokio", + "uuid", "zksync_config", "zksync_object_store", + "zksync_types", ] [[package]] diff --git a/contracts b/contracts index d89e406cd20c..9e8c28f90342 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit d89e406cd20c6d6e9052ba2321334b71ef53c54e +Subproject commit 9e8c28f9034272f05c6f2fb781fb84ff37ec7116 diff --git a/core/lib/config/src/configs/da_dispatcher.rs b/core/lib/config/src/configs/da_dispatcher.rs index 9e838eb6aeb5..1dddf94da823 100644 --- a/core/lib/config/src/configs/da_dispatcher.rs +++ b/core/lib/config/src/configs/da_dispatcher.rs @@ -1,27 +1,30 @@ +use serde::Deserialize; + use crate::ObjectStoreConfig; -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq)] pub struct DALayerInfo { - pub url: String, + pub name: String, pub private_key: Vec, } -#[derive(Clone, Debug)] -pub enum DACredentials { +#[derive(Clone, Debug, PartialEq)] +pub enum DataAvailabilityMode { DALayer(DALayerInfo), GCS(ObjectStoreConfig), + NoDA, } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Deserialize, PartialEq)] pub struct DADispatcherConfig { - pub credentials: DACredentials, + pub mode: DataAvailabilityMode, } impl DADispatcherConfig { pub fn for_tests() -> Self { Self { - credentials: DACredentials::DALayer(DALayerInfo { - url: "http://localhost:1234".to_string(), + mode: DataAvailabilityMode::DALayer(DALayerInfo { + name: "zkDA".into(), private_key: vec![1, 2, 3], }), } diff --git a/core/lib/config/src/configs/eth_sender.rs b/core/lib/config/src/configs/eth_sender.rs index 72de33778688..36117ef400c9 100644 --- a/core/lib/config/src/configs/eth_sender.rs +++ b/core/lib/config/src/configs/eth_sender.rs @@ -84,11 +84,7 @@ pub enum PubdataSendingMode { #[default] Calldata, Blobs, - NoDA, - GCS, - Celestia, - EigenDA, - Avail, + Custom, } #[derive(Debug, Deserialize, Clone, PartialEq)] diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index 3a5633a7a17c..b0e851f7506c 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -3,6 +3,7 @@ pub use self::{ api::ApiConfig, contract_verifier::ContractVerifierConfig, contracts::{ContractsConfig, EcosystemContracts}, + da_dispatcher::DADispatcherConfig, database::{DBConfig, PostgresConfig}, eth_sender::{EthConfig, GasAdjusterConfig}, eth_watch::EthWatchConfig, diff --git a/core/lib/config/src/lib.rs b/core/lib/config/src/lib.rs index 66656e60b702..1d74e51b6728 100644 --- a/core/lib/config/src/lib.rs +++ b/core/lib/config/src/lib.rs @@ -1,8 +1,9 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] pub use crate::configs::{ - ApiConfig, ContractVerifierConfig, ContractsConfig, DBConfig, EthConfig, EthWatchConfig, - GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, + ApiConfig, ContractVerifierConfig, ContractsConfig, DADispatcherConfig, DBConfig, EthConfig, + EthWatchConfig, GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, PostgresConfig, + SnapshotsCreatorConfig, }; pub mod configs; diff --git a/core/lib/da_client/Cargo.toml b/core/lib/da_client/Cargo.toml index f2d317ddbeeb..223c2315c289 100644 --- a/core/lib/da_client/Cargo.toml +++ b/core/lib/da_client/Cargo.toml @@ -14,6 +14,8 @@ categories.workspace = true [dependencies] zksync_object_store.workspace = true zksync_config.workspace = true +zksync_types.workspace = true +uuid = { version = "1.5.0", features = ["v4"] } [dev-dependencies] tokio = { workspace = true, features = ["full"] } \ No newline at end of file diff --git a/core/lib/da_client/src/clients/gcs/mod.rs b/core/lib/da_client/src/clients/gcs/mod.rs index e2656ed351a8..41cf0e05f886 100644 --- a/core/lib/da_client/src/clients/gcs/mod.rs +++ b/core/lib/da_client/src/clients/gcs/mod.rs @@ -6,10 +6,11 @@ use std::{ use zksync_config::ObjectStoreConfig; use zksync_object_store::{ObjectStore, ObjectStoreError, ObjectStoreFactory}; +use zksync_types::{pubdata_da::StorablePubdata, L1BatchNumber}; use crate::{ types::{DispatchResponse, InclusionData}, - DataAvailabilityClient, + DataAvailabilityInterface, }; struct GCSDAClient { @@ -24,9 +25,21 @@ impl GCSDAClient { } } -impl DataAvailabilityClient for GCSDAClient { - fn dispatch_blob(&self, data: Vec) -> Result { - Ok(DispatchResponse::default()) +impl DataAvailabilityInterface for GCSDAClient { + async fn dispatch_blob( + &self, + batch_number: L1BatchNumber, + data: Vec, + ) -> Result { + let key = self + .object_store + .put(batch_number, &StorablePubdata { data }) + .await + .unwrap(); + + Ok(DispatchResponse { + blob_id: key.into_bytes(), + }) } fn get_inclusion_data(&self, _: Vec) -> Result { diff --git a/core/lib/da_client/src/clients/mock.rs b/core/lib/da_client/src/clients/mock.rs deleted file mode 100644 index 8b137891791f..000000000000 --- a/core/lib/da_client/src/clients/mock.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/core/lib/da_client/src/clients/mod.rs b/core/lib/da_client/src/clients/mod.rs index b1586f939c89..13a1984c05cd 100644 --- a/core/lib/da_client/src/clients/mod.rs +++ b/core/lib/da_client/src/clients/mod.rs @@ -1,2 +1 @@ mod gcs; -mod mock; diff --git a/core/lib/da_client/src/lib.rs b/core/lib/da_client/src/lib.rs index 389f3c9c8ea3..93766185c619 100644 --- a/core/lib/da_client/src/lib.rs +++ b/core/lib/da_client/src/lib.rs @@ -1,11 +1,17 @@ use std::fmt; +use zksync_types::L1BatchNumber; + use crate::types::{DispatchResponse, InclusionData}; pub mod clients; mod types; -pub trait DataAvailabilityClient: Sync + Send + fmt::Debug { - fn dispatch_blob(&self, data: Vec) -> Result; - fn get_inclusion_data(&self, blob_id: Vec) -> Result; +pub trait DataAvailabilityInterface: Sync + Send + fmt::Debug { + fn dispatch_blob( + &self, + batch_number: L1BatchNumber, + data: Vec, + ) -> Result; + fn get_inclusion_data(&self, blob_id: Vec) -> Result; } diff --git a/core/lib/da_client/src/types.rs b/core/lib/da_client/src/types.rs index b2618efe4dbd..3f35972d84b0 100644 --- a/core/lib/da_client/src/types.rs +++ b/core/lib/da_client/src/types.rs @@ -1,6 +1,8 @@ +pub(crate) type Error = String; + #[derive(Default)] pub struct DispatchResponse { - blob_id: Vec, + pub(crate) blob_id: Vec, } #[derive(Default)] diff --git a/core/lib/object_store/src/objects.rs b/core/lib/object_store/src/objects.rs index 90b19fc52d30..107e118ebff3 100644 --- a/core/lib/object_store/src/objects.rs +++ b/core/lib/object_store/src/objects.rs @@ -7,6 +7,7 @@ use flate2::{read::GzDecoder, write::GzEncoder, Compression}; use prost::Message; use zksync_protobuf::{decode, ProtoFmt}; use zksync_types::{ + pubdata_da::StorablePubdata, snapshots::{ SnapshotFactoryDependencies, SnapshotStorageLogsChunk, SnapshotStorageLogsStorageKey, }, @@ -87,6 +88,33 @@ impl StoredObject for SnapshotFactoryDependencies { } } +impl StoredObject for StorablePubdata { + const BUCKET: Bucket = Bucket::DataAvailability; + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("l1_batch_{key}_pubdata.gzip") + } + + fn serialize(&self) -> Result, BoxedError> { + let mut encoder = GzEncoder::new(Vec::new(), Compression::default()); + encoder.write_all(&self.data[..])?; + encoder.finish().map_err(From::from) + } + + fn deserialize(bytes: Vec) -> Result { + let mut decoder = GzDecoder::new(&bytes[..]); + let mut decompressed_bytes = Vec::new(); + decoder + .read_to_end(&mut decompressed_bytes) + .map_err(BoxedError::from)?; + + Ok(Self { + data: decompressed_bytes, + }) + } +} + impl StoredObject for SnapshotStorageLogsChunk { const BUCKET: Bucket = Bucket::StorageSnapshot; type Key<'a> = SnapshotStorageLogsStorageKey; diff --git a/core/lib/object_store/src/raw.rs b/core/lib/object_store/src/raw.rs index d7036e5189dd..3125431d9d5b 100644 --- a/core/lib/object_store/src/raw.rs +++ b/core/lib/object_store/src/raw.rs @@ -24,6 +24,7 @@ pub enum Bucket { SchedulerWitnessJobsFri, ProofsFri, StorageSnapshot, + DataAvailability, } impl Bucket { @@ -40,6 +41,7 @@ impl Bucket { Self::SchedulerWitnessJobsFri => "scheduler_witness_jobs_fri", Self::ProofsFri => "proofs_fri", Self::StorageSnapshot => "storage_logs_snapshots", + Self::DataAvailability => "data_availability", } } } diff --git a/core/lib/protobuf_config/src/da_dispatcher.rs b/core/lib/protobuf_config/src/da_dispatcher.rs index d42daec0fd18..dfe7ad62abf3 100644 --- a/core/lib/protobuf_config/src/da_dispatcher.rs +++ b/core/lib/protobuf_config/src/da_dispatcher.rs @@ -9,11 +9,11 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { fn read(&self) -> anyhow::Result { configs::da_dispatcher::DADispatcherConfig { - credentials: match &self.credentials { + mode: match &self.credentials { Some(proto::data_availability_dispatcher::Credentials::DaLayer(config)) => { - configs::da_dispatcher::DACredentials::DALayer( + configs::da_dispatcher::DataAvailabilityMode::DALayer( configs::da_dispatcher::DALayerInfo { - url: *required(&config.url).context("url"), + name: *required(&config.name).context("name"), private_key: required(&config.private_key) .context("private_key") .into_bytes(), @@ -21,26 +21,27 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { ) } Some(proto::data_availability_dispatcher::Credentials::ObjectStore(config)) => { - configs::da_dispatcher::DACredentials::GCS(config.read()?) + configs::da_dispatcher::DataAvailabilityMode::GCS(config.read()?) } - None => None, + None => configs::da_dispatcher::DataAvailabilityMode::NoDA, }, } } fn build(this: &Self::Type) -> Self { - let credentials = match this.credentials.clone() { - configs::da_dispatcher::DACredentials::DALayer(info) => Some( + let credentials = match this.mode.clone() { + configs::da_dispatcher::DataAvailabilityMode::DALayer(info) => Some( proto::data_availability_dispatcher::Credentials::DaLayer(proto::DaLayer { - url: Some(info.url.clone()), + name: Some(info.name.clone()), private_key: info.private_key.clone().into(), }), ), - configs::da_dispatcher::DACredentials::GCS(config) => Some( + configs::da_dispatcher::DataAvailabilityMode::GCS(config) => Some( proto::data_availability_dispatcher::Credentials::ObjectStore(ObjectStore::build( &config, )), ), + configs::da_dispatcher::DataAvailabilityMode::NoDA => None, }; Self { credentials } diff --git a/core/lib/protobuf_config/src/eth.rs b/core/lib/protobuf_config/src/eth.rs index 759e05f8d5c0..8ea59f9e3477 100644 --- a/core/lib/protobuf_config/src/eth.rs +++ b/core/lib/protobuf_config/src/eth.rs @@ -48,11 +48,7 @@ impl proto::PubdataSendingMode { match x { From::Calldata => Self::Calldata, From::Blobs => Self::Blobs, - From::NoDA => Self::NoDa, - From::GCS => Self::Gcs, - From::Celestia => Self::Celestia, - From::EigenDA => Self::EigenDa, - From::Avail => Self::Avail, + From::Custom => Self::Custom, } } @@ -61,11 +57,7 @@ impl proto::PubdataSendingMode { match self { Self::Calldata => To::Calldata, Self::Blobs => To::Blobs, - Self::NoDa => To::NoDA, - Self::Gcs => To::GCS, - Self::Celestia => To::Celestia, - Self::EigenDa => To::EigenDA, - Self::Avail => To::Avail, + Self::Custom => To::Custom, } } } diff --git a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto index 0aeeef9c6799..e5ad0f6d359a 100644 --- a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto +++ b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto @@ -5,7 +5,7 @@ package zksync.config.da_dispatcher; import "zksync/config/object_store.proto"; message DALayer { - optional string url = 1; // required + optional string name = 1; // required optional string private_key = 2; // required } diff --git a/core/lib/protobuf_config/src/proto/config/eth_sender.proto b/core/lib/protobuf_config/src/proto/config/eth_sender.proto index 10857d5c0df8..8b6ec2d4a0d8 100644 --- a/core/lib/protobuf_config/src/proto/config/eth_sender.proto +++ b/core/lib/protobuf_config/src/proto/config/eth_sender.proto @@ -23,11 +23,7 @@ enum ProofLoadingMode { enum PubdataSendingMode { CALLDATA = 0; BLOBS = 1; - NO_DA = 2; - GCS = 3; - CELESTIA = 4; - EIGEN_DA = 5; - AVAIL = 6; + CUSTOM = 2; } message Sender { diff --git a/core/lib/types/src/pubdata_da.rs b/core/lib/types/src/pubdata_da.rs index 22b6184df510..ab4058c50d01 100644 --- a/core/lib/types/src/pubdata_da.rs +++ b/core/lib/types/src/pubdata_da.rs @@ -9,11 +9,7 @@ use zksync_config::configs::eth_sender::PubdataSendingMode; pub enum PubdataDA { Calldata = 0, Blobs, - NoDA, - GCS, - Celestia, - EigenDA, - Avail, + Custom, } impl From for PubdataDA { @@ -21,11 +17,11 @@ impl From for PubdataDA { match value { PubdataSendingMode::Calldata => PubdataDA::Calldata, PubdataSendingMode::Blobs => PubdataDA::Blobs, - PubdataSendingMode::NoDA => PubdataDA::NoDA, - PubdataSendingMode::GCS => PubdataDA::GCS, - PubdataSendingMode::Celestia => PubdataDA::Celestia, - PubdataSendingMode::EigenDA => PubdataDA::EigenDA, - PubdataSendingMode::Avail => PubdataDA::Avail, + PubdataSendingMode::Custom => PubdataDA::Custom, } } } + +pub struct StorablePubdata { + pub data: Vec, +} diff --git a/core/node/node_framework/examples/main_node.rs b/core/node/node_framework/examples/main_node.rs index b524664c89a1..ba0315a52bda 100644 --- a/core/node/node_framework/examples/main_node.rs +++ b/core/node/node_framework/examples/main_node.rs @@ -34,6 +34,7 @@ use zksync_node_framework::{ commitment_generator::CommitmentGeneratorLayer, consensus::{ConsensusLayer, Mode as ConsensusMode}, contract_verification_api::ContractVerificationApiLayer, + da_dispatcher::DataAvailabilityDispatcherLayer, eth_sender::EthSenderLayer, eth_watch::EthWatchLayer, healtcheck_server::HealthCheckLayer, @@ -312,6 +313,17 @@ impl MainNodeBuilder { Ok(self) } + fn add_da_dispatcher_layer(mut self) -> anyhow::Result { + let eth_sender_config = EthConfig::from_env()?; + let l1_batch_commit_data_generator_mode = + GenesisConfig::from_env()?.l1_batch_commit_data_generator_mode; + self.node.add_layer(DataAvailabilityDispatcherLayer::new( + eth_sender_config, + l1_batch_commit_data_generator_mode, + )); + Ok(self) + } + fn add_house_keeper_layer(mut self) -> anyhow::Result { let house_keeper_config = HouseKeeperConfig::from_env()?; let fri_prover_config = FriProverConfig::from_env()?; diff --git a/core/node/node_framework/src/implementations/layers/da_client.rs b/core/node/node_framework/src/implementations/layers/da_client.rs index 8b137891791f..602002973276 100644 --- a/core/node/node_framework/src/implementations/layers/da_client.rs +++ b/core/node/node_framework/src/implementations/layers/da_client.rs @@ -1 +1,69 @@ +use anyhow::Context as _; +use zksync_config::{configs::da_dispatcher::DADispatcherConfig, EthConfig}; +use zksync_eth_client::clients::PKSigningClient; +use crate::{ + implementations::resources::{ + da_interface::DAInterfaceResource, + eth_interface::{ + BoundEthInterfaceForBlobsResource, BoundEthInterfaceResource, EthInterfaceResource, + }, + }, + service::ServiceContext, + wiring_layer::{WiringError, WiringLayer}, +}; + +#[derive(Debug)] +pub struct DataAvailabilityClientLayer { + eth_sender_config: EthConfig, + da_dispatcher_config: DADispatcherConfig, +} + +impl DataAvailabilityClientLayer { + pub fn new(eth_sender_config: EthConfig, da_dispatcher_config: DADispatcherConfig) -> Self { + Self { + eth_sender_config, + da_dispatcher_config, + } + } +} + +#[async_trait::async_trait] +impl WiringLayer for DataAvailabilityClientLayer { + fn layer_name(&self) -> &'static str { + "data_availability_client_layer" + } + + async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + let private_key = self.wallets.operator.private_key(); + let gas_adjuster_config = self + .eth_sender_config + .gas_adjuster + .as_ref() + .context("gas_adjuster config is missing")?; + let EthInterfaceResource(query_client) = context.get_resource().await?; + + let signing_client = PKSigningClient::new_raw( + private_key.clone(), + self.contracts_config.diamond_proxy_addr, + gas_adjuster_config.default_priority_fee_per_gas, + self.l1_chain_id, + query_client.clone(), + ); + context.insert_resource(BoundEthInterfaceResource(Box::new(signing_client)))?; + + if let Some(blob_operator) = &self.wallets.blob_operator { + let private_key = blob_operator.private_key(); + let signing_client_for_blobs = PKSigningClient::new_raw( + private_key.clone(), + self.contracts_config.diamond_proxy_addr, + gas_adjuster_config.default_priority_fee_per_gas, + self.l1_chain_id, + query_client, + ); + context.insert_resource(DAInterfaceResource(Box::new(signing_client_for_blobs)))?; + } + + Ok(()) + } +} diff --git a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs index cbed9ec05afe..9cf43cad117d 100644 --- a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs +++ b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs @@ -2,14 +2,7 @@ use std::sync::Arc; use anyhow::Context; use zksync_circuit_breaker::l1_txs::FailedL1TransactionChecker; -use zksync_config::{ - configs::{ - chain::{L1BatchCommitDataGeneratorMode, NetworkConfig}, - eth_sender::EthConfig, - ContractsConfig, - }, - GenesisConfig, -}; +use zksync_config::configs::{chain::L1BatchCommitDataGeneratorMode, eth_sender::EthConfig}; use zksync_eth_client::BoundEthInterface; use zksync_eth_sender::{ l1_batch_commit_data_generator::{ @@ -22,6 +15,7 @@ use zksync_eth_sender::{ use crate::{ implementations::resources::{ circuit_breakers::CircuitBreakersResource, + da_interface::DAInterfaceResource, eth_interface::{BoundEthInterfaceForBlobsResource, BoundEthInterfaceResource}, l1_tx_params::L1TxParamsResource, object_store::ObjectStoreResource, @@ -60,110 +54,8 @@ impl WiringLayer for DataAvailabilityDispatcherLayer { let master_pool_resource = context.get_resource::>().await?; let master_pool = master_pool_resource.get().await.unwrap(); - let da_client = context.get_resource::().await?.0; - let eth_client_blobs = match context - .get_resource::() - .await - { - Ok(BoundEthInterfaceForBlobsResource(client)) => Some(client), - Err(WiringError::ResourceLacking { .. }) => None, - Err(err) => return Err(err), - }; - let object_store = context.get_resource::().await?.0; - - // Create and add tasks. - let eth_client_blobs_addr = eth_client_blobs - .as_deref() - .map(BoundEthInterface::sender_account); - - let l1_batch_commit_data_generator: Arc = - match self.l1_batch_commit_data_generator_mode { - L1BatchCommitDataGeneratorMode::Rollup => { - Arc::new(RollupModeL1BatchCommitDataGenerator {}) - } - L1BatchCommitDataGeneratorMode::Validium => { - Arc::new(ValidiumModeL1BatchCommitDataGenerator {}) - } - }; - - let config = self.eth_sender_config.sender.context("sender")?; - let aggregator = Aggregator::new( - config.clone(), - object_store, - eth_client_blobs_addr.is_some(), - l1_batch_commit_data_generator.clone(), - ); - - let eth_tx_aggregator_actor = EthTxAggregator::new( - master_pool.clone(), - config.clone(), - aggregator, - eth_client.clone(), - self.contracts_config.validator_timelock_addr, - self.contracts_config.l1_multicall3_addr, - self.contracts_config.diamond_proxy_addr, - self.network_config.zksync_network_id, - eth_client_blobs_addr, - l1_batch_commit_data_generator, - ) - .await; - - context.add_task(Box::new(EthTxAggregatorTask { - eth_tx_aggregator_actor, - })); - - let gas_adjuster = context.get_resource::().await?.0; - - let eth_tx_manager_actor = EthTxManager::new( - master_pool, - config, - gas_adjuster, - eth_client, - eth_client_blobs, - ); - - context.add_task(Box::new(EthTxManagerTask { - eth_tx_manager_actor, - })); - - // Insert circuit breaker. - let CircuitBreakersResource { breakers } = context.get_resource_or_default().await; - breakers - .insert(Box::new(FailedL1TransactionChecker { pool: replica_pool })) - .await; + let da_client = context.get_resource::().await?.0; Ok(()) } } - -#[derive(Debug)] -struct EthTxAggregatorTask { - eth_tx_aggregator_actor: EthTxAggregator, -} - -#[async_trait::async_trait] -impl Task for EthTxAggregatorTask { - fn name(&self) -> &'static str { - "eth_tx_aggregator" - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.eth_tx_aggregator_actor.run(stop_receiver.0).await - } -} - -#[derive(Debug)] -struct EthTxManagerTask { - eth_tx_manager_actor: EthTxManager, -} - -#[async_trait::async_trait] -impl Task for EthTxManagerTask { - fn name(&self) -> &'static str { - "eth_tx_manager" - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.eth_tx_manager_actor.run(stop_receiver.0).await - } -} diff --git a/core/node/node_framework/src/implementations/layers/mod.rs b/core/node/node_framework/src/implementations/layers/mod.rs index 37d2d300e6ab..f409266714e6 100644 --- a/core/node/node_framework/src/implementations/layers/mod.rs +++ b/core/node/node_framework/src/implementations/layers/mod.rs @@ -4,7 +4,7 @@ pub mod consensus; pub mod consistency_checker; pub mod contract_verification_api; mod da_client; -mod da_dispatcher; +pub mod da_dispatcher; pub mod eth_sender; pub mod eth_watch; pub mod healtcheck_server; diff --git a/core/node/node_framework/src/implementations/resources/da_interface.rs b/core/node/node_framework/src/implementations/resources/da_interface.rs index ca569b2e1f7a..e38f28dc13a6 100644 --- a/core/node/node_framework/src/implementations/resources/da_interface.rs +++ b/core/node/node_framework/src/implementations/resources/da_interface.rs @@ -1,30 +1,12 @@ -use zksync_da_client::DataAvailabilityClient; +use zksync_da_client::DataAvailabilityInterface; use crate::resource::Resource; #[derive(Debug, Clone)] -pub struct EthInterfaceResource(pub Box>); +pub struct DAInterfaceResource(pub Box); -impl Resource for EthInterfaceResource { +impl Resource for DAInterfaceResource { fn name() -> String { - "common/eth_interface".into() - } -} - -#[derive(Debug, Clone)] -pub struct BoundEthInterfaceResource(pub Box); - -impl Resource for BoundEthInterfaceResource { - fn name() -> String { - "common/bound_eth_interface".into() - } -} - -#[derive(Debug, Clone)] -pub struct BoundEthInterfaceForBlobsResource(pub Box); - -impl Resource for BoundEthInterfaceForBlobsResource { - fn name() -> String { - "common/bound_eth_interface_for_blobs".into() + "common/da_interface".into() } } diff --git a/core/node/node_framework/src/implementations/resources/mod.rs b/core/node/node_framework/src/implementations/resources/mod.rs index 240af30f1765..b8f5942db58c 100644 --- a/core/node/node_framework/src/implementations/resources/mod.rs +++ b/core/node/node_framework/src/implementations/resources/mod.rs @@ -1,6 +1,6 @@ pub mod action_queue; pub mod circuit_breakers; -mod da_interface; +pub(crate) mod da_interface; pub mod eth_interface; pub mod fee_input; pub mod healthcheck; From 710855a5eefdb4dc7d716b555741de7e49f37dae Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Thu, 16 May 2024 13:54:54 +0200 Subject: [PATCH 07/69] sync to upstream --- Cargo.lock | 520 ++++++++++++------ Cargo.toml | 3 +- core/lib/config/src/configs/da_dispatcher.rs | 7 +- core/lib/da_client/Cargo.toml | 21 - core/lib/da_client/src/clients/gcs/mod.rs | 57 -- core/lib/da_client/src/clients/mod.rs | 1 - core/lib/da_client/src/lib.rs | 17 - core/lib/da_client/src/types.rs | 11 - core/node/node_framework/Cargo.toml | 2 +- .../implementations/layers/da_dispatcher.rs | 34 +- .../implementations/resources/da_interface.rs | 2 +- 11 files changed, 368 insertions(+), 307 deletions(-) delete mode 100644 core/lib/da_client/Cargo.toml delete mode 100644 core/lib/da_client/src/clients/gcs/mod.rs delete mode 100644 core/lib/da_client/src/clients/mod.rs delete mode 100644 core/lib/da_client/src/lib.rs delete mode 100644 core/lib/da_client/src/types.rs diff --git a/Cargo.lock b/Cargo.lock index fc6de17f386e..c9bb77513a71 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -668,13 +668,13 @@ dependencies = [ "clap 4.4.6", "serde_json", "tokio", - "vlog", + "vlog 0.1.0", "zksync_block_reverter", - "zksync_config", + "zksync_config 0.1.0", "zksync_dal", "zksync_env_config", - "zksync_object_store", - "zksync_types", + "zksync_object_store 0.1.0", + "zksync_types 0.1.0", ] [[package]] @@ -2357,16 +2357,16 @@ dependencies = [ "serde_yaml", "tokio", "tracing", - "zksync_config", - "zksync_contracts", + "zksync_config 0.1.0", + "zksync_contracts 0.1.0", "zksync_core", "zksync_dal", "zksync_env_config", "zksync_node_genesis", "zksync_protobuf", "zksync_protobuf_config", - "zksync_types", - "zksync_utils", + "zksync_types 0.1.0", + "zksync_utils 0.1.0", ] [[package]] @@ -2834,6 +2834,16 @@ dependencies = [ "tokio-native-tls", ] +[[package]] +name = "hyperchain_da" +version = "0.1.0" +source = "git+https://github.com/matter-labs/hyperchain-da.git?rev=641bda157f1d0cdc3455f3a825ea19914549bb9c#641bda157f1d0cdc3455f3a825ea19914549bb9c" +dependencies = [ + "zksync_config 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", + "zksync_object_store 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", + "zksync_types 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", +] + [[package]] name = "iai" version = "0.1.1" @@ -3416,14 +3426,14 @@ dependencies = [ "thiserror", "tokio", "tracing", - "vlog", - "zksync_config", - "zksync_contracts", + "vlog 0.1.0", + "zksync_config 0.1.0", + "zksync_contracts 0.1.0", "zksync_eth_client", "zksync_eth_signer", - "zksync_system_constants", - "zksync_types", - "zksync_utils", + "zksync_system_constants 0.1.0", + "zksync_types 0.1.0", + "zksync_utils 0.1.0", "zksync_web3_decl", ] @@ -3577,12 +3587,12 @@ dependencies = [ "anyhow", "clap 4.4.6", "tracing", - "vlog", - "zksync_config", + "vlog 0.1.0", + "zksync_config 0.1.0", "zksync_env_config", "zksync_merkle_tree", "zksync_storage", - "zksync_types", + "zksync_types 0.1.0", ] [[package]] @@ -3749,13 +3759,13 @@ dependencies = [ "zk_evm 1.4.0", "zk_evm 1.4.1", "zk_evm 1.5.0", - "zksync_contracts", + "zksync_contracts 0.1.0", "zksync_eth_signer", "zksync_state", - "zksync_system_constants", + "zksync_system_constants 0.1.0", "zksync_test_account", - "zksync_types", - "zksync_utils", + "zksync_types 0.1.0", + "zksync_utils 0.1.0", ] [[package]] @@ -4842,7 +4852,7 @@ version = "0.1.0" dependencies = [ "sqlx", "strum", - "zksync_basic_types", + "zksync_basic_types 0.1.0", "zksync_db_connection", ] @@ -6024,12 +6034,12 @@ dependencies = [ "tokio", "tracing", "vise", - "vlog", - "zksync_config", + "vlog 0.1.0", + "zksync_config 0.1.0", "zksync_dal", "zksync_env_config", - "zksync_object_store", - "zksync_types", + "zksync_object_store 0.1.0", + "zksync_types 0.1.0", ] [[package]] @@ -6515,10 +6525,10 @@ dependencies = [ "once_cell", "serde", "serde_json", - "zksync_contracts", + "zksync_contracts 0.1.0", "zksync_state", - "zksync_types", - "zksync_utils", + "zksync_types 0.1.0", + "zksync_utils 0.1.0", ] [[package]] @@ -7256,10 +7266,10 @@ dependencies = [ "anyhow", "serde_json", "tokio", - "zksync_config", + "zksync_config 0.1.0", "zksync_dal", "zksync_env_config", - "zksync_types", + "zksync_types 0.1.0", ] [[package]] @@ -7319,6 +7329,22 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "vlog" +version = "0.1.0" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f#47e0a16a99cad735be06aec163a4e39f65a91d9f" +dependencies = [ + "chrono", + "opentelemetry", + "opentelemetry-otlp", + "opentelemetry-semantic-conventions", + "sentry", + "serde_json", + "tracing", + "tracing-opentelemetry", + "tracing-subscriber", +] + [[package]] name = "vm-benchmark" version = "0.1.0" @@ -7338,11 +7364,11 @@ dependencies = [ "multivm", "once_cell", "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2)", - "zksync_contracts", + "zksync_contracts 0.1.0", "zksync_state", - "zksync_system_constants", - "zksync_types", - "zksync_utils", + "zksync_system_constants 0.1.0", + "zksync_types 0.1.0", + "zksync_utils 0.1.0", ] [[package]] @@ -7353,11 +7379,11 @@ dependencies = [ "multivm", "tokio", "tracing", - "zksync_contracts", + "zksync_contracts 0.1.0", "zksync_dal", "zksync_state", - "zksync_types", - "zksync_utils", + "zksync_types 0.1.0", + "zksync_utils 0.1.0", ] [[package]] @@ -8033,6 +8059,24 @@ dependencies = [ "url", ] +[[package]] +name = "zksync_basic_types" +version = "0.1.0" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f#47e0a16a99cad735be06aec163a4e39f65a91d9f" +dependencies = [ + "anyhow", + "chrono", + "ethabi", + "hex", + "num_enum 0.7.2", + "serde", + "serde_json", + "strum", + "thiserror", + "tiny-keccak 2.0.2", + "url", +] + [[package]] name = "zksync_block_reverter" version = "0.1.0" @@ -8044,15 +8088,15 @@ dependencies = [ "test-casing", "tokio", "tracing", - "zksync_config", - "zksync_contracts", + "zksync_config 0.1.0", + "zksync_contracts 0.1.0", "zksync_dal", "zksync_eth_client", "zksync_merkle_tree", - "zksync_object_store", + "zksync_object_store 0.1.0", "zksync_state", "zksync_storage", - "zksync_types", + "zksync_types 0.1.0", ] [[package]] @@ -8066,7 +8110,7 @@ dependencies = [ "tokio", "tracing", "vise", - "zksync_config", + "zksync_config 0.1.0", "zksync_dal", ] @@ -8090,8 +8134,8 @@ dependencies = [ "zksync_dal", "zksync_health_check", "zksync_l1_contract_interface", - "zksync_types", - "zksync_utils", + "zksync_types 0.1.0", + "zksync_utils 0.1.0", ] [[package]] @@ -8120,9 +8164,23 @@ dependencies = [ "rand 0.8.5", "serde", "url", - "zksync_basic_types", + "zksync_basic_types 0.1.0", "zksync_consensus_utils", - "zksync_crypto_primitives", + "zksync_crypto_primitives 0.1.0", +] + +[[package]] +name = "zksync_config" +version = "0.1.0" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f#47e0a16a99cad735be06aec163a4e39f65a91d9f" +dependencies = [ + "anyhow", + "rand 0.8.5", + "serde", + "url", + "zksync_basic_types 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", + "zksync_consensus_utils", + "zksync_crypto_primitives 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", ] [[package]] @@ -8281,19 +8339,33 @@ dependencies = [ "thiserror", "tokio", "tracing", - "vlog", - "zksync_config", - "zksync_contracts", + "vlog 0.1.0", + "zksync_config 0.1.0", + "zksync_contracts 0.1.0", "zksync_dal", "zksync_env_config", "zksync_queued_job_processor", - "zksync_types", - "zksync_utils", + "zksync_types 0.1.0", + "zksync_utils 0.1.0", +] + +[[package]] +name = "zksync_contracts" +version = "0.1.0" +dependencies = [ + "envy", + "ethabi", + "hex", + "once_cell", + "serde", + "serde_json", + "zksync_utils 0.1.0", ] [[package]] name = "zksync_contracts" version = "0.1.0" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f#47e0a16a99cad735be06aec163a4e39f65a91d9f" dependencies = [ "envy", "ethabi", @@ -8301,7 +8373,7 @@ dependencies = [ "once_cell", "serde", "serde_json", - "zksync_utils", + "zksync_utils 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", ] [[package]] @@ -8343,12 +8415,12 @@ dependencies = [ "tower-http", "tracing", "vise", - "vlog", + "vlog 0.1.0", "vm_utils", "zksync_circuit_breaker", "zksync_commitment_generator", "zksync_concurrency", - "zksync_config", + "zksync_config 0.1.0", "zksync_consensus_bft", "zksync_consensus_crypto", "zksync_consensus_executor", @@ -8356,7 +8428,7 @@ dependencies = [ "zksync_consensus_roles", "zksync_consensus_storage", "zksync_consensus_utils", - "zksync_contracts", + "zksync_contracts 0.1.0", "zksync_dal", "zksync_db_connection", "zksync_eth_client", @@ -8368,11 +8440,11 @@ dependencies = [ "zksync_l1_contract_interface", "zksync_mempool", "zksync_merkle_tree", - "zksync_mini_merkle_tree", + "zksync_mini_merkle_tree 0.1.0", "zksync_node_fee_model", "zksync_node_genesis", "zksync_node_test_utils", - "zksync_object_store", + "zksync_object_store 0.1.0", "zksync_proof_data_handler", "zksync_protobuf", "zksync_protobuf_build", @@ -8381,10 +8453,10 @@ dependencies = [ "zksync_shared_metrics", "zksync_state", "zksync_storage", - "zksync_system_constants", + "zksync_system_constants 0.1.0", "zksync_test_account", - "zksync_types", - "zksync_utils", + "zksync_types 0.1.0", + "zksync_utils 0.1.0", "zksync_web3_decl", ] @@ -8399,7 +8471,21 @@ dependencies = [ "serde_json", "sha2 0.10.8", "thiserror", - "zksync_basic_types", + "zksync_basic_types 0.1.0", +] + +[[package]] +name = "zksync_crypto" +version = "0.1.0" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f#47e0a16a99cad735be06aec163a4e39f65a91d9f" +dependencies = [ + "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "hex", + "once_cell", + "serde", + "sha2 0.10.8", + "thiserror", + "zksync_basic_types 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", ] [[package]] @@ -8413,19 +8499,24 @@ dependencies = [ "serde", "serde_json", "thiserror", - "zksync_basic_types", - "zksync_utils", + "zksync_basic_types 0.1.0", + "zksync_utils 0.1.0", ] [[package]] -name = "zksync_da_client" +name = "zksync_crypto_primitives" version = "0.1.0" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f#47e0a16a99cad735be06aec163a4e39f65a91d9f" dependencies = [ - "tokio", - "uuid", - "zksync_config", - "zksync_object_store", - "zksync_types", + "anyhow", + "hex", + "rand 0.8.5", + "secp256k1", + "serde", + "serde_json", + "thiserror", + "zksync_basic_types 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", + "zksync_utils 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", ] [[package]] @@ -8450,13 +8541,13 @@ dependencies = [ "vise", "zksync_consensus_roles", "zksync_consensus_storage", - "zksync_contracts", + "zksync_contracts 0.1.0", "zksync_db_connection", "zksync_protobuf", "zksync_protobuf_build", - "zksync_system_constants", - "zksync_types", - "zksync_utils", + "zksync_system_constants 0.1.0", + "zksync_types 0.1.0", + "zksync_utils 0.1.0", ] [[package]] @@ -8473,7 +8564,7 @@ dependencies = [ "tokio", "tracing", "vise", - "zksync_basic_types", + "zksync_basic_types 0.1.0", "zksync_health_check", ] @@ -8484,9 +8575,9 @@ dependencies = [ "anyhow", "envy", "serde", - "zksync_basic_types", - "zksync_config", - "zksync_system_constants", + "zksync_basic_types 0.1.0", + "zksync_config 0.1.0", + "zksync_system_constants 0.1.0", ] [[package]] @@ -8504,10 +8595,10 @@ dependencies = [ "tokio", "tracing", "vise", - "zksync_config", - "zksync_contracts", + "zksync_config 0.1.0", + "zksync_contracts 0.1.0", "zksync_eth_signer", - "zksync_types", + "zksync_types 0.1.0", ] [[package]] @@ -8524,18 +8615,18 @@ dependencies = [ "tokio", "tracing", "vise", - "zksync_config", - "zksync_contracts", + "zksync_config 0.1.0", + "zksync_contracts 0.1.0", "zksync_dal", "zksync_eth_client", "zksync_l1_contract_interface", "zksync_node_fee_model", "zksync_node_test_utils", - "zksync_object_store", + "zksync_object_store 0.1.0", "zksync_prover_interface", "zksync_shared_metrics", - "zksync_types", - "zksync_utils", + "zksync_types 0.1.0", + "zksync_utils 0.1.0", ] [[package]] @@ -8553,7 +8644,7 @@ dependencies = [ "serde_json", "thiserror", "tokio", - "zksync_types", + "zksync_types 0.1.0", ] [[package]] @@ -8566,12 +8657,12 @@ dependencies = [ "tokio", "tracing", "vise", - "zksync_contracts", + "zksync_contracts 0.1.0", "zksync_dal", "zksync_eth_client", "zksync_shared_metrics", - "zksync_system_constants", - "zksync_types", + "zksync_system_constants 0.1.0", + "zksync_types 0.1.0", ] [[package]] @@ -8594,14 +8685,14 @@ dependencies = [ "tracing", "url", "vise", - "vlog", - "zksync_basic_types", + "vlog 0.1.0", + "zksync_basic_types 0.1.0", "zksync_block_reverter", "zksync_commitment_generator", "zksync_concurrency", - "zksync_config", + "zksync_config 0.1.0", "zksync_consensus_roles", - "zksync_contracts", + "zksync_contracts 0.1.0", "zksync_core", "zksync_dal", "zksync_db_connection", @@ -8612,14 +8703,14 @@ dependencies = [ "zksync_node_db_pruner", "zksync_node_fee_model", "zksync_node_genesis", - "zksync_object_store", + "zksync_object_store 0.1.0", "zksync_protobuf_config", "zksync_shared_metrics", "zksync_snapshots_applier", "zksync_state", "zksync_storage", - "zksync_types", - "zksync_utils", + "zksync_types 0.1.0", + "zksync_utils 0.1.0", "zksync_web3_decl", ] @@ -8649,10 +8740,10 @@ dependencies = [ "tokio", "tracing", "vise", - "zksync_config", + "zksync_config 0.1.0", "zksync_dal", "zksync_shared_metrics", - "zksync_types", + "zksync_types 0.1.0", ] [[package]] @@ -8669,7 +8760,7 @@ dependencies = [ "sha2 0.10.8", "sha3 0.10.8", "zksync_prover_interface", - "zksync_types", + "zksync_types 0.1.0", ] [[package]] @@ -8677,7 +8768,7 @@ name = "zksync_mempool" version = "0.1.0" dependencies = [ "tracing", - "zksync_types", + "zksync_types 0.1.0", ] [[package]] @@ -8702,12 +8793,12 @@ dependencies = [ "tracing", "tracing-subscriber", "vise", - "zksync_crypto", + "zksync_crypto 0.1.0", "zksync_prover_interface", "zksync_storage", - "zksync_system_constants", - "zksync_types", - "zksync_utils", + "zksync_system_constants 0.1.0", + "zksync_types 0.1.0", + "zksync_utils 0.1.0", ] [[package]] @@ -8716,8 +8807,18 @@ version = "0.1.0" dependencies = [ "criterion", "once_cell", - "zksync_basic_types", - "zksync_crypto", + "zksync_basic_types 0.1.0", + "zksync_crypto 0.1.0", +] + +[[package]] +name = "zksync_mini_merkle_tree" +version = "0.1.0" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f#47e0a16a99cad735be06aec163a4e39f65a91d9f" +dependencies = [ + "once_cell", + "zksync_basic_types 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", + "zksync_crypto 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", ] [[package]] @@ -8739,7 +8840,7 @@ dependencies = [ "zksync_db_connection", "zksync_health_check", "zksync_node_test_utils", - "zksync_types", + "zksync_types 0.1.0", ] [[package]] @@ -8752,12 +8853,12 @@ dependencies = [ "tokio", "tracing", "vise", - "zksync_config", + "zksync_config 0.1.0", "zksync_dal", "zksync_eth_client", "zksync_node_test_utils", - "zksync_types", - "zksync_utils", + "zksync_types 0.1.0", + "zksync_utils 0.1.0", "zksync_web3_decl", ] @@ -8770,19 +8871,19 @@ dependencies = [ "async-trait", "ctrlc", "futures 0.3.28", + "hyperchain_da", "prometheus_exporter", "prover_dal", "thiserror", "tokio", "tracing", - "vlog", + "vlog 0.1.0", "zksync_circuit_breaker", "zksync_commitment_generator", "zksync_concurrency", - "zksync_config", - "zksync_contracts", + "zksync_config 0.1.0", + "zksync_contracts 0.1.0", "zksync_core", - "zksync_da_client", "zksync_dal", "zksync_db_connection", "zksync_env_config", @@ -8792,13 +8893,13 @@ dependencies = [ "zksync_health_check", "zksync_house_keeper", "zksync_node_fee_model", - "zksync_object_store", + "zksync_object_store 0.1.0", "zksync_proof_data_handler", "zksync_protobuf_config", "zksync_state", "zksync_storage", - "zksync_types", - "zksync_utils", + "zksync_types 0.1.0", + "zksync_utils 0.1.0", "zksync_web3_decl", ] @@ -8813,14 +8914,14 @@ dependencies = [ "tokio", "tracing", "vise", - "zksync_config", - "zksync_contracts", + "zksync_config 0.1.0", + "zksync_contracts 0.1.0", "zksync_dal", "zksync_eth_client", "zksync_merkle_tree", - "zksync_system_constants", - "zksync_types", - "zksync_utils", + "zksync_system_constants 0.1.0", + "zksync_types 0.1.0", + "zksync_utils 0.1.0", ] [[package]] @@ -8828,13 +8929,13 @@ name = "zksync_node_test_utils" version = "0.1.0" dependencies = [ "multivm", - "zksync_contracts", + "zksync_contracts 0.1.0", "zksync_dal", "zksync_merkle_tree", "zksync_node_genesis", - "zksync_system_constants", - "zksync_types", - "zksync_utils", + "zksync_system_constants 0.1.0", + "zksync_types 0.1.0", + "zksync_utils 0.1.0", ] [[package]] @@ -8854,9 +8955,31 @@ dependencies = [ "tokio", "tracing", "vise", - "zksync_config", + "zksync_config 0.1.0", "zksync_protobuf", - "zksync_types", + "zksync_types 0.1.0", +] + +[[package]] +name = "zksync_object_store" +version = "0.1.0" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f#47e0a16a99cad735be06aec163a4e39f65a91d9f" +dependencies = [ + "anyhow", + "async-trait", + "bincode", + "flate2", + "google-cloud-auth", + "google-cloud-storage", + "http", + "prost 0.12.1", + "serde_json", + "tokio", + "tracing", + "vise", + "zksync_config 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", + "zksync_protobuf", + "zksync_types 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", ] [[package]] @@ -8867,11 +8990,11 @@ dependencies = [ "axum", "tokio", "tracing", - "zksync_config", + "zksync_config 0.1.0", "zksync_dal", - "zksync_object_store", + "zksync_object_store 0.1.0", "zksync_prover_interface", - "zksync_types", + "zksync_types 0.1.0", ] [[package]] @@ -8920,11 +9043,11 @@ dependencies = [ "rand 0.8.5", "serde_json", "serde_yaml", - "zksync_basic_types", - "zksync_config", + "zksync_basic_types 0.1.0", + "zksync_config 0.1.0", "zksync_protobuf", "zksync_protobuf_build", - "zksync_types", + "zksync_types 0.1.0", ] [[package]] @@ -8938,8 +9061,8 @@ dependencies = [ "serde_with", "strum", "tokio", - "zksync_object_store", - "zksync_types", + "zksync_object_store 0.1.0", + "zksync_types 0.1.0", ] [[package]] @@ -8951,7 +9074,7 @@ dependencies = [ "tokio", "tracing", "vise", - "zksync_utils", + "zksync_utils 0.1.0", ] [[package]] @@ -8965,9 +9088,9 @@ dependencies = [ "tikv-jemallocator", "tokio", "tracing", - "vlog", + "vlog 0.1.0", "zksync_concurrency", - "zksync_config", + "zksync_config 0.1.0", "zksync_consensus_crypto", "zksync_consensus_executor", "zksync_consensus_roles", @@ -8977,8 +9100,8 @@ dependencies = [ "zksync_node_genesis", "zksync_protobuf_config", "zksync_storage", - "zksync_types", - "zksync_utils", + "zksync_types 0.1.0", + "zksync_utils 0.1.0", ] [[package]] @@ -8987,7 +9110,7 @@ version = "0.1.0" dependencies = [ "vise", "zksync_dal", - "zksync_types", + "zksync_types 0.1.0", ] [[package]] @@ -9007,9 +9130,9 @@ dependencies = [ "zksync_dal", "zksync_db_connection", "zksync_health_check", - "zksync_object_store", - "zksync_types", - "zksync_utils", + "zksync_object_store 0.1.0", + "zksync_types 0.1.0", + "zksync_utils 0.1.0", "zksync_web3_decl", ] @@ -9033,8 +9156,8 @@ dependencies = [ "zksync_dal", "zksync_shared_metrics", "zksync_storage", - "zksync_types", - "zksync_utils", + "zksync_types 0.1.0", + "zksync_utils 0.1.0", ] [[package]] @@ -9055,8 +9178,18 @@ name = "zksync_system_constants" version = "0.1.0" dependencies = [ "once_cell", - "zksync_basic_types", - "zksync_utils", + "zksync_basic_types 0.1.0", + "zksync_utils 0.1.0", +] + +[[package]] +name = "zksync_system_constants" +version = "0.1.0" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f#47e0a16a99cad735be06aec163a4e39f65a91d9f" +dependencies = [ + "once_cell", + "zksync_basic_types 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", + "zksync_utils 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", ] [[package]] @@ -9065,11 +9198,11 @@ version = "0.1.0" dependencies = [ "ethabi", "hex", - "zksync_contracts", + "zksync_contracts 0.1.0", "zksync_eth_signer", - "zksync_system_constants", - "zksync_types", - "zksync_utils", + "zksync_system_constants 0.1.0", + "zksync_types 0.1.0", + "zksync_utils 0.1.0", ] [[package]] @@ -9094,15 +9227,47 @@ dependencies = [ "strum", "thiserror", "tokio", - "zksync_basic_types", - "zksync_config", - "zksync_contracts", - "zksync_crypto_primitives", - "zksync_mini_merkle_tree", + "zksync_basic_types 0.1.0", + "zksync_config 0.1.0", + "zksync_contracts 0.1.0", + "zksync_crypto_primitives 0.1.0", + "zksync_mini_merkle_tree 0.1.0", + "zksync_protobuf", + "zksync_protobuf_build", + "zksync_system_constants 0.1.0", + "zksync_utils 0.1.0", +] + +[[package]] +name = "zksync_types" +version = "0.1.0" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f#47e0a16a99cad735be06aec163a4e39f65a91d9f" +dependencies = [ + "anyhow", + "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "chrono", + "derive_more", + "hex", + "itertools 0.10.5", + "num", + "num_enum 0.7.2", + "once_cell", + "prost 0.12.1", + "rlp", + "secp256k1", + "serde", + "serde_json", + "strum", + "thiserror", + "zksync_basic_types 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", + "zksync_config 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", + "zksync_contracts 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", + "zksync_crypto_primitives 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", + "zksync_mini_merkle_tree 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", "zksync_protobuf", "zksync_protobuf_build", - "zksync_system_constants", - "zksync_utils", + "zksync_system_constants 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", + "zksync_utils 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", ] [[package]] @@ -9122,9 +9287,30 @@ dependencies = [ "thiserror", "tokio", "tracing", - "vlog", + "vlog 0.1.0", + "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2)", + "zksync_basic_types 0.1.0", +] + +[[package]] +name = "zksync_utils" +version = "0.1.0" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f#47e0a16a99cad735be06aec163a4e39f65a91d9f" +dependencies = [ + "anyhow", + "bigdecimal", + "futures 0.3.28", + "hex", + "itertools 0.10.5", + "num", + "reqwest", + "serde", + "thiserror", + "tokio", + "tracing", + "vlog 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2)", - "zksync_basic_types", + "zksync_basic_types 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", ] [[package]] @@ -9141,13 +9327,13 @@ dependencies = [ "tokio", "tracing", "vm_utils", - "zksync_contracts", + "zksync_contracts 0.1.0", "zksync_dal", "zksync_node_genesis", "zksync_node_test_utils", "zksync_state", "zksync_storage", - "zksync_types", + "zksync_types 0.1.0", ] [[package]] @@ -9169,8 +9355,8 @@ dependencies = [ "tokio", "tracing", "vise", - "zksync_config", - "zksync_types", + "zksync_config 0.1.0", + "zksync_types 0.1.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index f83bbd95295e..fd7684de6c12 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,7 +35,6 @@ members = [ "core/lib/dal", "core/lib/env_config", "core/lib/eth_client", - "core/lib/da_client", "core/lib/eth_signer", "core/lib/l1_contract_interface", "core/lib/mempool", @@ -189,6 +188,7 @@ zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter zksync_consensus_utils = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "92ecb2d5d65e3bc4a883dacd18d0640e86576c8c" } zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "92ecb2d5d65e3bc4a883dacd18d0640e86576c8c" } zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "92ecb2d5d65e3bc4a883dacd18d0640e86576c8c" } +zksync_da = { package = "hyperchain_da", git = "https://github.com/matter-labs/hyperchain-da.git", rev = "641bda157f1d0cdc3455f3a825ea19914549bb9c" } # "Local" dependencies multivm = { path = "core/lib/multivm" } @@ -208,7 +208,6 @@ zksync_dal = { path = "core/lib/dal" } zksync_db_connection = { path = "core/lib/db_connection" } zksync_env_config = { path = "core/lib/env_config" } zksync_eth_client = { path = "core/lib/eth_client" } -zksync_da_client = { path = "core/lib/da_client" } zksync_eth_signer = { path = "core/lib/eth_signer" } zksync_health_check = { path = "core/lib/health_check" } zksync_l1_contract_interface = { path = "core/lib/l1_contract_interface" } diff --git a/core/lib/config/src/configs/da_dispatcher.rs b/core/lib/config/src/configs/da_dispatcher.rs index 1dddf94da823..99bbf2d79b95 100644 --- a/core/lib/config/src/configs/da_dispatcher.rs +++ b/core/lib/config/src/configs/da_dispatcher.rs @@ -2,20 +2,21 @@ use serde::Deserialize; use crate::ObjectStoreConfig; -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq, Deserialize)] pub struct DALayerInfo { pub name: String, + #[serde(default)] pub private_key: Vec, } -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq, Deserialize)] pub enum DataAvailabilityMode { DALayer(DALayerInfo), GCS(ObjectStoreConfig), NoDA, } -#[derive(Debug, Clone, Deserialize, PartialEq)] +#[derive(Debug, Clone, PartialEq, Deserialize)] pub struct DADispatcherConfig { pub mode: DataAvailabilityMode, } diff --git a/core/lib/da_client/Cargo.toml b/core/lib/da_client/Cargo.toml deleted file mode 100644 index 223c2315c289..000000000000 --- a/core/lib/da_client/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "zksync_da_client" -version.workspace = true -edition.workspace = true -authors.workspace = true -homepage.workspace = true -repository.workspace = true -license.workspace = true -keywords.workspace = true -categories.workspace = true - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -zksync_object_store.workspace = true -zksync_config.workspace = true -zksync_types.workspace = true -uuid = { version = "1.5.0", features = ["v4"] } - -[dev-dependencies] -tokio = { workspace = true, features = ["full"] } \ No newline at end of file diff --git a/core/lib/da_client/src/clients/gcs/mod.rs b/core/lib/da_client/src/clients/gcs/mod.rs deleted file mode 100644 index 41cf0e05f886..000000000000 --- a/core/lib/da_client/src/clients/gcs/mod.rs +++ /dev/null @@ -1,57 +0,0 @@ -use std::{ - fmt, - fmt::{Debug, Formatter}, - sync::Arc, -}; - -use zksync_config::ObjectStoreConfig; -use zksync_object_store::{ObjectStore, ObjectStoreError, ObjectStoreFactory}; -use zksync_types::{pubdata_da::StorablePubdata, L1BatchNumber}; - -use crate::{ - types::{DispatchResponse, InclusionData}, - DataAvailabilityInterface, -}; - -struct GCSDAClient { - object_store: Arc, -} - -impl GCSDAClient { - pub fn new(object_store_conf: ObjectStoreConfig) -> Self { - GCSDAClient { - object_store: ObjectStoreFactory::create_from_config(&object_store_conf), - } - } -} - -impl DataAvailabilityInterface for GCSDAClient { - async fn dispatch_blob( - &self, - batch_number: L1BatchNumber, - data: Vec, - ) -> Result { - let key = self - .object_store - .put(batch_number, &StorablePubdata { data }) - .await - .unwrap(); - - Ok(DispatchResponse { - blob_id: key.into_bytes(), - }) - } - - fn get_inclusion_data(&self, _: Vec) -> Result { - return Ok(InclusionData::default()); - } -} - -impl Debug for GCSDAClient { - fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - formatter - .debug_struct("GCSDAClient") - .field("object_store", &self.object_store) - .finish() - } -} diff --git a/core/lib/da_client/src/clients/mod.rs b/core/lib/da_client/src/clients/mod.rs deleted file mode 100644 index 13a1984c05cd..000000000000 --- a/core/lib/da_client/src/clients/mod.rs +++ /dev/null @@ -1 +0,0 @@ -mod gcs; diff --git a/core/lib/da_client/src/lib.rs b/core/lib/da_client/src/lib.rs deleted file mode 100644 index 93766185c619..000000000000 --- a/core/lib/da_client/src/lib.rs +++ /dev/null @@ -1,17 +0,0 @@ -use std::fmt; - -use zksync_types::L1BatchNumber; - -use crate::types::{DispatchResponse, InclusionData}; - -pub mod clients; -mod types; - -pub trait DataAvailabilityInterface: Sync + Send + fmt::Debug { - fn dispatch_blob( - &self, - batch_number: L1BatchNumber, - data: Vec, - ) -> Result; - fn get_inclusion_data(&self, blob_id: Vec) -> Result; -} diff --git a/core/lib/da_client/src/types.rs b/core/lib/da_client/src/types.rs deleted file mode 100644 index 3f35972d84b0..000000000000 --- a/core/lib/da_client/src/types.rs +++ /dev/null @@ -1,11 +0,0 @@ -pub(crate) type Error = String; - -#[derive(Default)] -pub struct DispatchResponse { - pub(crate) blob_id: Vec, -} - -#[derive(Default)] -pub struct InclusionData { - data: Vec, -} diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index 4d86e47b742e..ca3f8491d686 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -23,7 +23,6 @@ zksync_object_store.workspace = true zksync_core.workspace = true zksync_storage.workspace = true zksync_eth_client.workspace = true -zksync_da_client.workspace = true zksync_contracts.workspace = true zksync_web3_decl.workspace = true zksync_utils.workspace = true @@ -35,6 +34,7 @@ zksync_commitment_generator.workspace = true zksync_house_keeper.workspace = true zksync_node_fee_model.workspace = true zksync_eth_sender.workspace = true +zksync_da.workspace = true tracing.workspace = true thiserror.workspace = true diff --git a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs index 9cf43cad117d..72a7a7034f81 100644 --- a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs +++ b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs @@ -1,44 +1,26 @@ -use std::sync::Arc; - -use anyhow::Context; -use zksync_circuit_breaker::l1_txs::FailedL1TransactionChecker; -use zksync_config::configs::{chain::L1BatchCommitDataGeneratorMode, eth_sender::EthConfig}; -use zksync_eth_client::BoundEthInterface; -use zksync_eth_sender::{ - l1_batch_commit_data_generator::{ - L1BatchCommitDataGenerator, RollupModeL1BatchCommitDataGenerator, - ValidiumModeL1BatchCommitDataGenerator, - }, - Aggregator, EthTxAggregator, EthTxManager, +use zksync_config::configs::{ + chain::L1BatchCommitDataGeneratorMode, da_dispatcher::DADispatcherConfig, }; use crate::{ - implementations::resources::{ - circuit_breakers::CircuitBreakersResource, - da_interface::DAInterfaceResource, - eth_interface::{BoundEthInterfaceForBlobsResource, BoundEthInterfaceResource}, - l1_tx_params::L1TxParamsResource, - object_store::ObjectStoreResource, - pools::{MasterPool, PoolResource, ReplicaPool}, - }, - service::{ServiceContext, StopReceiver}, - task::Task, + implementations::resources::pools::{MasterPool, PoolResource}, + service::ServiceContext, wiring_layer::{WiringError, WiringLayer}, }; #[derive(Debug)] pub struct DataAvailabilityDispatcherLayer { - eth_sender_config: EthConfig, + da_config: DADispatcherConfig, l1_batch_commit_data_generator_mode: L1BatchCommitDataGeneratorMode, } impl DataAvailabilityDispatcherLayer { pub fn new( - eth_sender_config: EthConfig, + da_config: DADispatcherConfig, l1_batch_commit_data_generator_mode: L1BatchCommitDataGeneratorMode, ) -> Self { Self { - eth_sender_config, + da_config, l1_batch_commit_data_generator_mode, } } @@ -54,7 +36,7 @@ impl WiringLayer for DataAvailabilityDispatcherLayer { let master_pool_resource = context.get_resource::>().await?; let master_pool = master_pool_resource.get().await.unwrap(); - let da_client = context.get_resource::().await?.0; + let da_client = zksync_da::new_da_client(self.da_config.clone()); Ok(()) } diff --git a/core/node/node_framework/src/implementations/resources/da_interface.rs b/core/node/node_framework/src/implementations/resources/da_interface.rs index e38f28dc13a6..77db176dc412 100644 --- a/core/node/node_framework/src/implementations/resources/da_interface.rs +++ b/core/node/node_framework/src/implementations/resources/da_interface.rs @@ -1,4 +1,4 @@ -use zksync_da_client::DataAvailabilityInterface; +use zksync_da::DataAvailabilityInterface; use crate::resource::Resource; From 1fedf44e8f4dab52222774af4c0afa05040d79ab Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Wed, 22 May 2024 10:16:40 +0200 Subject: [PATCH 08/69] feat: basic implementation --- Cargo.lock | 530 ++++++------------ Cargo.toml | 6 +- core/bin/zksync_server/src/main.rs | 5 +- core/lib/config/src/configs/da_dispatcher.rs | 33 +- core/lib/da_client/Cargo.toml | 20 + core/lib/da_client/src/clients/gcs/mod.rs | 58 ++ core/lib/da_client/src/clients/mod.rs | 2 + core/lib/da_client/src/clients/no_da/mod.rs | 39 ++ core/lib/da_client/src/lib.rs | 15 + ...c4018b155242a3ef3da602616a9bce668860b.json | 28 + ...e5c7fd0d06d506d4b6bdaa3b149b44a7b85ac.json | 23 + ...b21029b37b5cf978c25265d93f2280961e6d5.json | 15 + ...6de37201091bfccd3caf922e766896c5a542b.json | 15 + ...e5ed7e700b7c05eb11a85d0ca63bb58d3ddc.json} | 4 +- ...b1e2580f9f0ded642dd3085b9bf8d101bdb15.json | 23 + ...43f1177c1f31a19bc9039f4145367655e5e31.json | 44 ++ ...4_create_data_availability_table.down.sql} | 0 ...114_create_data_availability_table.up.sql} | 4 +- core/lib/dal/src/blocks_dal.rs | 145 ++++- core/lib/dal/src/models/mod.rs | 1 + .../src/models/storage_data_availability.rs | 10 + core/lib/env_config/src/da_dispatcher.rs | 122 ++++ core/lib/env_config/src/lib.rs | 1 + .../structures/commit_batch_info.rs | 2 + core/lib/protobuf_config/src/da_dispatcher.rs | 64 ++- core/lib/protobuf_config/src/general.rs | 3 + .../src/proto/config/da_dispatcher.proto | 3 + .../src/proto/config/general.proto | 2 + core/lib/types/src/commitment/mod.rs | 6 + .../zksync_core/src/temp_config_store/mod.rs | 6 +- core/node/da_dispatcher/Cargo.toml | 25 + core/node/da_dispatcher/src/da_dispatcher.rs | 146 +++++ core/node/da_dispatcher/src/lib.rs | 4 + core/node/da_dispatcher/src/metrics.rs | 26 + .../src/l1_gas_price/gas_adjuster/mod.rs | 1 + core/node/node_framework/Cargo.toml | 4 +- .../src/implementations/layers/da_client.rs | 69 --- .../implementations/layers/da_dispatcher.rs | 53 +- .../src/implementations/layers/mod.rs | 1 - .../implementations/resources/da_interface.rs | 12 - .../src/implementations/resources/mod.rs | 1 - 41 files changed, 1089 insertions(+), 482 deletions(-) create mode 100644 core/lib/da_client/Cargo.toml create mode 100644 core/lib/da_client/src/clients/gcs/mod.rs create mode 100644 core/lib/da_client/src/clients/mod.rs create mode 100644 core/lib/da_client/src/clients/no_da/mod.rs create mode 100644 core/lib/da_client/src/lib.rs create mode 100644 core/lib/dal/.sqlx/query-0763b6872f2d611d0247b3b6d8cc4018b155242a3ef3da602616a9bce668860b.json create mode 100644 core/lib/dal/.sqlx/query-16fdd84e9819be6f4e46b1afc5fe5c7fd0d06d506d4b6bdaa3b149b44a7b85ac.json create mode 100644 core/lib/dal/.sqlx/query-4643da2f3085c122a8f62b1f6b9b21029b37b5cf978c25265d93f2280961e6d5.json create mode 100644 core/lib/dal/.sqlx/query-5c99342c4fbf36ccc8e9c9dafc76de37201091bfccd3caf922e766896c5a542b.json rename core/lib/dal/.sqlx/{query-4bdda8d8956ac5dedc3bd389e3721fbe5f9e838b0f1fd6ff906c0130a15f9509.json => query-ca6debc6ab140f26ced294a2ea91e5ed7e700b7c05eb11a85d0ca63bb58d3ddc.json} (82%) create mode 100644 core/lib/dal/.sqlx/query-e05fb44a407c9b0289aaf6f2addb1e2580f9f0ded642dd3085b9bf8d101bdb15.json create mode 100644 core/lib/dal/.sqlx/query-efe8f8cf2594d00266282cfa15343f1177c1f31a19bc9039f4145367655e5e31.json rename core/lib/dal/migrations/{20240508145354_create_data_availability_table.down.sql => 20240522081114_create_data_availability_table.down.sql} (100%) rename core/lib/dal/migrations/{20240508145354_create_data_availability_table.up.sql => 20240522081114_create_data_availability_table.up.sql} (67%) create mode 100644 core/lib/dal/src/models/storage_data_availability.rs create mode 100644 core/lib/env_config/src/da_dispatcher.rs create mode 100644 core/node/da_dispatcher/Cargo.toml create mode 100644 core/node/da_dispatcher/src/da_dispatcher.rs create mode 100644 core/node/da_dispatcher/src/lib.rs create mode 100644 core/node/da_dispatcher/src/metrics.rs delete mode 100644 core/node/node_framework/src/implementations/layers/da_client.rs delete mode 100644 core/node/node_framework/src/implementations/resources/da_interface.rs diff --git a/Cargo.lock b/Cargo.lock index c9bb77513a71..41e721bbcca0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -668,13 +668,13 @@ dependencies = [ "clap 4.4.6", "serde_json", "tokio", - "vlog 0.1.0", + "vlog", "zksync_block_reverter", - "zksync_config 0.1.0", + "zksync_config", "zksync_dal", "zksync_env_config", - "zksync_object_store 0.1.0", - "zksync_types 0.1.0", + "zksync_object_store", + "zksync_types", ] [[package]] @@ -2357,16 +2357,16 @@ dependencies = [ "serde_yaml", "tokio", "tracing", - "zksync_config 0.1.0", - "zksync_contracts 0.1.0", + "zksync_config", + "zksync_contracts", "zksync_core", "zksync_dal", "zksync_env_config", "zksync_node_genesis", "zksync_protobuf", "zksync_protobuf_config", - "zksync_types 0.1.0", - "zksync_utils 0.1.0", + "zksync_types", + "zksync_utils", ] [[package]] @@ -2837,11 +2837,9 @@ dependencies = [ [[package]] name = "hyperchain_da" version = "0.1.0" -source = "git+https://github.com/matter-labs/hyperchain-da.git?rev=641bda157f1d0cdc3455f3a825ea19914549bb9c#641bda157f1d0cdc3455f3a825ea19914549bb9c" +source = "git+https://github.com/matter-labs/hyperchain-da.git?rev=69dc63c9c91553a31c4dd48f47c30ca44433dea3#69dc63c9c91553a31c4dd48f47c30ca44433dea3" dependencies = [ - "zksync_config 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", - "zksync_object_store 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", - "zksync_types 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", + "async-trait", ] [[package]] @@ -3426,14 +3424,14 @@ dependencies = [ "thiserror", "tokio", "tracing", - "vlog 0.1.0", - "zksync_config 0.1.0", - "zksync_contracts 0.1.0", + "vlog", + "zksync_config", + "zksync_contracts", "zksync_eth_client", "zksync_eth_signer", - "zksync_system_constants 0.1.0", - "zksync_types 0.1.0", - "zksync_utils 0.1.0", + "zksync_system_constants", + "zksync_types", + "zksync_utils", "zksync_web3_decl", ] @@ -3587,12 +3585,12 @@ dependencies = [ "anyhow", "clap 4.4.6", "tracing", - "vlog 0.1.0", - "zksync_config 0.1.0", + "vlog", + "zksync_config", "zksync_env_config", "zksync_merkle_tree", "zksync_storage", - "zksync_types 0.1.0", + "zksync_types", ] [[package]] @@ -3759,13 +3757,13 @@ dependencies = [ "zk_evm 1.4.0", "zk_evm 1.4.1", "zk_evm 1.5.0", - "zksync_contracts 0.1.0", + "zksync_contracts", "zksync_eth_signer", "zksync_state", - "zksync_system_constants 0.1.0", + "zksync_system_constants", "zksync_test_account", - "zksync_types 0.1.0", - "zksync_utils 0.1.0", + "zksync_types", + "zksync_utils", ] [[package]] @@ -4852,7 +4850,7 @@ version = "0.1.0" dependencies = [ "sqlx", "strum", - "zksync_basic_types 0.1.0", + "zksync_basic_types", "zksync_db_connection", ] @@ -6034,12 +6032,12 @@ dependencies = [ "tokio", "tracing", "vise", - "vlog 0.1.0", - "zksync_config 0.1.0", + "vlog", + "zksync_config", "zksync_dal", "zksync_env_config", - "zksync_object_store 0.1.0", - "zksync_types 0.1.0", + "zksync_object_store", + "zksync_types", ] [[package]] @@ -6525,10 +6523,10 @@ dependencies = [ "once_cell", "serde", "serde_json", - "zksync_contracts 0.1.0", + "zksync_contracts", "zksync_state", - "zksync_types 0.1.0", - "zksync_utils 0.1.0", + "zksync_types", + "zksync_utils", ] [[package]] @@ -7266,10 +7264,10 @@ dependencies = [ "anyhow", "serde_json", "tokio", - "zksync_config 0.1.0", + "zksync_config", "zksync_dal", "zksync_env_config", - "zksync_types 0.1.0", + "zksync_types", ] [[package]] @@ -7329,22 +7327,6 @@ dependencies = [ "tracing-subscriber", ] -[[package]] -name = "vlog" -version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f#47e0a16a99cad735be06aec163a4e39f65a91d9f" -dependencies = [ - "chrono", - "opentelemetry", - "opentelemetry-otlp", - "opentelemetry-semantic-conventions", - "sentry", - "serde_json", - "tracing", - "tracing-opentelemetry", - "tracing-subscriber", -] - [[package]] name = "vm-benchmark" version = "0.1.0" @@ -7364,11 +7346,11 @@ dependencies = [ "multivm", "once_cell", "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2)", - "zksync_contracts 0.1.0", + "zksync_contracts", "zksync_state", - "zksync_system_constants 0.1.0", - "zksync_types 0.1.0", - "zksync_utils 0.1.0", + "zksync_system_constants", + "zksync_types", + "zksync_utils", ] [[package]] @@ -7379,11 +7361,11 @@ dependencies = [ "multivm", "tokio", "tracing", - "zksync_contracts 0.1.0", + "zksync_contracts", "zksync_dal", "zksync_state", - "zksync_types 0.1.0", - "zksync_utils 0.1.0", + "zksync_types", + "zksync_utils", ] [[package]] @@ -8059,24 +8041,6 @@ dependencies = [ "url", ] -[[package]] -name = "zksync_basic_types" -version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f#47e0a16a99cad735be06aec163a4e39f65a91d9f" -dependencies = [ - "anyhow", - "chrono", - "ethabi", - "hex", - "num_enum 0.7.2", - "serde", - "serde_json", - "strum", - "thiserror", - "tiny-keccak 2.0.2", - "url", -] - [[package]] name = "zksync_block_reverter" version = "0.1.0" @@ -8088,15 +8052,15 @@ dependencies = [ "test-casing", "tokio", "tracing", - "zksync_config 0.1.0", - "zksync_contracts 0.1.0", + "zksync_config", + "zksync_contracts", "zksync_dal", "zksync_eth_client", "zksync_merkle_tree", - "zksync_object_store 0.1.0", + "zksync_object_store", "zksync_state", "zksync_storage", - "zksync_types 0.1.0", + "zksync_types", ] [[package]] @@ -8110,7 +8074,7 @@ dependencies = [ "tokio", "tracing", "vise", - "zksync_config 0.1.0", + "zksync_config", "zksync_dal", ] @@ -8134,8 +8098,8 @@ dependencies = [ "zksync_dal", "zksync_health_check", "zksync_l1_contract_interface", - "zksync_types 0.1.0", - "zksync_utils 0.1.0", + "zksync_types", + "zksync_utils", ] [[package]] @@ -8164,23 +8128,9 @@ dependencies = [ "rand 0.8.5", "serde", "url", - "zksync_basic_types 0.1.0", - "zksync_consensus_utils", - "zksync_crypto_primitives 0.1.0", -] - -[[package]] -name = "zksync_config" -version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f#47e0a16a99cad735be06aec163a4e39f65a91d9f" -dependencies = [ - "anyhow", - "rand 0.8.5", - "serde", - "url", - "zksync_basic_types 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", + "zksync_basic_types", "zksync_consensus_utils", - "zksync_crypto_primitives 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", + "zksync_crypto_primitives", ] [[package]] @@ -8339,14 +8289,14 @@ dependencies = [ "thiserror", "tokio", "tracing", - "vlog 0.1.0", - "zksync_config 0.1.0", - "zksync_contracts 0.1.0", + "vlog", + "zksync_config", + "zksync_contracts", "zksync_dal", "zksync_env_config", "zksync_queued_job_processor", - "zksync_types 0.1.0", - "zksync_utils 0.1.0", + "zksync_types", + "zksync_utils", ] [[package]] @@ -8359,21 +8309,7 @@ dependencies = [ "once_cell", "serde", "serde_json", - "zksync_utils 0.1.0", -] - -[[package]] -name = "zksync_contracts" -version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f#47e0a16a99cad735be06aec163a4e39f65a91d9f" -dependencies = [ - "envy", - "ethabi", - "hex", - "once_cell", - "serde", - "serde_json", - "zksync_utils 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", + "zksync_utils", ] [[package]] @@ -8415,12 +8351,12 @@ dependencies = [ "tower-http", "tracing", "vise", - "vlog 0.1.0", + "vlog", "vm_utils", "zksync_circuit_breaker", "zksync_commitment_generator", "zksync_concurrency", - "zksync_config 0.1.0", + "zksync_config", "zksync_consensus_bft", "zksync_consensus_crypto", "zksync_consensus_executor", @@ -8428,7 +8364,7 @@ dependencies = [ "zksync_consensus_roles", "zksync_consensus_storage", "zksync_consensus_utils", - "zksync_contracts 0.1.0", + "zksync_contracts", "zksync_dal", "zksync_db_connection", "zksync_eth_client", @@ -8440,11 +8376,11 @@ dependencies = [ "zksync_l1_contract_interface", "zksync_mempool", "zksync_merkle_tree", - "zksync_mini_merkle_tree 0.1.0", + "zksync_mini_merkle_tree", "zksync_node_fee_model", "zksync_node_genesis", "zksync_node_test_utils", - "zksync_object_store 0.1.0", + "zksync_object_store", "zksync_proof_data_handler", "zksync_protobuf", "zksync_protobuf_build", @@ -8453,10 +8389,10 @@ dependencies = [ "zksync_shared_metrics", "zksync_state", "zksync_storage", - "zksync_system_constants 0.1.0", + "zksync_system_constants", "zksync_test_account", - "zksync_types 0.1.0", - "zksync_utils 0.1.0", + "zksync_types", + "zksync_utils", "zksync_web3_decl", ] @@ -8471,21 +8407,7 @@ dependencies = [ "serde_json", "sha2 0.10.8", "thiserror", - "zksync_basic_types 0.1.0", -] - -[[package]] -name = "zksync_crypto" -version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f#47e0a16a99cad735be06aec163a4e39f65a91d9f" -dependencies = [ - "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", - "hex", - "once_cell", - "serde", - "sha2 0.10.8", - "thiserror", - "zksync_basic_types 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", + "zksync_basic_types", ] [[package]] @@ -8499,24 +8421,35 @@ dependencies = [ "serde", "serde_json", "thiserror", - "zksync_basic_types 0.1.0", - "zksync_utils 0.1.0", + "zksync_basic_types", + "zksync_utils", ] [[package]] -name = "zksync_crypto_primitives" +name = "zksync_da_client" +version = "0.1.0" +dependencies = [ + "async-trait", + "hyperchain_da", + "zksync_config", + "zksync_object_store", + "zksync_types", +] + +[[package]] +name = "zksync_da_dispatcher" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f#47e0a16a99cad735be06aec163a4e39f65a91d9f" dependencies = [ "anyhow", - "hex", - "rand 0.8.5", - "secp256k1", - "serde", - "serde_json", - "thiserror", - "zksync_basic_types 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", - "zksync_utils 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", + "chrono", + "hyperchain_da", + "tokio", + "tracing", + "vise", + "zksync_config", + "zksync_dal", + "zksync_types", + "zksync_utils", ] [[package]] @@ -8541,13 +8474,13 @@ dependencies = [ "vise", "zksync_consensus_roles", "zksync_consensus_storage", - "zksync_contracts 0.1.0", + "zksync_contracts", "zksync_db_connection", "zksync_protobuf", "zksync_protobuf_build", - "zksync_system_constants 0.1.0", - "zksync_types 0.1.0", - "zksync_utils 0.1.0", + "zksync_system_constants", + "zksync_types", + "zksync_utils", ] [[package]] @@ -8564,7 +8497,7 @@ dependencies = [ "tokio", "tracing", "vise", - "zksync_basic_types 0.1.0", + "zksync_basic_types", "zksync_health_check", ] @@ -8575,9 +8508,9 @@ dependencies = [ "anyhow", "envy", "serde", - "zksync_basic_types 0.1.0", - "zksync_config 0.1.0", - "zksync_system_constants 0.1.0", + "zksync_basic_types", + "zksync_config", + "zksync_system_constants", ] [[package]] @@ -8595,10 +8528,10 @@ dependencies = [ "tokio", "tracing", "vise", - "zksync_config 0.1.0", - "zksync_contracts 0.1.0", + "zksync_config", + "zksync_contracts", "zksync_eth_signer", - "zksync_types 0.1.0", + "zksync_types", ] [[package]] @@ -8615,18 +8548,18 @@ dependencies = [ "tokio", "tracing", "vise", - "zksync_config 0.1.0", - "zksync_contracts 0.1.0", + "zksync_config", + "zksync_contracts", "zksync_dal", "zksync_eth_client", "zksync_l1_contract_interface", "zksync_node_fee_model", "zksync_node_test_utils", - "zksync_object_store 0.1.0", + "zksync_object_store", "zksync_prover_interface", "zksync_shared_metrics", - "zksync_types 0.1.0", - "zksync_utils 0.1.0", + "zksync_types", + "zksync_utils", ] [[package]] @@ -8644,7 +8577,7 @@ dependencies = [ "serde_json", "thiserror", "tokio", - "zksync_types 0.1.0", + "zksync_types", ] [[package]] @@ -8657,12 +8590,12 @@ dependencies = [ "tokio", "tracing", "vise", - "zksync_contracts 0.1.0", + "zksync_contracts", "zksync_dal", "zksync_eth_client", "zksync_shared_metrics", - "zksync_system_constants 0.1.0", - "zksync_types 0.1.0", + "zksync_system_constants", + "zksync_types", ] [[package]] @@ -8685,14 +8618,14 @@ dependencies = [ "tracing", "url", "vise", - "vlog 0.1.0", - "zksync_basic_types 0.1.0", + "vlog", + "zksync_basic_types", "zksync_block_reverter", "zksync_commitment_generator", "zksync_concurrency", - "zksync_config 0.1.0", + "zksync_config", "zksync_consensus_roles", - "zksync_contracts 0.1.0", + "zksync_contracts", "zksync_core", "zksync_dal", "zksync_db_connection", @@ -8703,14 +8636,14 @@ dependencies = [ "zksync_node_db_pruner", "zksync_node_fee_model", "zksync_node_genesis", - "zksync_object_store 0.1.0", + "zksync_object_store", "zksync_protobuf_config", "zksync_shared_metrics", "zksync_snapshots_applier", "zksync_state", "zksync_storage", - "zksync_types 0.1.0", - "zksync_utils 0.1.0", + "zksync_types", + "zksync_utils", "zksync_web3_decl", ] @@ -8740,10 +8673,10 @@ dependencies = [ "tokio", "tracing", "vise", - "zksync_config 0.1.0", + "zksync_config", "zksync_dal", "zksync_shared_metrics", - "zksync_types 0.1.0", + "zksync_types", ] [[package]] @@ -8760,7 +8693,7 @@ dependencies = [ "sha2 0.10.8", "sha3 0.10.8", "zksync_prover_interface", - "zksync_types 0.1.0", + "zksync_types", ] [[package]] @@ -8768,7 +8701,7 @@ name = "zksync_mempool" version = "0.1.0" dependencies = [ "tracing", - "zksync_types 0.1.0", + "zksync_types", ] [[package]] @@ -8793,12 +8726,12 @@ dependencies = [ "tracing", "tracing-subscriber", "vise", - "zksync_crypto 0.1.0", + "zksync_crypto", "zksync_prover_interface", "zksync_storage", - "zksync_system_constants 0.1.0", - "zksync_types 0.1.0", - "zksync_utils 0.1.0", + "zksync_system_constants", + "zksync_types", + "zksync_utils", ] [[package]] @@ -8807,18 +8740,8 @@ version = "0.1.0" dependencies = [ "criterion", "once_cell", - "zksync_basic_types 0.1.0", - "zksync_crypto 0.1.0", -] - -[[package]] -name = "zksync_mini_merkle_tree" -version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f#47e0a16a99cad735be06aec163a4e39f65a91d9f" -dependencies = [ - "once_cell", - "zksync_basic_types 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", - "zksync_crypto 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", + "zksync_basic_types", + "zksync_crypto", ] [[package]] @@ -8840,7 +8763,7 @@ dependencies = [ "zksync_db_connection", "zksync_health_check", "zksync_node_test_utils", - "zksync_types 0.1.0", + "zksync_types", ] [[package]] @@ -8853,12 +8776,12 @@ dependencies = [ "tokio", "tracing", "vise", - "zksync_config 0.1.0", + "zksync_config", "zksync_dal", "zksync_eth_client", "zksync_node_test_utils", - "zksync_types 0.1.0", - "zksync_utils 0.1.0", + "zksync_types", + "zksync_utils", "zksync_web3_decl", ] @@ -8877,13 +8800,15 @@ dependencies = [ "thiserror", "tokio", "tracing", - "vlog 0.1.0", + "vlog", "zksync_circuit_breaker", "zksync_commitment_generator", "zksync_concurrency", - "zksync_config 0.1.0", - "zksync_contracts 0.1.0", + "zksync_config", + "zksync_contracts", "zksync_core", + "zksync_da_client", + "zksync_da_dispatcher", "zksync_dal", "zksync_db_connection", "zksync_env_config", @@ -8893,13 +8818,13 @@ dependencies = [ "zksync_health_check", "zksync_house_keeper", "zksync_node_fee_model", - "zksync_object_store 0.1.0", + "zksync_object_store", "zksync_proof_data_handler", "zksync_protobuf_config", "zksync_state", "zksync_storage", - "zksync_types 0.1.0", - "zksync_utils 0.1.0", + "zksync_types", + "zksync_utils", "zksync_web3_decl", ] @@ -8914,14 +8839,14 @@ dependencies = [ "tokio", "tracing", "vise", - "zksync_config 0.1.0", - "zksync_contracts 0.1.0", + "zksync_config", + "zksync_contracts", "zksync_dal", "zksync_eth_client", "zksync_merkle_tree", - "zksync_system_constants 0.1.0", - "zksync_types 0.1.0", - "zksync_utils 0.1.0", + "zksync_system_constants", + "zksync_types", + "zksync_utils", ] [[package]] @@ -8929,13 +8854,13 @@ name = "zksync_node_test_utils" version = "0.1.0" dependencies = [ "multivm", - "zksync_contracts 0.1.0", + "zksync_contracts", "zksync_dal", "zksync_merkle_tree", "zksync_node_genesis", - "zksync_system_constants 0.1.0", - "zksync_types 0.1.0", - "zksync_utils 0.1.0", + "zksync_system_constants", + "zksync_types", + "zksync_utils", ] [[package]] @@ -8955,31 +8880,9 @@ dependencies = [ "tokio", "tracing", "vise", - "zksync_config 0.1.0", + "zksync_config", "zksync_protobuf", - "zksync_types 0.1.0", -] - -[[package]] -name = "zksync_object_store" -version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f#47e0a16a99cad735be06aec163a4e39f65a91d9f" -dependencies = [ - "anyhow", - "async-trait", - "bincode", - "flate2", - "google-cloud-auth", - "google-cloud-storage", - "http", - "prost 0.12.1", - "serde_json", - "tokio", - "tracing", - "vise", - "zksync_config 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", - "zksync_protobuf", - "zksync_types 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", + "zksync_types", ] [[package]] @@ -8990,11 +8893,11 @@ dependencies = [ "axum", "tokio", "tracing", - "zksync_config 0.1.0", + "zksync_config", "zksync_dal", - "zksync_object_store 0.1.0", + "zksync_object_store", "zksync_prover_interface", - "zksync_types 0.1.0", + "zksync_types", ] [[package]] @@ -9043,11 +8946,11 @@ dependencies = [ "rand 0.8.5", "serde_json", "serde_yaml", - "zksync_basic_types 0.1.0", - "zksync_config 0.1.0", + "zksync_basic_types", + "zksync_config", "zksync_protobuf", "zksync_protobuf_build", - "zksync_types 0.1.0", + "zksync_types", ] [[package]] @@ -9061,8 +8964,8 @@ dependencies = [ "serde_with", "strum", "tokio", - "zksync_object_store 0.1.0", - "zksync_types 0.1.0", + "zksync_object_store", + "zksync_types", ] [[package]] @@ -9074,7 +8977,7 @@ dependencies = [ "tokio", "tracing", "vise", - "zksync_utils 0.1.0", + "zksync_utils", ] [[package]] @@ -9088,9 +8991,9 @@ dependencies = [ "tikv-jemallocator", "tokio", "tracing", - "vlog 0.1.0", + "vlog", "zksync_concurrency", - "zksync_config 0.1.0", + "zksync_config", "zksync_consensus_crypto", "zksync_consensus_executor", "zksync_consensus_roles", @@ -9100,8 +9003,8 @@ dependencies = [ "zksync_node_genesis", "zksync_protobuf_config", "zksync_storage", - "zksync_types 0.1.0", - "zksync_utils 0.1.0", + "zksync_types", + "zksync_utils", ] [[package]] @@ -9110,7 +9013,7 @@ version = "0.1.0" dependencies = [ "vise", "zksync_dal", - "zksync_types 0.1.0", + "zksync_types", ] [[package]] @@ -9130,9 +9033,9 @@ dependencies = [ "zksync_dal", "zksync_db_connection", "zksync_health_check", - "zksync_object_store 0.1.0", - "zksync_types 0.1.0", - "zksync_utils 0.1.0", + "zksync_object_store", + "zksync_types", + "zksync_utils", "zksync_web3_decl", ] @@ -9156,8 +9059,8 @@ dependencies = [ "zksync_dal", "zksync_shared_metrics", "zksync_storage", - "zksync_types 0.1.0", - "zksync_utils 0.1.0", + "zksync_types", + "zksync_utils", ] [[package]] @@ -9178,18 +9081,8 @@ name = "zksync_system_constants" version = "0.1.0" dependencies = [ "once_cell", - "zksync_basic_types 0.1.0", - "zksync_utils 0.1.0", -] - -[[package]] -name = "zksync_system_constants" -version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f#47e0a16a99cad735be06aec163a4e39f65a91d9f" -dependencies = [ - "once_cell", - "zksync_basic_types 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", - "zksync_utils 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", + "zksync_basic_types", + "zksync_utils", ] [[package]] @@ -9198,11 +9091,11 @@ version = "0.1.0" dependencies = [ "ethabi", "hex", - "zksync_contracts 0.1.0", + "zksync_contracts", "zksync_eth_signer", - "zksync_system_constants 0.1.0", - "zksync_types 0.1.0", - "zksync_utils 0.1.0", + "zksync_system_constants", + "zksync_types", + "zksync_utils", ] [[package]] @@ -9227,47 +9120,15 @@ dependencies = [ "strum", "thiserror", "tokio", - "zksync_basic_types 0.1.0", - "zksync_config 0.1.0", - "zksync_contracts 0.1.0", - "zksync_crypto_primitives 0.1.0", - "zksync_mini_merkle_tree 0.1.0", + "zksync_basic_types", + "zksync_config", + "zksync_contracts", + "zksync_crypto_primitives", + "zksync_mini_merkle_tree", "zksync_protobuf", "zksync_protobuf_build", - "zksync_system_constants 0.1.0", - "zksync_utils 0.1.0", -] - -[[package]] -name = "zksync_types" -version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f#47e0a16a99cad735be06aec163a4e39f65a91d9f" -dependencies = [ - "anyhow", - "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", - "chrono", - "derive_more", - "hex", - "itertools 0.10.5", - "num", - "num_enum 0.7.2", - "once_cell", - "prost 0.12.1", - "rlp", - "secp256k1", - "serde", - "serde_json", - "strum", - "thiserror", - "zksync_basic_types 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", - "zksync_config 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", - "zksync_contracts 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", - "zksync_crypto_primitives 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", - "zksync_mini_merkle_tree 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", - "zksync_protobuf", - "zksync_protobuf_build", - "zksync_system_constants 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", - "zksync_utils 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", + "zksync_system_constants", + "zksync_utils", ] [[package]] @@ -9287,30 +9148,9 @@ dependencies = [ "thiserror", "tokio", "tracing", - "vlog 0.1.0", - "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2)", - "zksync_basic_types 0.1.0", -] - -[[package]] -name = "zksync_utils" -version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f#47e0a16a99cad735be06aec163a4e39f65a91d9f" -dependencies = [ - "anyhow", - "bigdecimal", - "futures 0.3.28", - "hex", - "itertools 0.10.5", - "num", - "reqwest", - "serde", - "thiserror", - "tokio", - "tracing", - "vlog 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", + "vlog", "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2)", - "zksync_basic_types 0.1.0 (git+https://github.com/matter-labs/zksync-era.git?rev=47e0a16a99cad735be06aec163a4e39f65a91d9f)", + "zksync_basic_types", ] [[package]] @@ -9327,13 +9167,13 @@ dependencies = [ "tokio", "tracing", "vm_utils", - "zksync_contracts 0.1.0", + "zksync_contracts", "zksync_dal", "zksync_node_genesis", "zksync_node_test_utils", "zksync_state", "zksync_storage", - "zksync_types 0.1.0", + "zksync_types", ] [[package]] @@ -9355,8 +9195,8 @@ dependencies = [ "tokio", "tracing", "vise", - "zksync_config 0.1.0", - "zksync_types 0.1.0", + "zksync_config", + "zksync_types", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index fd7684de6c12..93ce5fb71acf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,6 +20,7 @@ members = [ "core/node/shared_metrics", "core/node/db_pruner", "core/node/fee_model", + "core/node/da_dispatcher", "core/node/eth_sender", "core/node/vm_runner", "core/node/test_utils", @@ -34,6 +35,7 @@ members = [ "core/lib/circuit_breaker", "core/lib/dal", "core/lib/env_config", + "core/lib/da_client", "core/lib/eth_client", "core/lib/eth_signer", "core/lib/l1_contract_interface", @@ -188,7 +190,7 @@ zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter zksync_consensus_utils = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "92ecb2d5d65e3bc4a883dacd18d0640e86576c8c" } zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "92ecb2d5d65e3bc4a883dacd18d0640e86576c8c" } zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "92ecb2d5d65e3bc4a883dacd18d0640e86576c8c" } -zksync_da = { package = "hyperchain_da", git = "https://github.com/matter-labs/hyperchain-da.git", rev = "641bda157f1d0cdc3455f3a825ea19914549bb9c" } +zksync_da_layers = { package = "hyperchain_da", git = "https://github.com/matter-labs/hyperchain-da.git", rev = "69dc63c9c91553a31c4dd48f47c30ca44433dea3" } # "Local" dependencies multivm = { path = "core/lib/multivm" } @@ -208,6 +210,7 @@ zksync_dal = { path = "core/lib/dal" } zksync_db_connection = { path = "core/lib/db_connection" } zksync_env_config = { path = "core/lib/env_config" } zksync_eth_client = { path = "core/lib/eth_client" } +zksync_da_client = { path = "core/lib/da_client" } zksync_eth_signer = { path = "core/lib/eth_signer" } zksync_health_check = { path = "core/lib/health_check" } zksync_l1_contract_interface = { path = "core/lib/l1_contract_interface" } @@ -237,6 +240,7 @@ zksync_block_reverter = { path = "core/node/block_reverter" } zksync_commitment_generator = { path = "core/node/commitment_generator" } zksync_house_keeper = { path = "core/node/house_keeper" } zksync_node_genesis = { path = "core/node/genesis" } +zksync_da_dispatcher = { path = "core/node/da_dispatcher" } zksync_eth_sender = { path = "core/node/eth_sender" } zksync_node_db_pruner = { path = "core/node/db_pruner" } zksync_node_fee_model = { path = "core/node/fee_model" } diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index d9a3246a0786..cefdb37f9e58 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -15,8 +15,8 @@ use zksync_config::{ FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, }, - ApiConfig, ContractVerifierConfig, DBConfig, EthConfig, EthWatchConfig, GasAdjusterConfig, - GenesisConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, + ApiConfig, ContractVerifierConfig, DADispatcherConfig, DBConfig, EthConfig, EthWatchConfig, + GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, }; use zksync_core::{ genesis_init, initialize_components, is_genesis_needed, setup_sigint_handler, @@ -275,5 +275,6 @@ fn load_env_config() -> anyhow::Result { object_store_config: ObjectStoreConfig::from_env().ok(), observability: ObservabilityConfig::from_env().ok(), snapshot_creator: SnapshotsCreatorConfig::from_env().ok(), + da_dispatcher_config: DADispatcherConfig::from_env().ok(), }) } diff --git a/core/lib/config/src/configs/da_dispatcher.rs b/core/lib/config/src/configs/da_dispatcher.rs index 99bbf2d79b95..0efde393f1cf 100644 --- a/core/lib/config/src/configs/da_dispatcher.rs +++ b/core/lib/config/src/configs/da_dispatcher.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use serde::Deserialize; use crate::ObjectStoreConfig; @@ -6,10 +8,11 @@ use crate::ObjectStoreConfig; pub struct DALayerInfo { pub name: String, #[serde(default)] - pub private_key: Vec, + pub private_key: String, } #[derive(Clone, Debug, PartialEq, Deserialize)] +#[serde(tag = "da_mode")] pub enum DataAvailabilityMode { DALayer(DALayerInfo), GCS(ObjectStoreConfig), @@ -18,16 +21,38 @@ pub enum DataAvailabilityMode { #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct DADispatcherConfig { - pub mode: DataAvailabilityMode, + #[serde(flatten)] + pub da_mode: DataAvailabilityMode, + pub polling_interval: Option, + pub query_rows_limit: Option, + pub max_retries: Option, } impl DADispatcherConfig { pub fn for_tests() -> Self { Self { - mode: DataAvailabilityMode::DALayer(DALayerInfo { + da_mode: DataAvailabilityMode::DALayer(DALayerInfo { name: "zkDA".into(), - private_key: vec![1, 2, 3], + private_key: "0x0".into(), }), + polling_interval: Some(5), + query_rows_limit: Some(100), + max_retries: Some(5), + } + } + + pub fn polling_interval(&self) -> Duration { + match self.polling_interval { + Some(interval) => Duration::from_secs(interval as u64), + None => Duration::from_secs(5), } } + + pub fn query_rows_limit(&self) -> u32 { + self.query_rows_limit.unwrap_or(100) + } + + pub fn max_retries(&self) -> u16 { + self.max_retries.unwrap_or(5) + } } diff --git a/core/lib/da_client/Cargo.toml b/core/lib/da_client/Cargo.toml new file mode 100644 index 000000000000..7320451b4fee --- /dev/null +++ b/core/lib/da_client/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "zksync_da_client" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +async-trait = "0.1.74" + +zksync_config.workspace = true +zksync_types.workspace = true +zksync_da_layers.workspace = true +zksync_object_store.workspace = true \ No newline at end of file diff --git a/core/lib/da_client/src/clients/gcs/mod.rs b/core/lib/da_client/src/clients/gcs/mod.rs new file mode 100644 index 000000000000..d41300710c31 --- /dev/null +++ b/core/lib/da_client/src/clients/gcs/mod.rs @@ -0,0 +1,58 @@ +use std::{ + fmt, + fmt::{Debug, Formatter}, + sync::Arc, +}; + +use async_trait::async_trait; +use zksync_config::ObjectStoreConfig; +use zksync_da_layers::{ + types::{DataAvailabilityError, DispatchResponse, InclusionData}, + DataAvailabilityInterface, +}; +use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_types::{pubdata_da::StorablePubdata, L1BatchNumber}; + +pub(crate) struct GCSDAClient { + object_store: Arc, +} + +impl GCSDAClient { + pub async fn new(object_store_conf: ObjectStoreConfig) -> Self { + GCSDAClient { + object_store: ObjectStoreFactory::create_from_config(&object_store_conf).await, + } + } +} + +#[async_trait] +impl DataAvailabilityInterface for GCSDAClient { + async fn dispatch_blob( + &self, + batch_number: u32, + data: Vec, + ) -> Result { + let key = self + .object_store + .put(L1BatchNumber(batch_number), &StorablePubdata { data }) + .await + .unwrap(); + + Ok(DispatchResponse { + blob_id: key.into_bytes(), + }) + } + + async fn get_inclusion_data(&self, _: Vec) -> Result { + return Ok(InclusionData::default()); + } +} + +impl Debug for GCSDAClient { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter + .debug_struct("GCSDAClient") + .field("object_store", &self.object_store) + .finish() + } +} diff --git a/core/lib/da_client/src/clients/mod.rs b/core/lib/da_client/src/clients/mod.rs new file mode 100644 index 000000000000..6a4994931375 --- /dev/null +++ b/core/lib/da_client/src/clients/mod.rs @@ -0,0 +1,2 @@ +pub mod gcs; +pub mod no_da; diff --git a/core/lib/da_client/src/clients/no_da/mod.rs b/core/lib/da_client/src/clients/no_da/mod.rs new file mode 100644 index 000000000000..80e56f95eed1 --- /dev/null +++ b/core/lib/da_client/src/clients/no_da/mod.rs @@ -0,0 +1,39 @@ +use std::{ + fmt, + fmt::{Debug, Formatter}, +}; + +use async_trait::async_trait; +use zksync_da_layers::{ + types::{DataAvailabilityError, DispatchResponse, InclusionData}, + DataAvailabilityInterface, +}; + +pub(crate) struct NoDAClient {} + +impl NoDAClient { + pub fn new() -> Self { + NoDAClient {} + } +} + +#[async_trait] +impl DataAvailabilityInterface for NoDAClient { + async fn dispatch_blob( + &self, + _: u32, + _: Vec, + ) -> Result { + Ok(DispatchResponse::default()) + } + + async fn get_inclusion_data(&self, _: Vec) -> Result { + return Ok(InclusionData::default()); + } +} + +impl Debug for NoDAClient { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter.debug_struct("NoDAClient").finish() + } +} diff --git a/core/lib/da_client/src/lib.rs b/core/lib/da_client/src/lib.rs new file mode 100644 index 000000000000..6e2a1c59eb18 --- /dev/null +++ b/core/lib/da_client/src/lib.rs @@ -0,0 +1,15 @@ +use zksync_config::configs::da_dispatcher::{DADispatcherConfig, DataAvailabilityMode}; +use zksync_da_layers::DataAvailabilityInterface; + +mod clients; + +pub async fn new_da_client(config: DADispatcherConfig) -> Box { + match config.da_mode { + DataAvailabilityMode::GCS(config) => Box::new(clients::gcs::GCSDAClient::new(config).await), + DataAvailabilityMode::NoDA => Box::new(clients::no_da::NoDAClient::new()), + DataAvailabilityMode::DALayer(config) => { + zksync_da_layers::new_da_layer_client(config.name, config.private_key.into_bytes()) + .await + } + } +} diff --git a/core/lib/dal/.sqlx/query-0763b6872f2d611d0247b3b6d8cc4018b155242a3ef3da602616a9bce668860b.json b/core/lib/dal/.sqlx/query-0763b6872f2d611d0247b3b6d8cc4018b155242a3ef3da602616a9bce668860b.json new file mode 100644 index 000000000000..b2914b9d0d78 --- /dev/null +++ b/core/lib/dal/.sqlx/query-0763b6872f2d611d0247b3b6d8cc4018b155242a3ef3da602616a9bce668860b.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND commitment IS NOT NULL\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND data_availability.blob_id IS NOT NULL\n AND data_availability.inclusion_data IS NOT NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "pubdata_input", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + true + ] + }, + "hash": "0763b6872f2d611d0247b3b6d8cc4018b155242a3ef3da602616a9bce668860b" +} diff --git a/core/lib/dal/.sqlx/query-16fdd84e9819be6f4e46b1afc5fe5c7fd0d06d506d4b6bdaa3b149b44a7b85ac.json b/core/lib/dal/.sqlx/query-16fdd84e9819be6f4e46b1afc5fe5c7fd0d06d506d4b6bdaa3b149b44a7b85ac.json new file mode 100644 index 000000000000..df0440b64254 --- /dev/null +++ b/core/lib/dal/.sqlx/query-16fdd84e9819be6f4e46b1afc5fe5c7fd0d06d506d4b6bdaa3b149b44a7b85ac.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n data_availability\n WHERE\n l1_batch_number = $1\n AND blob_id = $2\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "Bytea" + ] + }, + "nullable": [ + null + ] + }, + "hash": "16fdd84e9819be6f4e46b1afc5fe5c7fd0d06d506d4b6bdaa3b149b44a7b85ac" +} diff --git a/core/lib/dal/.sqlx/query-4643da2f3085c122a8f62b1f6b9b21029b37b5cf978c25265d93f2280961e6d5.json b/core/lib/dal/.sqlx/query-4643da2f3085c122a8f62b1f6b9b21029b37b5cf978c25265d93f2280961e6d5.json new file mode 100644 index 000000000000..0e6b9db2ad7e --- /dev/null +++ b/core/lib/dal/.sqlx/query-4643da2f3085c122a8f62b1f6b9b21029b37b5cf978c25265d93f2280961e6d5.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO data_availability\n (l1_batch_number, blob_id, created_at, updated_at)\n VALUES ($1, $2, NOW(), NOW())\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Bytea" + ] + }, + "nullable": [] + }, + "hash": "4643da2f3085c122a8f62b1f6b9b21029b37b5cf978c25265d93f2280961e6d5" +} diff --git a/core/lib/dal/.sqlx/query-5c99342c4fbf36ccc8e9c9dafc76de37201091bfccd3caf922e766896c5a542b.json b/core/lib/dal/.sqlx/query-5c99342c4fbf36ccc8e9c9dafc76de37201091bfccd3caf922e766896c5a542b.json new file mode 100644 index 000000000000..5d09a9c37f7a --- /dev/null +++ b/core/lib/dal/.sqlx/query-5c99342c4fbf36ccc8e9c9dafc76de37201091bfccd3caf922e766896c5a542b.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE data_availability\n SET\n inclusion_data = $1,\n updated_at = NOW()\n WHERE\n l1_batch_number = $2\n AND inclusion_data IS NULL\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Bytea", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "5c99342c4fbf36ccc8e9c9dafc76de37201091bfccd3caf922e766896c5a542b" +} diff --git a/core/lib/dal/.sqlx/query-4bdda8d8956ac5dedc3bd389e3721fbe5f9e838b0f1fd6ff906c0130a15f9509.json b/core/lib/dal/.sqlx/query-ca6debc6ab140f26ced294a2ea91e5ed7e700b7c05eb11a85d0ca63bb58d3ddc.json similarity index 82% rename from core/lib/dal/.sqlx/query-4bdda8d8956ac5dedc3bd389e3721fbe5f9e838b0f1fd6ff906c0130a15f9509.json rename to core/lib/dal/.sqlx/query-ca6debc6ab140f26ced294a2ea91e5ed7e700b7c05eb11a85d0ca63bb58d3ddc.json index cdf143094c23..0f4724111af7 100644 --- a/core/lib/dal/.sqlx/query-4bdda8d8956ac5dedc3bd389e3721fbe5f9e838b0f1fd6ff906c0130a15f9509.json +++ b/core/lib/dal/.sqlx/query-ca6debc6ab140f26ced294a2ea91e5ed7e700b7c05eb11a85d0ca63bb58d3ddc.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n ORDER BY\n number\n LIMIT\n $4\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND data_availability.blob_id IS NOT NULL\n AND data_availability.inclusion_data IS NOT NULL\n ORDER BY\n number\n LIMIT\n $4\n ", "describe": { "columns": [ { @@ -195,5 +195,5 @@ true ] }, - "hash": "4bdda8d8956ac5dedc3bd389e3721fbe5f9e838b0f1fd6ff906c0130a15f9509" + "hash": "ca6debc6ab140f26ced294a2ea91e5ed7e700b7c05eb11a85d0ca63bb58d3ddc" } diff --git a/core/lib/dal/.sqlx/query-e05fb44a407c9b0289aaf6f2addb1e2580f9f0ded642dd3085b9bf8d101bdb15.json b/core/lib/dal/.sqlx/query-e05fb44a407c9b0289aaf6f2addb1e2580f9f0ded642dd3085b9bf8d101bdb15.json new file mode 100644 index 000000000000..6bd046eb9d19 --- /dev/null +++ b/core/lib/dal/.sqlx/query-e05fb44a407c9b0289aaf6f2addb1e2580f9f0ded642dd3085b9bf8d101bdb15.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n data_availability\n WHERE\n l1_batch_number = $1\n AND inclusion_data = $2\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "Bytea" + ] + }, + "nullable": [ + null + ] + }, + "hash": "e05fb44a407c9b0289aaf6f2addb1e2580f9f0ded642dd3085b9bf8d101bdb15" +} diff --git a/core/lib/dal/.sqlx/query-efe8f8cf2594d00266282cfa15343f1177c1f31a19bc9039f4145367655e5e31.json b/core/lib/dal/.sqlx/query-efe8f8cf2594d00266282cfa15343f1177c1f31a19bc9039f4145367655e5e31.json new file mode 100644 index 000000000000..ce5043160471 --- /dev/null +++ b/core/lib/dal/.sqlx/query-efe8f8cf2594d00266282cfa15343f1177c1f31a19bc9039f4145367655e5e31.json @@ -0,0 +1,44 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n l1_batch_number,\n blob_id,\n inclusion_data,\n created_at,\n updated_at\n FROM\n data_availability\n WHERE\n inclusion_data IS NULL\n AND blob_id IS NOT NULL\n ORDER BY\n l1_batch_number\n LIMIT 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "blob_id", + "type_info": "Bytea" + }, + { + "ordinal": 2, + "name": "inclusion_data", + "type_info": "Bytea" + }, + { + "ordinal": 3, + "name": "created_at", + "type_info": "Timestamp" + }, + { + "ordinal": 4, + "name": "updated_at", + "type_info": "Timestamp" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + true, + true, + false, + false + ] + }, + "hash": "efe8f8cf2594d00266282cfa15343f1177c1f31a19bc9039f4145367655e5e31" +} diff --git a/core/lib/dal/migrations/20240508145354_create_data_availability_table.down.sql b/core/lib/dal/migrations/20240522081114_create_data_availability_table.down.sql similarity index 100% rename from core/lib/dal/migrations/20240508145354_create_data_availability_table.down.sql rename to core/lib/dal/migrations/20240522081114_create_data_availability_table.down.sql diff --git a/core/lib/dal/migrations/20240508145354_create_data_availability_table.up.sql b/core/lib/dal/migrations/20240522081114_create_data_availability_table.up.sql similarity index 67% rename from core/lib/dal/migrations/20240508145354_create_data_availability_table.up.sql rename to core/lib/dal/migrations/20240522081114_create_data_availability_table.up.sql index 05ee7f90ee3a..a1b0d82b45b4 100644 --- a/core/lib/dal/migrations/20240508145354_create_data_availability_table.up.sql +++ b/core/lib/dal/migrations/20240522081114_create_data_availability_table.up.sql @@ -3,8 +3,8 @@ CREATE TABLE data_availability l1_batch_number BIGINT PRIMARY KEY REFERENCES l1_batches (number) ON DELETE CASCADE, -- the BYTEA used for this 2 columns because it is the most generic type - -- the actual format if blob identifier and inclusion data is defined by the DA client implementation - blob_id BYTEA, -- blob here is an abstract term, unrelated to any DA implementation + -- the actual format of blob identifier and inclusion data is defined by the DA client implementation + blob_id BYTEA NOT NULL, -- blob here is an abstract term, unrelated to any DA implementation inclusion_data BYTEA, created_at TIMESTAMP NOT NULL, diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 04ab8ec8f42a..30485e6e3b29 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -16,7 +16,7 @@ use zksync_types::{ aggregated_operations::AggregatedActionType, block::{BlockGasCount, L1BatchHeader, L1BatchTreeData, L2BlockHeader, StorageOracleInfo}, circuit::CircuitStatistic, - commitment::{L1BatchCommitmentArtifacts, L1BatchWithMetadata}, + commitment::{L1BatchCommitmentArtifacts, L1BatchDA, L1BatchWithMetadata}, Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, U256, }; @@ -24,6 +24,7 @@ use crate::{ models::{ parse_protocol_version, storage_block::{StorageL1Batch, StorageL1BatchHeader, StorageL2BlockHeader}, + storage_data_availability::StorageDataAvailability, storage_oracle_info::DbStorageOracleInfo, }, Core, CoreDal, @@ -836,20 +837,20 @@ impl BlocksDal<'_, '_> { Ok(()) } - pub async fn save_l1_batch_da_data( + pub async fn save_l1_batch_inclusion_data( &mut self, number: L1BatchNumber, da_inclusion_data: Vec, ) -> anyhow::Result<()> { let update_result = sqlx::query!( r#" - UPDATE l1_batches + UPDATE data_availability SET - da_inclusion_data = $1, + inclusion_data = $1, updated_at = NOW() WHERE - number = $2 - AND da_inclusion_data IS NULL + l1_batch_number = $2 + AND inclusion_data IS NULL "#, da_inclusion_data.as_slice(), i64::from(number.0), @@ -869,10 +870,10 @@ impl BlocksDal<'_, '_> { SELECT COUNT(*) AS "count!" FROM - l1_batches + data_availability WHERE - number = $1 - AND da_inclusion_data = $2 + l1_batch_number = $1 + AND inclusion_data = $2 "#, i64::from(number.0), da_inclusion_data.as_slice(), @@ -892,6 +893,62 @@ impl BlocksDal<'_, '_> { Ok(()) } + pub async fn insert_l1_batch_da( + &mut self, + number: L1BatchNumber, + blob_id: Vec, + ) -> anyhow::Result<()> { + let update_result = sqlx::query!( + r#" + INSERT INTO + data_availability (l1_batch_number, blob_id, created_at, updated_at) + VALUES + ($1, $2, NOW(), NOW()) + "#, + i64::from(number.0), + blob_id.as_slice(), + ) + .instrument("insert_l1_batch_da") + .with_arg("number", &number) + .with_arg("blob_id", &blob_id) + .report_latency() + .execute(self.storage) + .await?; + + if update_result.rows_affected() == 0 { + tracing::debug!( + "L1 batch #{number}: DA blob_id wasn't updated as it's already present" + ); + + // Batch was already processed. Verify that existing DA blob_id matches + let matched: i64 = sqlx::query!( + r#" + SELECT + COUNT(*) AS "count!" + FROM + data_availability + WHERE + l1_batch_number = $1 + AND blob_id = $2 + "#, + i64::from(number.0), + blob_id.as_slice(), + ) + .instrument("get_matching_batch_da_blob_id") + .with_arg("number", &number) + .report_latency() + .fetch_one(self.storage) + .await? + .count; + + anyhow::ensure!( + matched == 1, + "DA blob_id verification failed. DA blob_id for L1 batch #{number} does not match the expected value" + ); + } + Ok(()) + } + pub async fn save_l1_batch_commitment_artifacts( &mut self, number: L1BatchNumber, @@ -1697,6 +1754,76 @@ impl BlocksDal<'_, '_> { .context("map_l1_batches()") } + pub async fn get_da_blob_awaiting_inclusion( + &mut self, + ) -> anyhow::Result> { + Ok(sqlx::query_as!( + StorageDataAvailability, + r#" + SELECT + l1_batch_number, + blob_id, + inclusion_data, + created_at, + updated_at + FROM + data_availability + WHERE + inclusion_data IS NULL + AND blob_id IS NOT NULL + ORDER BY + l1_batch_number + LIMIT + 1 + "#, + ) + .instrument("get_da_blob_awaiting_inclusion") + .fetch_optional(self.storage) + .await?) + } + + pub async fn get_ready_for_da_dispatch_l1_batches( + &mut self, + limit: usize, + ) -> anyhow::Result> { + let rows = sqlx::query!( + r#" + SELECT + number, + pubdata_input + FROM + l1_batches + LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number + WHERE + eth_commit_tx_id IS NULL + AND number != 0 + AND commitment IS NOT NULL + AND events_queue_commitment IS NOT NULL + AND bootloader_initial_content_commitment IS NOT NULL + AND data_availability.blob_id IS NOT NULL + AND data_availability.inclusion_data IS NOT NULL + ORDER BY + number + LIMIT + $1 + "#, + limit as i64, + ) + .instrument("get_ready_for_da_dispatch_l1_batches") + .with_arg("limit", &limit) + .fetch_all(self.storage) + .await?; + + Ok(rows + .into_iter() + .map(|row| L1BatchDA { + pubdata: row.pubdata_input.unwrap(), + l1_batch_number: L1BatchNumber(row.number as u32), + }) + .collect()) + } + pub async fn get_l1_batch_state_root( &mut self, number: L1BatchNumber, diff --git a/core/lib/dal/src/models/mod.rs b/core/lib/dal/src/models/mod.rs index 66ab73040d68..1ad17a85cb09 100644 --- a/core/lib/dal/src/models/mod.rs +++ b/core/lib/dal/src/models/mod.rs @@ -3,6 +3,7 @@ use anyhow::Context as _; use zksync_db_connection::error::SqlxContext; use zksync_types::{ProtocolVersionId, H160, H256}; +pub(crate) mod storage_data_availability; pub mod storage_eth_tx; pub mod storage_event; pub mod storage_fee_monitor; diff --git a/core/lib/dal/src/models/storage_data_availability.rs b/core/lib/dal/src/models/storage_data_availability.rs new file mode 100644 index 000000000000..220460673959 --- /dev/null +++ b/core/lib/dal/src/models/storage_data_availability.rs @@ -0,0 +1,10 @@ +use chrono::NaiveDateTime; + +#[derive(Debug, Clone)] +pub struct StorageDataAvailability { + pub l1_batch_number: i64, + pub blob_id: Option>, + pub inclusion_data: Option>, + pub created_at: NaiveDateTime, + pub updated_at: NaiveDateTime, +} diff --git a/core/lib/env_config/src/da_dispatcher.rs b/core/lib/env_config/src/da_dispatcher.rs new file mode 100644 index 000000000000..6e7c652ce103 --- /dev/null +++ b/core/lib/env_config/src/da_dispatcher.rs @@ -0,0 +1,122 @@ +use zksync_config::DADispatcherConfig; + +use crate::{envy_load, FromEnv}; + +impl FromEnv for DADispatcherConfig { + fn from_env() -> anyhow::Result { + envy_load("da_dispatcher", "DA_DISPATCHER_") + } +} + +#[cfg(test)] +mod tests { + use zksync_config::configs::{ + da_dispatcher::{DADispatcherConfig, DALayerInfo, DataAvailabilityMode}, + object_store::{ObjectStoreConfig, ObjectStoreMode}, + }; + + use super::*; + use crate::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); + + fn expected_gcs_config( + bucket_base_url: &str, + interval: u32, + rows_limit: u32, + max_retries: u16, + ) -> DADispatcherConfig { + DADispatcherConfig { + da_mode: DataAvailabilityMode::GCS(ObjectStoreConfig { + mode: ObjectStoreMode::GCSWithCredentialFile { + bucket_base_url: bucket_base_url.to_owned(), + gcs_credential_file_path: "/path/to/credentials.json".to_owned(), + }, + max_retries: 5, + }), + polling_interval: Some(interval), + query_rows_limit: Some(rows_limit), + max_retries: Some(max_retries), + } + } + + fn expected_da_layer_config( + name: &str, + pk: &str, + interval: u32, + rows_limit: u32, + max_retries: u16, + ) -> DADispatcherConfig { + DADispatcherConfig { + da_mode: DataAvailabilityMode::DALayer(DALayerInfo { + name: name.to_owned(), + private_key: pk.to_owned(), + }), + polling_interval: Some(interval), + query_rows_limit: Some(rows_limit), + max_retries: Some(max_retries), + } + } + + fn expected_no_da_config() -> DADispatcherConfig { + DADispatcherConfig { + da_mode: DataAvailabilityMode::NoDA, + polling_interval: None, + query_rows_limit: None, + max_retries: None, + } + } + + #[test] + fn from_env_da_layer() { + let mut lock = MUTEX.lock(); + let config = r#" + DA_DISPATCHER_POLLING_INTERVAL=5 + DA_DISPATCHER_QUERY_ROWS_LIMIT=60 + DA_DISPATCHER_MAX_RETRIES=7 + DA_DISPATCHER_DA_MODE="DALayer" + DA_DISPATCHER_NAME="testDALayer" + DA_DISPATCHER_PRIVATE_KEY="0xf55baf7c0e4e33b1d78fbf52f069c426bc36cff1aceb9bc8f45d14c07f034d73" + "#; + lock.set_env(config); + let actual = DADispatcherConfig::from_env().unwrap(); + assert_eq!( + actual, + expected_da_layer_config( + "testDALayer", + "0xf55baf7c0e4e33b1d78fbf52f069c426bc36cff1aceb9bc8f45d14c07f034d73", + 5, + 60, + 7 + ) + ); + } + + #[test] + fn from_env_no_da() { + let mut lock = MUTEX.lock(); + let config = r#" + DA_DISPATCHER_DA_MODE="NoDA" + "#; + lock.set_env(config); + let actual = DADispatcherConfig::from_env().unwrap(); + assert_eq!(actual, expected_no_da_config()); + } + + #[test] + fn from_env_gcs() { + let mut lock = MUTEX.lock(); + let config = r#" + DA_DISPATCHER_POLLING_INTERVAL=10 + DA_DISPATCHER_QUERY_ROWS_LIMIT=100 + DA_DISPATCHER_MAX_RETRIES=6 + DA_DISPATCHER_DA_MODE="GCS" + DA_DISPATCHER_MODE="GCSWithCredentialFile" + DA_DISPATCHER_GCS_CREDENTIAL_FILE_PATH="/path/to/credentials.json" + DA_DISPATCHER_BUCKET_BASE_URL="/base/url" + "#; + lock.set_env(config); + let actual = DADispatcherConfig::from_env().unwrap(); + assert_eq!(actual, expected_gcs_config("/base/url", 10, 100, 6)); + } +} diff --git a/core/lib/env_config/src/lib.rs b/core/lib/env_config/src/lib.rs index f6290020f38d..bfe0923dbc69 100644 --- a/core/lib/env_config/src/lib.rs +++ b/core/lib/env_config/src/lib.rs @@ -21,6 +21,7 @@ mod proof_data_handler; mod snapshots_creator; mod utils; +mod da_dispatcher; mod genesis; #[cfg(test)] mod test_utils; diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs index e40d4e61c66f..3b447bb2a8d3 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs @@ -215,6 +215,7 @@ impl<'a> Tokenizable for CommitBatchInfoRollup<'a> { tokens.push(Token::Bytes(result)); } + PubdataDA::Custom => todo!(), } } @@ -404,6 +405,7 @@ impl<'a> Tokenizable for CommitBatchInfoValidium<'a> { tokens.push(Token::Bytes(result)); } + PubdataDA::Custom => todo!(), } } diff --git a/core/lib/protobuf_config/src/da_dispatcher.rs b/core/lib/protobuf_config/src/da_dispatcher.rs index dfe7ad62abf3..1721de4f3376 100644 --- a/core/lib/protobuf_config/src/da_dispatcher.rs +++ b/core/lib/protobuf_config/src/da_dispatcher.rs @@ -8,32 +8,57 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { type Type = configs::da_dispatcher::DADispatcherConfig; fn read(&self) -> anyhow::Result { - configs::da_dispatcher::DADispatcherConfig { - mode: match &self.credentials { - Some(proto::data_availability_dispatcher::Credentials::DaLayer(config)) => { - configs::da_dispatcher::DataAvailabilityMode::DALayer( + match &self.credentials { + Some(proto::data_availability_dispatcher::Credentials::DaLayer(config)) => { + Ok(configs::da_dispatcher::DADispatcherConfig { + da_mode: configs::da_dispatcher::DataAvailabilityMode::DALayer( configs::da_dispatcher::DALayerInfo { - name: *required(&config.name).context("name"), + name: required(&config.name).context("name")?.clone(), private_key: required(&config.private_key) - .context("private_key") - .into_bytes(), + .context("private_key")? + .clone(), }, - ) - } - Some(proto::data_availability_dispatcher::Credentials::ObjectStore(config)) => { - configs::da_dispatcher::DataAvailabilityMode::GCS(config.read()?) - } - None => configs::da_dispatcher::DataAvailabilityMode::NoDA, - }, + ), + polling_interval: Some( + *required(&self.polling_interval).context("polling_interval")?, + ), + query_rows_limit: Some( + *required(&self.query_rows_limit).context("query_rows_limit")?, + ), + max_retries: Some( + *required(&self.max_retries).context("query_rows_limit")? as u16 + ), + }) + } + Some(proto::data_availability_dispatcher::Credentials::ObjectStore(config)) => { + Ok(configs::da_dispatcher::DADispatcherConfig { + da_mode: configs::da_dispatcher::DataAvailabilityMode::GCS(config.read()?), + polling_interval: Some( + *required(&self.polling_interval).context("polling_interval")?, + ), + query_rows_limit: Some( + *required(&self.query_rows_limit).context("query_rows_limit")?, + ), + max_retries: Some( + *required(&self.max_retries).context("query_rows_limit")? as u16 + ), + }) + } + None => Ok(configs::da_dispatcher::DADispatcherConfig { + da_mode: configs::da_dispatcher::DataAvailabilityMode::NoDA, + polling_interval: None, + query_rows_limit: None, + max_retries: None, + }), } } fn build(this: &Self::Type) -> Self { - let credentials = match this.mode.clone() { + let credentials = match this.da_mode.clone() { configs::da_dispatcher::DataAvailabilityMode::DALayer(info) => Some( proto::data_availability_dispatcher::Credentials::DaLayer(proto::DaLayer { name: Some(info.name.clone()), - private_key: info.private_key.clone().into(), + private_key: Some(info.private_key.clone()), }), ), configs::da_dispatcher::DataAvailabilityMode::GCS(config) => Some( @@ -44,6 +69,11 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { configs::da_dispatcher::DataAvailabilityMode::NoDA => None, }; - Self { credentials } + Self { + credentials, + polling_interval: this.polling_interval, + query_rows_limit: this.query_rows_limit, + max_retries: this.max_retries.map(|x| x as u32), + } } } diff --git a/core/lib/protobuf_config/src/general.rs b/core/lib/protobuf_config/src/general.rs index ccd55a71c2ec..947b7a3a30d8 100644 --- a/core/lib/protobuf_config/src/general.rs +++ b/core/lib/protobuf_config/src/general.rs @@ -37,6 +37,8 @@ impl ProtoRepr for proto::GeneralConfig { snapshot_creator: read_optional_repr(&self.snapshot_creator) .context("snapshot_creator")?, observability: read_optional_repr(&self.observability).context("observability")?, + da_dispatcher_config: read_optional_repr(&self.da_dispatcher) + .context("da_dispatcher")?, }) } @@ -68,6 +70,7 @@ impl ProtoRepr for proto::GeneralConfig { eth: this.eth.as_ref().map(ProtoRepr::build), snapshot_creator: this.snapshot_creator.as_ref().map(ProtoRepr::build), observability: this.observability.as_ref().map(ProtoRepr::build), + da_dispatcher: this.da_dispatcher_config.as_ref().map(ProtoRepr::build), } } } diff --git a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto index e5ad0f6d359a..e1c944d5d2e8 100644 --- a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto +++ b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto @@ -14,4 +14,7 @@ message DataAvailabilityDispatcher { config.object_store.ObjectStore object_store = 1; DALayer da_layer = 2; } + optional uint32 polling_interval = 3; + optional uint32 query_rows_limit = 4; + optional uint32 max_retries = 5; } diff --git a/core/lib/protobuf_config/src/proto/config/general.proto b/core/lib/protobuf_config/src/proto/config/general.proto index fdc60c57cfdd..b035be327031 100644 --- a/core/lib/protobuf_config/src/proto/config/general.proto +++ b/core/lib/protobuf_config/src/proto/config/general.proto @@ -13,6 +13,7 @@ import "zksync/config/house_keeper.proto"; import "zksync/config/observability.proto"; import "zksync/config/snapshots_creator.proto"; import "zksync/config/utils.proto"; +import "zksync/config/da_dispatcher.proto"; message GeneralConfig { optional config.database.Postgres postgres = 1; @@ -35,4 +36,5 @@ message GeneralConfig { optional config.prover.ProverGateway prover_gateway = 30; optional config.snapshot_creator.SnapshotsCreator snapshot_creator = 31; optional config.observability.Observability observability = 32; + optional config.da_dispatcher.DataAvailabilityDispatcher da_dispatcher = 33; } diff --git a/core/lib/types/src/commitment/mod.rs b/core/lib/types/src/commitment/mod.rs index 31baa78ee1fe..ff3d99613ce5 100644 --- a/core/lib/types/src/commitment/mod.rs +++ b/core/lib/types/src/commitment/mod.rs @@ -9,6 +9,7 @@ use std::{collections::HashMap, convert::TryFrom}; use serde::{Deserialize, Serialize}; +use zksync_basic_types::L1BatchNumber; use zksync_contracts::BaseSystemContractsHashes; use zksync_mini_merkle_tree::MiniMerkleTree; use zksync_system_constants::{ @@ -727,3 +728,8 @@ pub struct L1BatchCommitmentArtifacts { pub zkporter_is_available: bool, pub aux_commitments: Option, } + +pub struct L1BatchDA { + pub pubdata: Vec, + pub l1_batch_number: L1BatchNumber, +} diff --git a/core/lib/zksync_core/src/temp_config_store/mod.rs b/core/lib/zksync_core/src/temp_config_store/mod.rs index 958cd4b0c68f..31f1db74ec0e 100644 --- a/core/lib/zksync_core/src/temp_config_store/mod.rs +++ b/core/lib/zksync_core/src/temp_config_store/mod.rs @@ -14,8 +14,8 @@ use zksync_config::{ FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, GeneralConfig, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, }, - ApiConfig, ContractVerifierConfig, DBConfig, EthConfig, EthWatchConfig, GasAdjusterConfig, - ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, + ApiConfig, ContractVerifierConfig, DADispatcherConfig, DBConfig, EthConfig, EthWatchConfig, + GasAdjusterConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, }; use zksync_protobuf::{repr::ProtoRepr, ProtoFmt}; use zksync_protobuf_config::read_optional_repr; @@ -66,6 +66,7 @@ pub struct TempConfigStore { pub object_store_config: Option, pub observability: Option, pub snapshot_creator: Option, + pub da_dispatcher_config: Option, } #[derive(Debug)] @@ -111,6 +112,7 @@ impl TempConfigStore { eth: self.eth_sender_config.clone(), snapshot_creator: self.snapshot_creator.clone(), observability: self.observability.clone(), + da_dispatcher_config: self.da_dispatcher_config.clone(), } } diff --git a/core/node/da_dispatcher/Cargo.toml b/core/node/da_dispatcher/Cargo.toml new file mode 100644 index 000000000000..3f5656eeaa9b --- /dev/null +++ b/core/node/da_dispatcher/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "zksync_da_dispatcher" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +vise.workspace = true +zksync_dal.workspace = true +zksync_utils.workspace = true +zksync_config.workspace = true +zksync_types.workspace = true +zksync_da_layers.workspace = true + +tokio = { workspace = true, features = ["time"] } +anyhow.workspace = true +tracing.workspace = true +chrono = "0.4.31" diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs new file mode 100644 index 000000000000..05fcc5ec5312 --- /dev/null +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -0,0 +1,146 @@ +use std::{future::Future, time::Duration}; + +use anyhow::Error; +use chrono::Utc; +use tokio::sync::watch; +use zksync_config::DADispatcherConfig; +use zksync_da_layers::DataAvailabilityInterface; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use zksync_types::L1BatchNumber; + +use crate::metrics::METRICS; + +#[derive(Debug)] +pub struct DataAvailabilityDispatcher { + client: Box, + pool: ConnectionPool, + config: DADispatcherConfig, +} + +impl DataAvailabilityDispatcher { + pub fn new( + pool: ConnectionPool, + config: DADispatcherConfig, + client: Box, + ) -> Self { + Self { + pool, + config, + client, + } + } + + pub async fn run(self, stop_receiver: watch::Receiver) -> anyhow::Result<()> { + let pool = self.pool.clone(); + loop { + let mut storage = pool.connection_tagged("da_dispatcher").await.unwrap(); + + if *stop_receiver.borrow() { + tracing::info!("Stop signal received, da_dispatcher is shutting down"); + break; + } + + if let Err(err) = self.loop_iteration(&mut storage).await { + tracing::warn!("da_dispatcher error {err:?}"); + } + + tokio::time::sleep(self.config.polling_interval()).await; + } + Ok(()) + } + + async fn dispatch(&self, storage: &mut Connection<'_, Core>) -> anyhow::Result<()> { + let batches = storage + .blocks_dal() + .get_ready_for_da_dispatch_l1_batches(self.config.query_rows_limit() as usize) + .await?; + + for batch in batches { + let dispatch_latency = METRICS.blob_dispatch_latency.start(); + let dispatch_response = retry(self.config.max_retries(), || { + self.client + .dispatch_blob(batch.l1_batch_number.0, batch.pubdata.clone()) + }) + .await + .map_err(Error::msg)?; + dispatch_latency.observe(); + + storage + .blocks_dal() + .insert_l1_batch_da(batch.l1_batch_number, dispatch_response.blob_id) + .await?; + + METRICS + .last_known_l1_batch + .set(batch.l1_batch_number.0 as usize); + METRICS.blob_size.observe(batch.pubdata.len()); + } + + Ok(()) + } + + async fn poll_for_inclusion(&self, storage: &mut Connection<'_, Core>) -> anyhow::Result<()> { + let storage_da = storage + .blocks_dal() + .get_da_blob_awaiting_inclusion() + .await?; + + if let Some(storage_da) = storage_da { + let inclusion_data = retry(self.config.max_retries(), || { + self.client + .get_inclusion_data(storage_da.blob_id.clone().unwrap()) + }) + .await + .map_err(Error::msg)?; + + storage + .blocks_dal() + .save_l1_batch_inclusion_data( + L1BatchNumber(storage_da.l1_batch_number as u32), + inclusion_data.data, + ) + .await?; + + METRICS.inclusion_latency.observe(Duration::from_secs( + (Utc::now().timestamp() - storage_da.created_at.timestamp()) as u64, + )); + } + + Ok(()) + } + + #[tracing::instrument(skip(self, storage))] + async fn loop_iteration(&self, storage: &mut Connection<'_, Core>) -> anyhow::Result<()> { + self.dispatch(storage).await?; + self.poll_for_inclusion(storage).await?; + + Ok(()) + } +} + +async fn retry(max_retries: u16, mut f: F) -> Result +where + E: std::fmt::Display, + Fut: Future>, + F: FnMut() -> Fut, +{ + let mut retries = 1; + let mut backoff = 1; + loop { + match f().await { + Ok(result) => { + METRICS.dispatch_call_retries.observe(retries as usize); + return Ok(result); + } + Err(err) => { + tracing::warn!(%err, "Failed DA dispatch request {retries}/{max_retries}, retrying."); + if retries > max_retries { + return Err(err); + } + retries += 1; + tokio::time::sleep(Duration::from_secs(backoff)).await; + backoff *= 2; + } + } + } +} diff --git a/core/node/da_dispatcher/src/lib.rs b/core/node/da_dispatcher/src/lib.rs new file mode 100644 index 000000000000..7d3507238591 --- /dev/null +++ b/core/node/da_dispatcher/src/lib.rs @@ -0,0 +1,4 @@ +pub use self::da_dispatcher::DataAvailabilityDispatcher; + +pub mod da_dispatcher; +mod metrics; diff --git a/core/node/da_dispatcher/src/metrics.rs b/core/node/da_dispatcher/src/metrics.rs new file mode 100644 index 000000000000..49adbbcf984f --- /dev/null +++ b/core/node/da_dispatcher/src/metrics.rs @@ -0,0 +1,26 @@ +use std::time::Duration; + +use vise::{Buckets, Gauge, Histogram, Metrics}; + +#[derive(Debug, Metrics)] +#[metrics(prefix = "server_da_dispatcher")] +pub(super) struct DataAvailabilityDispatcherMetrics { + /// Latency of the dispatch of the blob. + #[metrics(buckets = Buckets::LATENCIES)] + pub blob_dispatch_latency: Histogram, + /// The duration between the moment when the blob is dispatched and the moment when it is included. + #[metrics(buckets = Buckets::LATENCIES)] + pub inclusion_latency: Histogram, + /// Size of the dispatched blob. + #[metrics(buckets = Buckets::exponential(1_024.0..=16.0 * 1_024.0 * 1_024.0, 2.0))] + pub blob_size: Histogram, + + /// Number of transactions resent by the Ethereum sender. + #[metrics(buckets = Buckets::linear(0.0..=10.0, 1.0))] + pub dispatch_call_retries: Histogram, + /// Last L1 batch number observed by the DA dispatcher. + pub last_known_l1_batch: Gauge, +} + +#[vise::register] +pub(super) static METRICS: vise::Global = vise::Global::new(); diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs index a6c8eb0520e8..ffd611c2d396 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs @@ -200,6 +200,7 @@ impl GasAdjuster { PubdataSendingMode::Calldata => { self.estimate_effective_gas_price() * self.pubdata_pricing.pubdata_byte_gas() } + PubdataSendingMode::Custom => todo!(), } } diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index ca3f8491d686..91841fb15737 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -34,7 +34,9 @@ zksync_commitment_generator.workspace = true zksync_house_keeper.workspace = true zksync_node_fee_model.workspace = true zksync_eth_sender.workspace = true -zksync_da.workspace = true +zksync_da_client.workspace = true +zksync_da_dispatcher.workspace = true +zksync_da_layers.workspace = true tracing.workspace = true thiserror.workspace = true diff --git a/core/node/node_framework/src/implementations/layers/da_client.rs b/core/node/node_framework/src/implementations/layers/da_client.rs deleted file mode 100644 index 602002973276..000000000000 --- a/core/node/node_framework/src/implementations/layers/da_client.rs +++ /dev/null @@ -1,69 +0,0 @@ -use anyhow::Context as _; -use zksync_config::{configs::da_dispatcher::DADispatcherConfig, EthConfig}; -use zksync_eth_client::clients::PKSigningClient; - -use crate::{ - implementations::resources::{ - da_interface::DAInterfaceResource, - eth_interface::{ - BoundEthInterfaceForBlobsResource, BoundEthInterfaceResource, EthInterfaceResource, - }, - }, - service::ServiceContext, - wiring_layer::{WiringError, WiringLayer}, -}; - -#[derive(Debug)] -pub struct DataAvailabilityClientLayer { - eth_sender_config: EthConfig, - da_dispatcher_config: DADispatcherConfig, -} - -impl DataAvailabilityClientLayer { - pub fn new(eth_sender_config: EthConfig, da_dispatcher_config: DADispatcherConfig) -> Self { - Self { - eth_sender_config, - da_dispatcher_config, - } - } -} - -#[async_trait::async_trait] -impl WiringLayer for DataAvailabilityClientLayer { - fn layer_name(&self) -> &'static str { - "data_availability_client_layer" - } - - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let private_key = self.wallets.operator.private_key(); - let gas_adjuster_config = self - .eth_sender_config - .gas_adjuster - .as_ref() - .context("gas_adjuster config is missing")?; - let EthInterfaceResource(query_client) = context.get_resource().await?; - - let signing_client = PKSigningClient::new_raw( - private_key.clone(), - self.contracts_config.diamond_proxy_addr, - gas_adjuster_config.default_priority_fee_per_gas, - self.l1_chain_id, - query_client.clone(), - ); - context.insert_resource(BoundEthInterfaceResource(Box::new(signing_client)))?; - - if let Some(blob_operator) = &self.wallets.blob_operator { - let private_key = blob_operator.private_key(); - let signing_client_for_blobs = PKSigningClient::new_raw( - private_key.clone(), - self.contracts_config.diamond_proxy_addr, - gas_adjuster_config.default_priority_fee_per_gas, - self.l1_chain_id, - query_client, - ); - context.insert_resource(DAInterfaceResource(Box::new(signing_client_for_blobs)))?; - } - - Ok(()) - } -} diff --git a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs index 72a7a7034f81..0c3f9b939b33 100644 --- a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs +++ b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs @@ -1,28 +1,23 @@ -use zksync_config::configs::{ - chain::L1BatchCommitDataGeneratorMode, da_dispatcher::DADispatcherConfig, -}; +use zksync_config::configs::da_dispatcher::DADispatcherConfig; +use zksync_da_layers::DataAvailabilityInterface; +use zksync_dal::Core; +use zksync_db_connection::connection_pool::ConnectionPool; use crate::{ implementations::resources::pools::{MasterPool, PoolResource}, - service::ServiceContext, + service::{ServiceContext, StopReceiver}, + task::Task, wiring_layer::{WiringError, WiringLayer}, }; #[derive(Debug)] pub struct DataAvailabilityDispatcherLayer { da_config: DADispatcherConfig, - l1_batch_commit_data_generator_mode: L1BatchCommitDataGeneratorMode, } impl DataAvailabilityDispatcherLayer { - pub fn new( - da_config: DADispatcherConfig, - l1_batch_commit_data_generator_mode: L1BatchCommitDataGeneratorMode, - ) -> Self { - Self { - da_config, - l1_batch_commit_data_generator_mode, - } + pub fn new(da_config: DADispatcherConfig) -> Self { + Self { da_config } } } @@ -36,8 +31,38 @@ impl WiringLayer for DataAvailabilityDispatcherLayer { let master_pool_resource = context.get_resource::>().await?; let master_pool = master_pool_resource.get().await.unwrap(); - let da_client = zksync_da::new_da_client(self.da_config.clone()); + let da_client = zksync_da_client::new_da_client(self.da_config.clone()).await; + + context.add_task(Box::new(DataAvailabilityDispatcherTask { + main_pool: master_pool, + da_config: self.da_config, + client: da_client, + })); Ok(()) } } + +#[derive(Debug)] +struct DataAvailabilityDispatcherTask { + main_pool: ConnectionPool, + da_config: DADispatcherConfig, + client: Box, +} + +#[async_trait::async_trait] +impl Task for DataAvailabilityDispatcherTask { + fn name(&self) -> &'static str { + "da_dispatcher" + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + let da_dispatcher = zksync_da_dispatcher::DataAvailabilityDispatcher::new( + self.main_pool, + self.da_config, + self.client, + ); + + da_dispatcher.run(stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/layers/mod.rs b/core/node/node_framework/src/implementations/layers/mod.rs index f409266714e6..0accb28cbc95 100644 --- a/core/node/node_framework/src/implementations/layers/mod.rs +++ b/core/node/node_framework/src/implementations/layers/mod.rs @@ -3,7 +3,6 @@ pub mod commitment_generator; pub mod consensus; pub mod consistency_checker; pub mod contract_verification_api; -mod da_client; pub mod da_dispatcher; pub mod eth_sender; pub mod eth_watch; diff --git a/core/node/node_framework/src/implementations/resources/da_interface.rs b/core/node/node_framework/src/implementations/resources/da_interface.rs deleted file mode 100644 index 77db176dc412..000000000000 --- a/core/node/node_framework/src/implementations/resources/da_interface.rs +++ /dev/null @@ -1,12 +0,0 @@ -use zksync_da::DataAvailabilityInterface; - -use crate::resource::Resource; - -#[derive(Debug, Clone)] -pub struct DAInterfaceResource(pub Box); - -impl Resource for DAInterfaceResource { - fn name() -> String { - "common/da_interface".into() - } -} diff --git a/core/node/node_framework/src/implementations/resources/mod.rs b/core/node/node_framework/src/implementations/resources/mod.rs index b8f5942db58c..2225fcd2f4c9 100644 --- a/core/node/node_framework/src/implementations/resources/mod.rs +++ b/core/node/node_framework/src/implementations/resources/mod.rs @@ -1,6 +1,5 @@ pub mod action_queue; pub mod circuit_breakers; -pub(crate) mod da_interface; pub mod eth_interface; pub mod fee_input; pub mod healthcheck; From 29cd8ef2d8b934e38b69353b42e865e93e4fc574 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Wed, 22 May 2024 10:19:31 +0200 Subject: [PATCH 09/69] chore: remove env file --- etc/env/dev.env | 305 ------------------------------------------------ 1 file changed, 305 deletions(-) delete mode 100644 etc/env/dev.env diff --git a/etc/env/dev.env b/etc/env/dev.env deleted file mode 100644 index 405845091b74..000000000000 --- a/etc/env/dev.env +++ /dev/null @@ -1,305 +0,0 @@ -ALERTS_SPORADIC_CRYPTO_ERRORS_SUBSTRS="EventDestroyErr,Can't free memory of DeviceBuf,value: PoisonError" -API_WEB3_JSON_RPC_HTTP_PORT=3050 -API_WEB3_JSON_RPC_HTTP_URL=http://127.0.0.1:3050 -API_WEB3_JSON_RPC_WS_PORT=3051 -API_WEB3_JSON_RPC_WS_URL=ws://127.0.0.1:3051 -API_WEB3_JSON_RPC_REQ_ENTITIES_LIMIT=10000 -API_WEB3_JSON_RPC_FILTERS_DISABLED=false -API_WEB3_JSON_RPC_FILTERS_LIMIT=10000 -API_WEB3_JSON_RPC_SUBSCRIPTIONS_LIMIT=10000 -API_WEB3_JSON_RPC_PUBSUB_POLLING_INTERVAL=200 -API_WEB3_JSON_RPC_THREADS_PER_SERVER=128 -API_WEB3_JSON_RPC_MAX_NONCE_AHEAD=50 -API_WEB3_JSON_RPC_GAS_PRICE_SCALE_FACTOR=1.2 -API_WEB3_JSON_RPC_L1_TO_L2_TRANSACTIONS_COMPATIBILITY_MODE=true -API_WEB3_JSON_RPC_REQUEST_TIMEOUT=10 -API_WEB3_JSON_RPC_ACCOUNT_PKS=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80,0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d,0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a,0x7c852118294e51e653712a81e05800f419141751be58f605c371e15141b007a6,0x47e179ec197488593b187f80a00eb0da91f1b9d0b13f8733639f19c30a34926a,0x8b3a350cf5c34c9194ca85829a2df0ec3153be0318b5e2d3348e872092edffba,0x92db14e403b83dfe3df233f83dfa3a0d7096f21ca9b0d6d6b8d88b2b4ec1564e,0x4bbbf85ce3377467afe5d46f804f221813b2bb87f24d81f60f1fcdbf7cbf4356,0xdbda1821b80551c9d65939329250298aa3472ba22feea921c0cf5d620ea67b97,0x2a871d0798f97d79848a013d4936a73bf4cc922c825d33c1cf7073dff6d409c6,0xf214f2b2cd398c806f84e317254e0f0b801d0643303237d97a22a48e01628897,0x701b615bbdfb9de65240bc28bd21bbc0d996645a3dd57e7b12bc2bdf6f192c82,0xa267530f49f8280200edf313ee7af6b827f2a8bce2897751d06a843f644967b1,0x47c99abed3324a2707c28affff1267e45918ec8c3f20b8aa892e8b065d2942dd,0xc526ee95bf44d8fc405a158bb884d9d1238d99f0612e9f33d006bb0789009aaa,0x8166f546bab6da521a8369cab06c5d2b9e46670292d85c875ee9ec20e84ffb61,0xea6c44ac03bff858b476bba40716402b03e41b8e97e276d1baec7c37d42484a0,0x689af8efa8c651a91ad287602527f3af2fe9f6501a7ac4b061667b5a93e037fd,0xde9be858da4a475276426320d5e9262ecfc3ba460bfac56360bfa6c4c28b4ee0,0xdf57089febbacf7ba0bc227dafbffa9fc08a93fdc68e1e42411a14efcf23656e -API_WEB3_JSON_RPC_ESTIMATE_GAS_SCALE_FACTOR=1.2 -API_WEB3_JSON_RPC_ESTIMATE_GAS_ACCEPTABLE_OVERESTIMATION=1000 -API_WEB3_JSON_RPC_MAX_TX_SIZE=1000000 -API_CONTRACT_VERIFICATION_PORT=3070 -API_CONTRACT_VERIFICATION_URL=http://127.0.0.1:3070 -API_CONTRACT_VERIFICATION_THREADS_PER_SERVER=128 -API_PROMETHEUS_LISTENER_PORT=3312 -API_PROMETHEUS_PUSHGATEWAY_URL=http://127.0.0.1:9091 -API_PROMETHEUS_PUSH_INTERVAL_MS=100 -API_HEALTHCHECK_PORT=3071 -API_MERKLE_TREE_PORT=3072 -CHAIN_ETH_NETWORK=localhost -CHAIN_ETH_ZKSYNC_NETWORK=zkcany8 -CHAIN_ETH_ZKSYNC_NETWORK_ID=308 -CHAIN_STATE_KEEPER_FEE_ACCOUNT_ADDR=0xA13c10C0D5bd6f79041B9835c63f91de35A15883 -CHAIN_STATE_KEEPER_TRANSACTION_SLOTS=250 -CHAIN_STATE_KEEPER_MAX_ALLOWED_L2_TX_GAS_LIMIT=4000000000 -CHAIN_STATE_KEEPER_BLOCK_COMMIT_DEADLINE_MS=2500 -CHAIN_STATE_KEEPER_MINIBLOCK_COMMIT_DEADLINE_MS=1000 -CHAIN_STATE_KEEPER_MINIBLOCK_SEAL_QUEUE_CAPACITY=10 -CHAIN_STATE_KEEPER_MAX_SINGLE_TX_GAS=6000000 -CHAIN_STATE_KEEPER_CLOSE_BLOCK_AT_GEOMETRY_PERCENTAGE=0.95 -CHAIN_STATE_KEEPER_CLOSE_BLOCK_AT_ETH_PARAMS_PERCENTAGE=0.95 -CHAIN_STATE_KEEPER_CLOSE_BLOCK_AT_GAS_PERCENTAGE=0.95 -CHAIN_STATE_KEEPER_REJECT_TX_AT_GEOMETRY_PERCENTAGE=0.95 -CHAIN_STATE_KEEPER_REJECT_TX_AT_ETH_PARAMS_PERCENTAGE=0.95 -CHAIN_STATE_KEEPER_REJECT_TX_AT_GAS_PERCENTAGE=0.95 -CHAIN_STATE_KEEPER_MINIMAL_L2_GAS_PRICE=100000000 -CHAIN_STATE_KEEPER_COMPUTE_OVERHEAD_PART=0 -CHAIN_STATE_KEEPER_PUBDATA_OVERHEAD_PART=1 -CHAIN_STATE_KEEPER_BATCH_OVERHEAD_L1_GAS=800000 -CHAIN_STATE_KEEPER_MAX_GAS_PER_BATCH=200000000 -CHAIN_STATE_KEEPER_MAX_PUBDATA_PER_BATCH=100000 -CHAIN_STATE_KEEPER_FEE_MODEL_VERSION=V1 -CHAIN_STATE_KEEPER_VALIDATION_COMPUTATIONAL_GAS_LIMIT=300000 -CHAIN_STATE_KEEPER_SAVE_CALL_TRACES=true -CHAIN_STATE_KEEPER_VIRTUAL_BLOCKS_INTERVAL=1 -CHAIN_STATE_KEEPER_VIRTUAL_BLOCKS_PER_MINIBLOCK=1 -CHAIN_STATE_KEEPER_BOOTLOADER_HASH=0x010007ede999d096c84553fb514d3d6ca76fbf39789dda76bfeda9f3ae06236e -CHAIN_STATE_KEEPER_DEFAULT_AA_HASH=0x0100055b041eb28aff6e3a6e0f37c31fd053fc9ef142683b05e5f0aee6934066 -CHAIN_OPERATIONS_MANAGER_DELAY_INTERVAL=100 -CHAIN_MEMPOOL_DELAY_INTERVAL=100 -CHAIN_MEMPOOL_SYNC_INTERVAL_MS=10 -CHAIN_MEMPOOL_SYNC_BATCH_SIZE=1000 -CHAIN_MEMPOOL_CAPACITY=10000000 -CHAIN_MEMPOOL_STUCK_TX_TIMEOUT=86400 -CHAIN_MEMPOOL_REMOVE_STUCK_TXS=true -CHAIN_CIRCUIT_BREAKER_SYNC_INTERVAL_MS=30000 -CHAIN_CIRCUIT_BREAKER_HTTP_REQ_MAX_RETRY_NUMBER=5 -CHAIN_CIRCUIT_BREAKER_HTTP_REQ_RETRY_INTERVAL_SEC=2 -CONTRACT_VERIFIER_COMPILATION_TIMEOUT=30 -CONTRACT_VERIFIER_POLLING_INTERVAL=1000 -CONTRACT_VERIFIER_PROMETHEUS_PORT=3314 -CONTRACTS_ADMIN_FACET_ADDR=0x0c2D3f8f56bE6170c876f5D4D2140944D004304f -CONTRACTS_DIAMOND_INIT_ADDR=0x9E1946D8496815d901f1274E4D18a3D45430F069 -CONTRACTS_DIAMOND_UPGRADE_INIT_ADDR=0x592ed7C3D2642dd34cef46989763979d48193d49 -CONTRACTS_DEFAULT_UPGRADE_ADDR=0xdc21cD787277b6A9c82f58777a4010DFCC4B7634 -CONTRACTS_MAILBOX_FACET_ADDR=0xc76637ddcF17044ea1b9ed96D5434e61133A2FF6 -CONTRACTS_EXECUTOR_FACET_ADDR=0xd736304E2C83D10861b80c6253880A7451f3080c -CONTRACTS_GOVERNANCE_ADDR=0xc23e02646203D8Cc0A7EdeeC938dD1514f411b6D -CONTRACTS_GETTERS_FACET_ADDR=0x37dA982B5a1Df37BB07E38F0C5b1824B62A08Cfd -CONTRACTS_VERIFIER_ADDR=0x3D9bA3C048E77E7c2C597E7Aa1DF81C004FF4Ca5 -CONTRACTS_DIAMOND_PROXY_ADDR=0x101467c948C359432DfC8078C4eB45a64cd10b0F -CONTRACTS_L1_MULTICALL3_ADDR=0xe6Cf83F3A38B6b55C792953d2576f1165Ff8395b -CONTRACTS_L1_ERC20_BRIDGE_PROXY_ADDR=0x56f81F235C3C78Fb675A268B1c8ded199c91E7BB -CONTRACTS_L1_ERC20_BRIDGE_IMPL_ADDR=0x8d52961335EBAD57C10150EadeDee98EB79A64C9 -CONTRACTS_L2_ERC20_BRIDGE_ADDR=0x29576C296156ba223487679Fbe21b1321d159823 -CONTRACTS_L2_TESTNET_PAYMASTER_ADDR=0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF -CONTRACTS_L1_ALLOW_LIST_ADDR=0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF -CONTRACTS_CREATE2_FACTORY_ADDR=0xcd7e4A048BEf47c58D1dF2bcfceC8b30cd5DB906 -CONTRACTS_VALIDATOR_TIMELOCK_ADDR=0x1C7EC23dEFFf159aB22d2BeA4c11F6d9691D2333 -CONTRACTS_VALIDATOR_TIMELOCK_EXECUTION_DELAY=0 -CONTRACTS_RECURSION_SCHEDULER_LEVEL_VK_HASH=0x18518ce15be02847459f304b1567cb914ae357eca82af07c09582e78592b987b -CONTRACTS_RECURSION_NODE_LEVEL_VK_HASH=0x1186ec268d49f1905f8d9c1e9d39fc33e98c74f91d91a21b8f7ef78bd09a8db8 -CONTRACTS_RECURSION_LEAF_LEVEL_VK_HASH=0x101e08b00193e529145ee09823378ef51a3bc8966504064f1f6ba3f1ba863210 -CONTRACTS_RECURSION_CIRCUITS_SET_VKS_HASH=0x18c1639094f58177409186e8c48d9f577c9410901d2f1d486b3e7d6cf553ae4c -CONTRACTS_GENESIS_TX_HASH=0xb99ebfea46cbe05a21cd80fe5597d97b204befc52a16303f579c607dc1ac2e2e -CONTRACTS_GENESIS_ROOT=0xe25bb13818ce30e19210a13aee061a9bf3be7f72050c6cd99e03465c21101475 -CONTRACTS_PRIORITY_TX_MAX_GAS_LIMIT=72000000 -CONTRACTS_DEPLOY_L2_BRIDGE_COUNTERPART_GAS_LIMIT=10000000 -CONTRACTS_GENESIS_BATCH_COMMITMENT=0x901804a1842c321c9a5789308aa5d05d58679629b7ceeb374747a9d165c02794 -CONTRACTS_GENESIS_ROLLUP_LEAF_INDEX=26 -CONTRACTS_GENESIS_PROTOCOL_VERSION=22 -CONTRACTS_L1_WETH_BRIDGE_IMPL_ADDR=0x66e963BD9cdeBb91BdbDA0B7f9578D59887f53eE -CONTRACTS_L1_WETH_BRIDGE_PROXY_ADDR=0x75fBc31d31fBdeeE68d9255dDB5d011bC28B658F -CONTRACTS_L1_WETH_TOKEN_ADDR=0x723527c48d14D5eC7aF0Bc19EA11Eb683E030804 -CONTRACTS_L2_WETH_BRIDGE_ADDR=0x0e059E80Acd29e065323e9315F660468df59fFCc -CONTRACTS_L2_WETH_TOKEN_IMPL_ADDR=0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9 -CONTRACTS_L2_WETH_TOKEN_PROXY_ADDR=0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9 -CONTRACTS_FRI_RECURSION_LEAF_LEVEL_VK_HASH=0x400a4b532c6f072c00d1806ef299300d4c104f4ac55bd8698ade78894fcadc0a -CONTRACTS_FRI_RECURSION_NODE_LEVEL_VK_HASH=0x5a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d080 -CONTRACTS_FRI_RECURSION_SCHEDULER_LEVEL_VK_HASH=0x1405880dc3317d635bddb0ab62bf5d013e5d1f462161c1f7ac3289c7fef956da -CONTRACTS_SNARK_WRAPPER_VK_HASH=0x063c6fb5c70404c2867f413a8e35563ad3d040b1ad8c11786231bfdba7b472c7 -CONTRACTS_BLOB_VERSIONED_HASH_RETRIEVER_ADDR=0x4Ab1e9A16638E35C13CcA6067433463843989001 -CONTRACTS_INITIAL_PROTOCOL_VERSION=22 -CONTRACTS_BRIDGEHUB_PROXY_ADDR=0x0000000000000000000000000000000000000000 -CONTRACTS_BRIDGEHUB_IMPL_ADDR=0x0000000000000000000000000000000000000000 -CONTRACTS_STATE_TRANSITION_PROXY_ADDR=0x0000000000000000000000000000000000000000 -CONTRACTS_STATE_TRANSITION_IMPL_ADDR=0x0000000000000000000000000000000000000000 -CONTRACTS_TRANSPARENT_PROXY_ADMIN_ADDR=0x0000000000000000000000000000000000000000 -CONTRACTS_TEST_DUMMY_VERIFIER=true -CONTRACTS_TEST_EASY_PRIORITY_MODE=false -DATABASE_STATE_KEEPER_DB_PATH=./db/main/state_keeper -DATABASE_BACKUP_COUNT=5 -DATABASE_BACKUP_INTERVAL_MS=60000 -DATABASE_POOL_SIZE=50 -DATABASE_STATEMENT_TIMEOUT_SEC=300 -DATABASE_MERKLE_TREE_PATH=./db/main/tree -DATABASE_MERKLE_TREE_BACKUP_PATH=./db/main/backups -ETH_CLIENT_CHAIN_ID=9 -ETH_CLIENT_WEB3_URL=http://127.0.0.1:8545 -ETH_SENDER_SENDER_WAIT_CONFIRMATIONS=1 -ETH_SENDER_SENDER_EXPECTED_WAIT_TIME_BLOCK=30 -ETH_SENDER_SENDER_TX_POLL_PERIOD=1 -ETH_SENDER_SENDER_AGGREGATE_TX_POLL_PERIOD=1 -ETH_SENDER_SENDER_MAX_TXS_IN_FLIGHT=30 -ETH_SENDER_SENDER_PROOF_SENDING_MODE=SkipEveryProof -ETH_SENDER_SENDER_MAX_AGGREGATED_BLOCKS_TO_COMMIT=1 -ETH_SENDER_SENDER_MAX_AGGREGATED_BLOCKS_TO_EXECUTE=10 -ETH_SENDER_SENDER_AGGREGATED_BLOCK_COMMIT_DEADLINE=1 -ETH_SENDER_SENDER_AGGREGATED_BLOCK_PROVE_DEADLINE=10 -ETH_SENDER_SENDER_AGGREGATED_BLOCK_EXECUTE_DEADLINE=10 -ETH_SENDER_SENDER_TIMESTAMP_CRITERIA_MAX_ALLOWED_LAG=30 -ETH_SENDER_SENDER_MAX_ETH_TX_DATA_SIZE=120000 -ETH_SENDER_SENDER_AGGREGATED_PROOF_SIZES=1,4 -ETH_SENDER_SENDER_MAX_AGGREGATED_TX_GAS=4000000 -ETH_SENDER_SENDER_MAX_SINGLE_TX_GAS=6000000 -ETH_SENDER_SENDER_MAX_ACCEPTABLE_PRIORITY_FEE_IN_GWEI=100000000000 -ETH_SENDER_SENDER_PROOF_LOADING_MODE=OldProofFromDb -ETH_SENDER_SENDER_PUBDATA_SENDING_MODE=Blobs -ETH_SENDER_SENDER_OPERATOR_PRIVATE_KEY=0x7726827caac94a7f9e1b160f7ea819f172f7b6f9d2a97f992c38edeab82d4110 -ETH_SENDER_SENDER_OPERATOR_COMMIT_ETH_ADDR=0x36615Cf349d7F6344891B1e7CA7C72883F5dc049 -ETH_SENDER_SENDER_OPERATOR_BLOBS_PRIVATE_KEY=0xac1e735be8536c6534bb4f17f06f6afc73b2b5ba84ac2cfb12f7461b20c0bbe3 -ETH_SENDER_SENDER_OPERATOR_BLOBS_ETH_ADDR=0xa61464658AfeAf65CccaaFD3a512b69A83B77618 -ETH_SENDER_GAS_ADJUSTER_DEFAULT_PRIORITY_FEE_PER_GAS=1000000000 -ETH_SENDER_GAS_ADJUSTER_MAX_BASE_FEE_SAMPLES=10000 -ETH_SENDER_GAS_ADJUSTER_PRICING_FORMULA_PARAMETER_A=1.5 -ETH_SENDER_GAS_ADJUSTER_PRICING_FORMULA_PARAMETER_B=1.0005 -ETH_SENDER_GAS_ADJUSTER_INTERNAL_L1_PRICING_MULTIPLIER=0.8 -ETH_SENDER_GAS_ADJUSTER_POLL_PERIOD=5 -ETH_WATCH_CONFIRMATIONS_FOR_ETH_EVENT=0 -ETH_WATCH_ETH_NODE_POLL_INTERVAL=300 -ZKSYNC_ACTION=dont_ask -MISC_LOG_FORMAT=plain -MISC_SENTRY_URL=unset -MISC_SENTRY_PANIC_INTERVAL=1800 -MISC_SENTRY_ERROR_INTERVAL=10800 -MISC_OTLP_URL=unset -MISC_FEE_ACCOUNT_PRIVATE_KEY=0xd293c684d884d56f8d6abd64fc76757d3664904e309a0645baf8522ab6366d9e -OBJECT_STORE_MODE=FileBacked -OBJECT_STORE_FILE_BACKED_BASE_PATH=artifacts -PUBLIC_OBJECT_STORE_MODE=FileBacked -PUBLIC_OBJECT_STORE_FILE_BACKED_BASE_PATH=artifacts -PROVER_OBJECT_STORE_MODE=FileBacked -PROVER_OBJECT_STORE_FILE_BACKED_BASE_PATH=artifacts -SNAPSHOTS_OBJECT_STORE_MODE=FileBacked -SNAPSHOTS_OBJECT_STORE_FILE_BACKED_BASE_PATH=artifacts -NFS_SETUP_KEY_MOUNT_PATH=/home/setup_keys/ -RUST_LOG=zksync_node_framework=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_eth_client=info,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=debug,snapshots_creator=debug, -RUST_BACKTRACE=full -RUST_LIB_BACKTRACE=1 -DATABASE_URL=postgres://postgres:notsecurepassword@127.0.0.1:5432/zksync_local -DATABASE_PROVER_URL=postgres://postgres:notsecurepassword@127.0.0.1:5432/prover_local -TEST_DATABASE_URL=postgres://postgres:notsecurepassword@localhost:5433/zksync_local_test -TEST_DATABASE_PROVER_URL=postgres://postgres:notsecurepassword@localhost:5433/prover_local_test -CONSENSUS_CONFIG_PATH=etc/env/consensus_config.yaml -CONSENSUS_SECRETS_PATH=etc/env/consensus_secrets.yaml -WITNESS_GENERATION_TIMEOUT_IN_SECS=900 -WITNESS_INITIAL_SETUP_KEY_PATH=./keys/setup/setup_2^22.key -WITNESS_KEY_DOWNLOAD_URL=https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2^22.key -WITNESS_MAX_ATTEMPTS=1 -WITNESS_DUMP_ARGUMENTS_FOR_BLOCKS=2,3 -WITNESS_DATA_SOURCE=FromPostgres -HOUSE_KEEPER_L1_BATCH_METRICS_REPORTING_INTERVAL_MS=10000 -HOUSE_KEEPER_GPU_PROVER_QUEUE_REPORTING_INTERVAL_MS=10000 -HOUSE_KEEPER_PROVER_JOB_RETRYING_INTERVAL_MS=300000 -HOUSE_KEEPER_PROVER_STATS_REPORTING_INTERVAL_MS=5000 -HOUSE_KEEPER_WITNESS_JOB_MOVING_INTERVAL_MS=30000 -HOUSE_KEEPER_WITNESS_GENERATOR_STATS_REPORTING_INTERVAL_MS=10000 -HOUSE_KEEPER_FRI_WITNESS_JOB_MOVING_INTERVAL_MS=40000 -HOUSE_KEEPER_FRI_PROVER_JOB_RETRYING_INTERVAL_MS=30000 -HOUSE_KEEPER_FRI_WITNESS_GENERATOR_JOB_RETRYING_INTERVAL_MS=30000 -HOUSE_KEEPER_PROVER_DB_POOL_SIZE=2 -HOUSE_KEEPER_FRI_PROVER_STATS_REPORTING_INTERVAL_MS=30000 -HOUSE_KEEPER_FRI_PROOF_COMPRESSOR_JOB_RETRYING_INTERVAL_MS=30000 -HOUSE_KEEPER_FRI_PROOF_COMPRESSOR_STATS_REPORTING_INTERVAL_MS=10000 -FRI_PROVER_SETUP_DATA_PATH=/usr/src/setup-data -FRI_PROVER_PROMETHEUS_PORT=3315 -FRI_PROVER_MAX_ATTEMPTS=10 -FRI_PROVER_GENERATION_TIMEOUT_IN_SECS=600 -FRI_PROVER_BASE_LAYER_CIRCUIT_IDS_TO_BE_VERIFIED=1 -FRI_PROVER_RECURSIVE_LAYER_CIRCUIT_IDS_TO_BE_VERIFIED=1 -FRI_PROVER_SETUP_LOAD_MODE=FromDisk -FRI_PROVER_SPECIALIZED_GROUP_ID=100 -FRI_PROVER_WITNESS_VECTOR_GENERATOR_THREAD_COUNT=5 -FRI_PROVER_QUEUE_CAPACITY=10 -FRI_PROVER_WITNESS_VECTOR_RECEIVER_PORT=3316 -FRI_PROVER_ZONE_READ_URL=http://metadata.google.internal/computeMetadata/v1/instance/zone -FRI_PROVER_SHALL_SAVE_TO_PUBLIC_BUCKET=true -FRI_WITNESS_GENERATION_TIMEOUT_IN_SECS=900 -FRI_WITNESS_BASIC_GENERATION_TIMEOUT_IN_SECS=900 -FRI_WITNESS_LEAF_GENERATION_TIMEOUT_IN_SECS=900 -FRI_WITNESS_NODE_GENERATION_TIMEOUT_IN_SECS=900 -FRI_WITNESS_SCHEDULER_GENERATION_TIMEOUT_IN_SECS=900 -FRI_WITNESS_MAX_ATTEMPTS=10 -FRI_WITNESS_DUMP_ARGUMENTS_FOR_BLOCKS=1 -FRI_WITNESS_FORCE_PROCESS_BLOCK=1 -FRI_WITNESS_SHALL_SAVE_TO_PUBLIC_BUCKET=true -FRI_PROVER_GROUP_GROUP_0_0_CIRCUIT_ID=1 -FRI_PROVER_GROUP_GROUP_0_0_AGGREGATION_ROUND=3 -FRI_PROVER_GROUP_GROUP_0_1_CIRCUIT_ID=2 -FRI_PROVER_GROUP_GROUP_0_1_AGGREGATION_ROUND=2 -FRI_PROVER_GROUP_GROUP_1_0_CIRCUIT_ID=1 -FRI_PROVER_GROUP_GROUP_1_0_AGGREGATION_ROUND=0 -FRI_PROVER_GROUP_GROUP_2_0_CIRCUIT_ID=2 -FRI_PROVER_GROUP_GROUP_2_0_AGGREGATION_ROUND=0 -FRI_PROVER_GROUP_GROUP_2_1_CIRCUIT_ID=4 -FRI_PROVER_GROUP_GROUP_2_1_AGGREGATION_ROUND=0 -FRI_PROVER_GROUP_GROUP_2_2_CIRCUIT_ID=6 -FRI_PROVER_GROUP_GROUP_2_2_AGGREGATION_ROUND=0 -FRI_PROVER_GROUP_GROUP_2_3_CIRCUIT_ID=9 -FRI_PROVER_GROUP_GROUP_2_3_AGGREGATION_ROUND=0 -FRI_PROVER_GROUP_GROUP_3_0_CIRCUIT_ID=3 -FRI_PROVER_GROUP_GROUP_3_0_AGGREGATION_ROUND=0 -FRI_PROVER_GROUP_GROUP_4_0_CIRCUIT_ID=11 -FRI_PROVER_GROUP_GROUP_4_0_AGGREGATION_ROUND=0 -FRI_PROVER_GROUP_GROUP_4_1_CIRCUIT_ID=12 -FRI_PROVER_GROUP_GROUP_4_1_AGGREGATION_ROUND=0 -FRI_PROVER_GROUP_GROUP_4_2_CIRCUIT_ID=13 -FRI_PROVER_GROUP_GROUP_4_2_AGGREGATION_ROUND=0 -FRI_PROVER_GROUP_GROUP_4_3_CIRCUIT_ID=255 -FRI_PROVER_GROUP_GROUP_4_3_AGGREGATION_ROUND=0 -FRI_PROVER_GROUP_GROUP_5_0_CIRCUIT_ID=5 -FRI_PROVER_GROUP_GROUP_5_0_AGGREGATION_ROUND=0 -FRI_PROVER_GROUP_GROUP_6_0_CIRCUIT_ID=3 -FRI_PROVER_GROUP_GROUP_6_0_AGGREGATION_ROUND=1 -FRI_PROVER_GROUP_GROUP_7_0_CIRCUIT_ID=7 -FRI_PROVER_GROUP_GROUP_7_0_AGGREGATION_ROUND=0 -FRI_PROVER_GROUP_GROUP_8_0_CIRCUIT_ID=8 -FRI_PROVER_GROUP_GROUP_8_0_AGGREGATION_ROUND=0 -FRI_PROVER_GROUP_GROUP_9_0_CIRCUIT_ID=12 -FRI_PROVER_GROUP_GROUP_9_0_AGGREGATION_ROUND=1 -FRI_PROVER_GROUP_GROUP_9_1_CIRCUIT_ID=13 -FRI_PROVER_GROUP_GROUP_9_1_AGGREGATION_ROUND=1 -FRI_PROVER_GROUP_GROUP_9_2_CIRCUIT_ID=14 -FRI_PROVER_GROUP_GROUP_9_2_AGGREGATION_ROUND=1 -FRI_PROVER_GROUP_GROUP_9_3_CIRCUIT_ID=15 -FRI_PROVER_GROUP_GROUP_9_3_AGGREGATION_ROUND=1 -FRI_PROVER_GROUP_GROUP_10_0_CIRCUIT_ID=10 -FRI_PROVER_GROUP_GROUP_10_0_AGGREGATION_ROUND=0 -FRI_PROVER_GROUP_GROUP_11_0_CIRCUIT_ID=7 -FRI_PROVER_GROUP_GROUP_11_0_AGGREGATION_ROUND=1 -FRI_PROVER_GROUP_GROUP_11_1_CIRCUIT_ID=8 -FRI_PROVER_GROUP_GROUP_11_1_AGGREGATION_ROUND=1 -FRI_PROVER_GROUP_GROUP_11_2_CIRCUIT_ID=10 -FRI_PROVER_GROUP_GROUP_11_2_AGGREGATION_ROUND=1 -FRI_PROVER_GROUP_GROUP_11_3_CIRCUIT_ID=11 -FRI_PROVER_GROUP_GROUP_11_3_AGGREGATION_ROUND=1 -FRI_PROVER_GROUP_GROUP_12_0_CIRCUIT_ID=4 -FRI_PROVER_GROUP_GROUP_12_0_AGGREGATION_ROUND=1 -FRI_PROVER_GROUP_GROUP_12_1_CIRCUIT_ID=5 -FRI_PROVER_GROUP_GROUP_12_1_AGGREGATION_ROUND=1 -FRI_PROVER_GROUP_GROUP_12_2_CIRCUIT_ID=6 -FRI_PROVER_GROUP_GROUP_12_2_AGGREGATION_ROUND=1 -FRI_PROVER_GROUP_GROUP_12_3_CIRCUIT_ID=9 -FRI_PROVER_GROUP_GROUP_12_3_AGGREGATION_ROUND=1 -PROOF_DATA_HANDLER_HTTP_PORT=3320 -PROOF_DATA_HANDLER_PROOF_GENERATION_TIMEOUT_IN_SECS=18000 -FRI_WITNESS_VECTOR_GENERATOR_PROVER_INSTANCE_WAIT_TIMEOUT_IN_SECS=200 -FRI_WITNESS_VECTOR_GENERATOR_PROVER_INSTANCE_POLL_TIME_IN_MILLI_SECS=250 -FRI_WITNESS_VECTOR_GENERATOR_PROMETHEUS_LISTENER_PORT=3314 -FRI_WITNESS_VECTOR_GENERATOR_PROMETHEUS_PUSHGATEWAY_URL=http://127.0.0.1:9091 -FRI_WITNESS_VECTOR_GENERATOR_PROMETHEUS_PUSH_INTERVAL_MS=100 -FRI_WITNESS_VECTOR_GENERATOR_SPECIALIZED_GROUP_ID=100 -FRI_WITNESS_VECTOR_GENERATOR_MAX_PROVER_RESERVATION_DURATION_IN_SECS=1000 -FRI_PROVER_GATEWAY_API_URL=http://127.0.0.1:3320 -FRI_PROVER_GATEWAY_API_POLL_DURATION_SECS=1000 -FRI_PROVER_GATEWAY_PROMETHEUS_LISTENER_PORT=3314 -FRI_PROVER_GATEWAY_PROMETHEUS_PUSHGATEWAY_URL=http://127.0.0.1:9091 -FRI_PROVER_GATEWAY_PROMETHEUS_PUSH_INTERVAL_MS=100 -FRI_PROOF_COMPRESSOR_COMPRESSION_MODE=1 -FRI_PROOF_COMPRESSOR_PROMETHEUS_LISTENER_PORT=3321 -FRI_PROOF_COMPRESSOR_PROMETHEUS_PUSHGATEWAY_URL=http://127.0.0.1:9091 -FRI_PROOF_COMPRESSOR_PROMETHEUS_PUSH_INTERVAL_MS=100 -FRI_PROOF_COMPRESSOR_GENERATION_TIMEOUT_IN_SECS=3600 -FRI_PROOF_COMPRESSOR_MAX_ATTEMPTS=5 -FRI_PROOF_COMPRESSOR_UNIVERSAL_SETUP_PATH=keys/setup/setup_2^26.key -FRI_PROOF_COMPRESSOR_UNIVERSAL_SETUP_DOWNLOAD_URL=https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2^26.key -FRI_PROOF_COMPRESSOR_VERIFY_WRAPPER_PROOF=true From 7be4273f87c404ce1d5e591648e14b78d3ceba8e Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Wed, 22 May 2024 12:54:00 +0200 Subject: [PATCH 10/69] chore: update contracts --- contracts | 2 +- .../implementations/layers/da_dispatcher.rs | 27 ++++++++++++------- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/contracts b/contracts index 9e8c28f90342..41fb9d918198 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 9e8c28f9034272f05c6f2fb781fb84ff37ec7116 +Subproject commit 41fb9d91819890dc756cb548000dd9ba98e7805c diff --git a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs index 0c3f9b939b33..a4531e303214 100644 --- a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs +++ b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs @@ -1,4 +1,7 @@ -use zksync_config::configs::da_dispatcher::DADispatcherConfig; +use zksync_config::{ + configs::{da_dispatcher::DADispatcherConfig, eth_sender::PubdataSendingMode}, + EthConfig, +}; use zksync_da_layers::DataAvailabilityInterface; use zksync_dal::Core; use zksync_db_connection::connection_pool::ConnectionPool; @@ -13,11 +16,15 @@ use crate::{ #[derive(Debug)] pub struct DataAvailabilityDispatcherLayer { da_config: DADispatcherConfig, + eth_config: EthConfig, } impl DataAvailabilityDispatcherLayer { - pub fn new(da_config: DADispatcherConfig) -> Self { - Self { da_config } + pub fn new(da_config: DADispatcherConfig, eth_config: EthConfig) -> Self { + Self { + da_config, + eth_config, + } } } @@ -31,13 +38,15 @@ impl WiringLayer for DataAvailabilityDispatcherLayer { let master_pool_resource = context.get_resource::>().await?; let master_pool = master_pool_resource.get().await.unwrap(); - let da_client = zksync_da_client::new_da_client(self.da_config.clone()).await; + if self.eth_config.sender.unwrap().pubdata_sending_mode == PubdataSendingMode::Custom { + let da_client = zksync_da_client::new_da_client(self.da_config.clone()).await; - context.add_task(Box::new(DataAvailabilityDispatcherTask { - main_pool: master_pool, - da_config: self.da_config, - client: da_client, - })); + context.add_task(Box::new(DataAvailabilityDispatcherTask { + main_pool: master_pool, + da_config: self.da_config, + client: da_client, + })); + } Ok(()) } From 0f47dbe09b1d6f4fed0e1b07c702cbb336c36f77 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Wed, 22 May 2024 18:14:21 +0200 Subject: [PATCH 11/69] chore: minor fixes --- Cargo.lock | 5 +- Cargo.toml | 2 +- core/lib/da_client/Cargo.toml | 3 +- core/lib/da_client/src/clients/gcs/mod.rs | 7 +- core/lib/da_client/src/clients/no_da/mod.rs | 7 +- core/lib/zksync_core_leftovers/Cargo.toml | 2 + core/lib/zksync_core_leftovers/src/lib.rs | 24 +++++++ core/node/da_dispatcher/src/da_dispatcher.rs | 71 +++++++++---------- .../node/node_framework/examples/main_node.rs | 10 +-- core/node/shared_metrics/src/lib.rs | 2 + 10 files changed, 82 insertions(+), 51 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 40afe91535c0..59e129c826e6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2823,7 +2823,7 @@ dependencies = [ [[package]] name = "hyperchain_da" version = "0.1.0" -source = "git+https://github.com/matter-labs/hyperchain-da.git?rev=69dc63c9c91553a31c4dd48f47c30ca44433dea3#69dc63c9c91553a31c4dd48f47c30ca44433dea3" +source = "git+https://github.com/matter-labs/hyperchain-da.git?rev=5629d56ec3d851a7ff03e72c5e0765c55af595d5#5629d56ec3d851a7ff03e72c5e0765c55af595d5" dependencies = [ "async-trait", ] @@ -8400,6 +8400,8 @@ dependencies = [ "zksync_consensus_utils", "zksync_contract_verification_server", "zksync_contracts", + "zksync_da_client", + "zksync_da_dispatcher", "zksync_dal", "zksync_db_connection", "zksync_eth_client", @@ -8474,6 +8476,7 @@ version = "0.1.0" dependencies = [ "async-trait", "hyperchain_da", + "tracing", "zksync_config", "zksync_object_store", "zksync_types", diff --git a/Cargo.toml b/Cargo.toml index 6f36c6b9b026..9add4e2335c2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -202,7 +202,7 @@ zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter zksync_consensus_utils = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "3e6f101ee4124308c4c974caaa259d524549b0c6" } zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "3e6f101ee4124308c4c974caaa259d524549b0c6" } zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "3e6f101ee4124308c4c974caaa259d524549b0c6" } -zksync_da_layers = { package = "hyperchain_da", git = "https://github.com/matter-labs/hyperchain-da.git", rev = "69dc63c9c91553a31c4dd48f47c30ca44433dea3" } +zksync_da_layers = { package = "hyperchain_da", git = "https://github.com/matter-labs/hyperchain-da.git", rev = "5629d56ec3d851a7ff03e72c5e0765c55af595d5" } # "Local" dependencies multivm = { path = "core/lib/multivm" } diff --git a/core/lib/da_client/Cargo.toml b/core/lib/da_client/Cargo.toml index 7320451b4fee..29eee728c994 100644 --- a/core/lib/da_client/Cargo.toml +++ b/core/lib/da_client/Cargo.toml @@ -17,4 +17,5 @@ async-trait = "0.1.74" zksync_config.workspace = true zksync_types.workspace = true zksync_da_layers.workspace = true -zksync_object_store.workspace = true \ No newline at end of file +zksync_object_store.workspace = true +tracing = "0.1.40" \ No newline at end of file diff --git a/core/lib/da_client/src/clients/gcs/mod.rs b/core/lib/da_client/src/clients/gcs/mod.rs index d41300710c31..888229ea0ac5 100644 --- a/core/lib/da_client/src/clients/gcs/mod.rs +++ b/core/lib/da_client/src/clients/gcs/mod.rs @@ -43,8 +43,11 @@ impl DataAvailabilityInterface for GCSDAClient { }) } - async fn get_inclusion_data(&self, _: Vec) -> Result { - return Ok(InclusionData::default()); + async fn get_inclusion_data( + &self, + _: Vec, + ) -> Result, DataAvailabilityError> { + return Ok(Some(InclusionData::default())); } } diff --git a/core/lib/da_client/src/clients/no_da/mod.rs b/core/lib/da_client/src/clients/no_da/mod.rs index 80e56f95eed1..9e0ab5b77c67 100644 --- a/core/lib/da_client/src/clients/no_da/mod.rs +++ b/core/lib/da_client/src/clients/no_da/mod.rs @@ -27,8 +27,11 @@ impl DataAvailabilityInterface for NoDAClient { Ok(DispatchResponse::default()) } - async fn get_inclusion_data(&self, _: Vec) -> Result { - return Ok(InclusionData::default()); + async fn get_inclusion_data( + &self, + _: Vec, + ) -> Result, DataAvailabilityError> { + return Ok(Some(InclusionData::default())); } } diff --git a/core/lib/zksync_core_leftovers/Cargo.toml b/core/lib/zksync_core_leftovers/Cargo.toml index c394342c6996..d53956713848 100644 --- a/core/lib/zksync_core_leftovers/Cargo.toml +++ b/core/lib/zksync_core_leftovers/Cargo.toml @@ -45,6 +45,8 @@ zksync_commitment_generator.workspace = true zksync_house_keeper.workspace = true zksync_node_genesis.workspace = true zksync_eth_sender.workspace = true +zksync_da_dispatcher.workspace = true +zksync_da_client.workspace = true zksync_node_fee_model.workspace = true zksync_state_keeper.workspace = true zksync_metadata_calculator.workspace = true diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index 5cccd0639c33..cad47a46a9ea 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -35,6 +35,7 @@ use zksync_config::{ ApiConfig, DBConfig, EthWatchConfig, GenesisConfig, }; use zksync_contracts::governance_contract; +use zksync_da_dispatcher::DataAvailabilityDispatcher; use zksync_dal::{metrics::PostgresMetrics, ConnectionPool, Core, CoreDal}; use zksync_db_connection::healthcheck::ConnectionPoolHealthCheck; use zksync_eth_client::{clients::PKSigningClient, BoundEthInterface}; @@ -153,6 +154,8 @@ pub enum Component { Consensus, /// Component generating commitment for L1 batches. CommitmentGenerator, + /// Component sending a pubdata to the DA layers. + DADispatcher, } #[derive(Debug)] @@ -189,6 +192,7 @@ impl FromStr for Components { "proof_data_handler" => Ok(Components(vec![Component::ProofDataHandler])), "consensus" => Ok(Components(vec![Component::Consensus])), "commitment_generator" => Ok(Components(vec![Component::CommitmentGenerator])), + "da_dispatcher" => Ok(Components(vec![Component::DADispatcher])), other => Err(format!("{} is not a valid component name", other)), } } @@ -738,6 +742,26 @@ pub async fn initialize_components( .context("add_tee_verifier_input_producer_to_task_futures()")?; } + if components.contains(&Component::DADispatcher) { + let started_at = Instant::now(); + let da_config = configs + .da_dispatcher_config + .clone() + .context("da_dispatcher_config")?; + let da_dispatcher_pool = ConnectionPool::::singleton(database_secrets.master_url()?) + .build() + .await + .context("failed to build da_dispatcher_pool")?; + let da_client = zksync_da_client::new_da_client(da_config.clone()).await; + let da_dispatcher = + DataAvailabilityDispatcher::new(da_dispatcher_pool, da_config, da_client); + task_futures.push(tokio::spawn(da_dispatcher.run(stop_receiver.clone()))); + + let elapsed = started_at.elapsed(); + APP_METRICS.init_latency[&InitStage::DADispatcher].set(elapsed); + tracing::info!("initialized DA dispatcher in {elapsed:?}"); + } + if components.contains(&Component::Housekeeper) { add_house_keeper_to_task_futures( configs, diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index 05fcc5ec5312..c089e57524f0 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -33,24 +33,24 @@ impl DataAvailabilityDispatcher { pub async fn run(self, stop_receiver: watch::Receiver) -> anyhow::Result<()> { let pool = self.pool.clone(); loop { - let mut storage = pool.connection_tagged("da_dispatcher").await.unwrap(); + let mut conn = pool.connection_tagged("da_dispatcher").await.unwrap(); if *stop_receiver.borrow() { tracing::info!("Stop signal received, da_dispatcher is shutting down"); break; } - if let Err(err) = self.loop_iteration(&mut storage).await { - tracing::warn!("da_dispatcher error {err:?}"); - } + self.dispatch(&mut conn).await?; + self.poll_for_inclusion(&mut conn).await?; + drop(conn); tokio::time::sleep(self.config.polling_interval()).await; } Ok(()) } - async fn dispatch(&self, storage: &mut Connection<'_, Core>) -> anyhow::Result<()> { - let batches = storage + async fn dispatch(&self, conn: &mut Connection<'_, Core>) -> anyhow::Result<()> { + let batches = conn .blocks_dal() .get_ready_for_da_dispatch_l1_batches(self.config.query_rows_limit() as usize) .await?; @@ -65,8 +65,7 @@ impl DataAvailabilityDispatcher { .map_err(Error::msg)?; dispatch_latency.observe(); - storage - .blocks_dal() + conn.blocks_dal() .insert_l1_batch_da(batch.l1_batch_number, dispatch_response.blob_id) .await?; @@ -79,43 +78,37 @@ impl DataAvailabilityDispatcher { Ok(()) } - async fn poll_for_inclusion(&self, storage: &mut Connection<'_, Core>) -> anyhow::Result<()> { - let storage_da = storage - .blocks_dal() - .get_da_blob_awaiting_inclusion() - .await?; + async fn poll_for_inclusion(&self, conn: &mut Connection<'_, Core>) -> anyhow::Result<()> { + let storage_da = conn.blocks_dal().get_da_blob_awaiting_inclusion().await?; if let Some(storage_da) = storage_da { - let inclusion_data = retry(self.config.max_retries(), || { - self.client - .get_inclusion_data(storage_da.blob_id.clone().unwrap()) - }) - .await - .map_err(Error::msg)?; - - storage - .blocks_dal() - .save_l1_batch_inclusion_data( - L1BatchNumber(storage_da.l1_batch_number as u32), - inclusion_data.data, - ) - .await?; - - METRICS.inclusion_latency.observe(Duration::from_secs( - (Utc::now().timestamp() - storage_da.created_at.timestamp()) as u64, - )); + let inclusion_data = self + .client + .get_inclusion_data(storage_da.blob_id.clone().unwrap()) + .await + .map_err(Error::msg)?; + + if let Some(inclusion_data) = inclusion_data { + tracing::info!( + "Storing inclusion data for batch_id: {}", + storage_da.l1_batch_number + ); + + conn.blocks_dal() + .save_l1_batch_inclusion_data( + L1BatchNumber(storage_da.l1_batch_number as u32), + inclusion_data.data, + ) + .await?; + + METRICS.inclusion_latency.observe(Duration::from_secs( + (Utc::now().timestamp() - storage_da.created_at.timestamp()) as u64, + )); + } } Ok(()) } - - #[tracing::instrument(skip(self, storage))] - async fn loop_iteration(&self, storage: &mut Connection<'_, Core>) -> anyhow::Result<()> { - self.dispatch(storage).await?; - self.poll_for_inclusion(storage).await?; - - Ok(()) - } } async fn retry(max_retries: u16, mut f: F) -> Result diff --git a/core/node/node_framework/examples/main_node.rs b/core/node/node_framework/examples/main_node.rs index c46e7d7bd322..7c54c3f75a39 100644 --- a/core/node/node_framework/examples/main_node.rs +++ b/core/node/node_framework/examples/main_node.rs @@ -16,8 +16,8 @@ use zksync_config::{ DatabaseSecrets, FriProofCompressorConfig, FriProverConfig, FriWitnessGeneratorConfig, L1Secrets, ObservabilityConfig, ProofDataHandlerConfig, }, - ApiConfig, ContractVerifierConfig, ContractsConfig, DBConfig, EthConfig, EthWatchConfig, - GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, PostgresConfig, + ApiConfig, ContractVerifierConfig, ContractsConfig, DADispatcherConfig, DBConfig, EthConfig, + EthWatchConfig, GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, PostgresConfig, }; use zksync_core_leftovers::temp_config_store::decode_yaml_repr; use zksync_env_config::FromEnv; @@ -318,11 +318,10 @@ impl MainNodeBuilder { fn add_da_dispatcher_layer(mut self) -> anyhow::Result { let eth_sender_config = EthConfig::from_env()?; - let l1_batch_commit_data_generator_mode = - GenesisConfig::from_env()?.l1_batch_commit_data_generator_mode; + let da_config = DADispatcherConfig::from_env()?; self.node.add_layer(DataAvailabilityDispatcherLayer::new( + da_config, eth_sender_config, - l1_batch_commit_data_generator_mode, )); Ok(self) } @@ -437,6 +436,7 @@ fn main() -> anyhow::Result<()> { .add_eth_watch_layer()? .add_pk_signing_client_layer()? .add_eth_sender_layer()? + .add_da_dispatcher_layer()? .add_proof_data_handler_layer()? .add_healthcheck_layer()? .add_tx_sender_layer()? diff --git a/core/node/shared_metrics/src/lib.rs b/core/node/shared_metrics/src/lib.rs index 46e80c8410fd..811c9325ecf7 100644 --- a/core/node/shared_metrics/src/lib.rs +++ b/core/node/shared_metrics/src/lib.rs @@ -29,6 +29,7 @@ pub enum InitStage { Tree, TeeVerifierInputProducer, Consensus, + DADispatcher, } impl fmt::Display for InitStage { @@ -44,6 +45,7 @@ impl fmt::Display for InitStage { Self::Tree => formatter.write_str("tree"), Self::TeeVerifierInputProducer => formatter.write_str("tee_verifier_input_producer"), Self::Consensus => formatter.write_str("consensus"), + Self::DADispatcher => formatter.write_str("da_dispatcher"), } } } From cbc4680a8d83cd2e761122cb2b49e776e12ee8c2 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Wed, 22 May 2024 18:21:35 +0200 Subject: [PATCH 12/69] fix spellcheck --- checks-config/era.dic | 1 + 1 file changed, 1 insertion(+) diff --git a/checks-config/era.dic b/checks-config/era.dic index 2b9b8ce7239a..0af153bb1250 100644 --- a/checks-config/era.dic +++ b/checks-config/era.dic @@ -964,3 +964,4 @@ delegator Bbellman Sbellman DCMAKE +blob_id From f8efc23ea72e3e452cdc88592c8cf2c3338b64bf Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Thu, 23 May 2024 14:33:13 +0200 Subject: [PATCH 13/69] fix db query --- ...e031b83e156836da437153c9fba11af1d2aefccca139d.json} | 4 ++-- core/lib/dal/src/blocks_dal.rs | 3 +-- core/node/da_dispatcher/src/da_dispatcher.rs | 10 ++++++++-- etc/env/configs/dev_validium.toml | 6 ++++++ 4 files changed, 17 insertions(+), 6 deletions(-) rename core/lib/dal/.sqlx/{query-0763b6872f2d611d0247b3b6d8cc4018b155242a3ef3da602616a9bce668860b.json => query-78d7a3b404a3acf5fe1e031b83e156836da437153c9fba11af1d2aefccca139d.json} (79%) diff --git a/core/lib/dal/.sqlx/query-0763b6872f2d611d0247b3b6d8cc4018b155242a3ef3da602616a9bce668860b.json b/core/lib/dal/.sqlx/query-78d7a3b404a3acf5fe1e031b83e156836da437153c9fba11af1d2aefccca139d.json similarity index 79% rename from core/lib/dal/.sqlx/query-0763b6872f2d611d0247b3b6d8cc4018b155242a3ef3da602616a9bce668860b.json rename to core/lib/dal/.sqlx/query-78d7a3b404a3acf5fe1e031b83e156836da437153c9fba11af1d2aefccca139d.json index b2914b9d0d78..452c2a4c3c97 100644 --- a/core/lib/dal/.sqlx/query-0763b6872f2d611d0247b3b6d8cc4018b155242a3ef3da602616a9bce668860b.json +++ b/core/lib/dal/.sqlx/query-78d7a3b404a3acf5fe1e031b83e156836da437153c9fba11af1d2aefccca139d.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND commitment IS NOT NULL\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND data_availability.blob_id IS NOT NULL\n AND data_availability.inclusion_data IS NOT NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND commitment IS NOT NULL\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND data_availability.blob_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -24,5 +24,5 @@ true ] }, - "hash": "0763b6872f2d611d0247b3b6d8cc4018b155242a3ef3da602616a9bce668860b" + "hash": "78d7a3b404a3acf5fe1e031b83e156836da437153c9fba11af1d2aefccca139d" } diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index bf38f89ed14a..1a414cca5ca2 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -1848,8 +1848,7 @@ impl BlocksDal<'_, '_> { AND commitment IS NOT NULL AND events_queue_commitment IS NOT NULL AND bootloader_initial_content_commitment IS NOT NULL - AND data_availability.blob_id IS NOT NULL - AND data_availability.inclusion_data IS NOT NULL + AND data_availability.blob_id IS NULL ORDER BY number LIMIT diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index c089e57524f0..3abdd0ff5f7d 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -40,8 +40,12 @@ impl DataAvailabilityDispatcher { break; } - self.dispatch(&mut conn).await?; - self.poll_for_inclusion(&mut conn).await?; + if let Err(err) = self.dispatch(&mut conn).await { + tracing::warn!("dispatch error {err:?}"); + } + if let Err(err) = self.poll_for_inclusion(&mut conn).await { + tracing::warn!("poll_for_inclusion error {err:?}"); + } drop(conn); tokio::time::sleep(self.config.polling_interval()).await; @@ -55,6 +59,8 @@ impl DataAvailabilityDispatcher { .get_ready_for_da_dispatch_l1_batches(self.config.query_rows_limit() as usize) .await?; + println!("batches: {:?}", batches.len()); + for batch in batches { let dispatch_latency = METRICS.blob_dispatch_latency.start(); let dispatch_response = retry(self.config.max_retries(), || { diff --git a/etc/env/configs/dev_validium.toml b/etc/env/configs/dev_validium.toml index d1b415180bce..b655e58bb030 100644 --- a/etc/env/configs/dev_validium.toml +++ b/etc/env/configs/dev_validium.toml @@ -10,9 +10,15 @@ max_pubdata_per_batch=100000 fee_model_version="V2" l1_batch_commit_data_generator_mode="Validium" +[eth_sender] +sender_pubdata_sending_mode="Custom" + # This override will be removed soon but it is needed for now. [eth_sender.gas_adjuster] max_blob_base_fee=0 +[da_dispatcher] +da_mode="NoDA" + [_metadata] base=['dev.toml'] From 57306e4b9d98adc756b17287a575765ac786c466 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Thu, 23 May 2024 18:33:46 +0200 Subject: [PATCH 14/69] simplify query, remove unnecessary log --- ...90a0e3a24c41249150230482a4f66f98862638003c8e09c6cf73.json} | 4 ++-- core/lib/dal/src/blocks_dal.rs | 1 - core/node/da_dispatcher/src/da_dispatcher.rs | 2 -- 3 files changed, 2 insertions(+), 5 deletions(-) rename core/lib/dal/.sqlx/{query-ca6debc6ab140f26ced294a2ea91e5ed7e700b7c05eb11a85d0ca63bb58d3ddc.json => query-f1812bf0478c90a0e3a24c41249150230482a4f66f98862638003c8e09c6cf73.json} (95%) diff --git a/core/lib/dal/.sqlx/query-ca6debc6ab140f26ced294a2ea91e5ed7e700b7c05eb11a85d0ca63bb58d3ddc.json b/core/lib/dal/.sqlx/query-f1812bf0478c90a0e3a24c41249150230482a4f66f98862638003c8e09c6cf73.json similarity index 95% rename from core/lib/dal/.sqlx/query-ca6debc6ab140f26ced294a2ea91e5ed7e700b7c05eb11a85d0ca63bb58d3ddc.json rename to core/lib/dal/.sqlx/query-f1812bf0478c90a0e3a24c41249150230482a4f66f98862638003c8e09c6cf73.json index 0f4724111af7..3f2f956c913d 100644 --- a/core/lib/dal/.sqlx/query-ca6debc6ab140f26ced294a2ea91e5ed7e700b7c05eb11a85d0ca63bb58d3ddc.json +++ b/core/lib/dal/.sqlx/query-f1812bf0478c90a0e3a24c41249150230482a4f66f98862638003c8e09c6cf73.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND data_availability.blob_id IS NOT NULL\n AND data_availability.inclusion_data IS NOT NULL\n ORDER BY\n number\n LIMIT\n $4\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND data_availability.inclusion_data IS NOT NULL\n ORDER BY\n number\n LIMIT\n $4\n ", "describe": { "columns": [ { @@ -195,5 +195,5 @@ true ] }, - "hash": "ca6debc6ab140f26ced294a2ea91e5ed7e700b7c05eb11a85d0ca63bb58d3ddc" + "hash": "f1812bf0478c90a0e3a24c41249150230482a4f66f98862638003c8e09c6cf73" } diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 1a414cca5ca2..5e00adb58265 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -1776,7 +1776,6 @@ impl BlocksDal<'_, '_> { ) AND events_queue_commitment IS NOT NULL AND bootloader_initial_content_commitment IS NOT NULL - AND data_availability.blob_id IS NOT NULL AND data_availability.inclusion_data IS NOT NULL ORDER BY number diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index 3abdd0ff5f7d..42f6e8dfdc83 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -59,8 +59,6 @@ impl DataAvailabilityDispatcher { .get_ready_for_da_dispatch_l1_batches(self.config.query_rows_limit() as usize) .await?; - println!("batches: {:?}", batches.len()); - for batch in batches { let dispatch_latency = METRICS.blob_dispatch_latency.start(); let dispatch_response = retry(self.config.max_retries(), || { From 484e6ccdc40b1745b35170fc2b2a730c297a2f8a Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Fri, 24 May 2024 00:31:44 +0200 Subject: [PATCH 15/69] fix queries for Rollup, add da_dispatcher to CI tests --- .github/workflows/ci-core-reusable.yml | 4 ++-- core/bin/zksync_server/src/main.rs | 2 +- core/lib/config/src/testonly.rs | 5 +++-- ...d37540e1c47ff57db77d43de8f7a0a27f3e944de92da7.json} | 5 +++-- core/lib/dal/src/blocks_dal.rs | 10 ++++++++-- core/lib/zksync_core_leftovers/src/lib.rs | 5 ++++- core/node/eth_sender/src/aggregator.rs | 1 + 7 files changed, 22 insertions(+), 10 deletions(-) rename core/lib/dal/.sqlx/{query-f1812bf0478c90a0e3a24c41249150230482a4f66f98862638003c8e09c6cf73.json => query-ef88175ed0a57ec21f6d37540e1c47ff57db77d43de8f7a0a27f3e944de92da7.json} (95%) diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 3d38cb38a08f..6818045b7c57 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -134,7 +134,7 @@ jobs: base_token: ["Eth", "Custom"] deployment_mode: ["Rollup", "Validium"] env: - SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator${{ matrix.consensus && ',consensus' || '' }}" + SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator${{ matrix.consensus && ',consensus' || '' }}${{ [matrix.deployment_mode == 'Validium'] && ',da_dispatcher' || '' }}" runs-on: [matterlabs-ci-runner] steps: @@ -301,7 +301,7 @@ jobs: runs-on: [matterlabs-ci-runner] env: - SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator${{ matrix.consensus && ',consensus' || '' }}" + SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator${{ matrix.consensus && ',consensus' || '' }}${{ [matrix.deployment_mode == 'Validium'] && ',da_dispatcher' || '' }}" EXT_NODE_FLAGS: "${{ matrix.consensus && '-- --enable-consensus' || '' }}" steps: diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index 0422a1f11fa2..83fc8a834f55 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -45,7 +45,7 @@ struct Cli { /// Comma-separated list of components to launch. #[arg( long, - default_value = "api,tree,eth,state_keeper,housekeeper,tee_verifier_input_producer,commitment_generator" + default_value = "api,tree,eth,state_keeper,housekeeper,tee_verifier_input_producer,commitment_generator,da_dispatcher" )] components: ComponentsToRun, /// Path to the yaml config. If set, it will be used instead of env vars. diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index f914a0390d43..3bc1ebfedccf 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -340,9 +340,10 @@ impl Distribution for EncodeDist { impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::eth_sender::PubdataSendingMode { type T = configs::eth_sender::PubdataSendingMode; - match rng.gen_range(0..2) { + match rng.gen_range(0..3) { 0 => T::Calldata, - _ => T::Blobs, + 1 => T::Blobs, + _ => T::Custom, } } } diff --git a/core/lib/dal/.sqlx/query-f1812bf0478c90a0e3a24c41249150230482a4f66f98862638003c8e09c6cf73.json b/core/lib/dal/.sqlx/query-ef88175ed0a57ec21f6d37540e1c47ff57db77d43de8f7a0a27f3e944de92da7.json similarity index 95% rename from core/lib/dal/.sqlx/query-f1812bf0478c90a0e3a24c41249150230482a4f66f98862638003c8e09c6cf73.json rename to core/lib/dal/.sqlx/query-ef88175ed0a57ec21f6d37540e1c47ff57db77d43de8f7a0a27f3e944de92da7.json index 3f2f956c913d..ea40ed3a3a64 100644 --- a/core/lib/dal/.sqlx/query-f1812bf0478c90a0e3a24c41249150230482a4f66f98862638003c8e09c6cf73.json +++ b/core/lib/dal/.sqlx/query-ef88175ed0a57ec21f6d37540e1c47ff57db77d43de8f7a0a27f3e944de92da7.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND data_availability.inclusion_data IS NOT NULL\n ORDER BY\n number\n LIMIT\n $4\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (data_availability.inclusion_data IS NOT NULL OR $4 IS TRUE)\n ORDER BY\n number\n LIMIT\n $5\n ", "describe": { "columns": [ { @@ -159,6 +159,7 @@ "Bytea", "Bytea", "Int4", + "Bool", "Int8" ] }, @@ -195,5 +196,5 @@ true ] }, - "hash": "f1812bf0478c90a0e3a24c41249150230482a4f66f98862638003c8e09c6cf73" + "hash": "ef88175ed0a57ec21f6d37540e1c47ff57db77d43de8f7a0a27f3e944de92da7" } diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 5e00adb58265..2f7ca63eedef 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -1724,6 +1724,7 @@ impl BlocksDal<'_, '_> { bootloader_hash: H256, default_aa_hash: H256, protocol_version_id: ProtocolVersionId, + is_rollup: bool, ) -> anyhow::Result> { let raw_batches = sqlx::query_as!( StorageL1Batch, @@ -1776,15 +1777,19 @@ impl BlocksDal<'_, '_> { ) AND events_queue_commitment IS NOT NULL AND bootloader_initial_content_commitment IS NOT NULL - AND data_availability.inclusion_data IS NOT NULL + AND ( + data_availability.inclusion_data IS NOT NULL + OR $4 IS TRUE + ) ORDER BY number LIMIT - $4 + $5 "#, bootloader_hash.as_bytes(), default_aa_hash.as_bytes(), protocol_version_id as i32, + is_rollup, limit as i64, ) .instrument("get_ready_for_commit_l1_batches") @@ -1792,6 +1797,7 @@ impl BlocksDal<'_, '_> { .with_arg("bootloader_hash", &bootloader_hash) .with_arg("default_aa_hash", &default_aa_hash) .with_arg("protocol_version_id", &protocol_version_id) + .with_arg("is_rollup", &is_rollup) .fetch_all(self.storage) .await?; diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index cad47a46a9ea..214300a11c83 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -28,6 +28,7 @@ use zksync_config::{ chain::{CircuitBreakerConfig, MempoolConfig, OperationsManagerConfig, StateKeeperConfig}, consensus::ConsensusConfig, database::{MerkleTreeConfig, MerkleTreeMode}, + eth_sender::PubdataSendingMode, wallets, wallets::Wallets, ContractsConfig, DatabaseSecrets, GeneralConfig, Secrets, @@ -742,7 +743,9 @@ pub async fn initialize_components( .context("add_tee_verifier_input_producer_to_task_futures()")?; } - if components.contains(&Component::DADispatcher) { + if components.contains(&Component::DADispatcher) + && eth.sender?.pubdata_sending_mode == PubdataSendingMode::Custom + { let started_at = Instant::now(); let da_config = configs .da_dispatcher_config diff --git a/core/node/eth_sender/src/aggregator.rs b/core/node/eth_sender/src/aggregator.rs index aa9b31abd426..e18c6d5a2bd3 100644 --- a/core/node/eth_sender/src/aggregator.rs +++ b/core/node/eth_sender/src/aggregator.rs @@ -216,6 +216,7 @@ impl Aggregator { base_system_contracts_hashes.bootloader, base_system_contracts_hashes.default_aa, protocol_version_id, + self.commitment_mode == L1BatchCommitmentMode::Rollup, ) .await .unwrap() From c81abc0dce9c50556eaab3f3b44638dc17f3b988 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Fri, 24 May 2024 00:36:54 +0200 Subject: [PATCH 16/69] sqlx --- ...591cbdeeddc398e1068650eba62c2478c08d5a6216c85014eb5f.json} | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename core/lib/dal/.sqlx/{query-ef88175ed0a57ec21f6d37540e1c47ff57db77d43de8f7a0a27f3e944de92da7.json => query-f0ea0bc1d776591cbdeeddc398e1068650eba62c2478c08d5a6216c85014eb5f.json} (95%) diff --git a/core/lib/dal/.sqlx/query-ef88175ed0a57ec21f6d37540e1c47ff57db77d43de8f7a0a27f3e944de92da7.json b/core/lib/dal/.sqlx/query-f0ea0bc1d776591cbdeeddc398e1068650eba62c2478c08d5a6216c85014eb5f.json similarity index 95% rename from core/lib/dal/.sqlx/query-ef88175ed0a57ec21f6d37540e1c47ff57db77d43de8f7a0a27f3e944de92da7.json rename to core/lib/dal/.sqlx/query-f0ea0bc1d776591cbdeeddc398e1068650eba62c2478c08d5a6216c85014eb5f.json index ea40ed3a3a64..c1ff828e33c9 100644 --- a/core/lib/dal/.sqlx/query-ef88175ed0a57ec21f6d37540e1c47ff57db77d43de8f7a0a27f3e944de92da7.json +++ b/core/lib/dal/.sqlx/query-f0ea0bc1d776591cbdeeddc398e1068650eba62c2478c08d5a6216c85014eb5f.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (data_availability.inclusion_data IS NOT NULL OR $4 IS TRUE)\n ORDER BY\n number\n LIMIT\n $5\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS TRUE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", "describe": { "columns": [ { @@ -196,5 +196,5 @@ true ] }, - "hash": "ef88175ed0a57ec21f6d37540e1c47ff57db77d43de8f7a0a27f3e944de92da7" + "hash": "f0ea0bc1d776591cbdeeddc398e1068650eba62c2478c08d5a6216c85014eb5f" } From 918aab15a7e7e05c2e28892d6660997bb97fa2d7 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Fri, 24 May 2024 00:44:23 +0200 Subject: [PATCH 17/69] fix config unwrap --- core/lib/zksync_core_leftovers/src/lib.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index 214300a11c83..9531bff7f3fb 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -744,7 +744,12 @@ pub async fn initialize_components( } if components.contains(&Component::DADispatcher) - && eth.sender?.pubdata_sending_mode == PubdataSendingMode::Custom + && eth + .sender + .clone() + .context("eth_sender")? + .pubdata_sending_mode + == PubdataSendingMode::Custom { let started_at = Instant::now(); let da_config = configs From cd3ce03ff23bc8b531229e1cec32ee954c709fc0 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Fri, 24 May 2024 13:52:10 +0200 Subject: [PATCH 18/69] simplify the workflow --- .github/workflows/ci-core-reusable.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 95f0f1fdb935..663567ba904e 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -134,7 +134,7 @@ jobs: base_token: ["Eth", "Custom"] deployment_mode: ["Rollup", "Validium"] env: - SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator${{ matrix.consensus && ',consensus' || '' }}${{ [matrix.deployment_mode == 'Validium'] && ',da_dispatcher' || '' }}" + SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,da_dispatcher${{ matrix.consensus && ',consensus' || '' }}" runs-on: [matterlabs-ci-runner] steps: @@ -301,7 +301,7 @@ jobs: runs-on: [matterlabs-ci-runner] env: - SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator${{ matrix.consensus && ',consensus' || '' }}${{ [matrix.deployment_mode == 'Validium'] && ',da_dispatcher' || '' }}" + SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,da_dispatcher${{ matrix.consensus && ',consensus' || '' }}" EXT_NODE_FLAGS: "${{ matrix.consensus && '-- --enable-consensus' || '' }}" steps: From a924d8cdfe9a3810749609cc3dca89f7d5fa2e29 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Fri, 24 May 2024 14:29:52 +0200 Subject: [PATCH 19/69] try fixing workflow --- .github/workflows/ci-core-reusable.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 663567ba904e..2373f2298f39 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -134,7 +134,7 @@ jobs: base_token: ["Eth", "Custom"] deployment_mode: ["Rollup", "Validium"] env: - SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,da_dispatcher${{ matrix.consensus && ',consensus' || '' }}" + SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator${{ matrix.consensus && ',consensus' || '' }}${{ matrix.deployment_mode == 'Validium' && ',da_dispatcher' || '' }}" runs-on: [matterlabs-ci-runner] steps: @@ -301,7 +301,7 @@ jobs: runs-on: [matterlabs-ci-runner] env: - SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,da_dispatcher${{ matrix.consensus && ',consensus' || '' }}" + SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator${{ matrix.consensus && ',consensus' || '' }}${{ matrix.deployment_mode == 'Validium' && ',da_dispatcher' || '' }}" EXT_NODE_FLAGS: "${{ matrix.consensus && '-- --enable-consensus' || '' }}" steps: From 9635e611697aa435e960b183c9299e9de2e82aa0 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Fri, 24 May 2024 14:50:04 +0200 Subject: [PATCH 20/69] update env for validium in docker --- etc/env/configs/dev_validium_docker.toml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/etc/env/configs/dev_validium_docker.toml b/etc/env/configs/dev_validium_docker.toml index 4392ca8d2711..66ed37f320a6 100644 --- a/etc/env/configs/dev_validium_docker.toml +++ b/etc/env/configs/dev_validium_docker.toml @@ -19,6 +19,12 @@ fee_model_version = "V2" l1_batch_commit_data_generator_mode = "Validium" miniblock_iteration_interval = 50 +[eth_sender] +sender_pubdata_sending_mode="Custom" + +[da_dispatcher] +da_mode="NoDA" + [eth_client] web3_url = "http://reth:8545" From 3f55deb1fb7127fb56890bbdc92c0777e7b70c8b Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Fri, 24 May 2024 16:37:08 +0200 Subject: [PATCH 21/69] add PUBDATA_SOURCE_CUSTOM to consistency checker --- core/node/consistency_checker/src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/node/consistency_checker/src/lib.rs b/core/node/consistency_checker/src/lib.rs index f1739bceec28..43dec4fa8e55 100644 --- a/core/node/consistency_checker/src/lib.rs +++ b/core/node/consistency_checker/src/lib.rs @@ -261,6 +261,7 @@ pub fn detect_da( /// These are used by the L1 Contracts to indicate what DA layer is used for pubdata const PUBDATA_SOURCE_CALLDATA: u8 = 0; const PUBDATA_SOURCE_BLOBS: u8 = 1; + const PUBDATA_SOURCE_CUSTOM: u8 = 2; fn parse_error(message: impl Into>) -> ethabi::Error { ethabi::Error::Other(message.into()) @@ -291,6 +292,7 @@ pub fn detect_da( match last_reference_token.first() { Some(&byte) if byte == PUBDATA_SOURCE_CALLDATA => Ok(PubdataDA::Calldata), Some(&byte) if byte == PUBDATA_SOURCE_BLOBS => Ok(PubdataDA::Blobs), + Some(&byte) if byte == PUBDATA_SOURCE_CUSTOM => Ok(PubdataDA::Custom), Some(&byte) => Err(parse_error(format!( "unexpected first byte of the last reference token; expected one of [{PUBDATA_SOURCE_CALLDATA}, {PUBDATA_SOURCE_BLOBS}], \ got {byte}" From 10180f5e9e32821d3f1ccb20a3aa6fffd03aa8c6 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Fri, 24 May 2024 18:02:29 +0200 Subject: [PATCH 22/69] fix revert test --- core/node/da_dispatcher/src/da_dispatcher.rs | 9 +++++++++ core/tests/revert-test/tests/revert-and-restart.test.ts | 4 ++++ 2 files changed, 13 insertions(+) diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index 42f6e8dfdc83..2867d941712a 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -77,6 +77,10 @@ impl DataAvailabilityDispatcher { .last_known_l1_batch .set(batch.l1_batch_number.0 as usize); METRICS.blob_size.observe(batch.pubdata.len()); + tracing::info!( + "Dispatched a DA for batch_number: {}", + batch.l1_batch_number + ); } Ok(()) @@ -108,6 +112,11 @@ impl DataAvailabilityDispatcher { METRICS.inclusion_latency.observe(Duration::from_secs( (Utc::now().timestamp() - storage_da.created_at.timestamp()) as u64, )); + + tracing::info!( + "Received an inclusion data for a batch_number: {}", + storage_da.l1_batch_number + ); } } diff --git a/core/tests/revert-test/tests/revert-and-restart.test.ts b/core/tests/revert-test/tests/revert-and-restart.test.ts index 6381f696283b..1bafdbcab4e7 100644 --- a/core/tests/revert-test/tests/revert-and-restart.test.ts +++ b/core/tests/revert-test/tests/revert-and-restart.test.ts @@ -62,10 +62,14 @@ describe('Block reverting test', function () { let operatorAddress = process.env.ETH_SENDER_SENDER_OPERATOR_COMMIT_ETH_ADDR; let enable_consensus = process.env.ENABLE_CONSENSUS == 'true'; + let is_validium = process.env.DEPLOYMENT_MODE == 'Validium'; let components = 'api,tree,eth,state_keeper,commitment_generator'; if (enable_consensus) { components += ',consensus'; } + if (is_validium) { + components += ',da_dispatcher'; + } before('create test wallet', async () => { tester = await Tester.init( From 765a2c40b9a11ad7e4fc0f9e1bdd918f92cdafde Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Fri, 24 May 2024 19:27:57 +0200 Subject: [PATCH 23/69] fix upgrade test --- core/tests/upgrade-test/tests/upgrade.test.ts | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/core/tests/upgrade-test/tests/upgrade.test.ts b/core/tests/upgrade-test/tests/upgrade.test.ts index 9e9458394ea3..21d47ab1825c 100644 --- a/core/tests/upgrade-test/tests/upgrade.test.ts +++ b/core/tests/upgrade-test/tests/upgrade.test.ts @@ -28,6 +28,11 @@ const STATE_TRANSITON_MANAGER = new ethers.utils.Interface( require(`${L1_CONTRACTS_FOLDER}/state-transition/StateTransitionManager.sol/StateTransitionManager.json`).abi ); +let serverComponents = 'api,tree,eth,state_keeper,commitment_generator'; +if (process.env.CHAIN_STATE_KEEPER_L1_BATCH_COMMIT_DATA_GENERATOR_MODE == 'Validium') { + serverComponents += ',da_dispatcher'; +} + const depositAmount = ethers.utils.parseEther('0.001'); describe('Upgrade test', function () { @@ -69,7 +74,7 @@ describe('Upgrade test', function () { process.env.CHAIN_STATE_KEEPER_BLOCK_COMMIT_DEADLINE_MS = '2000'; // Run server in background. utils.background( - 'cd $ZKSYNC_HOME && cargo run --bin zksync_server --release -- --components=api,tree,eth,state_keeper,commitment_generator', + `cd $ZKSYNC_HOME && cargo run --bin zksync_server --release -- --components=${serverComponents}`, [null, logs, logs] ); // Server may need some time to recompile if it's a cold run, so wait for it. @@ -281,7 +286,7 @@ describe('Upgrade test', function () { // Run again. utils.background( - 'cd $ZKSYNC_HOME && zk f cargo run --bin zksync_server --release -- --components=api,tree,eth,state_keeper,commitment_generator &> upgrade.log', + `cd $ZKSYNC_HOME && zk f cargo run --bin zksync_server --release -- --components=${serverComponents} &> upgrade.log`, [null, logs, logs] ); await utils.sleep(10); From c399625cf20053be2798b666f77446cc077479f4 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Sat, 25 May 2024 21:13:33 +0200 Subject: [PATCH 24/69] start the server with da_dispatcher for Validium in EN tests --- core/node/da_dispatcher/src/da_dispatcher.rs | 5 ----- .../tests/revert-test/tests/revert-and-restart-en.test.ts | 5 +++++ core/tests/revert-test/tests/revert-and-restart.test.ts | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index 2867d941712a..c083a345d4e1 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -97,11 +97,6 @@ impl DataAvailabilityDispatcher { .map_err(Error::msg)?; if let Some(inclusion_data) = inclusion_data { - tracing::info!( - "Storing inclusion data for batch_id: {}", - storage_da.l1_batch_number - ); - conn.blocks_dal() .save_l1_batch_inclusion_data( L1BatchNumber(storage_da.l1_batch_number as u32), diff --git a/core/tests/revert-test/tests/revert-and-restart-en.test.ts b/core/tests/revert-test/tests/revert-and-restart-en.test.ts index 7e5931ac8ad3..08ded047cdc6 100644 --- a/core/tests/revert-test/tests/revert-and-restart-en.test.ts +++ b/core/tests/revert-test/tests/revert-and-restart-en.test.ts @@ -137,10 +137,15 @@ class MainNode { env.DATABASE_MERKLE_TREE_MODE = 'full'; console.log(`DATABASE_URL = ${env.DATABASE_URL}`); + const isValidium = process.env.DEPLOYMENT_MODE == 'Validium'; let components = 'api,tree,eth,state_keeper,commitment_generator'; if (enableConsensus) { components += ',consensus'; } + if (isValidium) { + components += ',da_dispatcher'; + } + let proc = spawn('./target/release/zksync_server', ['--components', components], { cwd: env.ZKSYNC_HOME, stdio: [null, logs, logs], diff --git a/core/tests/revert-test/tests/revert-and-restart.test.ts b/core/tests/revert-test/tests/revert-and-restart.test.ts index 1bafdbcab4e7..fcf58ba8557e 100644 --- a/core/tests/revert-test/tests/revert-and-restart.test.ts +++ b/core/tests/revert-test/tests/revert-and-restart.test.ts @@ -61,13 +61,13 @@ describe('Block reverting test', function () { let logs: fs.WriteStream; let operatorAddress = process.env.ETH_SENDER_SENDER_OPERATOR_COMMIT_ETH_ADDR; - let enable_consensus = process.env.ENABLE_CONSENSUS == 'true'; - let is_validium = process.env.DEPLOYMENT_MODE == 'Validium'; + const enableConsensus = process.env.ENABLE_CONSENSUS == 'true'; + const isValidium = process.env.DEPLOYMENT_MODE == 'Validium'; let components = 'api,tree,eth,state_keeper,commitment_generator'; - if (enable_consensus) { + if (enableConsensus) { components += ',consensus'; } - if (is_validium) { + if (isValidium) { components += ',da_dispatcher'; } From 08affa0a9c1c27afc6a274a0b91291dd7fe5a70c Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Thu, 30 May 2024 14:34:05 +0200 Subject: [PATCH 25/69] code review fixes --- Cargo.lock | 11 +- Cargo.toml | 2 +- core/bin/zksync_server/src/node_builder.rs | 14 +- core/lib/config/Cargo.toml | 2 + core/lib/config/src/configs/da_dispatcher.rs | 48 +++-- core/lib/constants/src/data_availability.rs | 6 + core/lib/constants/src/lib.rs | 1 + core/lib/da_client/Cargo.toml | 5 +- core/lib/da_client/README.md | 13 ++ core/lib/da_client/src/clients/mod.rs | 2 - core/lib/da_client/src/clients/no_da/mod.rs | 42 ---- .../da_client/src/{clients => }/gcs/mod.rs | 27 +-- core/lib/da_client/src/lib.rs | 17 +- core/lib/da_client/src/no_da/mod.rs | 32 +++ ...e5c7fd0d06d506d4b6bdaa3b149b44a7b85ac.json | 2 +- ...4e765933f54bb3b4dd7ee73416b3e1bc1bd68.json | 28 +++ ...156836da437153c9fba11af1d2aefccca139d.json | 28 --- ...b7af3a771c82b5eef0857cadb47d2e5b10c12.json | 15 -- ...02c15663bcaf6ad1c42fcdb0ca2ab7930c987.json | 16 ++ ...2f215fa85b24deea1c71419ef945f7f9f7c6.json} | 14 +- ...1114_create_data_availability_table.up.sql | 5 +- core/lib/dal/src/blocks_dal.rs | 184 +--------------- core/lib/dal/src/data_availability_dal.rs | 201 ++++++++++++++++++ core/lib/dal/src/lib.rs | 14 +- .../src/models/storage_data_availability.rs | 26 ++- core/lib/env_config/Cargo.toml | 1 + core/lib/env_config/src/da_dispatcher.rs | 18 +- core/lib/protobuf_config/Cargo.toml | 1 + core/lib/protobuf_config/src/da_dispatcher.rs | 47 ++-- .../src/proto/config/da_dispatcher.proto | 1 + core/lib/types/src/commitment/mod.rs | 6 - core/lib/types/src/pubdata_da.rs | 15 ++ core/lib/zksync_core_leftovers/Cargo.toml | 1 + core/lib/zksync_core_leftovers/src/lib.rs | 26 ++- core/node/da_dispatcher/Cargo.toml | 1 + core/node/da_dispatcher/README.md | 14 ++ core/node/da_dispatcher/src/da_dispatcher.rs | 122 +++++++---- core/node/da_dispatcher/src/lib.rs | 2 +- core/node/da_dispatcher/src/metrics.rs | 13 +- .../node/node_framework/examples/main_node.rs | 17 +- .../src/implementations/layers/da_client.rs | 70 ++++++ .../implementations/layers/da_dispatcher.rs | 40 ++-- .../src/implementations/layers/mod.rs | 1 + .../implementations/resources/da_client.rs | 13 ++ .../src/implementations/resources/mod.rs | 1 + 45 files changed, 714 insertions(+), 451 deletions(-) create mode 100644 core/lib/constants/src/data_availability.rs create mode 100644 core/lib/da_client/README.md delete mode 100644 core/lib/da_client/src/clients/mod.rs delete mode 100644 core/lib/da_client/src/clients/no_da/mod.rs rename core/lib/da_client/src/{clients => }/gcs/mod.rs (62%) create mode 100644 core/lib/da_client/src/no_da/mod.rs create mode 100644 core/lib/dal/.sqlx/query-519a299c5fabd641b718b64945b4e765933f54bb3b4dd7ee73416b3e1bc1bd68.json delete mode 100644 core/lib/dal/.sqlx/query-78d7a3b404a3acf5fe1e031b83e156836da437153c9fba11af1d2aefccca139d.json delete mode 100644 core/lib/dal/.sqlx/query-7ae47f4bac9639e8b2ad2626891b7af3a771c82b5eef0857cadb47d2e5b10c12.json create mode 100644 core/lib/dal/.sqlx/query-7b569dddae6e8a766392183baa902c15663bcaf6ad1c42fcdb0ca2ab7930c987.json rename core/lib/dal/.sqlx/{query-b3a6f05f7e0023d878e9bef24041d9804016a8c1902e8c67e0fe72742048b555.json => query-c3759facd600c44ff628fe504a672f215fa85b24deea1c71419ef945f7f9f7c6.json} (53%) create mode 100644 core/lib/dal/src/data_availability_dal.rs create mode 100644 core/node/da_dispatcher/README.md create mode 100644 core/node/node_framework/src/implementations/layers/da_client.rs create mode 100644 core/node/node_framework/src/implementations/resources/da_client.rs diff --git a/Cargo.lock b/Cargo.lock index 6fca4fc9e1a2..f43ebc87b64c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2822,9 +2822,11 @@ dependencies = [ [[package]] name = "hyperchain_da" version = "0.1.0" -source = "git+https://github.com/matter-labs/hyperchain-da.git?rev=5629d56ec3d851a7ff03e72c5e0765c55af595d5#5629d56ec3d851a7ff03e72c5e0765c55af595d5" +source = "git+https://github.com/matter-labs/hyperchain-da.git?rev=ad24b39e90a4a32db53d0a46fac8bf2c995f7a51#ad24b39e90a4a32db53d0a46fac8bf2c995f7a51" dependencies = [ + "anyhow", "async-trait", + "serde", ] [[package]] @@ -8117,12 +8119,14 @@ name = "zksync_config" version = "0.1.0" dependencies = [ "anyhow", + "hyperchain_da", "rand 0.8.5", "secrecy", "serde", "zksync_basic_types", "zksync_consensus_utils", "zksync_crypto_primitives", + "zksync_system_constants", ] [[package]] @@ -8362,6 +8366,7 @@ dependencies = [ "futures 0.3.28", "governor", "hex", + "hyperchain_da", "itertools 0.10.5", "jsonrpsee", "lru", @@ -8476,6 +8481,7 @@ dependencies = [ name = "zksync_da_client" version = "0.1.0" dependencies = [ + "anyhow", "async-trait", "hyperchain_da", "tracing", @@ -8491,6 +8497,7 @@ dependencies = [ "anyhow", "chrono", "hyperchain_da", + "rand 0.8.5", "tokio", "tracing", "vise", @@ -8555,6 +8562,7 @@ version = "0.1.0" dependencies = [ "anyhow", "envy", + "hyperchain_da", "serde", "zksync_basic_types", "zksync_config", @@ -9142,6 +9150,7 @@ version = "0.1.0" dependencies = [ "anyhow", "hex", + "hyperchain_da", "prost 0.12.1", "rand 0.8.5", "secrecy", diff --git a/Cargo.toml b/Cargo.toml index 9add4e2335c2..96535bdb6a40 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -202,7 +202,7 @@ zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter zksync_consensus_utils = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "3e6f101ee4124308c4c974caaa259d524549b0c6" } zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "3e6f101ee4124308c4c974caaa259d524549b0c6" } zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "3e6f101ee4124308c4c974caaa259d524549b0c6" } -zksync_da_layers = { package = "hyperchain_da", git = "https://github.com/matter-labs/hyperchain-da.git", rev = "5629d56ec3d851a7ff03e72c5e0765c55af595d5" } +zksync_da_layers = { package = "hyperchain_da", git = "https://github.com/matter-labs/hyperchain-da.git", rev = "ad24b39e90a4a32db53d0a46fac8bf2c995f7a51" } # "Local" dependencies multivm = { path = "core/lib/multivm" } diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index e73ccbb70300..e3c6bc46f9d6 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -19,6 +19,7 @@ use zksync_node_framework::{ commitment_generator::CommitmentGeneratorLayer, consensus::{ConsensusLayer, Mode as ConsensusMode}, contract_verification_api::ContractVerificationApiLayer, + da_client::DataAvailabilityClientLayer, da_dispatcher::DataAvailabilityDispatcherLayer, eth_sender::{EthTxAggregatorLayer, EthTxManagerLayer}, eth_watch::EthWatchLayer, @@ -400,16 +401,23 @@ impl MainNodeBuilder { Ok(self) } - fn add_da_dispatcher_layer(mut self) -> anyhow::Result { + fn add_da_client_layer(mut self) -> anyhow::Result { let eth_sender_config = try_load_config!(self.configs.eth); let da_config = try_load_config!(self.configs.da_dispatcher_config); - self.node.add_layer(DataAvailabilityDispatcherLayer::new( + self.node.add_layer(DataAvailabilityClientLayer::new( da_config, eth_sender_config, )); Ok(self) } + fn add_da_dispatcher_layer(mut self) -> anyhow::Result { + let da_config = try_load_config!(self.configs.da_dispatcher_config); + self.node + .add_layer(DataAvailabilityDispatcherLayer::new(da_config)); + Ok(self) + } + pub fn build(mut self, mut components: Vec) -> anyhow::Result { // Add "base" layers (resources and helper tasks). self = self @@ -492,7 +500,7 @@ impl MainNodeBuilder { self = self.add_commitment_generator_layer()?; } Component::DADispatcher => { - self = self.add_da_dispatcher_layer()?; + self = self.add_da_client_layer()?.add_da_dispatcher_layer()?; } } } diff --git a/core/lib/config/Cargo.toml b/core/lib/config/Cargo.toml index 144843c2bab2..a11c6aefac5d 100644 --- a/core/lib/config/Cargo.toml +++ b/core/lib/config/Cargo.toml @@ -13,6 +13,8 @@ categories.workspace = true zksync_basic_types.workspace = true zksync_crypto_primitives.workspace = true zksync_consensus_utils.workspace = true +zksync_da_layers.workspace = true +zksync_system_constants.workspace = true anyhow.workspace = true rand.workspace = true diff --git a/core/lib/config/src/configs/da_dispatcher.rs b/core/lib/config/src/configs/da_dispatcher.rs index 0efde393f1cf..0c45d8d1c157 100644 --- a/core/lib/config/src/configs/da_dispatcher.rs +++ b/core/lib/config/src/configs/da_dispatcher.rs @@ -1,58 +1,70 @@ use std::time::Duration; use serde::Deserialize; +use zksync_da_layers::config::DALayerConfig; use crate::ObjectStoreConfig; -#[derive(Clone, Debug, PartialEq, Deserialize)] -pub struct DALayerInfo { - pub name: String, - #[serde(default)] - pub private_key: String, -} - #[derive(Clone, Debug, PartialEq, Deserialize)] #[serde(tag = "da_mode")] pub enum DataAvailabilityMode { - DALayer(DALayerInfo), + /// Uses the data availability layer to dispatch pubdata. + DALayer(DALayerConfig), + /// Stores the pubdata in the Google Cloud Storage. GCS(ObjectStoreConfig), + /// Does not store the pubdata. NoDA, } #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct DADispatcherConfig { + /// The mode of the data availability layer. This defines the underlying client that will be + /// used, and the configuration for that client. #[serde(flatten)] pub da_mode: DataAvailabilityMode, + /// The interval at which the dispatcher will poll the DA layer for inclusion data. pub polling_interval: Option, + /// The maximum number of rows to query from the database in a single query. pub query_rows_limit: Option, + /// The maximum number of retries for the dispatching of a blob. pub max_retries: Option, } impl DADispatcherConfig { pub fn for_tests() -> Self { Self { - da_mode: DataAvailabilityMode::DALayer(DALayerInfo { - name: "zkDA".into(), - private_key: "0x0".into(), - }), - polling_interval: Some(5), - query_rows_limit: Some(100), - max_retries: Some(5), + da_mode: DataAvailabilityMode::DALayer(DALayerConfig::Celestia( + zksync_da_layers::clients::celestia::config::CelestiaConfig { + light_node_url: "localhost:12345".to_string(), + private_key: "0x0".to_string(), + }, + )), + polling_interval: Some( + zksync_system_constants::data_availability::DEFAULT_POLLING_INTERVAL, + ), + query_rows_limit: Some( + zksync_system_constants::data_availability::DEFAULT_QUERY_ROWS_LIMIT, + ), + max_retries: Some(zksync_system_constants::data_availability::DEFAULT_MAX_RETRIES), } } pub fn polling_interval(&self) -> Duration { match self.polling_interval { Some(interval) => Duration::from_secs(interval as u64), - None => Duration::from_secs(5), + None => Duration::from_secs( + zksync_system_constants::data_availability::DEFAULT_POLLING_INTERVAL as u64, + ), } } pub fn query_rows_limit(&self) -> u32 { - self.query_rows_limit.unwrap_or(100) + self.query_rows_limit + .unwrap_or(zksync_system_constants::data_availability::DEFAULT_QUERY_ROWS_LIMIT) } pub fn max_retries(&self) -> u16 { - self.max_retries.unwrap_or(5) + self.max_retries + .unwrap_or(zksync_system_constants::data_availability::DEFAULT_MAX_RETRIES) } } diff --git a/core/lib/constants/src/data_availability.rs b/core/lib/constants/src/data_availability.rs new file mode 100644 index 000000000000..9058b532524d --- /dev/null +++ b/core/lib/constants/src/data_availability.rs @@ -0,0 +1,6 @@ +/// An interval with which the dispatcher is polling the DA layer for the inclusion of the blobs. +pub const DEFAULT_POLLING_INTERVAL: u32 = 5; +/// The maximum number of rows that the dispatcher is fetching from the database. +pub const DEFAULT_QUERY_ROWS_LIMIT: u32 = 100; +/// The maximum number of retries for the dispatching of a blob. +pub const DEFAULT_MAX_RETRIES: u16 = 5; diff --git a/core/lib/constants/src/lib.rs b/core/lib/constants/src/lib.rs index 6aab79ad71f3..76a96a0cc9cf 100644 --- a/core/lib/constants/src/lib.rs +++ b/core/lib/constants/src/lib.rs @@ -1,6 +1,7 @@ pub mod blocks; pub mod contracts; pub mod crypto; +pub mod data_availability; pub mod ethereum; pub mod fees; pub mod system_context; diff --git a/core/lib/da_client/Cargo.toml b/core/lib/da_client/Cargo.toml index 29eee728c994..21ca99366b84 100644 --- a/core/lib/da_client/Cargo.toml +++ b/core/lib/da_client/Cargo.toml @@ -9,13 +9,12 @@ license.workspace = true keywords.workspace = true categories.workspace = true -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - [dependencies] +tracing = "0.1.40" async-trait = "0.1.74" +anyhow.workspace = true zksync_config.workspace = true zksync_types.workspace = true zksync_da_layers.workspace = true zksync_object_store.workspace = true -tracing = "0.1.40" \ No newline at end of file diff --git a/core/lib/da_client/README.md b/core/lib/da_client/README.md new file mode 100644 index 000000000000..6c3e2734337c --- /dev/null +++ b/core/lib/da_client/README.md @@ -0,0 +1,13 @@ +# Data Availability clients + +This crate contains an implementations of the default DataAvailability clients. These are maintained within this repo +because they are tightly coupled with the codebase, and would cause the circular dependency if they were to be moved to +the [hyperchain-da](https://github.com/matter-labs/hyperchain-da) repository. + +## Overview + +Currently, the following DataAvailability clients are implemented: + +- `NoDA client` that does not send or store any pubdata, it is needed to run the zkSync network in the "no-DA" mode + utilizing the DA framework. +- `GCS client` that stores the pubdata in the GCS. diff --git a/core/lib/da_client/src/clients/mod.rs b/core/lib/da_client/src/clients/mod.rs deleted file mode 100644 index 6a4994931375..000000000000 --- a/core/lib/da_client/src/clients/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod gcs; -pub mod no_da; diff --git a/core/lib/da_client/src/clients/no_da/mod.rs b/core/lib/da_client/src/clients/no_da/mod.rs deleted file mode 100644 index 9e0ab5b77c67..000000000000 --- a/core/lib/da_client/src/clients/no_da/mod.rs +++ /dev/null @@ -1,42 +0,0 @@ -use std::{ - fmt, - fmt::{Debug, Formatter}, -}; - -use async_trait::async_trait; -use zksync_da_layers::{ - types::{DataAvailabilityError, DispatchResponse, InclusionData}, - DataAvailabilityInterface, -}; - -pub(crate) struct NoDAClient {} - -impl NoDAClient { - pub fn new() -> Self { - NoDAClient {} - } -} - -#[async_trait] -impl DataAvailabilityInterface for NoDAClient { - async fn dispatch_blob( - &self, - _: u32, - _: Vec, - ) -> Result { - Ok(DispatchResponse::default()) - } - - async fn get_inclusion_data( - &self, - _: Vec, - ) -> Result, DataAvailabilityError> { - return Ok(Some(InclusionData::default())); - } -} - -impl Debug for NoDAClient { - fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - formatter.debug_struct("NoDAClient").finish() - } -} diff --git a/core/lib/da_client/src/clients/gcs/mod.rs b/core/lib/da_client/src/gcs/mod.rs similarity index 62% rename from core/lib/da_client/src/clients/gcs/mod.rs rename to core/lib/da_client/src/gcs/mod.rs index 888229ea0ac5..580dde722047 100644 --- a/core/lib/da_client/src/clients/gcs/mod.rs +++ b/core/lib/da_client/src/gcs/mod.rs @@ -7,13 +7,15 @@ use std::{ use async_trait::async_trait; use zksync_config::ObjectStoreConfig; use zksync_da_layers::{ - types::{DataAvailabilityError, DispatchResponse, InclusionData}, - DataAvailabilityInterface, + types::{DispatchResponse, InclusionData}, + DataAvailabilityClient, }; use zksync_object_store::{ObjectStore, ObjectStoreFactory}; use zksync_types::{pubdata_da::StorablePubdata, L1BatchNumber}; -pub(crate) struct GCSDAClient { +/// An implementation of the DataAvailabilityClient trait that stores the pubdata in the GCS. +#[derive(Clone)] +pub struct GCSDAClient { object_store: Arc, } @@ -26,29 +28,30 @@ impl GCSDAClient { } #[async_trait] -impl DataAvailabilityInterface for GCSDAClient { +impl DataAvailabilityClient for GCSDAClient { async fn dispatch_blob( &self, batch_number: u32, data: Vec, - ) -> Result { + ) -> Result { let key = self .object_store .put(L1BatchNumber(batch_number), &StorablePubdata { data }) .await .unwrap(); - Ok(DispatchResponse { - blob_id: key.into_bytes(), - }) + Ok(DispatchResponse { blob_id: key }) } - async fn get_inclusion_data( - &self, - _: Vec, - ) -> Result, DataAvailabilityError> { + async fn get_inclusion_data(&self, _: String) -> Result, anyhow::Error> { + // Using default here because we don't get any inclusion data from GCS, thus there's + // nothing to check on L1. return Ok(Some(InclusionData::default())); } + + fn clone_boxed(&self) -> Box { + Box::new(self.clone()) + } } impl Debug for GCSDAClient { diff --git a/core/lib/da_client/src/lib.rs b/core/lib/da_client/src/lib.rs index 6e2a1c59eb18..6a4994931375 100644 --- a/core/lib/da_client/src/lib.rs +++ b/core/lib/da_client/src/lib.rs @@ -1,15 +1,2 @@ -use zksync_config::configs::da_dispatcher::{DADispatcherConfig, DataAvailabilityMode}; -use zksync_da_layers::DataAvailabilityInterface; - -mod clients; - -pub async fn new_da_client(config: DADispatcherConfig) -> Box { - match config.da_mode { - DataAvailabilityMode::GCS(config) => Box::new(clients::gcs::GCSDAClient::new(config).await), - DataAvailabilityMode::NoDA => Box::new(clients::no_da::NoDAClient::new()), - DataAvailabilityMode::DALayer(config) => { - zksync_da_layers::new_da_layer_client(config.name, config.private_key.into_bytes()) - .await - } - } -} +pub mod gcs; +pub mod no_da; diff --git a/core/lib/da_client/src/no_da/mod.rs b/core/lib/da_client/src/no_da/mod.rs new file mode 100644 index 000000000000..86996ebb9844 --- /dev/null +++ b/core/lib/da_client/src/no_da/mod.rs @@ -0,0 +1,32 @@ +use std::fmt::Debug; + +use async_trait::async_trait; +use zksync_da_layers::{ + types::{DispatchResponse, InclusionData}, + DataAvailabilityClient, +}; + +/// A no-op implementation of the DataAvailabilityClient trait, that doesn't store the pubdata. +#[derive(Clone, Debug)] +pub struct NoDAClient; + +impl NoDAClient { + pub fn new() -> Self { + NoDAClient {} + } +} + +#[async_trait] +impl DataAvailabilityClient for NoDAClient { + async fn dispatch_blob(&self, _: u32, _: Vec) -> Result { + Ok(DispatchResponse::default()) + } + + async fn get_inclusion_data(&self, _: String) -> Result, anyhow::Error> { + return Ok(Some(InclusionData::default())); + } + + fn clone_boxed(&self) -> Box { + Box::new(self.clone()) + } +} diff --git a/core/lib/dal/.sqlx/query-16fdd84e9819be6f4e46b1afc5fe5c7fd0d06d506d4b6bdaa3b149b44a7b85ac.json b/core/lib/dal/.sqlx/query-16fdd84e9819be6f4e46b1afc5fe5c7fd0d06d506d4b6bdaa3b149b44a7b85ac.json index df0440b64254..0dc59785a8b6 100644 --- a/core/lib/dal/.sqlx/query-16fdd84e9819be6f4e46b1afc5fe5c7fd0d06d506d4b6bdaa3b149b44a7b85ac.json +++ b/core/lib/dal/.sqlx/query-16fdd84e9819be6f4e46b1afc5fe5c7fd0d06d506d4b6bdaa3b149b44a7b85ac.json @@ -12,7 +12,7 @@ "parameters": { "Left": [ "Int8", - "Bytea" + "Text" ] }, "nullable": [ diff --git a/core/lib/dal/.sqlx/query-519a299c5fabd641b718b64945b4e765933f54bb3b4dd7ee73416b3e1bc1bd68.json b/core/lib/dal/.sqlx/query-519a299c5fabd641b718b64945b4e765933f54bb3b4dd7ee73416b3e1bc1bd68.json new file mode 100644 index 000000000000..80cba7150eae --- /dev/null +++ b/core/lib/dal/.sqlx/query-519a299c5fabd641b718b64945b4e765933f54bb3b4dd7ee73416b3e1bc1bd68.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND data_availability.blob_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "pubdata_input", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + true + ] + }, + "hash": "519a299c5fabd641b718b64945b4e765933f54bb3b4dd7ee73416b3e1bc1bd68" +} diff --git a/core/lib/dal/.sqlx/query-78d7a3b404a3acf5fe1e031b83e156836da437153c9fba11af1d2aefccca139d.json b/core/lib/dal/.sqlx/query-78d7a3b404a3acf5fe1e031b83e156836da437153c9fba11af1d2aefccca139d.json deleted file mode 100644 index 452c2a4c3c97..000000000000 --- a/core/lib/dal/.sqlx/query-78d7a3b404a3acf5fe1e031b83e156836da437153c9fba11af1d2aefccca139d.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND commitment IS NOT NULL\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND data_availability.blob_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "pubdata_input", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false, - true - ] - }, - "hash": "78d7a3b404a3acf5fe1e031b83e156836da437153c9fba11af1d2aefccca139d" -} diff --git a/core/lib/dal/.sqlx/query-7ae47f4bac9639e8b2ad2626891b7af3a771c82b5eef0857cadb47d2e5b10c12.json b/core/lib/dal/.sqlx/query-7ae47f4bac9639e8b2ad2626891b7af3a771c82b5eef0857cadb47d2e5b10c12.json deleted file mode 100644 index 595eaa63c817..000000000000 --- a/core/lib/dal/.sqlx/query-7ae47f4bac9639e8b2ad2626891b7af3a771c82b5eef0857cadb47d2e5b10c12.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n data_availability (l1_batch_number, blob_id, created_at, updated_at)\n VALUES\n ($1, $2, NOW(), NOW())\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Bytea" - ] - }, - "nullable": [] - }, - "hash": "7ae47f4bac9639e8b2ad2626891b7af3a771c82b5eef0857cadb47d2e5b10c12" -} diff --git a/core/lib/dal/.sqlx/query-7b569dddae6e8a766392183baa902c15663bcaf6ad1c42fcdb0ca2ab7930c987.json b/core/lib/dal/.sqlx/query-7b569dddae6e8a766392183baa902c15663bcaf6ad1c42fcdb0ca2ab7930c987.json new file mode 100644 index 000000000000..f5023abf2e4e --- /dev/null +++ b/core/lib/dal/.sqlx/query-7b569dddae6e8a766392183baa902c15663bcaf6ad1c42fcdb0ca2ab7930c987.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n data_availability (l1_batch_number, blob_id, sent_at, created_at, updated_at)\n VALUES\n ($1, $2, $3, NOW(), NOW())\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text", + "Timestamp" + ] + }, + "nullable": [] + }, + "hash": "7b569dddae6e8a766392183baa902c15663bcaf6ad1c42fcdb0ca2ab7930c987" +} diff --git a/core/lib/dal/.sqlx/query-b3a6f05f7e0023d878e9bef24041d9804016a8c1902e8c67e0fe72742048b555.json b/core/lib/dal/.sqlx/query-c3759facd600c44ff628fe504a672f215fa85b24deea1c71419ef945f7f9f7c6.json similarity index 53% rename from core/lib/dal/.sqlx/query-b3a6f05f7e0023d878e9bef24041d9804016a8c1902e8c67e0fe72742048b555.json rename to core/lib/dal/.sqlx/query-c3759facd600c44ff628fe504a672f215fa85b24deea1c71419ef945f7f9f7c6.json index 567c6d626e35..5099527325ba 100644 --- a/core/lib/dal/.sqlx/query-b3a6f05f7e0023d878e9bef24041d9804016a8c1902e8c67e0fe72742048b555.json +++ b/core/lib/dal/.sqlx/query-c3759facd600c44ff628fe504a672f215fa85b24deea1c71419ef945f7f9f7c6.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n l1_batch_number,\n blob_id,\n inclusion_data,\n created_at,\n updated_at\n FROM\n data_availability\n WHERE\n inclusion_data IS NULL\n AND blob_id IS NOT NULL\n ORDER BY\n l1_batch_number\n LIMIT\n 1\n ", + "query": "\n SELECT\n l1_batch_number,\n blob_id,\n inclusion_data,\n sent_at\n FROM\n data_availability\n WHERE\n inclusion_data IS NULL\n AND blob_id IS NOT NULL\n ORDER BY\n l1_batch_number\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -11,7 +11,7 @@ { "ordinal": 1, "name": "blob_id", - "type_info": "Bytea" + "type_info": "Text" }, { "ordinal": 2, @@ -20,12 +20,7 @@ }, { "ordinal": 3, - "name": "created_at", - "type_info": "Timestamp" - }, - { - "ordinal": 4, - "name": "updated_at", + "name": "sent_at", "type_info": "Timestamp" } ], @@ -36,9 +31,8 @@ false, false, true, - false, false ] }, - "hash": "b3a6f05f7e0023d878e9bef24041d9804016a8c1902e8c67e0fe72742048b555" + "hash": "c3759facd600c44ff628fe504a672f215fa85b24deea1c71419ef945f7f9f7c6" } diff --git a/core/lib/dal/migrations/20240522081114_create_data_availability_table.up.sql b/core/lib/dal/migrations/20240522081114_create_data_availability_table.up.sql index a1b0d82b45b4..ae98593a3766 100644 --- a/core/lib/dal/migrations/20240522081114_create_data_availability_table.up.sql +++ b/core/lib/dal/migrations/20240522081114_create_data_availability_table.up.sql @@ -2,10 +2,11 @@ CREATE TABLE data_availability ( l1_batch_number BIGINT PRIMARY KEY REFERENCES l1_batches (number) ON DELETE CASCADE, - -- the BYTEA used for this 2 columns because it is the most generic type + blob_id TEXT NOT NULL, -- blob here is an abstract term, unrelated to any DA implementation + -- the BYTEA used for this column as the most generic type -- the actual format of blob identifier and inclusion data is defined by the DA client implementation - blob_id BYTEA NOT NULL, -- blob here is an abstract term, unrelated to any DA implementation inclusion_data BYTEA, + sent_at TIMESTAMP NOT NULL, created_at TIMESTAMP NOT NULL, updated_at TIMESTAMP NOT NULL diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 2f7ca63eedef..2c0d78d2fd3a 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -17,7 +17,7 @@ use zksync_types::{ aggregated_operations::AggregatedActionType, block::{BlockGasCount, L1BatchHeader, L1BatchTreeData, L2BlockHeader, StorageOracleInfo}, circuit::CircuitStatistic, - commitment::{L1BatchCommitmentArtifacts, L1BatchDA, L1BatchWithMetadata}, + commitment::{L1BatchCommitmentArtifacts, L1BatchWithMetadata}, Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, U256, }; @@ -25,7 +25,6 @@ use crate::{ models::{ parse_protocol_version, storage_block::{StorageL1Batch, StorageL1BatchHeader, StorageL2BlockHeader}, - storage_data_availability::StorageDataAvailability, storage_oracle_info::DbStorageOracleInfo, }, Core, CoreDal, @@ -884,118 +883,6 @@ impl BlocksDal<'_, '_> { Ok(()) } - pub async fn save_l1_batch_inclusion_data( - &mut self, - number: L1BatchNumber, - da_inclusion_data: Vec, - ) -> anyhow::Result<()> { - let update_result = sqlx::query!( - r#" - UPDATE data_availability - SET - inclusion_data = $1, - updated_at = NOW() - WHERE - l1_batch_number = $2 - AND inclusion_data IS NULL - "#, - da_inclusion_data.as_slice(), - i64::from(number.0), - ) - .instrument("save_l1_batch_da_data") - .with_arg("number", &number) - .report_latency() - .execute(self.storage) - .await?; - - if update_result.rows_affected() == 0 { - tracing::debug!("L1 batch #{number}: DA data wasn't updated as it's already present"); - - // Batch was already processed. Verify that existing DA data matches - let matched: i64 = sqlx::query!( - r#" - SELECT - COUNT(*) AS "count!" - FROM - data_availability - WHERE - l1_batch_number = $1 - AND inclusion_data = $2 - "#, - i64::from(number.0), - da_inclusion_data.as_slice(), - ) - .instrument("get_matching_batch_da_data") - .with_arg("number", &number) - .report_latency() - .fetch_one(self.storage) - .await? - .count; - - anyhow::ensure!( - matched == 1, - "DA data verification failed. DA data for L1 batch #{number} does not match the expected value" - ); - } - Ok(()) - } - - pub async fn insert_l1_batch_da( - &mut self, - number: L1BatchNumber, - blob_id: Vec, - ) -> anyhow::Result<()> { - let update_result = sqlx::query!( - r#" - INSERT INTO - data_availability (l1_batch_number, blob_id, created_at, updated_at) - VALUES - ($1, $2, NOW(), NOW()) - "#, - i64::from(number.0), - blob_id.as_slice(), - ) - .instrument("insert_l1_batch_da") - .with_arg("number", &number) - .with_arg("blob_id", &blob_id) - .report_latency() - .execute(self.storage) - .await?; - - if update_result.rows_affected() == 0 { - tracing::debug!( - "L1 batch #{number}: DA blob_id wasn't updated as it's already present" - ); - - // Batch was already processed. Verify that existing DA blob_id matches - let matched: i64 = sqlx::query!( - r#" - SELECT - COUNT(*) AS "count!" - FROM - data_availability - WHERE - l1_batch_number = $1 - AND blob_id = $2 - "#, - i64::from(number.0), - blob_id.as_slice(), - ) - .instrument("get_matching_batch_da_blob_id") - .with_arg("number", &number) - .report_latency() - .fetch_one(self.storage) - .await? - .count; - - anyhow::ensure!( - matched == 1, - "DA blob_id verification failed. DA blob_id for L1 batch #{number} does not match the expected value" - ); - } - Ok(()) - } - pub async fn save_l1_batch_commitment_artifacts( &mut self, number: L1BatchNumber, @@ -1806,75 +1693,6 @@ impl BlocksDal<'_, '_> { .context("map_l1_batches()") } - pub async fn get_da_blob_awaiting_inclusion( - &mut self, - ) -> anyhow::Result> { - Ok(sqlx::query_as!( - StorageDataAvailability, - r#" - SELECT - l1_batch_number, - blob_id, - inclusion_data, - created_at, - updated_at - FROM - data_availability - WHERE - inclusion_data IS NULL - AND blob_id IS NOT NULL - ORDER BY - l1_batch_number - LIMIT - 1 - "#, - ) - .instrument("get_da_blob_awaiting_inclusion") - .fetch_optional(self.storage) - .await?) - } - - pub async fn get_ready_for_da_dispatch_l1_batches( - &mut self, - limit: usize, - ) -> anyhow::Result> { - let rows = sqlx::query!( - r#" - SELECT - number, - pubdata_input - FROM - l1_batches - LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number - LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number - WHERE - eth_commit_tx_id IS NULL - AND number != 0 - AND commitment IS NOT NULL - AND events_queue_commitment IS NOT NULL - AND bootloader_initial_content_commitment IS NOT NULL - AND data_availability.blob_id IS NULL - ORDER BY - number - LIMIT - $1 - "#, - limit as i64, - ) - .instrument("get_ready_for_da_dispatch_l1_batches") - .with_arg("limit", &limit) - .fetch_all(self.storage) - .await?; - - Ok(rows - .into_iter() - .map(|row| L1BatchDA { - pubdata: row.pubdata_input.unwrap(), - l1_batch_number: L1BatchNumber(row.number as u32), - }) - .collect()) - } - pub async fn get_l1_batch_state_root( &mut self, number: L1BatchNumber, diff --git a/core/lib/dal/src/data_availability_dal.rs b/core/lib/dal/src/data_availability_dal.rs new file mode 100644 index 000000000000..8c5e17ef3590 --- /dev/null +++ b/core/lib/dal/src/data_availability_dal.rs @@ -0,0 +1,201 @@ +use zksync_db_connection::{connection::Connection, error::DalResult, instrument::InstrumentExt}; +use zksync_types::{pubdata_da::DataAvailabilityBlob, L1BatchNumber}; + +use crate::{ + models::storage_data_availability::{L1BatchDA, StorageDABlob}, + Core, +}; + +#[derive(Debug)] +pub struct DataAvailabilityDal<'a, 'c> { + pub(crate) storage: &'a mut Connection<'c, Core>, +} + +impl DataAvailabilityDal<'_, '_> { + /// Inserts the blob_id for the given L1 batch. If the blob_id is already present, + /// verifies that it matches the one provided in the function arguments + /// (preventing the same L1 batch from being stored twice) + pub async fn insert_l1_batch_da( + &mut self, + number: L1BatchNumber, + blob_id: &str, + sent_at: chrono::NaiveDateTime, + ) -> anyhow::Result<()> { + let update_result = sqlx::query!( + r#" + INSERT INTO + data_availability (l1_batch_number, blob_id, sent_at, created_at, updated_at) + VALUES + ($1, $2, $3, NOW(), NOW()) + "#, + i64::from(number.0), + blob_id, + sent_at, + ) + .instrument("insert_l1_batch_da") + .with_arg("number", &number) + .with_arg("blob_id", &blob_id) + .report_latency() + .execute(self.storage) + .await?; + + if update_result.rows_affected() == 0 { + tracing::debug!( + "L1 batch #{number}: DA blob_id wasn't updated as it's already present" + ); + + // Batch was already processed. Verify that existing DA blob_id matches + let matched: i64 = sqlx::query!( + r#" + SELECT + COUNT(*) AS "count!" + FROM + data_availability + WHERE + l1_batch_number = $1 + AND blob_id = $2 + "#, + i64::from(number.0), + blob_id, + ) + .instrument("get_matching_batch_da_blob_id") + .with_arg("number", &number) + .report_latency() + .fetch_one(self.storage) + .await? + .count; + + anyhow::ensure!( + matched == 1, + "DA blob_id verification failed. DA blob_id for L1 batch #{number} does not match the expected value" + ); + } + Ok(()) + } + + /// Saves the inclusion data for the given L1 batch. If the inclusion data is already present, + /// verifies that it matches the one provided in the function arguments + /// (meaning that the inclusion data corresponds to the same DA blob) + pub async fn save_l1_batch_inclusion_data( + &mut self, + number: L1BatchNumber, + da_inclusion_data: &[u8], + ) -> anyhow::Result<()> { + let update_result = sqlx::query!( + r#" + UPDATE data_availability + SET + inclusion_data = $1, + updated_at = NOW() + WHERE + l1_batch_number = $2 + AND inclusion_data IS NULL + "#, + da_inclusion_data, + i64::from(number.0), + ) + .instrument("save_l1_batch_da_data") + .with_arg("number", &number) + .report_latency() + .execute(self.storage) + .await?; + + if update_result.rows_affected() == 0 { + tracing::debug!("L1 batch #{number}: DA data wasn't updated as it's already present or the row for the batch_number is missing"); + + // Batch was already processed. Verify that existing DA data matches + let matched: i64 = sqlx::query!( + r#" + SELECT + COUNT(*) AS "count!" + FROM + data_availability + WHERE + l1_batch_number = $1 + AND inclusion_data = $2 + "#, + i64::from(number.0), + da_inclusion_data, + ) + .instrument("get_matching_batch_da_data") + .with_arg("number", &number) + .report_latency() + .fetch_one(self.storage) + .await? + .count; + + anyhow::ensure!( + matched == 1, + "DA data verification failed. DA data for L1 batch #{number} does not match the one provided before" + ); + } + Ok(()) + } + + /// Assumes that the l1_batches are sorted by number, and returns the first one that is ready for DA dispatch. + pub async fn get_first_da_blob_awaiting_inclusion( + &mut self, + ) -> DalResult> { + Ok(sqlx::query_as!( + StorageDABlob, + r#" + SELECT + l1_batch_number, + blob_id, + inclusion_data, + sent_at + FROM + data_availability + WHERE + inclusion_data IS NULL + AND blob_id IS NOT NULL + ORDER BY + l1_batch_number + LIMIT + 1 + "#, + ) + .instrument("get_first_da_blob_awaiting_inclusion") + .fetch_optional(self.storage) + .await? + .map(DataAvailabilityBlob::from)) + } + + /// Fetches the pubdata and l1_batch_number for the l1_batches that are ready for DA dispatch. + pub async fn get_ready_for_da_dispatch_l1_batches( + &mut self, + limit: usize, + ) -> DalResult> { + let rows = sqlx::query!( + r#" + SELECT + number, + pubdata_input + FROM + l1_batches + LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number + WHERE + eth_commit_tx_id IS NULL + AND number != 0 + AND data_availability.blob_id IS NULL + ORDER BY + number + LIMIT + $1 + "#, + limit as i64, + ) + .instrument("get_ready_for_da_dispatch_l1_batches") + .with_arg("limit", &limit) + .fetch_all(self.storage) + .await?; + + Ok(rows + .into_iter() + .map(|row| L1BatchDA { + pubdata: row.pubdata_input.unwrap(), + l1_batch_number: L1BatchNumber(row.number as u32), + }) + .collect()) + } +} diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index f9c585758c4d..8ffcbaec0c35 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -13,9 +13,10 @@ pub use zksync_db_connection::{ use crate::{ blocks_dal::BlocksDal, blocks_web3_dal::BlocksWeb3Dal, consensus_dal::ConsensusDal, - contract_verification_dal::ContractVerificationDal, eth_sender_dal::EthSenderDal, - events_dal::EventsDal, events_web3_dal::EventsWeb3Dal, factory_deps_dal::FactoryDepsDal, - proof_generation_dal::ProofGenerationDal, protocol_versions_dal::ProtocolVersionsDal, + contract_verification_dal::ContractVerificationDal, data_availability_dal::DataAvailabilityDal, + eth_sender_dal::EthSenderDal, events_dal::EventsDal, events_web3_dal::EventsWeb3Dal, + factory_deps_dal::FactoryDepsDal, proof_generation_dal::ProofGenerationDal, + protocol_versions_dal::ProtocolVersionsDal, protocol_versions_web3_dal::ProtocolVersionsWeb3Dal, pruning_dal::PruningDal, snapshot_recovery_dal::SnapshotRecoveryDal, snapshots_creator_dal::SnapshotsCreatorDal, snapshots_dal::SnapshotsDal, storage_logs_dal::StorageLogsDal, @@ -31,6 +32,7 @@ pub mod blocks_web3_dal; pub mod consensus; pub mod consensus_dal; pub mod contract_verification_dal; +mod data_availability_dal; pub mod eth_sender_dal; pub mod events_dal; pub mod events_web3_dal; @@ -119,6 +121,8 @@ where fn snapshot_recovery_dal(&mut self) -> SnapshotRecoveryDal<'_, 'a>; fn pruning_dal(&mut self) -> PruningDal<'_, 'a>; + + fn data_availability_dal(&mut self) -> DataAvailabilityDal<'_, 'a>; } #[derive(Clone, Debug)] @@ -229,4 +233,8 @@ impl<'a> CoreDal<'a> for Connection<'a, Core> { fn pruning_dal(&mut self) -> PruningDal<'_, 'a> { PruningDal { storage: self } } + + fn data_availability_dal(&mut self) -> DataAvailabilityDal<'_, 'a> { + DataAvailabilityDal { storage: self } + } } diff --git a/core/lib/dal/src/models/storage_data_availability.rs b/core/lib/dal/src/models/storage_data_availability.rs index 220460673959..2d8a2d0183db 100644 --- a/core/lib/dal/src/models/storage_data_availability.rs +++ b/core/lib/dal/src/models/storage_data_availability.rs @@ -1,10 +1,28 @@ use chrono::NaiveDateTime; +use zksync_types::{pubdata_da::DataAvailabilityBlob, L1BatchNumber}; +/// Represents a blob in the data availability layer. #[derive(Debug, Clone)] -pub struct StorageDataAvailability { +pub struct StorageDABlob { pub l1_batch_number: i64, - pub blob_id: Option>, + pub blob_id: String, pub inclusion_data: Option>, - pub created_at: NaiveDateTime, - pub updated_at: NaiveDateTime, + pub sent_at: NaiveDateTime, +} + +impl From for DataAvailabilityBlob { + fn from(blob: StorageDABlob) -> DataAvailabilityBlob { + DataAvailabilityBlob { + l1_batch_number: L1BatchNumber(blob.l1_batch_number as u32), + blob_id: blob.blob_id, + inclusion_data: blob.inclusion_data, + sent_at: blob.sent_at.and_utc(), + } + } +} + +/// A small struct used to store a batch and its data availability, which are retrieved from the database. +pub struct L1BatchDA { + pub pubdata: Vec, + pub l1_batch_number: L1BatchNumber, } diff --git a/core/lib/env_config/Cargo.toml b/core/lib/env_config/Cargo.toml index c86621584010..344c91a2260c 100644 --- a/core/lib/env_config/Cargo.toml +++ b/core/lib/env_config/Cargo.toml @@ -12,6 +12,7 @@ categories.workspace = true [dependencies] zksync_basic_types.workspace = true zksync_config.workspace = true +zksync_da_layers.workspace = true anyhow.workspace = true serde.workspace = true diff --git a/core/lib/env_config/src/da_dispatcher.rs b/core/lib/env_config/src/da_dispatcher.rs index 6e7c652ce103..5f395d217c3d 100644 --- a/core/lib/env_config/src/da_dispatcher.rs +++ b/core/lib/env_config/src/da_dispatcher.rs @@ -11,9 +11,10 @@ impl FromEnv for DADispatcherConfig { #[cfg(test)] mod tests { use zksync_config::configs::{ - da_dispatcher::{DADispatcherConfig, DALayerInfo, DataAvailabilityMode}, + da_dispatcher::{DADispatcherConfig, DataAvailabilityMode}, object_store::{ObjectStoreConfig, ObjectStoreMode}, }; + use zksync_da_layers::{clients::celestia::config::CelestiaConfig, config::DALayerConfig}; use super::*; use crate::test_utils::EnvMutex; @@ -40,18 +41,17 @@ mod tests { } } - fn expected_da_layer_config( - name: &str, + fn expected_celestia_da_layer_config( pk: &str, interval: u32, rows_limit: u32, max_retries: u16, ) -> DADispatcherConfig { DADispatcherConfig { - da_mode: DataAvailabilityMode::DALayer(DALayerInfo { - name: name.to_owned(), + da_mode: DataAvailabilityMode::DALayer(DALayerConfig::Celestia(CelestiaConfig { + light_node_url: "localhost:12345".to_string(), private_key: pk.to_owned(), - }), + })), polling_interval: Some(interval), query_rows_limit: Some(rows_limit), max_retries: Some(max_retries), @@ -75,15 +75,15 @@ mod tests { DA_DISPATCHER_QUERY_ROWS_LIMIT=60 DA_DISPATCHER_MAX_RETRIES=7 DA_DISPATCHER_DA_MODE="DALayer" - DA_DISPATCHER_NAME="testDALayer" + DA_DISPATCHER_CLIENT_NAME="Celestia" + DA_DISPATCHER_LIGHT_NODE_URL="localhost:12345" DA_DISPATCHER_PRIVATE_KEY="0xf55baf7c0e4e33b1d78fbf52f069c426bc36cff1aceb9bc8f45d14c07f034d73" "#; lock.set_env(config); let actual = DADispatcherConfig::from_env().unwrap(); assert_eq!( actual, - expected_da_layer_config( - "testDALayer", + expected_celestia_da_layer_config( "0xf55baf7c0e4e33b1d78fbf52f069c426bc36cff1aceb9bc8f45d14c07f034d73", 5, 60, diff --git a/core/lib/protobuf_config/Cargo.toml b/core/lib/protobuf_config/Cargo.toml index ee52d8d5472f..3595413ff901 100644 --- a/core/lib/protobuf_config/Cargo.toml +++ b/core/lib/protobuf_config/Cargo.toml @@ -18,6 +18,7 @@ zksync_basic_types.workspace = true zksync_config.workspace = true zksync_protobuf.workspace = true zksync_types.workspace = true +zksync_da_layers.workspace = true anyhow.workspace = true prost.workspace = true diff --git a/core/lib/protobuf_config/src/da_dispatcher.rs b/core/lib/protobuf_config/src/da_dispatcher.rs index 1721de4f3376..b4cbb5f9d764 100644 --- a/core/lib/protobuf_config/src/da_dispatcher.rs +++ b/core/lib/protobuf_config/src/da_dispatcher.rs @@ -1,5 +1,6 @@ -use anyhow::Context; -use zksync_config::configs; +use anyhow::{Context, Error}; +use zksync_config::configs::{self, da_dispatcher::DataAvailabilityMode}; +use zksync_da_layers::config::DALayerConfig; use zksync_protobuf::{required, ProtoRepr}; use crate::proto::{da_dispatcher as proto, object_store::ObjectStore}; @@ -10,15 +11,26 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { fn read(&self) -> anyhow::Result { match &self.credentials { Some(proto::data_availability_dispatcher::Credentials::DaLayer(config)) => { - Ok(configs::da_dispatcher::DADispatcherConfig { - da_mode: configs::da_dispatcher::DataAvailabilityMode::DALayer( - configs::da_dispatcher::DALayerInfo { - name: required(&config.name).context("name")?.clone(), + let da_config = match required(&config.name).context("da_layer_name")?.as_str() { + "celestia" => DALayerConfig::Celestia( + zksync_da_layers::clients::celestia::config::CelestiaConfig { + light_node_url: required(&config.light_node_url) + .context("light_node_url")? + .clone(), private_key: required(&config.private_key) .context("private_key")? .clone(), }, ), + _ => { + return Err(Error::msg(format!( + "Unknown DA layer name: {}", + required(&config.name).context("da_layer_name")? + ))) + } + }; + Ok(configs::da_dispatcher::DADispatcherConfig { + da_mode: DataAvailabilityMode::DALayer(da_config), polling_interval: Some( *required(&self.polling_interval).context("polling_interval")?, ), @@ -32,7 +44,7 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { } Some(proto::data_availability_dispatcher::Credentials::ObjectStore(config)) => { Ok(configs::da_dispatcher::DADispatcherConfig { - da_mode: configs::da_dispatcher::DataAvailabilityMode::GCS(config.read()?), + da_mode: DataAvailabilityMode::GCS(config.read()?), polling_interval: Some( *required(&self.polling_interval).context("polling_interval")?, ), @@ -45,7 +57,7 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { }) } None => Ok(configs::da_dispatcher::DADispatcherConfig { - da_mode: configs::da_dispatcher::DataAvailabilityMode::NoDA, + da_mode: DataAvailabilityMode::NoDA, polling_interval: None, query_rows_limit: None, max_retries: None, @@ -55,18 +67,21 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { fn build(this: &Self::Type) -> Self { let credentials = match this.da_mode.clone() { - configs::da_dispatcher::DataAvailabilityMode::DALayer(info) => Some( - proto::data_availability_dispatcher::Credentials::DaLayer(proto::DaLayer { - name: Some(info.name.clone()), - private_key: Some(info.private_key.clone()), - }), - ), - configs::da_dispatcher::DataAvailabilityMode::GCS(config) => Some( + DataAvailabilityMode::DALayer(info) => match info { + DALayerConfig::Celestia(info) => Some( + proto::data_availability_dispatcher::Credentials::DaLayer(proto::DaLayer { + name: Some("celestia".to_string()), + private_key: Some(info.private_key.clone()), + light_node_url: Some(info.light_node_url.clone()), + }), + ), + }, + DataAvailabilityMode::GCS(config) => Some( proto::data_availability_dispatcher::Credentials::ObjectStore(ObjectStore::build( &config, )), ), - configs::da_dispatcher::DataAvailabilityMode::NoDA => None, + DataAvailabilityMode::NoDA => None, }; Self { diff --git a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto index e1c944d5d2e8..8de0b9e28d01 100644 --- a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto +++ b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto @@ -7,6 +7,7 @@ import "zksync/config/object_store.proto"; message DALayer { optional string name = 1; // required optional string private_key = 2; // required + optional string light_node_url = 3; } message DataAvailabilityDispatcher { diff --git a/core/lib/types/src/commitment/mod.rs b/core/lib/types/src/commitment/mod.rs index c789a5706d18..c78188b2f7d7 100644 --- a/core/lib/types/src/commitment/mod.rs +++ b/core/lib/types/src/commitment/mod.rs @@ -10,7 +10,6 @@ use std::{collections::HashMap, convert::TryFrom}; use serde::{Deserialize, Serialize}; pub use zksync_basic_types::commitment::L1BatchCommitmentMode; -use zksync_basic_types::L1BatchNumber; use zksync_contracts::BaseSystemContractsHashes; use zksync_mini_merkle_tree::MiniMerkleTree; use zksync_system_constants::{ @@ -729,8 +728,3 @@ pub struct L1BatchCommitmentArtifacts { pub zkporter_is_available: bool, pub aux_commitments: Option, } - -pub struct L1BatchDA { - pub pubdata: Vec, - pub l1_batch_number: L1BatchNumber, -} diff --git a/core/lib/types/src/pubdata_da.rs b/core/lib/types/src/pubdata_da.rs index ab4058c50d01..fc8d1c6584a2 100644 --- a/core/lib/types/src/pubdata_da.rs +++ b/core/lib/types/src/pubdata_da.rs @@ -1,5 +1,7 @@ +use chrono::{DateTime, Utc}; use num_enum::TryFromPrimitive; use serde::{Deserialize, Serialize}; +use zksync_basic_types::L1BatchNumber; use zksync_config::configs::eth_sender::PubdataSendingMode; /// Enum holding the current values used for DA Layers. @@ -7,8 +9,11 @@ use zksync_config::configs::eth_sender::PubdataSendingMode; #[derive(Debug, Clone, Copy, Deserialize, PartialEq, Serialize)] #[derive(TryFromPrimitive)] pub enum PubdataDA { + /// Pubdata is sent to the L1 as a tx calldata. Calldata = 0, + /// Pubdata is sent to L1 as EIP-4844 blobs. Blobs, + /// Pubdata is sent to the external storage (GCS/DA layers) or not sent at all. Custom, } @@ -22,6 +27,16 @@ impl From for PubdataDA { } } +/// Used as a wrapper for the pubdata to be stored in the GCS. pub struct StorablePubdata { pub data: Vec, } + +/// Represents a blob in the data availability layer. +#[derive(Debug, Clone)] +pub struct DataAvailabilityBlob { + pub l1_batch_number: L1BatchNumber, + pub blob_id: String, + pub inclusion_data: Option>, + pub sent_at: DateTime, +} diff --git a/core/lib/zksync_core_leftovers/Cargo.toml b/core/lib/zksync_core_leftovers/Cargo.toml index d53956713848..d62d5220ac9f 100644 --- a/core/lib/zksync_core_leftovers/Cargo.toml +++ b/core/lib/zksync_core_leftovers/Cargo.toml @@ -47,6 +47,7 @@ zksync_node_genesis.workspace = true zksync_eth_sender.workspace = true zksync_da_dispatcher.workspace = true zksync_da_client.workspace = true +zksync_da_layers.workspace = true zksync_node_fee_model.workspace = true zksync_state_keeper.workspace = true zksync_metadata_calculator.workspace = true diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index ec1be1fac55b..64f6d9220caf 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -27,6 +27,7 @@ use zksync_config::{ api::{MerkleTreeApiConfig, Web3JsonRpcConfig}, chain::{CircuitBreakerConfig, MempoolConfig, OperationsManagerConfig, StateKeeperConfig}, consensus::ConsensusConfig, + da_dispatcher::DataAvailabilityMode, database::{MerkleTreeConfig, MerkleTreeMode}, eth_sender::PubdataSendingMode, wallets, @@ -36,7 +37,11 @@ use zksync_config::{ ApiConfig, DBConfig, EthWatchConfig, GenesisConfig, }; use zksync_contracts::governance_contract; +use zksync_da_client::{gcs::GCSDAClient, no_da::NoDAClient}; use zksync_da_dispatcher::DataAvailabilityDispatcher; +use zksync_da_layers::{ + clients::celestia::CelestiaClient, config::DALayerConfig, DataAvailabilityClient, +}; use zksync_dal::{metrics::PostgresMetrics, ConnectionPool, Core, CoreDal}; use zksync_db_connection::healthcheck::ConnectionPoolHealthCheck; use zksync_eth_client::{clients::PKSigningClient, BoundEthInterface}; @@ -743,14 +748,17 @@ pub async fn initialize_components( .context("add_tee_verifier_input_producer_to_task_futures()")?; } - if components.contains(&Component::DADispatcher) - && eth + if components.contains(&Component::DADispatcher) { + if eth .sender .clone() .context("eth_sender")? .pubdata_sending_mode - == PubdataSendingMode::Custom - { + != PubdataSendingMode::Custom + { + panic!("DA dispatcher requires custom pubdata sending mode"); + } + let started_at = Instant::now(); let da_config = configs .da_dispatcher_config @@ -760,7 +768,15 @@ pub async fn initialize_components( .build() .await .context("failed to build da_dispatcher_pool")?; - let da_client = zksync_da_client::new_da_client(da_config.clone()).await; + let da_client: Box = match da_config.clone().da_mode { + DataAvailabilityMode::GCS(config) => Box::new(GCSDAClient::new(config).await), + DataAvailabilityMode::NoDA => Box::new(NoDAClient::new()), + DataAvailabilityMode::DALayer(config) => match config { + DALayerConfig::Celestia(celestia_config) => { + Box::new(CelestiaClient::new(celestia_config)) + } + }, + }; let da_dispatcher = DataAvailabilityDispatcher::new(da_dispatcher_pool, da_config, da_client); task_futures.push(tokio::spawn(da_dispatcher.run(stop_receiver.clone()))); diff --git a/core/node/da_dispatcher/Cargo.toml b/core/node/da_dispatcher/Cargo.toml index 3f5656eeaa9b..930d45d9f675 100644 --- a/core/node/da_dispatcher/Cargo.toml +++ b/core/node/da_dispatcher/Cargo.toml @@ -23,3 +23,4 @@ tokio = { workspace = true, features = ["time"] } anyhow.workspace = true tracing.workspace = true chrono = "0.4.31" +rand = "0.8.5" diff --git a/core/node/da_dispatcher/README.md b/core/node/da_dispatcher/README.md new file mode 100644 index 000000000000..e8ab740cb213 --- /dev/null +++ b/core/node/da_dispatcher/README.md @@ -0,0 +1,14 @@ +# DA dispatcher + +This crate contains an implementation of the DataAvailability dispatcher component, which sends a blobs of data to the +corresponding DA layer. + +## Overview + +The implementation of the DA clients is abstracted away from the dispatcher. The dispatcher is responsible for storing +the DA blobs info in the Postgres database and use it to get the inclusion proofs for the blobs. The retries logic is +also part of the DA dispatcher. + +This component assumes that batches are being sent to the L1 sequentially and that there is no need to fetch the +inclusion data for their DA in parallel. Same with dispatching DA blobs, there is no need to do that in parallel unless +we are facing performance issues when the sequencer is trying to catch up after some outage. diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index c083a345d4e1..7a7d9ba88cd1 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -1,10 +1,11 @@ use std::{future::Future, time::Duration}; -use anyhow::Error; -use chrono::Utc; +use anyhow::Context; +use chrono::{NaiveDateTime, Utc}; +use rand::Rng; use tokio::sync::watch; use zksync_config::DADispatcherConfig; -use zksync_da_layers::DataAvailabilityInterface; +use zksync_da_layers::DataAvailabilityClient; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_types::L1BatchNumber; @@ -12,7 +13,7 @@ use crate::metrics::METRICS; #[derive(Debug)] pub struct DataAvailabilityDispatcher { - client: Box, + client: Box, pool: ConnectionPool, config: DADispatcherConfig, } @@ -21,7 +22,7 @@ impl DataAvailabilityDispatcher { pub fn new( pool: ConnectionPool, config: DADispatcherConfig, - client: Box, + client: Box, ) -> Self { Self { pool, @@ -33,13 +34,12 @@ impl DataAvailabilityDispatcher { pub async fn run(self, stop_receiver: watch::Receiver) -> anyhow::Result<()> { let pool = self.pool.clone(); loop { - let mut conn = pool.connection_tagged("da_dispatcher").await.unwrap(); - if *stop_receiver.borrow() { tracing::info!("Stop signal received, da_dispatcher is shutting down"); break; } + let mut conn = pool.connection_tagged("da_dispatcher").await?; if let Err(err) = self.dispatch(&mut conn).await { tracing::warn!("dispatch error {err:?}"); } @@ -53,64 +53,104 @@ impl DataAvailabilityDispatcher { Ok(()) } + /// Dispatches the blobs to the data availability layer, and saves the blob_id in the database. async fn dispatch(&self, conn: &mut Connection<'_, Core>) -> anyhow::Result<()> { let batches = conn - .blocks_dal() + .data_availability_dal() .get_ready_for_da_dispatch_l1_batches(self.config.query_rows_limit() as usize) .await?; for batch in batches { let dispatch_latency = METRICS.blob_dispatch_latency.start(); - let dispatch_response = retry(self.config.max_retries(), || { + let dispatch_response = retry(self.config.max_retries(), batch.l1_batch_number, || { self.client .dispatch_blob(batch.l1_batch_number.0, batch.pubdata.clone()) }) .await - .map_err(Error::msg)?; - dispatch_latency.observe(); - - conn.blocks_dal() - .insert_l1_batch_da(batch.l1_batch_number, dispatch_response.blob_id) - .await?; + .with_context(|| { + format!( + "failed to dispatch a blob with batch_number: {}, pubdata_len: {}", + batch.l1_batch_number, + batch.pubdata.len() + ) + })?; + let dispatch_latency_duration = dispatch_latency.observe(); + + let sent_at = + NaiveDateTime::from_timestamp_millis(Utc::now().timestamp_millis()).unwrap(); + conn.data_availability_dal() + .insert_l1_batch_da( + batch.l1_batch_number, + dispatch_response.blob_id.as_str(), + sent_at, + ) + .await + .with_context(|| { + format!( + "failed to save blob_id for batch_number: {}", + batch.l1_batch_number + ) + })?; METRICS - .last_known_l1_batch + .last_dispatched_l1_batch .set(batch.l1_batch_number.0 as usize); METRICS.blob_size.observe(batch.pubdata.len()); tracing::info!( - "Dispatched a DA for batch_number: {}", - batch.l1_batch_number + "Dispatched a DA for batch_number: {}, pubdata_size: {}, dispatch_latency ms: {}", + batch.l1_batch_number, + batch.pubdata.len(), + dispatch_latency_duration.as_millis() ); } Ok(()) } + /// Polls the data availability layer for inclusion data, and saves it in the database. async fn poll_for_inclusion(&self, conn: &mut Connection<'_, Core>) -> anyhow::Result<()> { - let storage_da = conn.blocks_dal().get_da_blob_awaiting_inclusion().await?; - - if let Some(storage_da) = storage_da { + if let Some(blob_info) = conn + .data_availability_dal() + .get_first_da_blob_awaiting_inclusion() + .await? + { let inclusion_data = self .client - .get_inclusion_data(storage_da.blob_id.clone().unwrap()) + .get_inclusion_data(blob_info.blob_id.clone()) .await - .map_err(Error::msg)?; + .with_context(|| { + format!( + "failed to get inclusion data for blob_id: {}, batch_number: {}", + blob_info.blob_id, blob_info.l1_batch_number + ) + })?; if let Some(inclusion_data) = inclusion_data { - conn.blocks_dal() + conn.data_availability_dal() .save_l1_batch_inclusion_data( - L1BatchNumber(storage_da.l1_batch_number as u32), - inclusion_data.data, + L1BatchNumber(blob_info.l1_batch_number.0), + inclusion_data.data.as_slice(), ) - .await?; - - METRICS.inclusion_latency.observe(Duration::from_secs( - (Utc::now().timestamp() - storage_da.created_at.timestamp()) as u64, - )); + .await + .with_context(|| { + format!( + "failed to save inclusion data for batch_number: {}", + blob_info.l1_batch_number + ) + })?; + + let inclusion_latency_seconds = + (Utc::now().timestamp() - blob_info.sent_at.timestamp()) as u64; + METRICS + .inclusion_latency + .observe(Duration::from_secs(inclusion_latency_seconds)); + METRICS + .last_included_l1_batch + .set(blob_info.l1_batch_number.0 as usize); tracing::info!( - "Received an inclusion data for a batch_number: {}", - storage_da.l1_batch_number + "Received an inclusion data for a batch_number: {}, inclusion_latency_seconds: {}", + blob_info.l1_batch_number, inclusion_latency_seconds ); } } @@ -119,14 +159,18 @@ impl DataAvailabilityDispatcher { } } -async fn retry(max_retries: u16, mut f: F) -> Result +async fn retry( + max_retries: u16, + batch_number: L1BatchNumber, + mut f: F, +) -> Result where E: std::fmt::Display, Fut: Future>, F: FnMut() -> Fut, { let mut retries = 1; - let mut backoff = 1; + let mut backoff_secs = 1; loop { match f().await { Ok(result) => { @@ -134,13 +178,15 @@ where return Ok(result); } Err(err) => { - tracing::warn!(%err, "Failed DA dispatch request {retries}/{max_retries}, retrying."); + tracing::warn!(%err, "Failed DA dispatch request {retries}/{max_retries} for batch {batch_number}, retrying in {backoff_secs} seconds."); if retries > max_retries { return Err(err); } retries += 1; - tokio::time::sleep(Duration::from_secs(backoff)).await; - backoff *= 2; + let sleep_duration = Duration::from_secs(backoff_secs) + .mul_f32(rand::thread_rng().gen_range(0.8..1.2)); + tokio::time::sleep(sleep_duration).await; + backoff_secs = (backoff_secs * 2).min(128); } } } diff --git a/core/node/da_dispatcher/src/lib.rs b/core/node/da_dispatcher/src/lib.rs index 7d3507238591..cb41ea1f7c25 100644 --- a/core/node/da_dispatcher/src/lib.rs +++ b/core/node/da_dispatcher/src/lib.rs @@ -1,4 +1,4 @@ pub use self::da_dispatcher::DataAvailabilityDispatcher; -pub mod da_dispatcher; +mod da_dispatcher; mod metrics; diff --git a/core/node/da_dispatcher/src/metrics.rs b/core/node/da_dispatcher/src/metrics.rs index 49adbbcf984f..35423297bb76 100644 --- a/core/node/da_dispatcher/src/metrics.rs +++ b/core/node/da_dispatcher/src/metrics.rs @@ -1,25 +1,28 @@ use std::time::Duration; -use vise::{Buckets, Gauge, Histogram, Metrics}; +use vise::{Buckets, Gauge, Histogram, Metrics, Unit}; #[derive(Debug, Metrics)] #[metrics(prefix = "server_da_dispatcher")] pub(super) struct DataAvailabilityDispatcherMetrics { /// Latency of the dispatch of the blob. - #[metrics(buckets = Buckets::LATENCIES)] + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] pub blob_dispatch_latency: Histogram, /// The duration between the moment when the blob is dispatched and the moment when it is included. #[metrics(buckets = Buckets::LATENCIES)] pub inclusion_latency: Histogram, /// Size of the dispatched blob. + /// Buckets are bytes ranging from 1KB to 16MB, which has to satisfy all blob size values. #[metrics(buckets = Buckets::exponential(1_024.0..=16.0 * 1_024.0 * 1_024.0, 2.0))] pub blob_size: Histogram, - /// Number of transactions resent by the Ethereum sender. + /// Number of transactions resent by the DA dispatcher. #[metrics(buckets = Buckets::linear(0.0..=10.0, 1.0))] pub dispatch_call_retries: Histogram, - /// Last L1 batch number observed by the DA dispatcher. - pub last_known_l1_batch: Gauge, + /// Last L1 batch that was dispatched to the DA layer. + pub last_dispatched_l1_batch: Gauge, + /// Last L1 batch that has its inclusion finalized by DA layer. + pub last_included_l1_batch: Gauge, } #[vise::register] diff --git a/core/node/node_framework/examples/main_node.rs b/core/node/node_framework/examples/main_node.rs index 08622b64dc4b..0bc0a8c00d96 100644 --- a/core/node/node_framework/examples/main_node.rs +++ b/core/node/node_framework/examples/main_node.rs @@ -29,6 +29,7 @@ use zksync_node_framework::{ circuit_breaker_checker::CircuitBreakerCheckerLayer, commitment_generator::CommitmentGeneratorLayer, contract_verification_api::ContractVerificationApiLayer, + da_client::DataAvailabilityClientLayer, da_dispatcher::DataAvailabilityDispatcherLayer, eth_sender::{EthTxAggregatorLayer, EthTxManagerLayer}, eth_watch::EthWatchLayer, @@ -314,13 +315,18 @@ impl MainNodeBuilder { Ok(self) } + fn add_da_client_layer(mut self) -> anyhow::Result { + let da_config = DADispatcherConfig::from_env()?; + let eth_config = EthConfig::from_env()?; + self.node + .add_layer(DataAvailabilityClientLayer::new(da_config, eth_config)); + Ok(self) + } + fn add_da_dispatcher_layer(mut self) -> anyhow::Result { - let eth_sender_config = EthConfig::from_env()?; let da_config = DADispatcherConfig::from_env()?; - self.node.add_layer(DataAvailabilityDispatcherLayer::new( - da_config, - eth_sender_config, - )); + self.node + .add_layer(DataAvailabilityDispatcherLayer::new(da_config)); Ok(self) } @@ -393,6 +399,7 @@ fn main() -> anyhow::Result<()> { .add_eth_watch_layer()? .add_pk_signing_client_layer()? .add_eth_sender_layer()? + .add_da_client_layer()? .add_da_dispatcher_layer()? .add_proof_data_handler_layer()? .add_healthcheck_layer()? diff --git a/core/node/node_framework/src/implementations/layers/da_client.rs b/core/node/node_framework/src/implementations/layers/da_client.rs new file mode 100644 index 000000000000..d100fa7a0430 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/da_client.rs @@ -0,0 +1,70 @@ +use zksync_config::{ + configs::{ + da_dispatcher::{DADispatcherConfig, DataAvailabilityMode}, + eth_sender::PubdataSendingMode, + }, + EthConfig, +}; +use zksync_da_client::{gcs::GCSDAClient, no_da::NoDAClient}; +use zksync_da_layers::{ + clients::celestia::CelestiaClient, config::DALayerConfig, DataAvailabilityClient, +}; + +use crate::{ + implementations::resources::da_client::DAClientResource, + service::ServiceContext, + wiring_layer::{WiringError, WiringLayer}, +}; + +#[derive(Debug)] +pub struct DataAvailabilityClientLayer { + da_config: DADispatcherConfig, + eth_config: EthConfig, +} + +impl DataAvailabilityClientLayer { + pub fn new(da_config: DADispatcherConfig, eth_config: EthConfig) -> Self { + Self { + da_config, + eth_config, + } + } +} + +#[async_trait::async_trait] +impl WiringLayer for DataAvailabilityClientLayer { + fn layer_name(&self) -> &'static str { + "da_client_layer" + } + + async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + if self + .eth_config + .sender + .ok_or(WiringError::Configuration( + "missing the eth_sender config".to_string(), + ))? + .pubdata_sending_mode + != PubdataSendingMode::Custom + { + panic!("DA client layer requires custom pubdata sending mode"); + } + + // this can be broken down into the separate layers, but that would require the operator to + // wire the right one manually, which is less convenient than the current approach, which + // uses the config to determine the right client + let client: Box = match self.da_config.da_mode { + DataAvailabilityMode::GCS(config) => Box::new(GCSDAClient::new(config).await), + DataAvailabilityMode::NoDA => Box::new(NoDAClient::new()), + DataAvailabilityMode::DALayer(config) => match config { + DALayerConfig::Celestia(celestia_config) => { + Box::new(CelestiaClient::new(celestia_config)) + } + }, + }; + + context.insert_resource(DAClientResource(client))?; + + Ok(()) + } +} diff --git a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs index a4531e303214..9bad60935e46 100644 --- a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs +++ b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs @@ -1,30 +1,27 @@ -use zksync_config::{ - configs::{da_dispatcher::DADispatcherConfig, eth_sender::PubdataSendingMode}, - EthConfig, -}; -use zksync_da_layers::DataAvailabilityInterface; +use zksync_config::configs::da_dispatcher::DADispatcherConfig; +use zksync_da_layers::DataAvailabilityClient; use zksync_dal::Core; use zksync_db_connection::connection_pool::ConnectionPool; use crate::{ - implementations::resources::pools::{MasterPool, PoolResource}, + implementations::resources::{ + da_client::DAClientResource, + pools::{MasterPool, PoolResource}, + }, service::{ServiceContext, StopReceiver}, task::Task, wiring_layer::{WiringError, WiringLayer}, }; +/// A layer that wires the data availability dispatcher task. #[derive(Debug)] pub struct DataAvailabilityDispatcherLayer { da_config: DADispatcherConfig, - eth_config: EthConfig, } impl DataAvailabilityDispatcherLayer { - pub fn new(da_config: DADispatcherConfig, eth_config: EthConfig) -> Self { - Self { - da_config, - eth_config, - } + pub fn new(da_config: DADispatcherConfig) -> Self { + Self { da_config } } } @@ -36,17 +33,14 @@ impl WiringLayer for DataAvailabilityDispatcherLayer { async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { let master_pool_resource = context.get_resource::>().await?; - let master_pool = master_pool_resource.get().await.unwrap(); - - if self.eth_config.sender.unwrap().pubdata_sending_mode == PubdataSendingMode::Custom { - let da_client = zksync_da_client::new_da_client(self.da_config.clone()).await; + let master_pool = master_pool_resource.get().await?; + let da_client = context.get_resource::().await?.0; - context.add_task(Box::new(DataAvailabilityDispatcherTask { - main_pool: master_pool, - da_config: self.da_config, - client: da_client, - })); - } + context.add_task(Box::new(DataAvailabilityDispatcherTask { + main_pool: master_pool, + da_config: self.da_config, + client: da_client, + })); Ok(()) } @@ -56,7 +50,7 @@ impl WiringLayer for DataAvailabilityDispatcherLayer { struct DataAvailabilityDispatcherTask { main_pool: ConnectionPool, da_config: DADispatcherConfig, - client: Box, + client: Box, } #[async_trait::async_trait] diff --git a/core/node/node_framework/src/implementations/layers/mod.rs b/core/node/node_framework/src/implementations/layers/mod.rs index 4a2fd5d5ad26..1d8b03ce611f 100644 --- a/core/node/node_framework/src/implementations/layers/mod.rs +++ b/core/node/node_framework/src/implementations/layers/mod.rs @@ -3,6 +3,7 @@ pub mod commitment_generator; pub mod consensus; pub mod consistency_checker; pub mod contract_verification_api; +pub mod da_client; pub mod da_dispatcher; pub mod eth_sender; pub mod eth_watch; diff --git a/core/node/node_framework/src/implementations/resources/da_client.rs b/core/node/node_framework/src/implementations/resources/da_client.rs new file mode 100644 index 000000000000..90027220aa11 --- /dev/null +++ b/core/node/node_framework/src/implementations/resources/da_client.rs @@ -0,0 +1,13 @@ +use zksync_da_layers::DataAvailabilityClient; + +use crate::resource::Resource; + +/// Represents a client of a certain DA solution. +#[derive(Clone)] +pub struct DAClientResource(pub Box); + +impl Resource for DAClientResource { + fn name() -> String { + "common/da_client".into() + } +} diff --git a/core/node/node_framework/src/implementations/resources/mod.rs b/core/node/node_framework/src/implementations/resources/mod.rs index 17c939419985..b8176e148c6f 100644 --- a/core/node/node_framework/src/implementations/resources/mod.rs +++ b/core/node/node_framework/src/implementations/resources/mod.rs @@ -1,5 +1,6 @@ pub mod action_queue; pub mod circuit_breakers; +pub mod da_client; pub mod eth_interface; pub mod fee_input; pub mod healthcheck; From 3888d8ab0741adc96243f971ab86826ecf61c1c6 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Thu, 30 May 2024 15:06:07 +0200 Subject: [PATCH 26/69] fix CI --- core/lib/da_client/src/gcs/mod.rs | 2 +- core/lib/da_client/src/no_da/mod.rs | 4 ++-- core/lib/dal/src/data_availability_dal.rs | 4 ++-- core/node/da_dispatcher/src/metrics.rs | 2 +- prover/Cargo.lock | 13 +++++++++++++ 5 files changed, 19 insertions(+), 6 deletions(-) diff --git a/core/lib/da_client/src/gcs/mod.rs b/core/lib/da_client/src/gcs/mod.rs index 580dde722047..7a9f9b3e25b1 100644 --- a/core/lib/da_client/src/gcs/mod.rs +++ b/core/lib/da_client/src/gcs/mod.rs @@ -13,7 +13,7 @@ use zksync_da_layers::{ use zksync_object_store::{ObjectStore, ObjectStoreFactory}; use zksync_types::{pubdata_da::StorablePubdata, L1BatchNumber}; -/// An implementation of the DataAvailabilityClient trait that stores the pubdata in the GCS. +/// An implementation of the `DataAvailabilityClient` trait that stores the pubdata in the GCS. #[derive(Clone)] pub struct GCSDAClient { object_store: Arc, diff --git a/core/lib/da_client/src/no_da/mod.rs b/core/lib/da_client/src/no_da/mod.rs index 86996ebb9844..051e4165b312 100644 --- a/core/lib/da_client/src/no_da/mod.rs +++ b/core/lib/da_client/src/no_da/mod.rs @@ -6,8 +6,8 @@ use zksync_da_layers::{ DataAvailabilityClient, }; -/// A no-op implementation of the DataAvailabilityClient trait, that doesn't store the pubdata. -#[derive(Clone, Debug)] +/// A no-op implementation of the `DataAvailabilityClient` trait, that doesn't store the pubdata. +#[derive(Clone, Debug, Default)] pub struct NoDAClient; impl NoDAClient { diff --git a/core/lib/dal/src/data_availability_dal.rs b/core/lib/dal/src/data_availability_dal.rs index 8c5e17ef3590..3074605d490b 100644 --- a/core/lib/dal/src/data_availability_dal.rs +++ b/core/lib/dal/src/data_availability_dal.rs @@ -132,7 +132,7 @@ impl DataAvailabilityDal<'_, '_> { Ok(()) } - /// Assumes that the l1_batches are sorted by number, and returns the first one that is ready for DA dispatch. + /// Assumes that the L1 batches are sorted by number, and returns the first one that is ready for DA dispatch. pub async fn get_first_da_blob_awaiting_inclusion( &mut self, ) -> DalResult> { @@ -161,7 +161,7 @@ impl DataAvailabilityDal<'_, '_> { .map(DataAvailabilityBlob::from)) } - /// Fetches the pubdata and l1_batch_number for the l1_batches that are ready for DA dispatch. + /// Fetches the pubdata and `l1_batch_number` for the L1 batches that are ready for DA dispatch. pub async fn get_ready_for_da_dispatch_l1_batches( &mut self, limit: usize, diff --git a/core/node/da_dispatcher/src/metrics.rs b/core/node/da_dispatcher/src/metrics.rs index 35423297bb76..6c246cd447e8 100644 --- a/core/node/da_dispatcher/src/metrics.rs +++ b/core/node/da_dispatcher/src/metrics.rs @@ -12,7 +12,7 @@ pub(super) struct DataAvailabilityDispatcherMetrics { #[metrics(buckets = Buckets::LATENCIES)] pub inclusion_latency: Histogram, /// Size of the dispatched blob. - /// Buckets are bytes ranging from 1KB to 16MB, which has to satisfy all blob size values. + /// Buckets are bytes ranging from 1 KB to 16 MB, which has to satisfy all blob size values. #[metrics(buckets = Buckets::exponential(1_024.0..=16.0 * 1_024.0 * 1_024.0, 2.0))] pub blob_size: Histogram, diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 89cb099cfa3c..b8b7b6ecfde1 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -2821,6 +2821,16 @@ dependencies = [ "tokio-native-tls", ] +[[package]] +name = "hyperchain_da" +version = "0.1.0" +source = "git+https://github.com/matter-labs/hyperchain-da.git?rev=ad24b39e90a4a32db53d0a46fac8bf2c995f7a51#ad24b39e90a4a32db53d0a46fac8bf2c995f7a51" +dependencies = [ + "anyhow", + "async-trait", + "serde", +] + [[package]] name = "iana-time-zone" version = "0.1.60" @@ -7931,12 +7941,14 @@ name = "zksync_config" version = "0.1.0" dependencies = [ "anyhow", + "hyperchain_da", "rand 0.8.5", "secrecy", "serde", "zksync_basic_types", "zksync_consensus_utils", "zksync_crypto_primitives", + "zksync_system_constants", ] [[package]] @@ -8104,6 +8116,7 @@ version = "0.1.0" dependencies = [ "anyhow", "envy", + "hyperchain_da", "serde", "zksync_basic_types", "zksync_config", From a60f9dbb80ff1f608ad7378b5a9ce2f280fa6238 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Thu, 30 May 2024 16:41:46 +0200 Subject: [PATCH 27/69] regenerate sqlx folder --- ...8d26219c13bc176e8cfee696d4e9f6.json.nPBbNl | 119 ------------------ 1 file changed, 119 deletions(-) delete mode 100644 core/lib/dal/.sqlx/.query-05be1a2c5cefcb1a58af2e5113e89003638d26219c13bc176e8cfee696d4e9f6.json.nPBbNl diff --git a/core/lib/dal/.sqlx/.query-05be1a2c5cefcb1a58af2e5113e89003638d26219c13bc176e8cfee696d4e9f6.json.nPBbNl b/core/lib/dal/.sqlx/.query-05be1a2c5cefcb1a58af2e5113e89003638d26219c13bc176e8cfee696d4e9f6.json.nPBbNl deleted file mode 100644 index 69a1077452dd..000000000000 --- a/core/lib/dal/.sqlx/.query-05be1a2c5cefcb1a58af2e5113e89003638d26219c13bc176e8cfee696d4e9f6.json.nPBbNl +++ /dev/null @@ -1,119 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block AS index_in_block,\n miniblocks.number AS block_number,\n transactions.nonce AS nonce,\n transactions.signature AS signature,\n transactions.initiator_address AS initiator_address,\n transactions.tx_format AS tx_format,\n transactions.value AS value,\n transactions.gas_limit AS gas_limit,\n transactions.max_fee_per_gas AS max_fee_per_gas,\n transactions.max_priority_fee_per_gas AS max_priority_fee_per_gas,\n transactions.effective_gas_price AS effective_gas_price,\n transactions.l1_batch_number AS l1_batch_number,\n transactions.l1_batch_tx_index AS l1_batch_tx_index,\n transactions.data->'contractAddress' AS \"execute_contract_address\",\n transactions.data->'calldata' AS \"calldata\",\n miniblocks.hash AS \"block_hash\"\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n WHERE\n miniblocks.number = $1 AND transactions.index_in_block = $2 AND transactions.data != '{}'::jsonb", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "tx_hash", - "type_info": "Bytea" - }, - { - "ordinal": 1, - "name": "index_in_block", - "type_info": "Int4" - }, - { - "ordinal": 2, - "name": "block_number", - "type_info": "Int8" - }, - { - "ordinal": 3, - "name": "nonce", - "type_info": "Int8" - }, - { - "ordinal": 4, - "name": "signature", - "type_info": "Bytea" - }, - { - "ordinal": 5, - "name": "initiator_address", - "type_info": "Bytea" - }, - { - "ordinal": 6, - "name": "tx_format", - "type_info": "Int4" - }, - { - "ordinal": 7, - "name": "value", - "type_info": "Numeric" - }, - { - "ordinal": 8, - "name": "gas_limit", - "type_info": "Numeric" - }, - { - "ordinal": 9, - "name": "max_fee_per_gas", - "type_info": "Numeric" - }, - { - "ordinal": 10, - "name": "max_priority_fee_per_gas", - "type_info": "Numeric" - }, - { - "ordinal": 11, - "name": "effective_gas_price", - "type_info": "Numeric" - }, - { - "ordinal": 12, - "name": "l1_batch_number", - "type_info": "Int8" - }, - { - "ordinal": 13, - "name": "l1_batch_tx_index", - "type_info": "Int4" - }, - { - "ordinal": 14, - "name": "execute_contract_address", - "type_info": "Jsonb" - }, - { - "ordinal": 15, - "name": "calldata", - "type_info": "Jsonb" - }, - { - "ordinal": 16, - "name": "block_hash", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int8", - "Int4" - ] - }, - "nullable": [ - false, - true, - false, - true, - true, - false, - true, - false, - true, - true, - true, - true, - true, - true, - null, - null, - false - ] - }, - "hash": "05be1a2c5cefcb1a58af2e5113e89003638d26219c13bc176e8cfee696d4e9f6" -} From e9310feed5948a2cdcd92b4576fe234c2c2f2eb7 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Thu, 30 May 2024 16:49:15 +0200 Subject: [PATCH 28/69] cargo sqlx prepare --- ...2a77487df19d18a9e496d62d6d79e429e83a.json} | 5 +- ...1068650eba62c2478c08d5a6216c85014eb5f.json | 200 ------------------ 2 files changed, 3 insertions(+), 202 deletions(-) rename core/lib/dal/.sqlx/{query-71f82ea7c83cccab209e13ea388dbb6b0c35c756638588792ac9c1db221fef33.json => query-a1ad8005dbe847f7ea6c5def68e52a77487df19d18a9e496d62d6d79e429e83a.json} (80%) delete mode 100644 core/lib/dal/.sqlx/query-f0ea0bc1d776591cbdeeddc398e1068650eba62c2478c08d5a6216c85014eb5f.json diff --git a/core/lib/dal/.sqlx/query-71f82ea7c83cccab209e13ea388dbb6b0c35c756638588792ac9c1db221fef33.json b/core/lib/dal/.sqlx/query-a1ad8005dbe847f7ea6c5def68e52a77487df19d18a9e496d62d6d79e429e83a.json similarity index 80% rename from core/lib/dal/.sqlx/query-71f82ea7c83cccab209e13ea388dbb6b0c35c756638588792ac9c1db221fef33.json rename to core/lib/dal/.sqlx/query-a1ad8005dbe847f7ea6c5def68e52a77487df19d18a9e496d62d6d79e429e83a.json index afa7ac0e2111..1979ca8c1c3a 100644 --- a/core/lib/dal/.sqlx/query-71f82ea7c83cccab209e13ea388dbb6b0c35c756638588792ac9c1db221fef33.json +++ b/core/lib/dal/.sqlx/query-a1ad8005dbe847f7ea6c5def68e52a77487df19d18a9e496d62d6d79e429e83a.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n ORDER BY\n number\n LIMIT\n $4\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS TRUE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", "describe": { "columns": [ { @@ -144,6 +144,7 @@ "Bytea", "Bytea", "Int4", + "Bool", "Int8" ] }, @@ -177,5 +178,5 @@ true ] }, - "hash": "71f82ea7c83cccab209e13ea388dbb6b0c35c756638588792ac9c1db221fef33" + "hash": "a1ad8005dbe847f7ea6c5def68e52a77487df19d18a9e496d62d6d79e429e83a" } diff --git a/core/lib/dal/.sqlx/query-f0ea0bc1d776591cbdeeddc398e1068650eba62c2478c08d5a6216c85014eb5f.json b/core/lib/dal/.sqlx/query-f0ea0bc1d776591cbdeeddc398e1068650eba62c2478c08d5a6216c85014eb5f.json deleted file mode 100644 index c1ff828e33c9..000000000000 --- a/core/lib/dal/.sqlx/query-f0ea0bc1d776591cbdeeddc398e1068650eba62c2478c08d5a6216c85014eb5f.json +++ /dev/null @@ -1,200 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS TRUE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "timestamp", - "type_info": "Int8" - }, - { - "ordinal": 2, - "name": "l1_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 3, - "name": "l2_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 4, - "name": "bloom", - "type_info": "Bytea" - }, - { - "ordinal": 5, - "name": "priority_ops_onchain_data", - "type_info": "ByteaArray" - }, - { - "ordinal": 6, - "name": "hash", - "type_info": "Bytea" - }, - { - "ordinal": 7, - "name": "commitment", - "type_info": "Bytea" - }, - { - "ordinal": 8, - "name": "eth_prove_tx_id", - "type_info": "Int4" - }, - { - "ordinal": 9, - "name": "eth_commit_tx_id", - "type_info": "Int4" - }, - { - "ordinal": 10, - "name": "eth_execute_tx_id", - "type_info": "Int4" - }, - { - "ordinal": 11, - "name": "l2_to_l1_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 12, - "name": "l2_to_l1_messages", - "type_info": "ByteaArray" - }, - { - "ordinal": 13, - "name": "used_contract_hashes", - "type_info": "Jsonb" - }, - { - "ordinal": 14, - "name": "compressed_initial_writes", - "type_info": "Bytea" - }, - { - "ordinal": 15, - "name": "compressed_repeated_writes", - "type_info": "Bytea" - }, - { - "ordinal": 16, - "name": "l2_l1_merkle_root", - "type_info": "Bytea" - }, - { - "ordinal": 17, - "name": "rollup_last_leaf_index", - "type_info": "Int8" - }, - { - "ordinal": 18, - "name": "zkporter_is_available", - "type_info": "Bool" - }, - { - "ordinal": 19, - "name": "bootloader_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 20, - "name": "default_aa_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 21, - "name": "aux_data_hash", - "type_info": "Bytea" - }, - { - "ordinal": 22, - "name": "pass_through_data_hash", - "type_info": "Bytea" - }, - { - "ordinal": 23, - "name": "meta_parameters_hash", - "type_info": "Bytea" - }, - { - "ordinal": 24, - "name": "protocol_version", - "type_info": "Int4" - }, - { - "ordinal": 25, - "name": "compressed_state_diffs", - "type_info": "Bytea" - }, - { - "ordinal": 26, - "name": "system_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 27, - "name": "events_queue_commitment", - "type_info": "Bytea" - }, - { - "ordinal": 28, - "name": "bootloader_initial_content_commitment", - "type_info": "Bytea" - }, - { - "ordinal": 29, - "name": "pubdata_input", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Bytea", - "Bytea", - "Int4", - "Bool", - "Int8" - ] - }, - "nullable": [ - false, - false, - false, - false, - false, - false, - true, - true, - true, - true, - true, - false, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - false, - true, - true, - true - ] - }, - "hash": "f0ea0bc1d776591cbdeeddc398e1068650eba62c2478c08d5a6216c85014eb5f" -} From b28ebb150fda872a7c0a830b44109d2fb6bd76f1 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Thu, 30 May 2024 17:32:35 +0200 Subject: [PATCH 29/69] swap lines --- core/node/da_dispatcher/src/da_dispatcher.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index 7a7d9ba88cd1..148ce3d7bc47 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -178,10 +178,10 @@ where return Ok(result); } Err(err) => { - tracing::warn!(%err, "Failed DA dispatch request {retries}/{max_retries} for batch {batch_number}, retrying in {backoff_secs} seconds."); if retries > max_retries { return Err(err); } + tracing::warn!(%err, "Failed DA dispatch request {retries}/{max_retries} for batch {batch_number}, retrying in {backoff_secs} seconds."); retries += 1; let sleep_duration = Duration::from_secs(backoff_secs) .mul_f32(rand::thread_rng().gen_range(0.8..1.2)); From 943d40a72a81d6a4686d28b44fe22a38b2941717 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Thu, 30 May 2024 18:14:43 +0200 Subject: [PATCH 30/69] fix --- core/lib/da_client/src/gcs/mod.rs | 10 +++++++++- core/node/da_dispatcher/Cargo.toml | 1 - 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/core/lib/da_client/src/gcs/mod.rs b/core/lib/da_client/src/gcs/mod.rs index 7a9f9b3e25b1..2c384a7edef8 100644 --- a/core/lib/da_client/src/gcs/mod.rs +++ b/core/lib/da_client/src/gcs/mod.rs @@ -43,7 +43,15 @@ impl DataAvailabilityClient for GCSDAClient { Ok(DispatchResponse { blob_id: key }) } - async fn get_inclusion_data(&self, _: String) -> Result, anyhow::Error> { + async fn get_inclusion_data( + &self, + key: String, + ) -> Result, anyhow::Error> { + let key_u32 = key.parse::().unwrap(); + self.object_store + .get::(L1BatchNumber(key_u32)) + .await?; + // Using default here because we don't get any inclusion data from GCS, thus there's // nothing to check on L1. return Ok(Some(InclusionData::default())); diff --git a/core/node/da_dispatcher/Cargo.toml b/core/node/da_dispatcher/Cargo.toml index 930d45d9f675..4bee01e05118 100644 --- a/core/node/da_dispatcher/Cargo.toml +++ b/core/node/da_dispatcher/Cargo.toml @@ -9,7 +9,6 @@ license.workspace = true keywords.workspace = true categories.workspace = true -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] vise.workspace = true From c62146e91edd2c3b8ffdffdc16d727c1054da67e Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Thu, 30 May 2024 22:54:58 +0200 Subject: [PATCH 31/69] add transient errors --- Cargo.lock | 2 +- Cargo.toml | 2 +- core/lib/da_client/src/gcs/mod.rs | 33 +++++++++++++------- core/lib/da_client/src/no_da/mod.rs | 6 ++-- core/node/da_dispatcher/src/da_dispatcher.rs | 7 +++-- 5 files changed, 31 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d76b9b2864b8..23e99e12242c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2822,7 +2822,7 @@ dependencies = [ [[package]] name = "hyperchain_da" version = "0.1.0" -source = "git+https://github.com/matter-labs/hyperchain-da.git?rev=ad24b39e90a4a32db53d0a46fac8bf2c995f7a51#ad24b39e90a4a32db53d0a46fac8bf2c995f7a51" +source = "git+https://github.com/matter-labs/hyperchain-da.git?rev=6be8343f5cc0c22a3e153b2af72a8c7d0717a094#6be8343f5cc0c22a3e153b2af72a8c7d0717a094" dependencies = [ "anyhow", "async-trait", diff --git a/Cargo.toml b/Cargo.toml index 96535bdb6a40..fac5bf1a6ea1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -202,7 +202,7 @@ zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter zksync_consensus_utils = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "3e6f101ee4124308c4c974caaa259d524549b0c6" } zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "3e6f101ee4124308c4c974caaa259d524549b0c6" } zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "3e6f101ee4124308c4c974caaa259d524549b0c6" } -zksync_da_layers = { package = "hyperchain_da", git = "https://github.com/matter-labs/hyperchain-da.git", rev = "ad24b39e90a4a32db53d0a46fac8bf2c995f7a51" } +zksync_da_layers = { package = "hyperchain_da", git = "https://github.com/matter-labs/hyperchain-da.git", rev = "6be8343f5cc0c22a3e153b2af72a8c7d0717a094" } # "Local" dependencies multivm = { path = "core/lib/multivm" } diff --git a/core/lib/da_client/src/gcs/mod.rs b/core/lib/da_client/src/gcs/mod.rs index 2c384a7edef8..8c8b7476d7a4 100644 --- a/core/lib/da_client/src/gcs/mod.rs +++ b/core/lib/da_client/src/gcs/mod.rs @@ -7,7 +7,7 @@ use std::{ use async_trait::async_trait; use zksync_config::ObjectStoreConfig; use zksync_da_layers::{ - types::{DispatchResponse, InclusionData}, + types::{DAError, DispatchResponse, InclusionData}, DataAvailabilityClient, }; use zksync_object_store::{ObjectStore, ObjectStoreFactory}; @@ -33,24 +33,35 @@ impl DataAvailabilityClient for GCSDAClient { &self, batch_number: u32, data: Vec, - ) -> Result { - let key = self + ) -> Result { + if let Err(err) = self .object_store .put(L1BatchNumber(batch_number), &StorablePubdata { data }) .await - .unwrap(); + { + return Err(DAError { + error: anyhow::Error::from(err), + is_transient: true, + }); + } - Ok(DispatchResponse { blob_id: key }) + Ok(DispatchResponse { + blob_id: batch_number.to_string(), + }) } - async fn get_inclusion_data( - &self, - key: String, - ) -> Result, anyhow::Error> { + async fn get_inclusion_data(&self, key: String) -> Result, DAError> { let key_u32 = key.parse::().unwrap(); - self.object_store + if let Err(err) = self + .object_store .get::(L1BatchNumber(key_u32)) - .await?; + .await + { + return Err(DAError { + error: anyhow::Error::from(err), + is_transient: true, + }); + } // Using default here because we don't get any inclusion data from GCS, thus there's // nothing to check on L1. diff --git a/core/lib/da_client/src/no_da/mod.rs b/core/lib/da_client/src/no_da/mod.rs index 051e4165b312..c3aed24b34ca 100644 --- a/core/lib/da_client/src/no_da/mod.rs +++ b/core/lib/da_client/src/no_da/mod.rs @@ -2,7 +2,7 @@ use std::fmt::Debug; use async_trait::async_trait; use zksync_da_layers::{ - types::{DispatchResponse, InclusionData}, + types::{DAError, DispatchResponse, InclusionData}, DataAvailabilityClient, }; @@ -18,11 +18,11 @@ impl NoDAClient { #[async_trait] impl DataAvailabilityClient for NoDAClient { - async fn dispatch_blob(&self, _: u32, _: Vec) -> Result { + async fn dispatch_blob(&self, _: u32, _: Vec) -> Result { Ok(DispatchResponse::default()) } - async fn get_inclusion_data(&self, _: String) -> Result, anyhow::Error> { + async fn get_inclusion_data(&self, _: String) -> Result, DAError> { return Ok(Some(InclusionData::default())); } diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index 148ce3d7bc47..dea938c51b4b 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -5,7 +5,7 @@ use chrono::{NaiveDateTime, Utc}; use rand::Rng; use tokio::sync::watch; use zksync_config::DADispatcherConfig; -use zksync_da_layers::DataAvailabilityClient; +use zksync_da_layers::{types::IsTransient, DataAvailabilityClient}; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_types::L1BatchNumber; @@ -165,7 +165,7 @@ async fn retry( mut f: F, ) -> Result where - E: std::fmt::Display, + E: std::fmt::Display + IsTransient, Fut: Future>, F: FnMut() -> Fut, { @@ -178,9 +178,10 @@ where return Ok(result); } Err(err) => { - if retries > max_retries { + if !err.is_transient() || retries > max_retries { return Err(err); } + tracing::warn!(%err, "Failed DA dispatch request {retries}/{max_retries} for batch {batch_number}, retrying in {backoff_secs} seconds."); retries += 1; let sleep_duration = Duration::from_secs(backoff_secs) From 0c0cd75c35d5d959a9e7cea0eeeaa08e8297f132 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Thu, 30 May 2024 23:32:43 +0200 Subject: [PATCH 32/69] fix cargo lock --- prover/Cargo.lock | 2 +- zk_toolbox/crates/zk_inception/src/forge_utils.rs | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/prover/Cargo.lock b/prover/Cargo.lock index d2412f3de349..a88ba5567e70 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -2824,7 +2824,7 @@ dependencies = [ [[package]] name = "hyperchain_da" version = "0.1.0" -source = "git+https://github.com/matter-labs/hyperchain-da.git?rev=ad24b39e90a4a32db53d0a46fac8bf2c995f7a51#ad24b39e90a4a32db53d0a46fac8bf2c995f7a51" +source = "git+https://github.com/matter-labs/hyperchain-da.git?rev=6be8343f5cc0c22a3e153b2af72a8c7d0717a094#6be8343f5cc0c22a3e153b2af72a8c7d0717a094" dependencies = [ "anyhow", "async-trait", diff --git a/zk_toolbox/crates/zk_inception/src/forge_utils.rs b/zk_toolbox/crates/zk_inception/src/forge_utils.rs index a9fa45c9f34d..f222daff39bc 100644 --- a/zk_toolbox/crates/zk_inception/src/forge_utils.rs +++ b/zk_toolbox/crates/zk_inception/src/forge_utils.rs @@ -2,7 +2,6 @@ use crate::messages::{msg_address_doesnt_have_enough_money_prompt, MSG_DEPLOYER_ use anyhow::anyhow; use common::forge::ForgeScript; use ethers::types::{H256, U256}; - use crate::consts::MINIMUM_BALANCE_FOR_WALLET; pub fn fill_forge_private_key( From ae91f3f8b673435fb05e5be1665d71b9d32dbe69 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Thu, 30 May 2024 23:47:41 +0200 Subject: [PATCH 33/69] zk fmt --- .../zk_inception/src/accept_ownership.rs | 6 ++-- .../src/commands/chain/args/genesis.rs | 12 ++++---- .../src/commands/chain/args/init.rs | 11 +++++--- .../zk_inception/src/commands/chain/create.rs | 12 ++++---- .../src/commands/chain/deploy_paymaster.rs | 2 +- .../zk_inception/src/commands/chain/init.rs | 5 ++-- .../src/commands/chain/initialize_bridges.rs | 2 +- .../src/commands/ecosystem/args/create.rs | 2 +- .../src/commands/ecosystem/args/init.rs | 12 +++++--- .../src/commands/ecosystem/change_default.rs | 6 ++-- .../src/commands/ecosystem/create.rs | 28 ++++++++++--------- .../src/commands/ecosystem/create_configs.rs | 6 ++-- .../src/commands/ecosystem/init.rs | 13 +++++---- .../crates/zk_inception/src/forge_utils.rs | 7 +++-- zk_toolbox/crates/zk_inception/src/main.rs | 2 +- zk_toolbox/crates/zk_inception/src/server.rs | 6 ++-- 16 files changed, 76 insertions(+), 56 deletions(-) diff --git a/zk_toolbox/crates/zk_inception/src/accept_ownership.rs b/zk_toolbox/crates/zk_inception/src/accept_ownership.rs index b88167ca6d20..830da513d4f0 100644 --- a/zk_toolbox/crates/zk_inception/src/accept_ownership.rs +++ b/zk_toolbox/crates/zk_inception/src/accept_ownership.rs @@ -1,4 +1,3 @@ -use crate::messages::MSG_ACCEPTING_GOVERNANCE_SPINNER; use common::{ forge::{Forge, ForgeScript, ForgeScriptArgs}, spinner::Spinner, @@ -13,7 +12,10 @@ use config::{ use ethers::types::{Address, H256}; use xshell::Shell; -use crate::forge_utils::{check_the_balance, fill_forge_private_key}; +use crate::{ + forge_utils::{check_the_balance, fill_forge_private_key}, + messages::MSG_ACCEPTING_GOVERNANCE_SPINNER, +}; pub async fn accept_admin( shell: &Shell, diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs index b8fdcab6a8cc..42c653b9bce1 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs @@ -4,11 +4,13 @@ use config::{ChainConfig, DatabaseConfig, DatabasesConfig}; use serde::{Deserialize, Serialize}; use url::Url; -use crate::defaults::{generate_db_names, DBNames, DATABASE_PROVER_URL, DATABASE_SERVER_URL}; -use crate::messages::{ - msg_prover_db_name_prompt, msg_prover_db_url_prompt, msg_server_db_name_prompt, - msg_server_db_url_prompt, MSG_GENESIS_USE_DEFAULT_HELP, MSG_PROVER_DB_NAME_HELP, - MSG_PROVER_DB_URL_HELP, MSG_SERVER_DB_NAME_HELP, MSG_SERVER_DB_URL_HELP, +use crate::{ + defaults::{generate_db_names, DBNames, DATABASE_PROVER_URL, DATABASE_SERVER_URL}, + messages::{ + msg_prover_db_name_prompt, msg_prover_db_url_prompt, msg_server_db_name_prompt, + msg_server_db_url_prompt, MSG_GENESIS_USE_DEFAULT_HELP, MSG_PROVER_DB_NAME_HELP, + MSG_PROVER_DB_URL_HELP, MSG_SERVER_DB_NAME_HELP, MSG_SERVER_DB_URL_HELP, + }, }; #[derive(Debug, Clone, Serialize, Deserialize, Parser, Default)] diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs index e917136f9bdf..0700c96c76ec 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs @@ -6,11 +6,14 @@ use types::L1Network; use url::Url; use super::genesis::GenesisArgsFinal; -use crate::messages::{ - MSG_DEPLOY_PAYMASTER_PROMPT, MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, - MSG_L1_RPC_URL_INVALID_ERR, MSG_L1_RPC_URL_PROMPT, +use crate::{ + commands::chain::args::genesis::GenesisArgs, + defaults::LOCAL_RPC_URL, + messages::{ + MSG_DEPLOY_PAYMASTER_PROMPT, MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, + MSG_L1_RPC_URL_INVALID_ERR, MSG_L1_RPC_URL_PROMPT, + }, }; -use crate::{commands::chain::args::genesis::GenesisArgs, defaults::LOCAL_RPC_URL}; #[derive(Debug, Clone, Serialize, Deserialize, Parser)] pub struct InitArgs { diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs index d93f8969b0ce..e64b3eb281db 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs @@ -1,9 +1,5 @@ use std::cell::OnceCell; -use crate::messages::{ - MSG_CHAIN_CREATED, MSG_CREATING_CHAIN, MSG_CREATING_CHAIN_CONFIGURATIONS_SPINNER, - MSG_SELECTED_CONFIG, -}; use common::{logger, spinner::Spinner}; use config::{ create_local_configs_dir, create_wallets, traits::SaveConfigWithBasePath, ChainConfig, @@ -12,7 +8,13 @@ use config::{ use types::ChainId; use xshell::Shell; -use crate::commands::chain::args::create::{ChainCreateArgs, ChainCreateArgsFinal}; +use crate::{ + commands::chain::args::create::{ChainCreateArgs, ChainCreateArgsFinal}, + messages::{ + MSG_CHAIN_CREATED, MSG_CREATING_CHAIN, MSG_CREATING_CHAIN_CONFIGURATIONS_SPINNER, + MSG_SELECTED_CONFIG, + }, +}; pub fn run(args: ChainCreateArgs, shell: &Shell) -> anyhow::Result<()> { let mut ecosystem_config = EcosystemConfig::from_file(shell)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs index cd01ad054bf8..fe8dcdc562b2 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs @@ -1,4 +1,3 @@ -use crate::messages::{MSG_CHAIN_NOT_INITIALIZED, MSG_DEPLOYING_PAYMASTER}; use anyhow::Context; use common::{ config::global_config, @@ -18,6 +17,7 @@ use xshell::Shell; use crate::{ config_manipulations::update_paymaster, forge_utils::{check_the_balance, fill_forge_private_key}, + messages::{MSG_CHAIN_NOT_INITIALIZED, MSG_DEPLOYING_PAYMASTER}, }; pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs index 074592d70896..0c9ac8743eee 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs @@ -17,7 +17,6 @@ use config::{ use xshell::Shell; use super::args::init::InitArgsFinal; -use crate::messages::{MSG_CHAIN_NOT_FOUND_ERR, MSG_SELECTED_CONFIG}; use crate::{ accept_ownership::accept_admin, commands::chain::{ @@ -27,8 +26,8 @@ use crate::{ forge_utils::{check_the_balance, fill_forge_private_key}, messages::{ msg_initializing_chain, MSG_ACCEPTING_ADMIN_SPINNER, MSG_CHAIN_INITIALIZED, - MSG_CONTRACTS_CONFIG_NOT_FOUND_ERR, MSG_GENESIS_DATABASE_ERR, - MSG_REGISTERING_CHAIN_SPINNER, + MSG_CHAIN_NOT_FOUND_ERR, MSG_CONTRACTS_CONFIG_NOT_FOUND_ERR, MSG_GENESIS_DATABASE_ERR, + MSG_REGISTERING_CHAIN_SPINNER, MSG_SELECTED_CONFIG, }, }; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/initialize_bridges.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/initialize_bridges.rs index 206aff89d2ea..4a81a2b26f1b 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/initialize_bridges.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/initialize_bridges.rs @@ -1,6 +1,5 @@ use std::path::Path; -use crate::messages::{MSG_CHAIN_NOT_INITIALIZED, MSG_INITIALIZING_BRIDGES_SPINNER}; use anyhow::Context; use common::{ cmd::Cmd, @@ -21,6 +20,7 @@ use xshell::{cmd, Shell}; use crate::{ config_manipulations::update_l2_shared_bridge, forge_utils::{check_the_balance, fill_forge_private_key}, + messages::{MSG_CHAIN_NOT_INITIALIZED, MSG_INITIALIZING_BRIDGES_SPINNER}, }; pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/create.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/create.rs index ee609d3f8506..30b7d1cf1508 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/create.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/create.rs @@ -5,6 +5,7 @@ use common::{slugify, Prompt, PromptConfirm, PromptSelect}; use serde::{Deserialize, Serialize}; use strum::IntoEnumIterator; use strum_macros::EnumIter; +use types::{L1Network, WalletCreation}; use crate::{ commands::chain::{args::create::ChainCreateArgs, ChainCreateArgsFinal}, @@ -15,7 +16,6 @@ use crate::{ MSG_START_CONTAINERS_PROMPT, }, }; -use types::{L1Network, WalletCreation}; #[derive(Debug, Serialize, Deserialize, Parser)] pub struct EcosystemCreateArgs { diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs index 46a76c933e2a..075435cf86f6 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs @@ -6,11 +6,15 @@ use serde::{Deserialize, Serialize}; use types::L1Network; use url::Url; -use crate::messages::{ - MSG_DEPLOY_ECOSYSTEM_PROMPT, MSG_DEPLOY_ERC20_PROMPT, MSG_DEPLOY_PAYMASTER_PROMPT, - MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, MSG_L1_RPC_URL_PROMPT, +use crate::{ + commands::chain::args::genesis::GenesisArgs, + defaults::LOCAL_RPC_URL, + messages::{ + MSG_DEPLOY_ECOSYSTEM_PROMPT, MSG_DEPLOY_ERC20_PROMPT, MSG_DEPLOY_PAYMASTER_PROMPT, + MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, + MSG_L1_RPC_URL_PROMPT, + }, }; -use crate::{commands::chain::args::genesis::GenesisArgs, defaults::LOCAL_RPC_URL}; #[derive(Debug, Clone, Serialize, Deserialize, Parser)] pub struct EcosystemArgs { diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/change_default.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/change_default.rs index 80e72e8457d5..3bd392c0558d 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/change_default.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/change_default.rs @@ -2,8 +2,10 @@ use common::PromptSelect; use config::{traits::SaveConfigWithBasePath, EcosystemConfig}; use xshell::Shell; -use crate::commands::ecosystem::args::change_default::ChangeDefaultChain; -use crate::messages::{msg_chain_doesnt_exist_err, MSG_DEFAULT_CHAIN_PROMPT}; +use crate::{ + commands::ecosystem::args::change_default::ChangeDefaultChain, + messages::{msg_chain_doesnt_exist_err, MSG_DEFAULT_CHAIN_PROMPT}, +}; pub fn run(args: ChangeDefaultChain, shell: &Shell) -> anyhow::Result<()> { let mut ecosystem_config = EcosystemConfig::from_file(shell)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs index 1198ee413c21..4daab36c56b8 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs @@ -12,19 +12,21 @@ use config::{ }; use xshell::{cmd, Shell}; -use crate::commands::ecosystem::{ - args::create::EcosystemCreateArgs, - create_configs::{create_erc20_deployment_config, create_initial_deployments_config}, -}; -use crate::commands::{ - chain::create_chain_inner, - containers::{initialize_docker, start_containers}, -}; -use crate::messages::{ - MSG_CLONING_ERA_REPO_SPINNER, MSG_CREATED_ECOSYSTEM, MSG_CREATING_DEFAULT_CHAIN_SPINNER, - MSG_CREATING_ECOSYSTEM, MSG_CREATING_INITIAL_CONFIGURATIONS_SPINNER, - MSG_ECOSYSTEM_ALREADY_EXISTS_ERR, MSG_ECOSYSTEM_CONFIG_INVALID_ERR, MSG_SELECTED_CONFIG, - MSG_STARTING_CONTAINERS_SPINNER, +use crate::{ + commands::{ + chain::create_chain_inner, + containers::{initialize_docker, start_containers}, + ecosystem::{ + args::create::EcosystemCreateArgs, + create_configs::{create_erc20_deployment_config, create_initial_deployments_config}, + }, + }, + messages::{ + MSG_CLONING_ERA_REPO_SPINNER, MSG_CREATED_ECOSYSTEM, MSG_CREATING_DEFAULT_CHAIN_SPINNER, + MSG_CREATING_ECOSYSTEM, MSG_CREATING_INITIAL_CONFIGURATIONS_SPINNER, + MSG_ECOSYSTEM_ALREADY_EXISTS_ERR, MSG_ECOSYSTEM_CONFIG_INVALID_ERR, MSG_SELECTED_CONFIG, + MSG_STARTING_CONTAINERS_SPINNER, + }, }; pub fn run(args: EcosystemCreateArgs, shell: &Shell) -> anyhow::Result<()> { diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create_configs.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create_configs.rs index 390df426348c..b4f42313e3d0 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create_configs.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create_configs.rs @@ -1,12 +1,12 @@ use std::path::Path; -use xshell::Shell; - -use crate::messages::{MSG_SAVE_ERC20_CONFIG_ATTENTION, MSG_SAVE_INITIAL_CONFIG_ATTENTION}; use config::{ forge_interface::deploy_ecosystem::input::{Erc20DeploymentConfig, InitialDeploymentConfig}, traits::SaveConfigWithCommentAndBasePath, }; +use xshell::Shell; + +use crate::messages::{MSG_SAVE_ERC20_CONFIG_ATTENTION, MSG_SAVE_INITIAL_CONFIG_ATTENTION}; pub fn create_initial_deployments_config( shell: &Shell, diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs index ddbd62b1d0e5..951e8d116963 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs @@ -3,12 +3,6 @@ use std::{ str::FromStr, }; -use crate::messages::{ - msg_ecosystem_initialized, msg_initializing_chain, MSG_CHAIN_NOT_INITIALIZED, - MSG_DEPLOYING_ECOSYSTEM_CONTRACTS_SPINNER, MSG_DEPLOYING_ERC20, MSG_DEPLOYING_ERC20_SPINNER, - MSG_DISTRIBUTING_ETH_SPINNER, MSG_ECOSYSTEM_CONTRACTS_PATH_INVALID_ERR, - MSG_ECOSYSTEM_CONTRACTS_PATH_PROMPT, MSG_INITIALIZING_ECOSYSTEM, MSG_INTALLING_DEPS_SPINNER, -}; use anyhow::Context; use common::{ cmd::Cmd, @@ -48,6 +42,13 @@ use crate::{ }, consts::AMOUNT_FOR_DISTRIBUTION_TO_WALLETS, forge_utils::{check_the_balance, fill_forge_private_key}, + messages::{ + msg_ecosystem_initialized, msg_initializing_chain, MSG_CHAIN_NOT_INITIALIZED, + MSG_DEPLOYING_ECOSYSTEM_CONTRACTS_SPINNER, MSG_DEPLOYING_ERC20, + MSG_DEPLOYING_ERC20_SPINNER, MSG_DISTRIBUTING_ETH_SPINNER, + MSG_ECOSYSTEM_CONTRACTS_PATH_INVALID_ERR, MSG_ECOSYSTEM_CONTRACTS_PATH_PROMPT, + MSG_INITIALIZING_ECOSYSTEM, MSG_INTALLING_DEPS_SPINNER, + }, }; pub async fn run(args: EcosystemInitArgs, shell: &Shell) -> anyhow::Result<()> { diff --git a/zk_toolbox/crates/zk_inception/src/forge_utils.rs b/zk_toolbox/crates/zk_inception/src/forge_utils.rs index f222daff39bc..322722320e7b 100644 --- a/zk_toolbox/crates/zk_inception/src/forge_utils.rs +++ b/zk_toolbox/crates/zk_inception/src/forge_utils.rs @@ -1,8 +1,11 @@ -use crate::messages::{msg_address_doesnt_have_enough_money_prompt, MSG_DEPLOYER_PK_NOT_SET_ERR}; use anyhow::anyhow; use common::forge::ForgeScript; use ethers::types::{H256, U256}; -use crate::consts::MINIMUM_BALANCE_FOR_WALLET; + +use crate::{ + consts::MINIMUM_BALANCE_FOR_WALLET, + messages::{msg_address_doesnt_have_enough_money_prompt, MSG_DEPLOYER_PK_NOT_SET_ERR}, +}; pub fn fill_forge_private_key( mut forge: ForgeScript, diff --git a/zk_toolbox/crates/zk_inception/src/main.rs b/zk_toolbox/crates/zk_inception/src/main.rs index 5e62f3b9ae25..b0e8e8f4fd69 100644 --- a/zk_toolbox/crates/zk_inception/src/main.rs +++ b/zk_toolbox/crates/zk_inception/src/main.rs @@ -13,9 +13,9 @@ pub mod accept_ownership; mod commands; mod config_manipulations; mod consts; -mod messages; mod defaults; pub mod forge_utils; +mod messages; pub mod server; #[derive(Parser, Debug)] diff --git a/zk_toolbox/crates/zk_inception/src/server.rs b/zk_toolbox/crates/zk_inception/src/server.rs index f5ef53376f07..6773d224cba3 100644 --- a/zk_toolbox/crates/zk_inception/src/server.rs +++ b/zk_toolbox/crates/zk_inception/src/server.rs @@ -2,13 +2,13 @@ use std::path::PathBuf; use anyhow::Context; use common::cmd::Cmd; -use xshell::{cmd, Shell}; - -use crate::messages::MSG_FAILED_TO_RUN_SERVER_ERR; use config::{ traits::FileConfigWithDefaultName, ChainConfig, ContractsConfig, GeneralConfig, GenesisConfig, SecretsConfig, WalletsConfig, }; +use xshell::{cmd, Shell}; + +use crate::messages::MSG_FAILED_TO_RUN_SERVER_ERR; pub struct RunServer { components: Option>, From 21d9320ab599a8bb6908b9533538d20f529f4715 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Fri, 31 May 2024 00:45:45 +0200 Subject: [PATCH 34/69] replace todo with a github issue --- core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs index 641b14ee49b2..1885159a23e1 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs @@ -201,9 +201,8 @@ impl GasAdjuster { self.estimate_effective_gas_price() * self.pubdata_byte_gas() } PubdataSendingMode::Custom => { - // TODO: at this point we are assuming that the operators are subsidizing the cost - // of the pubdata sent to external DA layers. Fix this when we have a better - // understanding of dynamic pricing for custom DA layers. + // Fix this when we have a better understanding of dynamic pricing for custom DA layers. + // GitHub issue: https://github.com/matter-labs/zksync-era/issues/2105 0 } } From 5be906c64ca548a50d388c61167e266bd29d22a2 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Fri, 31 May 2024 13:43:50 +0200 Subject: [PATCH 35/69] add some safeguards --- Cargo.lock | 2 +- Cargo.toml | 2 +- core/bin/zksync_server/src/node_builder.rs | 2 ++ core/lib/config/src/configs/chain.rs | 6 +++++- core/lib/da_client/src/gcs/mod.rs | 4 ++++ core/lib/da_client/src/no_da/mod.rs | 4 ++++ core/lib/zksync_core_leftovers/src/lib.rs | 16 ++++++++-------- core/node/node_framework/examples/main_node.rs | 8 ++++++-- .../src/implementations/layers/da_client.rs | 13 ++++++++++++- prover/Cargo.lock | 2 +- 10 files changed, 44 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b044d4c6445b..45e951dd4406 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2822,7 +2822,7 @@ dependencies = [ [[package]] name = "hyperchain_da" version = "0.1.0" -source = "git+https://github.com/matter-labs/hyperchain-da.git?rev=6be8343f5cc0c22a3e153b2af72a8c7d0717a094#6be8343f5cc0c22a3e153b2af72a8c7d0717a094" +source = "git+https://github.com/matter-labs/hyperchain-da.git?rev=822542abff3e7c9e42c6e65f4ba29f289a979d3d#822542abff3e7c9e42c6e65f4ba29f289a979d3d" dependencies = [ "anyhow", "async-trait", diff --git a/Cargo.toml b/Cargo.toml index fac5bf1a6ea1..97c803a298c6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -202,7 +202,7 @@ zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter zksync_consensus_utils = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "3e6f101ee4124308c4c974caaa259d524549b0c6" } zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "3e6f101ee4124308c4c974caaa259d524549b0c6" } zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "3e6f101ee4124308c4c974caaa259d524549b0c6" } -zksync_da_layers = { package = "hyperchain_da", git = "https://github.com/matter-labs/hyperchain-da.git", rev = "6be8343f5cc0c22a3e153b2af72a8c7d0717a094" } +zksync_da_layers = { package = "hyperchain_da", git = "https://github.com/matter-labs/hyperchain-da.git", rev = "822542abff3e7c9e42c6e65f4ba29f289a979d3d" } # "Local" dependencies multivm = { path = "core/lib/multivm" } diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index e3c6bc46f9d6..3aa1c9e15ee9 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -404,9 +404,11 @@ impl MainNodeBuilder { fn add_da_client_layer(mut self) -> anyhow::Result { let eth_sender_config = try_load_config!(self.configs.eth); let da_config = try_load_config!(self.configs.da_dispatcher_config); + let state_keeper_config = try_load_config!(self.configs.state_keeper_config); self.node.add_layer(DataAvailabilityClientLayer::new( da_config, eth_sender_config, + state_keeper_config, )); Ok(self) } diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index ade0f9d4226f..8fbf0172a344 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -105,7 +105,11 @@ pub struct StateKeeperConfig { pub batch_overhead_l1_gas: u64, /// The maximum amount of gas that can be used by the batch. This value is derived from the circuits limitation per batch. pub max_gas_per_batch: u64, - /// The maximum amount of pubdata that can be used by the batch. Note that if the calldata is used as pubdata, this variable should not exceed 128kb. + /// The maximum amount of pubdata that can be used by the batch. Note that if the calldata is used as pubdata. + /// This variable should not exceed: + /// - 128kb for calldata-based rollups + /// - 120kb*number_of_blobs for blob-based rollups + /// - the DA layer blob size limit for the DA layer-based validiums pub max_pubdata_per_batch: u64, /// The version of the fee model to use. diff --git a/core/lib/da_client/src/gcs/mod.rs b/core/lib/da_client/src/gcs/mod.rs index 8c8b7476d7a4..bf82bdc87492 100644 --- a/core/lib/da_client/src/gcs/mod.rs +++ b/core/lib/da_client/src/gcs/mod.rs @@ -71,6 +71,10 @@ impl DataAvailabilityClient for GCSDAClient { fn clone_boxed(&self) -> Box { Box::new(self.clone()) } + + fn blob_size_limit(&self) -> usize { + 100 * 1024 * 1024 // 100 MB, high enough to not be a problem + } } impl Debug for GCSDAClient { diff --git a/core/lib/da_client/src/no_da/mod.rs b/core/lib/da_client/src/no_da/mod.rs index c3aed24b34ca..5f233d623861 100644 --- a/core/lib/da_client/src/no_da/mod.rs +++ b/core/lib/da_client/src/no_da/mod.rs @@ -29,4 +29,8 @@ impl DataAvailabilityClient for NoDAClient { fn clone_boxed(&self) -> Box { Box::new(self.clone()) } + + fn blob_size_limit(&self) -> usize { + 100 * 1024 * 1024 // 100 MB, high enough to not be a problem + } } diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index 2ec4400f4fc0..cc5a692921b3 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -749,14 +749,14 @@ pub async fn initialize_components( .context("add_tee_verifier_input_producer_to_task_futures()")?; } - if components.contains(&Component::DADispatcher) { - if eth - .sender - .clone() - .context("eth_sender")? - .pubdata_sending_mode - != PubdataSendingMode::Custom - { + if eth + .sender + .clone() + .context("eth_sender")? + .pubdata_sending_mode + == PubdataSendingMode::Custom + { + if !components.contains(&Component::DADispatcher) { panic!("DA dispatcher requires custom pubdata sending mode"); } diff --git a/core/node/node_framework/examples/main_node.rs b/core/node/node_framework/examples/main_node.rs index 0bc0a8c00d96..8a2f51d143b3 100644 --- a/core/node/node_framework/examples/main_node.rs +++ b/core/node/node_framework/examples/main_node.rs @@ -318,8 +318,12 @@ impl MainNodeBuilder { fn add_da_client_layer(mut self) -> anyhow::Result { let da_config = DADispatcherConfig::from_env()?; let eth_config = EthConfig::from_env()?; - self.node - .add_layer(DataAvailabilityClientLayer::new(da_config, eth_config)); + let state_keeper_config = StateKeeperConfig::from_env()?; + self.node.add_layer(DataAvailabilityClientLayer::new( + da_config, + eth_config, + state_keeper_config, + )); Ok(self) } diff --git a/core/node/node_framework/src/implementations/layers/da_client.rs b/core/node/node_framework/src/implementations/layers/da_client.rs index d100fa7a0430..14ee453249bb 100644 --- a/core/node/node_framework/src/implementations/layers/da_client.rs +++ b/core/node/node_framework/src/implementations/layers/da_client.rs @@ -1,5 +1,6 @@ use zksync_config::{ configs::{ + chain::StateKeeperConfig, da_dispatcher::{DADispatcherConfig, DataAvailabilityMode}, eth_sender::PubdataSendingMode, }, @@ -20,13 +21,19 @@ use crate::{ pub struct DataAvailabilityClientLayer { da_config: DADispatcherConfig, eth_config: EthConfig, + state_keeper_config: StateKeeperConfig, } impl DataAvailabilityClientLayer { - pub fn new(da_config: DADispatcherConfig, eth_config: EthConfig) -> Self { + pub fn new( + da_config: DADispatcherConfig, + eth_config: EthConfig, + state_keeper_config: StateKeeperConfig, + ) -> Self { Self { da_config, eth_config, + state_keeper_config, } } } @@ -63,6 +70,10 @@ impl WiringLayer for DataAvailabilityClientLayer { }, }; + if self.state_keeper_config.max_pubdata_per_batch > client.blob_size_limit() as u64 { + panic!("State keeper max pubdata per batch is greater than the client blob size limit"); + } + context.insert_resource(DAClientResource(client))?; Ok(()) diff --git a/prover/Cargo.lock b/prover/Cargo.lock index a88ba5567e70..991fcd0ac567 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -2824,7 +2824,7 @@ dependencies = [ [[package]] name = "hyperchain_da" version = "0.1.0" -source = "git+https://github.com/matter-labs/hyperchain-da.git?rev=6be8343f5cc0c22a3e153b2af72a8c7d0717a094#6be8343f5cc0c22a3e153b2af72a8c7d0717a094" +source = "git+https://github.com/matter-labs/hyperchain-da.git?rev=822542abff3e7c9e42c6e65f4ba29f289a979d3d#822542abff3e7c9e42c6e65f4ba29f289a979d3d" dependencies = [ "anyhow", "async-trait", From e68d9d60638fe3bbff7cba3799f0a6975248cde0 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Fri, 31 May 2024 15:30:31 +0200 Subject: [PATCH 36/69] fix --- infrastructure/protocol-upgrade/src/transaction.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/infrastructure/protocol-upgrade/src/transaction.ts b/infrastructure/protocol-upgrade/src/transaction.ts index 38f4ed1e91bd..dc9d5d190512 100644 --- a/infrastructure/protocol-upgrade/src/transaction.ts +++ b/infrastructure/protocol-upgrade/src/transaction.ts @@ -1,5 +1,5 @@ import { BigNumberish } from '@ethersproject/bignumber'; -import { Bytes, BytesLike, ethers } from 'ethers'; +import { BytesLike, ethers } from 'ethers'; import { ForceDeployUpgraderFactory as ForceDeployUpgraderFactoryL2 } from 'l2-contracts/typechain'; import { DefaultUpgradeFactory as DefaultUpgradeFactoryL1, From 35398cd23bc0b4f5e1d8787ac55412631933181c Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Mon, 3 Jun 2024 16:51:38 +0200 Subject: [PATCH 37/69] address comments --- .github/release-please/manifest.json | 2 +- Cargo.lock | 11 +- core/CHANGELOG.md | 13 + core/bin/block_reverter/src/main.rs | 20 +- core/bin/external_node/Cargo.toml | 2 +- core/bin/external_node/src/main.rs | 19 +- .../external_node/src/version_sync_task.rs | 131 ------- core/lib/basic_types/src/basic_fri_types.rs | 12 + core/lib/basic_types/src/protocol_version.rs | 18 - core/lib/basic_types/src/web3/mod.rs | 43 ++- core/lib/config/src/configs/chain.rs | 4 +- core/lib/config/src/configs/da_dispatcher.rs | 34 +- core/lib/constants/src/data_availability.rs | 6 - core/lib/constants/src/lib.rs | 1 - core/lib/da_client/src/gcs/mod.rs | 36 +- core/lib/env_config/src/da_dispatcher.rs | 10 +- core/lib/eth_client/src/clients/http/query.rs | 47 +-- .../eth_client/src/clients/http/signing.rs | 12 +- core/lib/eth_client/src/clients/mock.rs | 8 +- core/lib/eth_client/src/lib.rs | 53 +-- core/lib/eth_client/src/types.rs | 41 +-- core/lib/multivm/src/utils.rs | 31 +- .../src/versions/vm_latest/constants.rs | 4 +- core/lib/object_store/Cargo.toml | 2 + core/lib/object_store/src/file.rs | 45 ++- core/lib/object_store/src/gcs.rs | 127 ++++--- core/lib/object_store/src/raw.rs | 78 ++++- core/lib/protobuf_config/src/da_dispatcher.rs | 28 +- .../src/proto/config/da_dispatcher.proto | 2 +- core/lib/snapshots_applier/src/lib.rs | 11 +- core/lib/snapshots_applier/src/tests/mod.rs | 12 +- core/lib/types/src/transaction_request.rs | 57 ++-- core/lib/utils/src/env.rs | 6 +- core/lib/zksync_core_leftovers/src/lib.rs | 6 +- .../api_server/src/execution_sandbox/apply.rs | 6 +- .../src/execution_sandbox/execute.rs | 21 +- core/node/api_server/src/tx_sender/mod.rs | 20 +- .../api_server/src/web3/namespaces/debug.rs | 16 +- .../api_server/src/web3/namespaces/eth.rs | 21 +- core/node/block_reverter/Cargo.toml | 2 + core/node/block_reverter/src/lib.rs | 40 ++- core/node/block_reverter/src/tests.rs | 173 +++++++++- .../src/validation_task.rs | 8 +- core/node/consistency_checker/src/lib.rs | 8 +- core/node/da_dispatcher/src/da_dispatcher.rs | 45 ++- core/node/da_dispatcher/src/lib.rs | 2 + core/node/da_dispatcher/src/metrics.rs | 2 +- core/node/eth_sender/src/error.rs | 13 +- core/node/eth_sender/src/eth_tx_aggregator.rs | 63 ++-- core/node/eth_sender/src/eth_tx_manager.rs | 30 +- core/node/eth_sender/src/lib.rs | 2 +- core/node/eth_sender/src/tests.rs | 4 +- core/node/eth_watch/src/client.rs | 26 +- .../eth_watch/src/event_processors/mod.rs | 7 +- core/node/eth_watch/src/tests.rs | 15 +- .../src/l1_gas_price/gas_adjuster/mod.rs | 8 +- core/node/genesis/src/lib.rs | 2 +- core/node/house_keeper/src/prover/metrics.rs | 5 +- .../fri_proof_compressor_queue_reporter.rs | 4 +- .../fri_prover_queue_reporter.rs | 5 +- .../fri_witness_generator_queue_reporter.rs | 9 +- core/node/node_framework/Cargo.toml | 2 + core/node/node_framework/examples/showcase.rs | 14 +- .../layers/circuit_breaker_checker.rs | 6 +- .../layers/commitment_generator.rs | 6 +- .../src/implementations/layers/consensus.rs | 10 +- .../layers/consistency_checker.rs | 6 +- .../layers/contract_verification_api.rs | 6 +- .../src/implementations/layers/da_client.rs | 6 +- .../implementations/layers/da_dispatcher.rs | 6 +- .../src/implementations/layers/eth_sender.rs | 10 +- .../src/implementations/layers/eth_watch.rs | 6 +- .../layers/healtcheck_server.rs | 6 +- .../implementations/layers/house_keeper.rs | 46 +-- .../src/implementations/layers/l1_gas.rs | 6 +- .../layers/metadata_calculator.rs | 10 +- .../src/implementations/layers/mod.rs | 2 + .../layers/prometheus_exporter.rs | 6 +- .../layers/proof_data_handler.rs | 6 +- .../layers/reorg_detector_checker.rs | 71 ++++ .../layers/reorg_detector_runner.rs | 73 ++++ .../src/implementations/layers/sigint.rs | 6 +- .../layers/state_keeper/mempool_io.rs | 10 +- .../layers/state_keeper/mod.rs | 10 +- .../layers/tee_verifier_input_producer.rs | 6 +- .../implementations/layers/web3_api/caches.rs | 6 +- .../implementations/layers/web3_api/server.rs | 12 +- .../layers/web3_api/tx_sender.rs | 10 +- .../src/implementations/resources/mod.rs | 1 + .../src/implementations/resources/reverter.rs | 15 + core/node/node_framework/src/precondition.rs | 4 +- .../node_framework/src/service/context.rs | 10 +- .../node_framework/src/service/runnables.rs | 26 +- core/node/node_framework/src/service/tests.rs | 14 +- core/node/node_framework/src/task.rs | 44 ++- core/node/node_sync/Cargo.toml | 1 + .../node_sync/src/tree_data_fetcher/mod.rs | 95 +++--- .../src/tree_data_fetcher/provider/mod.rs | 321 ++++++++++++++++++ .../src/tree_data_fetcher/provider/tests.rs | 244 +++++++++++++ .../node_sync/src/tree_data_fetcher/tests.rs | 114 ++++--- core/tests/loadnext/src/sdk/ethereum/mod.rs | 13 +- .../interfaces/ISystemContext.sol | 61 ++++ core/tests/ts-integration/src/env.ts | 6 +- core/tests/ts-integration/src/helpers.ts | 2 + core/tests/ts-integration/tests/fees.test.ts | 12 + .../tests/ts-integration/tests/system.test.ts | 17 +- prover/Cargo.lock | 13 +- prover/proof_fri_compressor/src/main.rs | 4 +- ...e27807ede6b4db9541198cee2861b874b52f9.json | 32 -- ...f3ad13840d2c497760e9bd0513f68dc4271c.json} | 12 +- ...b99cf505662036f2dd7a9f1807c4c1bad7c7b.json | 38 +++ .../src/fri_proof_compressor_dal.rs | 11 +- prover/prover_dal/src/fri_prover_dal.rs | 7 +- .../src/fri_witness_generator_dal.rs | 11 +- prover/prover_fri/src/main.rs | 6 +- prover/prover_fri_types/src/lib.rs | 14 +- prover/prover_version/Cargo.toml | 2 +- prover/prover_version/src/main.rs | 4 +- prover/setup-data-gpu-keys.json | 6 +- .../data/commitments.json | 6 +- .../data/finalization_hints_basic_1.bin | Bin 276 -> 276 bytes .../snark_verification_scheduler_key.json | 32 +- .../data/verification_basic_1_key.json | 136 ++++---- .../data/verification_leaf_3_key.json | 128 +++---- .../data/verification_scheduler_key.json | 128 +++---- .../src/keystore.rs | 13 +- prover/witness_generator/src/main.rs | 4 +- prover/witness_vector_generator/src/main.rs | 4 +- zk_toolbox/Cargo.lock | 12 + zk_toolbox/Cargo.toml | 5 +- zk_toolbox/crates/common/Cargo.toml | 3 +- zk_toolbox/crates/common/src/cmd.rs | 28 +- zk_toolbox/crates/common/src/db.rs | 73 +++- zk_toolbox/crates/common/src/term/logger.rs | 6 +- zk_toolbox/crates/common/src/term/spinner.rs | 9 + .../forge_interface/deploy_ecosystem/input.rs | 9 +- zk_toolbox/crates/config/src/genesis.rs | 3 +- zk_toolbox/crates/config/src/secrets.rs | 29 +- zk_toolbox/crates/types/Cargo.toml | 1 + zk_toolbox/crates/types/src/lib.rs | 2 + .../crates/types/src/protocol_version.rs | 87 +++++ zk_toolbox/crates/zk_inception/Cargo.toml | 1 + .../src/commands/chain/args/genesis.rs | 42 +-- .../src/commands/chain/genesis.rs | 36 +- .../src/commands/ecosystem/init.rs | 2 +- .../src/commands/ecosystem/mod.rs | 1 + .../zk_inception/src/config_manipulations.rs | 10 +- .../crates/zk_inception/src/defaults.rs | 10 +- .../crates/zk_inception/src/messages.rs | 1 - zk_toolbox/crates/zk_supervisor/Cargo.toml | 9 + .../src/commands/database/args/mod.rs | 41 +++ .../commands/database/args/new_migration.rs | 49 +++ .../src/commands/database/check_sqlx_data.rs | 59 ++++ .../src/commands/database/drop.rs | 42 +++ .../src/commands/database/migrate.rs | 54 +++ .../src/commands/database/mod.rs | 48 +++ .../src/commands/database/new_migration.rs | 43 +++ .../src/commands/database/prepare.rs | 58 ++++ .../src/commands/database/reset.rs | 46 +++ .../src/commands/database/setup.rs | 56 +++ .../crates/zk_supervisor/src/commands/mod.rs | 1 + zk_toolbox/crates/zk_supervisor/src/dals.rs | 70 ++++ zk_toolbox/crates/zk_supervisor/src/main.rs | 112 +++++- .../crates/zk_supervisor/src/messages.rs | 59 ++++ 164 files changed, 3360 insertions(+), 1221 deletions(-) delete mode 100644 core/bin/external_node/src/version_sync_task.rs delete mode 100644 core/lib/constants/src/data_availability.rs create mode 100644 core/node/node_framework/src/implementations/layers/reorg_detector_checker.rs create mode 100644 core/node/node_framework/src/implementations/layers/reorg_detector_runner.rs create mode 100644 core/node/node_framework/src/implementations/resources/reverter.rs create mode 100644 core/node/node_sync/src/tree_data_fetcher/provider/mod.rs create mode 100644 core/node/node_sync/src/tree_data_fetcher/provider/tests.rs create mode 100644 core/tests/ts-integration/contracts/custom-account/interfaces/ISystemContext.sol delete mode 100644 prover/prover_dal/.sqlx/query-7496a9df12e409162338266085ce27807ede6b4db9541198cee2861b874b52f9.json rename prover/prover_dal/.sqlx/{query-5449963638944edc62b5ec293bcda37e6c0c6c85fe5f701ebc9919fcad749a23.json => query-a94fffdbc1827dc5df908ea1e99ef3ad13840d2c497760e9bd0513f68dc4271c.json} (52%) create mode 100644 prover/prover_dal/.sqlx/query-e9c9b69852fa68f463f17b6d63ab99cf505662036f2dd7a9f1807c4c1bad7c7b.json create mode 100644 zk_toolbox/crates/types/src/protocol_version.rs create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/database/args/mod.rs create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/database/args/new_migration.rs create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/database/check_sqlx_data.rs create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/database/drop.rs create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/database/migrate.rs create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/database/mod.rs create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/database/new_migration.rs create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/database/prepare.rs create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/database/setup.rs create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/mod.rs create mode 100644 zk_toolbox/crates/zk_supervisor/src/dals.rs create mode 100644 zk_toolbox/crates/zk_supervisor/src/messages.rs diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 3a4443af38b3..d360ffb19dfa 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,4 +1,4 @@ { - "core": "24.5.1", + "core": "24.6.0", "prover": "14.4.0" } diff --git a/Cargo.lock b/Cargo.lock index d3a48a41662b..edfd97a49026 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7938,7 +7938,7 @@ dependencies = [ [[package]] name = "zkevm_circuits" version = "1.5.0" -source = "git+https://github.com/matter-labs/era-zkevm_circuits.git?branch=v1.5.0#28fe577bbb2b95c18d3959ba3dd37ca8ce5bd865" +source = "git+https://github.com/matter-labs/era-zkevm_circuits.git?branch=v1.5.0#b7a86c739e8a8f88e788e90893c6e7496f6d7dfc" dependencies = [ "arrayvec 0.7.4", "boojum", @@ -8034,6 +8034,8 @@ version = "0.1.0" dependencies = [ "anyhow", "assert_matches", + "async-trait", + "futures 0.3.28", "serde", "tempfile", "test-casing", @@ -8651,7 +8653,7 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "24.5.1" +version = "24.6.0" dependencies = [ "anyhow", "assert_matches", @@ -8972,6 +8974,7 @@ dependencies = [ "tokio", "tracing", "vlog", + "zksync_block_reverter", "zksync_circuit_breaker", "zksync_commitment_generator", "zksync_concurrency", @@ -8998,6 +9001,7 @@ dependencies = [ "zksync_proof_data_handler", "zksync_protobuf_config", "zksync_queued_job_processor", + "zksync_reorg_detector", "zksync_state", "zksync_state_keeper", "zksync_storage", @@ -9036,6 +9040,7 @@ dependencies = [ "assert_matches", "async-trait", "chrono", + "once_cell", "serde", "test-casing", "thiserror", @@ -9078,6 +9083,7 @@ name = "zksync_object_store" version = "0.1.0" dependencies = [ "anyhow", + "assert_matches", "async-trait", "bincode", "flate2", @@ -9085,6 +9091,7 @@ dependencies = [ "google-cloud-storage", "http", "prost 0.12.1", + "rand 0.8.5", "serde_json", "tempfile", "tokio", diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 18d74c9e4468..149c049c9ed7 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## [24.6.0](https://github.com/matter-labs/zksync-era/compare/core-v24.5.1...core-v24.6.0) (2024-06-03) + + +### Features + +* **en:** Fetch old L1 batch hashes from L1 ([#2000](https://github.com/matter-labs/zksync-era/issues/2000)) ([dc5a918](https://github.com/matter-labs/zksync-era/commit/dc5a9188a44a51810c9b7609a0887090043507f2)) +* use semver for metrics, move constants to prover workspace ([#2098](https://github.com/matter-labs/zksync-era/issues/2098)) ([7a50a9f](https://github.com/matter-labs/zksync-era/commit/7a50a9f79e516ec150d1f30b9f1c781a5523375b)) + + +### Bug Fixes + +* **api:** correct default fee data in eth call ([#2072](https://github.com/matter-labs/zksync-era/issues/2072)) ([e71f6f9](https://github.com/matter-labs/zksync-era/commit/e71f6f96bda08f8330c643a31df4ef9e82c9afc2)) + ## [24.5.1](https://github.com/matter-labs/zksync-era/compare/core-v24.5.0...core-v24.5.1) (2024-05-31) diff --git a/core/bin/block_reverter/src/main.rs b/core/bin/block_reverter/src/main.rs index faacf15597ff..b5e5c4054a3a 100644 --- a/core/bin/block_reverter/src/main.rs +++ b/core/bin/block_reverter/src/main.rs @@ -69,6 +69,9 @@ enum Command { /// Flag that specifies if RocksDB with state keeper cache should be rolled back. #[arg(long)] rollback_sk_cache: bool, + /// Flag that specifies if snapshot files in GCS should be rolled back. + #[arg(long, requires = "rollback_postgres")] + rollback_snapshots: bool, /// Flag that allows to roll back already executed blocks. It's ultra dangerous and required only for fixing external nodes. #[arg(long)] allow_executed_block_reversion: bool, @@ -187,6 +190,7 @@ async fn main() -> anyhow::Result<()> { rollback_postgres, rollback_tree, rollback_sk_cache, + rollback_snapshots, allow_executed_block_reversion, } => { if !rollback_tree && rollback_postgres { @@ -219,13 +223,15 @@ async fn main() -> anyhow::Result<()> { if rollback_postgres { block_reverter.enable_rolling_back_postgres(); - let object_store_config = SnapshotsObjectStoreConfig::from_env() - .context("SnapshotsObjectStoreConfig::from_env()")?; - block_reverter.enable_rolling_back_snapshot_objects( - ObjectStoreFactory::new(object_store_config.0) - .create_store() - .await, - ); + if rollback_snapshots { + let object_store_config = SnapshotsObjectStoreConfig::from_env() + .context("SnapshotsObjectStoreConfig::from_env()")?; + block_reverter.enable_rolling_back_snapshot_objects( + ObjectStoreFactory::new(object_store_config.0) + .create_store() + .await, + ); + } } if rollback_tree { block_reverter.enable_rolling_back_merkle_tree(db_config.merkle_tree.path); diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index 8ca3abb23eae..e390a9d873e3 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zksync_external_node" -version = "24.5.1" # x-release-please-version +version = "24.6.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index 0f53e8983881..2c0e79c4a665 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -66,7 +66,6 @@ mod metadata; mod metrics; #[cfg(test)] mod tests; -mod version_sync_task; /// Creates the state keeper configured to work in the external node mode. #[allow(clippy::too_many_arguments)] @@ -629,7 +628,8 @@ async fn init_tasks( "Running tree data fetcher (allows a node to operate w/o a Merkle tree or w/o waiting the tree to catch up). \ This is an experimental feature; do not use unless you know what you're doing" ); - let fetcher = TreeDataFetcher::new(main_node_client.clone(), connection_pool.clone()); + let fetcher = TreeDataFetcher::new(main_node_client.clone(), connection_pool.clone()) + .with_l1_data(eth_client.clone(), config.remote.diamond_proxy_addr)?; app_health.insert_component(fetcher.health_check())?; task_handles.push(tokio::spawn(fetcher.run(stop_receiver.clone()))); } @@ -912,20 +912,7 @@ async fn run_node( ); let validate_chain_ids_task = tokio::spawn(validate_chain_ids_task.run(stop_receiver.clone())); - let version_sync_task_pool = connection_pool.clone(); - let version_sync_task_main_node_client = main_node_client.clone(); - let mut stop_receiver_for_version_sync = stop_receiver.clone(); - let version_sync_task = tokio::spawn(async move { - version_sync_task::sync_versions( - version_sync_task_pool, - version_sync_task_main_node_client, - ) - .await?; - - stop_receiver_for_version_sync.changed().await.ok(); - Ok(()) - }); - let mut task_handles = vec![metrics_task, validate_chain_ids_task, version_sync_task]; + let mut task_handles = vec![metrics_task, validate_chain_ids_task]; task_handles.extend(prometheus_task); // Make sure that the node storage is initialized either via genesis or snapshot recovery. diff --git a/core/bin/external_node/src/version_sync_task.rs b/core/bin/external_node/src/version_sync_task.rs deleted file mode 100644 index a62241d7ab35..000000000000 --- a/core/bin/external_node/src/version_sync_task.rs +++ /dev/null @@ -1,131 +0,0 @@ -use std::cmp::Ordering; - -use anyhow::Context; -use zksync_dal::{ConnectionPool, Core, CoreDal}; -use zksync_types::{L1BatchNumber, L2BlockNumber, ProtocolVersionId}; -use zksync_web3_decl::{ - client::{DynClient, L2}, - namespaces::{EnNamespaceClient, ZksNamespaceClient}, -}; - -pub async fn get_l1_batch_remote_protocol_version( - main_node_client: &DynClient, - l1_batch_number: L1BatchNumber, -) -> anyhow::Result> { - let Some((miniblock, _)) = main_node_client.get_l2_block_range(l1_batch_number).await? else { - return Ok(None); - }; - let sync_block = main_node_client - .sync_l2_block(L2BlockNumber(miniblock.as_u32()), false) - .await?; - Ok(sync_block.map(|b| b.protocol_version)) -} - -// Synchronizes protocol version in `l1_batches` and `miniblocks` tables between EN and main node. -pub async fn sync_versions( - connection_pool: ConnectionPool, - main_node_client: Box>, -) -> anyhow::Result<()> { - tracing::info!("Starting syncing protocol version of blocks"); - - let mut connection = connection_pool.connection().await?; - - // Load the first local batch number with version 22. - let Some(local_first_v22_l1_batch) = connection - .blocks_dal() - .get_first_l1_batch_number_for_version(ProtocolVersionId::Version22) - .await? - else { - return Ok(()); - }; - tracing::info!("First local v22 batch is #{local_first_v22_l1_batch}"); - - // Find the first remote batch with version 22, assuming it's less than or equal than local one. - // Uses binary search. - let mut left_bound = L1BatchNumber(0); - let mut right_bound = local_first_v22_l1_batch; - let snapshot_recovery = connection - .snapshot_recovery_dal() - .get_applied_snapshot_status() - .await?; - if let Some(snapshot_recovery) = snapshot_recovery { - left_bound = L1BatchNumber(snapshot_recovery.l1_batch_number.0 + 1) - } - - let right_bound_remote_version = - get_l1_batch_remote_protocol_version(main_node_client.as_ref(), right_bound).await?; - if right_bound_remote_version != Some(ProtocolVersionId::Version22) { - anyhow::bail!("Remote protocol versions should be v22 for the first local v22 batch, got {right_bound_remote_version:?}"); - } - - while left_bound < right_bound { - let mid_batch = L1BatchNumber((left_bound.0 + right_bound.0) / 2); - let (mid_miniblock, _) = connection - .blocks_dal() - .get_l2_block_range_of_l1_batch(mid_batch) - .await? - .with_context(|| { - format!("Postgres is inconsistent: missing miniblocks for L1 batch #{mid_batch}") - })?; - let mid_protocol_version = main_node_client - .sync_l2_block(mid_miniblock, false) - .await? - .with_context(|| format!("Main node missing data about miniblock #{mid_miniblock}"))? - .protocol_version; - - match mid_protocol_version.cmp(&ProtocolVersionId::Version22) { - Ordering::Less => { - left_bound = mid_batch + 1; - } - Ordering::Equal => { - right_bound = mid_batch; - } - Ordering::Greater => { - anyhow::bail!("Unexpected remote protocol version: {mid_protocol_version:?} for miniblock #{mid_miniblock}"); - } - } - } - - let remote_first_v22_l1_batch = left_bound; - let (remote_first_v22_miniblock, _) = connection - .blocks_dal() - .get_l2_block_range_of_l1_batch(remote_first_v22_l1_batch) - .await? - .with_context(|| { - format!("Postgres is inconsistent: missing miniblocks for L1 batch #{remote_first_v22_l1_batch}") - })?; - - let mut transaction = connection.start_transaction().await?; - - tracing::info!( - "Setting version 22 for batches {remote_first_v22_l1_batch}..={local_first_v22_l1_batch}" - ); - transaction - .blocks_dal() - .reset_protocol_version_for_l1_batches( - remote_first_v22_l1_batch..=local_first_v22_l1_batch, - ProtocolVersionId::Version22, - ) - .await?; - - let (local_first_v22_miniblock, _) = transaction - .blocks_dal() - .get_l2_block_range_of_l1_batch(local_first_v22_l1_batch) - .await? - .with_context(|| { - format!("Postgres is inconsistent: missing miniblocks for L1 batch #{local_first_v22_l1_batch}") - })?; - - tracing::info!("Setting version 22 for miniblocks {remote_first_v22_miniblock}..={local_first_v22_miniblock}"); - transaction - .blocks_dal() - .reset_protocol_version_for_l2_blocks( - remote_first_v22_miniblock..=local_first_v22_miniblock, - ProtocolVersionId::Version22, - ) - .await?; - - transaction.commit().await?; - - Ok(()) -} diff --git a/core/lib/basic_types/src/basic_fri_types.rs b/core/lib/basic_types/src/basic_fri_types.rs index 33d4fafa5905..a1563ff7e590 100644 --- a/core/lib/basic_types/src/basic_fri_types.rs +++ b/core/lib/basic_types/src/basic_fri_types.rs @@ -6,6 +6,8 @@ use std::{convert::TryFrom, str::FromStr}; use serde::{Deserialize, Serialize}; +use crate::protocol_version::{ProtocolSemanticVersion, ProtocolVersionId, VersionPatch}; + const BLOB_CHUNK_SIZE: usize = 31; const ELEMENTS_PER_4844_BLOCK: usize = 4096; pub const MAX_4844_BLOBS_PER_BLOCK: usize = 16; @@ -189,6 +191,16 @@ pub struct JobIdentifiers { pub circuit_id: u8, pub aggregation_round: u8, pub protocol_version: u16, + pub protocol_version_patch: u32, +} + +impl JobIdentifiers { + pub fn get_semantic_protocol_version(&self) -> ProtocolSemanticVersion { + ProtocolSemanticVersion::new( + ProtocolVersionId::try_from(self.protocol_version).unwrap(), + VersionPatch(self.protocol_version_patch), + ) + } } #[cfg(test)] diff --git a/core/lib/basic_types/src/protocol_version.rs b/core/lib/basic_types/src/protocol_version.rs index 4f29d936a73f..d8083c0f6a31 100644 --- a/core/lib/basic_types/src/protocol_version.rs +++ b/core/lib/basic_types/src/protocol_version.rs @@ -20,16 +20,6 @@ use crate::{ pub const PACKED_SEMVER_MINOR_OFFSET: u32 = 32; pub const PACKED_SEMVER_MINOR_MASK: u32 = 0xFFFF; -// These values should be manually updated for every protocol upgrade -// Otherwise, the prover will not be able to work with new versions. -// TODO(PLA-954): Move to prover workspace -pub const PROVER_PROTOCOL_VERSION: ProtocolVersionId = ProtocolVersionId::Version24; -pub const PROVER_PROTOCOL_PATCH: VersionPatch = VersionPatch(0); -pub const PROVER_PROTOCOL_SEMANTIC_VERSION: ProtocolSemanticVersion = ProtocolSemanticVersion { - minor: PROVER_PROTOCOL_VERSION, - patch: PROVER_PROTOCOL_PATCH, -}; - /// `ProtocolVersionId` is a unique identifier of the protocol version. /// Note, that it is an identifier of the `minor` semver version of the protocol, with /// the `major` version being `0`. Also, the protocol version on the contracts may contain @@ -85,10 +75,6 @@ impl ProtocolVersionId { Self::Version24 } - pub fn current_prover_version() -> Self { - PROVER_PROTOCOL_VERSION - } - pub fn next() -> Self { Self::Version25 } @@ -311,10 +297,6 @@ impl ProtocolSemanticVersion { Self { minor, patch } } - pub fn current_prover_version() -> Self { - PROVER_PROTOCOL_SEMANTIC_VERSION - } - pub fn try_from_packed(packed: U256) -> Result { let minor = ((packed >> U256::from(PACKED_SEMVER_MINOR_OFFSET)) & U256::from(PACKED_SEMVER_MINOR_MASK)) diff --git a/core/lib/basic_types/src/web3/mod.rs b/core/lib/basic_types/src/web3/mod.rs index bb4a24da55e7..d684b9b6c7b2 100644 --- a/core/lib/basic_types/src/web3/mod.rs +++ b/core/lib/basic_types/src/web3/mod.rs @@ -138,30 +138,36 @@ impl<'a> Visitor<'a> for BytesVisitor { // `Log`: from `web3::types::log` /// Filter -#[derive(Default, Debug, PartialEq, Clone, Serialize)] +#[derive(Default, Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct Filter { /// From Block #[serde(rename = "fromBlock", skip_serializing_if = "Option::is_none")] - from_block: Option, + pub from_block: Option, /// To Block #[serde(rename = "toBlock", skip_serializing_if = "Option::is_none")] - to_block: Option, + pub to_block: Option, /// Block Hash #[serde(rename = "blockHash", skip_serializing_if = "Option::is_none")] - block_hash: Option, + pub block_hash: Option, /// Address #[serde(skip_serializing_if = "Option::is_none")] - address: Option>, + pub address: Option>, /// Topics #[serde(skip_serializing_if = "Option::is_none")] - topics: Option>>>, + pub topics: Option>>>, /// Limit #[serde(skip_serializing_if = "Option::is_none")] - limit: Option, + pub limit: Option, } #[derive(Default, Debug, PartialEq, Clone)] -struct ValueOrArray(Vec); +pub struct ValueOrArray(Vec); + +impl ValueOrArray { + pub fn flatten(self) -> Vec { + self.0 + } +} impl Serialize for ValueOrArray where @@ -179,6 +185,25 @@ where } } +impl<'de, T> Deserialize<'de> for ValueOrArray +where + T: Deserialize<'de>, +{ + fn deserialize>(deserializer: D) -> Result { + #[derive(Deserialize)] + #[serde(untagged)] + enum Repr { + Single(T), + Sequence(Vec), + } + + Ok(match Repr::::deserialize(deserializer)? { + Repr::Single(element) => Self(vec![element]), + Repr::Sequence(elements) => Self(elements), + }) + } +} + // Filter Builder #[derive(Default, Clone)] pub struct FilterBuilder { @@ -271,7 +296,7 @@ fn topic_to_option(topic: ethabi::Topic) -> Option> { } /// A log produced by a transaction. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize)] pub struct Log { /// H160 pub address: H160, diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index 8fbf0172a344..20cd6b323a6d 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -105,10 +105,10 @@ pub struct StateKeeperConfig { pub batch_overhead_l1_gas: u64, /// The maximum amount of gas that can be used by the batch. This value is derived from the circuits limitation per batch. pub max_gas_per_batch: u64, - /// The maximum amount of pubdata that can be used by the batch. Note that if the calldata is used as pubdata. + /// The maximum amount of pubdata that can be used by the batch. /// This variable should not exceed: /// - 128kb for calldata-based rollups - /// - 120kb*number_of_blobs for blob-based rollups + /// - 120kb * n, where `n` is a number of blobs for blob-based rollups /// - the DA layer blob size limit for the DA layer-based validiums pub max_pubdata_per_batch: u64, diff --git a/core/lib/config/src/configs/da_dispatcher.rs b/core/lib/config/src/configs/da_dispatcher.rs index 0c45d8d1c157..e78816352796 100644 --- a/core/lib/config/src/configs/da_dispatcher.rs +++ b/core/lib/config/src/configs/da_dispatcher.rs @@ -5,13 +5,17 @@ use zksync_da_layers::config::DALayerConfig; use crate::ObjectStoreConfig; +pub const DEFAULT_POLLING_INTERVAL_MS: u32 = 5000; +pub const DEFAULT_QUERY_ROWS_LIMIT: u32 = 100; +pub const DEFAULT_MAX_RETRIES: u16 = 5; + #[derive(Clone, Debug, PartialEq, Deserialize)] #[serde(tag = "da_mode")] pub enum DataAvailabilityMode { /// Uses the data availability layer to dispatch pubdata. DALayer(DALayerConfig), - /// Stores the pubdata in the Google Cloud Storage. - GCS(ObjectStoreConfig), + /// Stores the pubdata in the Object Store(GCS/S3/...). + ObjectStore(ObjectStoreConfig), /// Does not store the pubdata. NoDA, } @@ -23,7 +27,7 @@ pub struct DADispatcherConfig { #[serde(flatten)] pub da_mode: DataAvailabilityMode, /// The interval at which the dispatcher will poll the DA layer for inclusion data. - pub polling_interval: Option, + pub polling_interval_ms: Option, /// The maximum number of rows to query from the database in a single query. pub query_rows_limit: Option, /// The maximum number of retries for the dispatching of a blob. @@ -39,32 +43,24 @@ impl DADispatcherConfig { private_key: "0x0".to_string(), }, )), - polling_interval: Some( - zksync_system_constants::data_availability::DEFAULT_POLLING_INTERVAL, - ), - query_rows_limit: Some( - zksync_system_constants::data_availability::DEFAULT_QUERY_ROWS_LIMIT, - ), - max_retries: Some(zksync_system_constants::data_availability::DEFAULT_MAX_RETRIES), + polling_interval_ms: Some(DEFAULT_POLLING_INTERVAL_MS), + query_rows_limit: Some(DEFAULT_QUERY_ROWS_LIMIT), + max_retries: Some(DEFAULT_MAX_RETRIES), } } pub fn polling_interval(&self) -> Duration { - match self.polling_interval { - Some(interval) => Duration::from_secs(interval as u64), - None => Duration::from_secs( - zksync_system_constants::data_availability::DEFAULT_POLLING_INTERVAL as u64, - ), + match self.polling_interval_ms { + Some(interval) => Duration::from_millis(interval as u64), + None => Duration::from_millis(DEFAULT_POLLING_INTERVAL_MS as u64), } } pub fn query_rows_limit(&self) -> u32 { - self.query_rows_limit - .unwrap_or(zksync_system_constants::data_availability::DEFAULT_QUERY_ROWS_LIMIT) + self.query_rows_limit.unwrap_or(DEFAULT_QUERY_ROWS_LIMIT) } pub fn max_retries(&self) -> u16 { - self.max_retries - .unwrap_or(zksync_system_constants::data_availability::DEFAULT_MAX_RETRIES) + self.max_retries.unwrap_or(DEFAULT_MAX_RETRIES) } } diff --git a/core/lib/constants/src/data_availability.rs b/core/lib/constants/src/data_availability.rs deleted file mode 100644 index 9058b532524d..000000000000 --- a/core/lib/constants/src/data_availability.rs +++ /dev/null @@ -1,6 +0,0 @@ -/// An interval with which the dispatcher is polling the DA layer for the inclusion of the blobs. -pub const DEFAULT_POLLING_INTERVAL: u32 = 5; -/// The maximum number of rows that the dispatcher is fetching from the database. -pub const DEFAULT_QUERY_ROWS_LIMIT: u32 = 100; -/// The maximum number of retries for the dispatching of a blob. -pub const DEFAULT_MAX_RETRIES: u16 = 5; diff --git a/core/lib/constants/src/lib.rs b/core/lib/constants/src/lib.rs index 76a96a0cc9cf..6aab79ad71f3 100644 --- a/core/lib/constants/src/lib.rs +++ b/core/lib/constants/src/lib.rs @@ -1,7 +1,6 @@ pub mod blocks; pub mod contracts; pub mod crypto; -pub mod data_availability; pub mod ethereum; pub mod fees; pub mod system_context; diff --git a/core/lib/da_client/src/gcs/mod.rs b/core/lib/da_client/src/gcs/mod.rs index bf82bdc87492..57c1085bf08c 100644 --- a/core/lib/da_client/src/gcs/mod.rs +++ b/core/lib/da_client/src/gcs/mod.rs @@ -15,20 +15,20 @@ use zksync_types::{pubdata_da::StorablePubdata, L1BatchNumber}; /// An implementation of the `DataAvailabilityClient` trait that stores the pubdata in the GCS. #[derive(Clone)] -pub struct GCSDAClient { +pub struct ObjectStoreDAClient { object_store: Arc, } -impl GCSDAClient { - pub async fn new(object_store_conf: ObjectStoreConfig) -> Self { - GCSDAClient { - object_store: ObjectStoreFactory::create_from_config(&object_store_conf).await, - } +impl ObjectStoreDAClient { + pub async fn new(object_store_conf: ObjectStoreConfig) -> anyhow::Result { + Ok(ObjectStoreDAClient { + object_store: ObjectStoreFactory::create_from_config(&object_store_conf).await?, + }) } } #[async_trait] -impl DataAvailabilityClient for GCSDAClient { +impl DataAvailabilityClient for ObjectStoreDAClient { async fn dispatch_blob( &self, batch_number: u32, @@ -40,8 +40,8 @@ impl DataAvailabilityClient for GCSDAClient { .await { return Err(DAError { + is_transient: err.is_transient(), error: anyhow::Error::from(err), - is_transient: true, }); } @@ -51,20 +51,28 @@ impl DataAvailabilityClient for GCSDAClient { } async fn get_inclusion_data(&self, key: String) -> Result, DAError> { - let key_u32 = key.parse::().unwrap(); + let key_u32 = key.parse::().map_err(|err| DAError { + error: anyhow::Error::from(err), + is_transient: false, + })?; + if let Err(err) = self .object_store .get::(L1BatchNumber(key_u32)) .await { + if let zksync_object_store::ObjectStoreError::KeyNotFound(_) = err { + return Ok(None); + } + return Err(DAError { + is_transient: err.is_transient(), error: anyhow::Error::from(err), - is_transient: true, }); } - // Using default here because we don't get any inclusion data from GCS, thus there's - // nothing to check on L1. + // Using default here because we don't get any inclusion data from object store, thus + // there's nothing to check on L1. return Ok(Some(InclusionData::default())); } @@ -77,10 +85,10 @@ impl DataAvailabilityClient for GCSDAClient { } } -impl Debug for GCSDAClient { +impl Debug for ObjectStoreDAClient { fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { formatter - .debug_struct("GCSDAClient") + .debug_struct("ObjectStoreDAClient") .field("object_store", &self.object_store) .finish() } diff --git a/core/lib/env_config/src/da_dispatcher.rs b/core/lib/env_config/src/da_dispatcher.rs index 5f395d217c3d..fea67c8880b3 100644 --- a/core/lib/env_config/src/da_dispatcher.rs +++ b/core/lib/env_config/src/da_dispatcher.rs @@ -28,14 +28,14 @@ mod tests { max_retries: u16, ) -> DADispatcherConfig { DADispatcherConfig { - da_mode: DataAvailabilityMode::GCS(ObjectStoreConfig { + da_mode: DataAvailabilityMode::ObjectStore(ObjectStoreConfig { mode: ObjectStoreMode::GCSWithCredentialFile { bucket_base_url: bucket_base_url.to_owned(), gcs_credential_file_path: "/path/to/credentials.json".to_owned(), }, max_retries: 5, }), - polling_interval: Some(interval), + polling_interval_ms: Some(interval), query_rows_limit: Some(rows_limit), max_retries: Some(max_retries), } @@ -52,7 +52,7 @@ mod tests { light_node_url: "localhost:12345".to_string(), private_key: pk.to_owned(), })), - polling_interval: Some(interval), + polling_interval_ms: Some(interval), query_rows_limit: Some(rows_limit), max_retries: Some(max_retries), } @@ -61,7 +61,7 @@ mod tests { fn expected_no_da_config() -> DADispatcherConfig { DADispatcherConfig { da_mode: DataAvailabilityMode::NoDA, - polling_interval: None, + polling_interval_ms: None, query_rows_limit: None, max_retries: None, } @@ -104,7 +104,7 @@ mod tests { } #[test] - fn from_env_gcs() { + fn from_env_object_store() { let mut lock = MUTEX.lock(); let config = r#" DA_DISPATCHER_POLLING_INTERVAL=10 diff --git a/core/lib/eth_client/src/clients/http/query.rs b/core/lib/eth_client/src/clients/http/query.rs index 984804953f68..33d9838dc735 100644 --- a/core/lib/eth_client/src/clients/http/query.rs +++ b/core/lib/eth_client/src/clients/http/query.rs @@ -3,11 +3,11 @@ use std::fmt; use async_trait::async_trait; use jsonrpsee::core::ClientError; use zksync_types::{web3, Address, L1ChainId, H256, U256, U64}; -use zksync_web3_decl::error::{ClientRpcContext, EnrichedClientError}; +use zksync_web3_decl::error::{ClientRpcContext, EnrichedClientError, EnrichedClientResult}; use super::{decl::L1EthNamespaceClient, Method, COUNTERS, LATENCIES}; use crate::{ - types::{Error, ExecutedTxStatus, FailureInfo}, + types::{ExecutedTxStatus, FailureInfo}, EthInterface, RawTransactionBytes, }; @@ -16,15 +16,14 @@ impl EthInterface for T where T: L1EthNamespaceClient + fmt::Debug + Send + Sync, { - async fn fetch_chain_id(&self) -> Result { + async fn fetch_chain_id(&self) -> EnrichedClientResult { COUNTERS.call[&(Method::ChainId, self.component())].inc(); let latency = LATENCIES.direct[&Method::ChainId].start(); let raw_chain_id = self.chain_id().rpc_context("chain_id").await?; latency.observe(); let chain_id = u64::try_from(raw_chain_id).map_err(|err| { let err = ClientError::Custom(format!("invalid chainId: {err}")); - let err = EnrichedClientError::new(err, "chain_id").with_arg("chain_id", &raw_chain_id); - Error::EthereumGateway(err) + EnrichedClientError::new(err, "chain_id").with_arg("chain_id", &raw_chain_id) })?; Ok(L1ChainId(chain_id)) } @@ -33,7 +32,7 @@ where &self, account: Address, block: web3::BlockNumber, - ) -> Result { + ) -> EnrichedClientResult { COUNTERS.call[&(Method::NonceAtForAccount, self.component())].inc(); let latency = LATENCIES.direct[&Method::NonceAtForAccount].start(); let nonce = self @@ -46,7 +45,7 @@ where Ok(nonce) } - async fn block_number(&self) -> Result { + async fn block_number(&self) -> EnrichedClientResult { COUNTERS.call[&(Method::BlockNumber, self.component())].inc(); let latency = LATENCIES.direct[&Method::BlockNumber].start(); let block_number = self @@ -57,7 +56,7 @@ where Ok(block_number) } - async fn get_gas_price(&self) -> Result { + async fn get_gas_price(&self) -> EnrichedClientResult { COUNTERS.call[&(Method::GetGasPrice, self.component())].inc(); let latency = LATENCIES.direct[&Method::GetGasPrice].start(); let network_gas_price = self.gas_price().rpc_context("gas_price").await?; @@ -65,7 +64,7 @@ where Ok(network_gas_price) } - async fn send_raw_tx(&self, tx: RawTransactionBytes) -> Result { + async fn send_raw_tx(&self, tx: RawTransactionBytes) -> EnrichedClientResult { let latency = LATENCIES.direct[&Method::SendRawTx].start(); let tx = self .send_raw_transaction(web3::Bytes(tx.0)) @@ -79,7 +78,7 @@ where &self, upto_block: usize, block_count: usize, - ) -> Result, Error> { + ) -> EnrichedClientResult> { const MAX_REQUEST_CHUNK: usize = 1024; COUNTERS.call[&(Method::BaseFeeHistory, self.component())].inc(); @@ -111,7 +110,7 @@ where Ok(history.into_iter().map(|fee| fee.as_u64()).collect()) } - async fn get_pending_block_base_fee_per_gas(&self) -> Result { + async fn get_pending_block_base_fee_per_gas(&self) -> EnrichedClientResult { COUNTERS.call[&(Method::PendingBlockBaseFee, self.component())].inc(); let latency = LATENCIES.direct[&Method::PendingBlockBaseFee].start(); @@ -140,7 +139,7 @@ where Ok(block.base_fee_per_gas.unwrap()) } - async fn get_tx_status(&self, hash: H256) -> Result, Error> { + async fn get_tx_status(&self, hash: H256) -> EnrichedClientResult> { COUNTERS.call[&(Method::GetTxStatus, self.component())].inc(); let latency = LATENCIES.direct[&Method::GetTxStatus].start(); @@ -162,7 +161,7 @@ where Ok(res) } - async fn failure_reason(&self, tx_hash: H256) -> Result, Error> { + async fn failure_reason(&self, tx_hash: H256) -> EnrichedClientResult> { let latency = LATENCIES.direct[&Method::FailureReason].start(); let transaction = self .get_transaction_by_hash(tx_hash) @@ -218,7 +217,7 @@ where gas_limit, })) } else { - Err(err.into()) + Err(err) } } Ok(_) => Ok(None), @@ -231,7 +230,7 @@ where } } - async fn get_tx(&self, hash: H256) -> Result, Error> { + async fn get_tx(&self, hash: H256) -> EnrichedClientResult> { COUNTERS.call[&(Method::GetTx, self.component())].inc(); let tx = self .get_transaction_by_hash(hash) @@ -245,7 +244,7 @@ where &self, request: web3::CallRequest, block: Option, - ) -> Result { + ) -> EnrichedClientResult { let latency = LATENCIES.direct[&Method::CallContractFunction].start(); let block = block.unwrap_or_else(|| web3::BlockNumber::Latest.into()); let output_bytes = self @@ -258,7 +257,10 @@ where Ok(output_bytes) } - async fn tx_receipt(&self, tx_hash: H256) -> Result, Error> { + async fn tx_receipt( + &self, + tx_hash: H256, + ) -> EnrichedClientResult> { COUNTERS.call[&(Method::TxReceipt, self.component())].inc(); let latency = LATENCIES.direct[&Method::TxReceipt].start(); let receipt = self @@ -270,7 +272,7 @@ where Ok(receipt) } - async fn eth_balance(&self, address: Address) -> Result { + async fn eth_balance(&self, address: Address) -> EnrichedClientResult { COUNTERS.call[&(Method::EthBalance, self.component())].inc(); let latency = LATENCIES.direct[&Method::EthBalance].start(); let balance = self @@ -282,19 +284,22 @@ where Ok(balance) } - async fn logs(&self, filter: web3::Filter) -> Result, Error> { + async fn logs(&self, filter: &web3::Filter) -> EnrichedClientResult> { COUNTERS.call[&(Method::Logs, self.component())].inc(); let latency = LATENCIES.direct[&Method::Logs].start(); let logs = self .get_logs(filter.clone()) .rpc_context("get_logs") - .with_arg("filter", &filter) + .with_arg("filter", filter) .await?; latency.observe(); Ok(logs) } - async fn block(&self, block_id: web3::BlockId) -> Result>, Error> { + async fn block( + &self, + block_id: web3::BlockId, + ) -> EnrichedClientResult>> { COUNTERS.call[&(Method::Block, self.component())].inc(); let latency = LATENCIES.direct[&Method::Block].start(); let block = match block_id { diff --git a/core/lib/eth_client/src/clients/http/signing.rs b/core/lib/eth_client/src/clients/http/signing.rs index bdb7be8aea91..2b89af97a773 100644 --- a/core/lib/eth_client/src/clients/http/signing.rs +++ b/core/lib/eth_client/src/clients/http/signing.rs @@ -10,7 +10,7 @@ use zksync_web3_decl::client::{DynClient, L1}; use super::{Method, LATENCIES}; use crate::{ - types::{encode_blob_tx_with_sidecar, Error, SignedCallResult}, + types::{encode_blob_tx_with_sidecar, ContractCallError, SignedCallResult, SigningError}, BoundEthInterface, CallFunctionArgs, EthInterface, Options, RawTransactionBytes, }; @@ -114,7 +114,7 @@ impl BoundEthInterface for SigningClient { data: Vec, contract_addr: H160, options: Options, - ) -> Result { + ) -> Result { let latency = LATENCIES.direct[&Method::SignPreparedTx].start(); // Fetch current max priority fee per gas let max_priority_fee_per_gas = match options.max_priority_fee_per_gas { @@ -124,10 +124,10 @@ impl BoundEthInterface for SigningClient { if options.transaction_type == Some(EIP_4844_TX_TYPE.into()) { if options.max_fee_per_blob_gas.is_none() { - return Err(Error::Eip4844MissingMaxFeePerBlobGas); + return Err(SigningError::Eip4844MissingMaxFeePerBlobGas); } if options.blob_versioned_hashes.is_none() { - return Err(Error::Eip4844MissingBlobVersionedHashes); + return Err(SigningError::Eip4844MissingBlobVersionedHashes); } } @@ -140,7 +140,7 @@ impl BoundEthInterface for SigningClient { }; if max_fee_per_gas < max_priority_fee_per_gas { - return Err(Error::WrongFeeProvided( + return Err(SigningError::WrongFeeProvided( max_fee_per_gas, max_priority_fee_per_gas, )); @@ -197,7 +197,7 @@ impl BoundEthInterface for SigningClient { token_address: Address, address: Address, erc20_abi: ðabi::Contract, - ) -> Result { + ) -> Result { let latency = LATENCIES.direct[&Method::Allowance].start(); let allowance: U256 = CallFunctionArgs::new("allowance", (self.inner.sender_account, address)) diff --git a/core/lib/eth_client/src/clients/mock.rs b/core/lib/eth_client/src/clients/mock.rs index a6f8f391de73..a3f9dde7c6ea 100644 --- a/core/lib/eth_client/src/clients/mock.rs +++ b/core/lib/eth_client/src/clients/mock.rs @@ -13,7 +13,7 @@ use zksync_types::{ use zksync_web3_decl::client::{DynClient, MockClient, L1}; use crate::{ - types::{Error, SignedCallResult}, + types::{ContractCallError, SignedCallResult, SigningError}, BoundEthInterface, Options, RawTransactionBytes, }; @@ -474,7 +474,7 @@ impl MockEthereum { mut raw_tx: Vec, contract_addr: Address, options: Options, - ) -> Result { + ) -> Result { let max_fee_per_gas = options.max_fee_per_gas.unwrap_or(self.max_fee_per_gas); let max_priority_fee_per_gas = options .max_priority_fee_per_gas @@ -569,7 +569,7 @@ impl BoundEthInterface for MockEthereum { data: Vec, contract_addr: H160, options: Options, - ) -> Result { + ) -> Result { self.sign_prepared_tx(data, contract_addr, options) } @@ -578,7 +578,7 @@ impl BoundEthInterface for MockEthereum { _token_address: Address, _contract_address: Address, _erc20_abi: ðabi::Contract, - ) -> Result { + ) -> Result { unimplemented!("Not needed right now") } } diff --git a/core/lib/eth_client/src/lib.rs b/core/lib/eth_client/src/lib.rs index b2433df9d761..2adac587b66c 100644 --- a/core/lib/eth_client/src/lib.rs +++ b/core/lib/eth_client/src/lib.rs @@ -11,11 +11,14 @@ use zksync_types::{ Address, L1ChainId, H160, H256, U256, U64, }; use zksync_web3_decl::client::{DynClient, L1}; -pub use zksync_web3_decl::{error::EnrichedClientError, jsonrpsee::core::ClientError}; +pub use zksync_web3_decl::{ + error::{EnrichedClientError, EnrichedClientResult}, + jsonrpsee::core::ClientError, +}; pub use crate::types::{ - encode_blob_tx_with_sidecar, CallFunctionArgs, ContractCall, ContractError, Error, - ExecutedTxStatus, FailureInfo, RawTransactionBytes, SignedCallResult, + encode_blob_tx_with_sidecar, CallFunctionArgs, ContractCall, ContractCallError, + ExecutedTxStatus, FailureInfo, RawTransactionBytes, SignedCallResult, SigningError, }; pub mod clients; @@ -76,14 +79,14 @@ impl Options { pub trait EthInterface: Sync + Send { /// Fetches the L1 chain ID (in contrast to [`BoundEthInterface::chain_id()`] which returns /// the *expected* L1 chain ID). - async fn fetch_chain_id(&self) -> Result; + async fn fetch_chain_id(&self) -> EnrichedClientResult; /// Returns the nonce of the provided account at the specified block. async fn nonce_at_for_account( &self, account: Address, block: BlockNumber, - ) -> Result; + ) -> EnrichedClientResult; /// Collects the base fee history for the specified block range. /// @@ -93,25 +96,25 @@ pub trait EthInterface: Sync + Send { &self, from_block: usize, block_count: usize, - ) -> Result, Error>; + ) -> EnrichedClientResult>; /// Returns the `base_fee_per_gas` value for the currently pending L1 block. - async fn get_pending_block_base_fee_per_gas(&self) -> Result; + async fn get_pending_block_base_fee_per_gas(&self) -> EnrichedClientResult; /// Returns the current gas price. - async fn get_gas_price(&self) -> Result; + async fn get_gas_price(&self) -> EnrichedClientResult; /// Returns the current block number. - async fn block_number(&self) -> Result; + async fn block_number(&self) -> EnrichedClientResult; /// Sends a transaction to the Ethereum network. - async fn send_raw_tx(&self, tx: RawTransactionBytes) -> Result; + async fn send_raw_tx(&self, tx: RawTransactionBytes) -> EnrichedClientResult; /// Fetches the transaction status for a specified transaction hash. /// /// Returns `Ok(None)` if the transaction is either not found or not executed yet. /// Returns `Err` only if the request fails (e.g. due to network issues). - async fn get_tx_status(&self, hash: H256) -> Result, Error>; + async fn get_tx_status(&self, hash: H256) -> EnrichedClientResult>; /// For a reverted transaction, attempts to recover information on the revert reason. /// @@ -119,29 +122,29 @@ pub trait EthInterface: Sync + Send { /// Returns `Ok(None)` if the transaction isn't found, wasn't executed yet, or if it was /// executed successfully. /// Returns `Err` only if the request fails (e.g. due to network issues). - async fn failure_reason(&self, tx_hash: H256) -> Result, Error>; + async fn failure_reason(&self, tx_hash: H256) -> EnrichedClientResult>; /// Returns the transaction for the specified hash. - async fn get_tx(&self, hash: H256) -> Result, Error>; + async fn get_tx(&self, hash: H256) -> EnrichedClientResult>; /// Returns the receipt for the specified transaction hash. - async fn tx_receipt(&self, tx_hash: H256) -> Result, Error>; + async fn tx_receipt(&self, tx_hash: H256) -> EnrichedClientResult>; /// Returns the ETH balance of the specified token for the specified address. - async fn eth_balance(&self, address: Address) -> Result; + async fn eth_balance(&self, address: Address) -> EnrichedClientResult; /// Invokes a function on a contract specified by `contract_address` / `contract_abi` using `eth_call`. async fn call_contract_function( &self, request: web3::CallRequest, block: Option, - ) -> Result; + ) -> EnrichedClientResult; /// Returns the logs for the specified filter. - async fn logs(&self, filter: Filter) -> Result, Error>; + async fn logs(&self, filter: &Filter) -> EnrichedClientResult>; /// Returns the block header for the specified block number or hash. - async fn block(&self, block_id: BlockId) -> Result>, Error>; + async fn block(&self, block_id: BlockId) -> EnrichedClientResult>>; } /// An extension of `EthInterface` trait, which is used to perform queries that are bound to @@ -187,7 +190,7 @@ pub trait BoundEthInterface: AsRef> + 'static + Sync + Send + fmt: token_address: Address, address: Address, erc20_abi: ðabi::Contract, - ) -> Result; + ) -> Result; /// Signs the transaction and sends it to the Ethereum network. /// Expected to use credentials associated with `Self::sender_account()`. @@ -196,7 +199,7 @@ pub trait BoundEthInterface: AsRef> + 'static + Sync + Send + fmt: data: Vec, contract_addr: H160, options: Options, - ) -> Result; + ) -> Result; } impl Clone for Box { @@ -207,19 +210,19 @@ impl Clone for Box { impl dyn BoundEthInterface { /// Returns the nonce of the `Self::sender_account()` at the specified block. - pub async fn nonce_at(&self, block: BlockNumber) -> Result { + pub async fn nonce_at(&self, block: BlockNumber) -> EnrichedClientResult { self.as_ref() .nonce_at_for_account(self.sender_account(), block) .await } /// Returns the current nonce of the `Self::sender_account()`. - pub async fn current_nonce(&self) -> Result { + pub async fn current_nonce(&self) -> EnrichedClientResult { self.nonce_at(BlockNumber::Latest).await } /// Returns the pending nonce of the `Self::sender_account()`. - pub async fn pending_nonce(&self) -> Result { + pub async fn pending_nonce(&self) -> EnrichedClientResult { self.nonce_at(BlockNumber::Pending).await } @@ -228,13 +231,13 @@ impl dyn BoundEthInterface { &self, data: Vec, options: Options, - ) -> Result { + ) -> Result { self.sign_prepared_tx_for_addr(data, self.contract_addr(), options) .await } /// Returns the ETH balance of `Self::sender_account()`. - pub async fn sender_eth_balance(&self) -> Result { + pub async fn sender_eth_balance(&self) -> EnrichedClientResult { self.as_ref().eth_balance(self.sender_account()).await } diff --git a/core/lib/eth_client/src/types.rs b/core/lib/eth_client/src/types.rs index bb1a5f4b6a20..8ac5ff427fb8 100644 --- a/core/lib/eth_client/src/types.rs +++ b/core/lib/eth_client/src/types.rs @@ -79,18 +79,21 @@ impl ContractCall<'_> { &self.inner.params } - pub async fn call(&self, client: &DynClient) -> Result { + pub async fn call( + &self, + client: &DynClient, + ) -> Result { let func = self .contract_abi .function(&self.inner.name) - .map_err(ContractError::Function)?; - let encoded_input = - func.encode_input(&self.inner.params) - .map_err(|source| ContractError::EncodeInput { - signature: func.signature(), - input: self.inner.params.clone(), - source, - })?; + .map_err(ContractCallError::Function)?; + let encoded_input = func.encode_input(&self.inner.params).map_err(|source| { + ContractCallError::EncodeInput { + signature: func.signature(), + input: self.inner.params.clone(), + source, + } + })?; let request = web3::CallRequest { from: self.inner.from, @@ -110,25 +113,28 @@ impl ContractCall<'_> { .call_contract_function(request, self.inner.block) .await?; let output_tokens = func.decode_output(&encoded_output.0).map_err(|source| { - ContractError::DecodeOutput { + ContractCallError::DecodeOutput { signature: func.signature(), output: encoded_output, source, } })?; - Ok(Res::from_tokens(output_tokens.clone()).map_err(|source| { - ContractError::DetokenizeOutput { + Res::from_tokens(output_tokens.clone()).map_err(|source| { + ContractCallError::DetokenizeOutput { signature: func.signature(), output: output_tokens, source, } - })?) + }) } } /// Contract-related subset of Ethereum client errors. #[derive(Debug, thiserror::Error)] -pub enum ContractError { +pub enum ContractCallError { + /// Problem on the Ethereum client side (e.g. bad RPC call, network issues). + #[error("Request to ethereum gateway failed: {0}")] + EthereumGateway(#[from] EnrichedClientError), /// Failed resolving a function specified for the contract call in the contract ABI. #[error("failed resolving contract function: {0}")] Function(#[source] ethabi::Error), @@ -158,15 +164,12 @@ pub enum ContractError { }, } -/// Common error type exposed by the crate, +/// Common error type exposed by the crate. #[derive(Debug, thiserror::Error)] -pub enum Error { +pub enum SigningError { /// Problem on the Ethereum client side (e.g. bad RPC call, network issues). #[error("Request to ethereum gateway failed: {0}")] EthereumGateway(#[from] EnrichedClientError), - /// Problem with a contract call. - #[error("Call to contract failed: {0}")] - Contract(#[from] ContractError), /// Problem with transaction signer. #[error("Transaction signing failed: {0}")] Signer(#[from] zksync_eth_signer::SignerError), diff --git a/core/lib/multivm/src/utils.rs b/core/lib/multivm/src/utils.rs index 1f4d55ea66ae..a15fdba6b703 100644 --- a/core/lib/multivm/src/utils.rs +++ b/core/lib/multivm/src/utils.rs @@ -441,8 +441,35 @@ pub fn get_max_batch_gas_limit(version: VmVersion) -> u64 { } VmVersion::Vm1_4_1 => crate::vm_1_4_1::constants::BLOCK_GAS_LIMIT as u64, VmVersion::Vm1_4_2 => crate::vm_1_4_2::constants::BLOCK_GAS_LIMIT as u64, - VmVersion::Vm1_5_0SmallBootloaderMemory => crate::vm_latest::constants::BATCH_GAS_LIMIT, - VmVersion::Vm1_5_0IncreasedBootloaderMemory => crate::vm_latest::constants::BATCH_GAS_LIMIT, + VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { + crate::vm_latest::constants::BATCH_GAS_LIMIT + } + } +} + +pub fn get_eth_call_gas_limit(version: VmVersion) -> u64 { + match version { + VmVersion::M5WithRefunds | VmVersion::M5WithoutRefunds => { + crate::vm_m5::utils::ETH_CALL_GAS_LIMIT as u64 + } + VmVersion::M6Initial | VmVersion::M6BugWithCompressionFixed => { + crate::vm_m6::utils::ETH_CALL_GAS_LIMIT as u64 + } + VmVersion::Vm1_3_2 => crate::vm_1_3_2::utils::ETH_CALL_GAS_LIMIT as u64, + VmVersion::VmVirtualBlocks => { + crate::vm_virtual_blocks::constants::ETH_CALL_GAS_LIMIT as u64 + } + VmVersion::VmVirtualBlocksRefundsEnhancement => { + crate::vm_refunds_enhancement::constants::ETH_CALL_GAS_LIMIT as u64 + } + VmVersion::VmBoojumIntegration => { + crate::vm_boojum_integration::constants::ETH_CALL_GAS_LIMIT as u64 + } + VmVersion::Vm1_4_1 => crate::vm_1_4_1::constants::ETH_CALL_GAS_LIMIT as u64, + VmVersion::Vm1_4_2 => crate::vm_1_4_2::constants::ETH_CALL_GAS_LIMIT as u64, + VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { + crate::vm_latest::constants::ETH_CALL_GAS_LIMIT + } } } diff --git a/core/lib/multivm/src/versions/vm_latest/constants.rs b/core/lib/multivm/src/versions/vm_latest/constants.rs index 1f02162f7348..01f697ec91a2 100644 --- a/core/lib/multivm/src/versions/vm_latest/constants.rs +++ b/core/lib/multivm/src/versions/vm_latest/constants.rs @@ -3,7 +3,7 @@ use zk_evm_1_5_0::aux_structures::MemoryPage; pub use zk_evm_1_5_0::zkevm_opcode_defs::system_params::{ ERGS_PER_CIRCUIT, INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; -use zksync_system_constants::{MAX_L2_TX_GAS_LIMIT, MAX_NEW_FACTORY_DEPS}; +use zksync_system_constants::MAX_NEW_FACTORY_DEPS; use super::vm::MultiVMSubversion; use crate::vm_latest::old_vm::utils::heap_page_from_base; @@ -160,7 +160,7 @@ pub const BATCH_COMPUTATIONAL_GAS_LIMIT: u32 = pub const BATCH_GAS_LIMIT: u64 = 1 << 50; /// How many gas is allowed to spend on a single transaction in eth_call method -pub const ETH_CALL_GAS_LIMIT: u32 = MAX_L2_TX_GAS_LIMIT as u32; +pub const ETH_CALL_GAS_LIMIT: u64 = BATCH_GAS_LIMIT; /// ID of the transaction from L1 pub const L1_TX_TYPE: u8 = 255; diff --git a/core/lib/object_store/Cargo.toml b/core/lib/object_store/Cargo.toml index e8d5322765ec..3e33c9097153 100644 --- a/core/lib/object_store/Cargo.toml +++ b/core/lib/object_store/Cargo.toml @@ -22,9 +22,11 @@ google-cloud-auth.workspace = true http.workspace = true serde_json.workspace = true flate2.workspace = true +rand.workspace = true tokio = { workspace = true, features = ["full"] } tracing.workspace = true prost.workspace = true [dev-dependencies] +assert_matches.workspace = true tempfile.workspace = true diff --git a/core/lib/object_store/src/file.rs b/core/lib/object_store/src/file.rs index aea10cccd8e8..f641ab9c74a1 100644 --- a/core/lib/object_store/src/file.rs +++ b/core/lib/object_store/src/file.rs @@ -9,7 +9,10 @@ impl From for ObjectStoreError { fn from(err: io::Error) -> Self { match err.kind() { io::ErrorKind::NotFound => ObjectStoreError::KeyNotFound(err.into()), - _ => ObjectStoreError::Other(err.into()), + kind => ObjectStoreError::Other { + is_transient: matches!(kind, io::ErrorKind::Interrupted | io::ErrorKind::TimedOut), + source: err.into(), + }, } } } @@ -20,7 +23,7 @@ pub(crate) struct FileBackedObjectStore { } impl FileBackedObjectStore { - pub async fn new(base_dir: String) -> Self { + pub async fn new(base_dir: String) -> Result { for bucket in &[ Bucket::ProverJobs, Bucket::WitnessInput, @@ -36,13 +39,9 @@ impl FileBackedObjectStore { Bucket::TeeVerifierInput, ] { let bucket_path = format!("{base_dir}/{bucket}"); - fs::create_dir_all(&bucket_path) - .await - .unwrap_or_else(|err| { - panic!("failed creating bucket `{bucket_path}`: {err}"); - }); + fs::create_dir_all(&bucket_path).await?; } - FileBackedObjectStore { base_dir } + Ok(FileBackedObjectStore { base_dir }) } fn filename(&self, bucket: Bucket, key: &str) -> String { @@ -87,12 +86,12 @@ mod test { async fn test_get() { let dir = TempDir::new().unwrap(); let path = dir.into_path().into_os_string().into_string().unwrap(); - let object_store = FileBackedObjectStore::new(path).await; + let object_store = FileBackedObjectStore::new(path).await.unwrap(); let expected = vec![9, 0, 8, 9, 0, 7]; - let result = object_store + object_store .put_raw(Bucket::ProverJobs, "test-key.bin", expected.clone()) - .await; - assert!(result.is_ok(), "result must be OK"); + .await + .unwrap(); let bytes = object_store .get_raw(Bucket::ProverJobs, "test-key.bin") .await @@ -104,26 +103,26 @@ mod test { async fn test_put() { let dir = TempDir::new().unwrap(); let path = dir.into_path().into_os_string().into_string().unwrap(); - let object_store = FileBackedObjectStore::new(path).await; + let object_store = FileBackedObjectStore::new(path).await.unwrap(); let bytes = vec![9, 0, 8, 9, 0, 7]; - let result = object_store + object_store .put_raw(Bucket::ProverJobs, "test-key.bin", bytes) - .await; - assert!(result.is_ok(), "result must be OK"); + .await + .unwrap(); } #[tokio::test] async fn test_remove() { let dir = TempDir::new().unwrap(); let path = dir.into_path().into_os_string().into_string().unwrap(); - let object_store = FileBackedObjectStore::new(path).await; - let result = object_store + let object_store = FileBackedObjectStore::new(path).await.unwrap(); + object_store .put_raw(Bucket::ProverJobs, "test-key.bin", vec![0, 1]) - .await; - assert!(result.is_ok(), "result must be OK"); - let result = object_store + .await + .unwrap(); + object_store .remove_raw(Bucket::ProverJobs, "test-key.bin") - .await; - assert!(result.is_ok(), "result must be OK"); + .await + .unwrap(); } } diff --git a/core/lib/object_store/src/gcs.rs b/core/lib/object_store/src/gcs.rs index d2650a48ea50..8cd7b982a058 100644 --- a/core/lib/object_store/src/gcs.rs +++ b/core/lib/object_store/src/gcs.rs @@ -3,7 +3,7 @@ use std::{fmt, future::Future, time::Duration}; use async_trait::async_trait; -use google_cloud_auth::{credentials::CredentialsFile, error::Error}; +use google_cloud_auth::{credentials::CredentialsFile, error::Error as AuthError}; use google_cloud_storage::{ client::{Client, ClientConfig}, http::{ @@ -17,37 +17,45 @@ use google_cloud_storage::{ }, }; use http::StatusCode; +use rand::Rng; use crate::{ metrics::GCS_METRICS, raw::{Bucket, ObjectStore, ObjectStoreError}, }; -async fn retry(max_retries: u16, mut f: F) -> Result +async fn retry(max_retries: u16, mut f: F) -> Result where - E: fmt::Display, - Fut: Future>, + Fut: Future>, F: FnMut() -> Fut, { let mut retries = 1; - let mut backoff = 1; + let mut backoff_secs = 1; loop { match f().await { Ok(result) => return Ok(result), - Err(err) => { - tracing::warn!(%err, "Failed GCS request {retries}/{max_retries}, retrying."); + Err(err) if err.is_transient() => { if retries > max_retries { + tracing::warn!(%err, "Exhausted {max_retries} retries performing GCS request; returning last error"); return Err(err); } + tracing::info!(%err, "Failed GCS request {retries}/{max_retries}, retrying."); retries += 1; - tokio::time::sleep(Duration::from_secs(backoff)).await; - backoff *= 2; + // Randomize sleep duration to prevent stampeding the server if multiple requests are initiated at the same time. + let sleep_duration = Duration::from_secs(backoff_secs) + .mul_f32(rand::thread_rng().gen_range(0.8..1.2)); + tokio::time::sleep(sleep_duration).await; + backoff_secs *= 2; + } + Err(err) => { + tracing::warn!(%err, "Failed GCS request with a fatal error"); + return Err(err); } } } } -pub struct GoogleCloudStorage { +pub(crate) struct GoogleCloudStorage { bucket_prefix: String, max_retries: u16, client: Client, @@ -64,7 +72,7 @@ impl fmt::Debug for GoogleCloudStorage { } #[derive(Debug, Clone)] -pub enum GoogleCloudStorageAuthMode { +pub(crate) enum GoogleCloudStorageAuthMode { AuthenticatedWithCredentialFile(String), Authenticated, Anonymous, @@ -75,26 +83,27 @@ impl GoogleCloudStorage { auth_mode: GoogleCloudStorageAuthMode, bucket_prefix: String, max_retries: u16, - ) -> Self { - let client_config = retry(max_retries, || Self::get_client_config(auth_mode.clone())) - .await - .expect("failed fetching GCS client config after retries"); + ) -> Result { + let client_config = retry(max_retries, || async { + Self::get_client_config(auth_mode.clone()) + .await + .map_err(Into::into) + }) + .await?; - Self { + Ok(Self { client: Client::new(client_config), bucket_prefix, max_retries, - } + }) } async fn get_client_config( auth_mode: GoogleCloudStorageAuthMode, - ) -> Result { + ) -> Result { match auth_mode { GoogleCloudStorageAuthMode::AuthenticatedWithCredentialFile(path) => { - let cred_file = CredentialsFile::new_from_file(path) - .await - .expect("failed loading GCS credential file"); + let cred_file = CredentialsFile::new_from_file(path).await?; ClientConfig::default().with_credentials(cred_file).await } GoogleCloudStorageAuthMode::Authenticated => ClientConfig::default().with_auth().await, @@ -127,9 +136,24 @@ impl GoogleCloudStorage { ..DeleteObjectRequest::default() }; async move { - retry(self.max_retries, || self.client.delete_object(&request)) - .await - .map_err(ObjectStoreError::from) + retry(self.max_retries, || async { + self.client + .delete_object(&request) + .await + .map_err(ObjectStoreError::from) + }) + .await + } + } +} + +impl From for ObjectStoreError { + fn from(err: AuthError) -> Self { + let is_transient = + matches!(&err, AuthError::HttpError(err) if err.is_timeout() || err.is_connect()); + Self::Initialization { + source: err.into(), + is_transient, } } } @@ -147,7 +171,12 @@ impl From for ObjectStoreError { if is_not_found { ObjectStoreError::KeyNotFound(err.into()) } else { - ObjectStoreError::Other(err.into()) + let is_transient = + matches!(&err, HttpError::HttpClient(err) if err.is_timeout() || err.is_connect()); + ObjectStoreError::Other { + is_transient, + source: err.into(), + } } } } @@ -168,8 +197,11 @@ impl ObjectStore for GoogleCloudStorage { ..GetObjectRequest::default() }; let range = Range::default(); - let blob = retry(self.max_retries, || { - self.client.download_object(&request, &range) + let blob = retry(self.max_retries, || async { + self.client + .download_object(&request, &range) + .await + .map_err(Into::into) }) .await; @@ -177,7 +209,7 @@ impl ObjectStore for GoogleCloudStorage { tracing::trace!( "Fetched data from GCS for key {key} from bucket {bucket} and it took: {elapsed:?}" ); - blob.map_err(ObjectStoreError::from) + blob } async fn put_raw( @@ -198,9 +230,11 @@ impl ObjectStore for GoogleCloudStorage { bucket: self.bucket_prefix.clone(), ..Default::default() }; - let object = retry(self.max_retries, || { + let object = retry(self.max_retries, || async { self.client .upload_object(&request, value.clone(), &upload_type) + .await + .map_err(Into::into) }) .await; @@ -208,7 +242,7 @@ impl ObjectStore for GoogleCloudStorage { tracing::trace!( "Stored data to GCS for key {key} from bucket {bucket} and it took: {elapsed:?}" ); - object.map(drop).map_err(ObjectStoreError::from) + object.map(drop) } async fn remove_raw(&self, bucket: Bucket, key: &str) -> Result<(), ObjectStoreError> { @@ -228,38 +262,47 @@ impl ObjectStore for GoogleCloudStorage { mod test { use std::sync::atomic::{AtomicU16, Ordering}; + use assert_matches::assert_matches; + use super::*; + fn transient_error() -> ObjectStoreError { + ObjectStoreError::Other { + is_transient: true, + source: "oops".into(), + } + } + #[tokio::test] async fn test_retry_success_immediate() { - let result = retry(2, || async { Ok::<_, &'static str>(42) }).await; - assert_eq!(result, Ok(42)); + let result = retry(2, || async { Ok(42) }).await.unwrap(); + assert_eq!(result, 42); } #[tokio::test] async fn test_retry_failure_exhausted() { - let result = retry(2, || async { Err::("oops") }).await; - assert_eq!(result, Err("oops")); + let err = retry(2, || async { Err::(transient_error()) }) + .await + .unwrap_err(); + assert_matches!(err, ObjectStoreError::Other { .. }); } - async fn retry_success_after_n_retries(n: u16) -> Result { + async fn retry_success_after_n_retries(n: u16) -> Result { let retries = AtomicU16::new(0); - let result = retry(n, || async { + retry(n, || async { let retries = retries.fetch_add(1, Ordering::Relaxed); if retries + 1 == n { Ok(42) } else { - Err("oops") + Err(transient_error()) } }) - .await; - - result.map_err(|_| "Retry failed".to_string()) + .await } #[tokio::test] async fn test_retry_success_after_retry() { - let result = retry(2, || retry_success_after_n_retries(2)).await; - assert_eq!(result, Ok(42)); + let result = retry(2, || retry_success_after_n_retries(2)).await.unwrap(); + assert_eq!(result, 42); } } diff --git a/core/lib/object_store/src/raw.rs b/core/lib/object_store/src/raw.rs index e30635f32836..7294bdcb10b8 100644 --- a/core/lib/object_store/src/raw.rs +++ b/core/lib/object_store/src/raw.rs @@ -59,21 +59,58 @@ pub type BoxedError = Box; /// Errors during [`ObjectStore`] operations. #[derive(Debug)] +#[non_exhaustive] pub enum ObjectStoreError { + /// Object store initialization failed. + Initialization { + source: BoxedError, + is_transient: bool, + }, /// An object with the specified key is not found. KeyNotFound(BoxedError), /// Object (de)serialization failed. Serialization(BoxedError), /// Other error has occurred when accessing the store (e.g., a network error). - Other(BoxedError), + Other { + source: BoxedError, + is_transient: bool, + }, +} + +impl ObjectStoreError { + /// Gives a best-effort estimate whether this error is transient. + pub fn is_transient(&self) -> bool { + match self { + Self::Initialization { is_transient, .. } | Self::Other { is_transient, .. } => { + *is_transient + } + Self::KeyNotFound(_) | Self::Serialization(_) => false, + } + } } impl fmt::Display for ObjectStoreError { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { match self { + Self::Initialization { + source, + is_transient, + } => { + let kind = if *is_transient { "transient" } else { "fatal" }; + write!( + formatter, + "{kind} error initializing object store: {source}" + ) + } Self::KeyNotFound(err) => write!(formatter, "key not found: {err}"), Self::Serialization(err) => write!(formatter, "serialization error: {err}"), - Self::Other(err) => write!(formatter, "other error: {err}"), + Self::Other { + source, + is_transient, + } => { + let kind = if *is_transient { "transient" } else { "fatal" }; + write!(formatter, "{kind} error accessing object store: {source}") + } } } } @@ -81,9 +118,10 @@ impl fmt::Display for ObjectStoreError { impl error::Error for ObjectStoreError { fn source(&self) -> Option<&(dyn error::Error + 'static)> { match self { - Self::KeyNotFound(err) | Self::Serialization(err) | Self::Other(err) => { - Some(err.as_ref()) + Self::Initialization { source, .. } | Self::Other { source, .. } => { + Some(source.as_ref()) } + Self::KeyNotFound(err) | Self::Serialization(err) => Some(err.as_ref()), } } } @@ -186,14 +224,26 @@ impl ObjectStoreFactory { } /// Creates an [`ObjectStore`]. + /// + /// # Panics + /// + /// Panics if store initialization fails (e.g., because of incorrect configuration). pub async fn create_store(&self) -> Arc { match &self.origin { - ObjectStoreOrigin::Config(config) => Self::create_from_config(config).await, + ObjectStoreOrigin::Config(config) => Self::create_from_config(config) + .await + .unwrap_or_else(|err| { + panic!( + "failed creating object store factory with configuration {config:?}: {err}" + ) + }), ObjectStoreOrigin::Mock(store) => Arc::new(Arc::clone(store)), } } - pub async fn create_from_config(config: &ObjectStoreConfig) -> Arc { + pub async fn create_from_config( + config: &ObjectStoreConfig, + ) -> Result, ObjectStoreError> { match &config.mode { ObjectStoreMode::GCS { bucket_base_url } => { tracing::trace!( @@ -204,8 +254,8 @@ impl ObjectStoreFactory { bucket_base_url.clone(), config.max_retries, ) - .await; - Arc::new(store) + .await?; + Ok(Arc::new(store)) } ObjectStoreMode::GCSWithCredentialFile { bucket_base_url, @@ -219,15 +269,15 @@ impl ObjectStoreFactory { bucket_base_url.clone(), config.max_retries, ) - .await; - Arc::new(store) + .await?; + Ok(Arc::new(store)) } ObjectStoreMode::FileBacked { file_backed_base_path, } => { tracing::trace!("Initialized FileBacked Object store"); - let store = FileBackedObjectStore::new(file_backed_base_path.clone()).await; - Arc::new(store) + let store = FileBackedObjectStore::new(file_backed_base_path.clone()).await?; + Ok(Arc::new(store)) } ObjectStoreMode::GCSAnonymousReadOnly { bucket_base_url } => { tracing::trace!("Initialized GoogleCloudStoragePublicReadOnly store"); @@ -236,8 +286,8 @@ impl ObjectStoreFactory { bucket_base_url.clone(), config.max_retries, ) - .await; - Arc::new(store) + .await?; + Ok(Arc::new(store)) } } } diff --git a/core/lib/protobuf_config/src/da_dispatcher.rs b/core/lib/protobuf_config/src/da_dispatcher.rs index b4cbb5f9d764..9681da9cf01f 100644 --- a/core/lib/protobuf_config/src/da_dispatcher.rs +++ b/core/lib/protobuf_config/src/da_dispatcher.rs @@ -9,8 +9,8 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { type Type = configs::da_dispatcher::DADispatcherConfig; fn read(&self) -> anyhow::Result { - match &self.credentials { - Some(proto::data_availability_dispatcher::Credentials::DaLayer(config)) => { + match &self.mode { + Some(proto::data_availability_dispatcher::Mode::DaLayer(config)) => { let da_config = match required(&config.name).context("da_layer_name")?.as_str() { "celestia" => DALayerConfig::Celestia( zksync_da_layers::clients::celestia::config::CelestiaConfig { @@ -31,7 +31,7 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { }; Ok(configs::da_dispatcher::DADispatcherConfig { da_mode: DataAvailabilityMode::DALayer(da_config), - polling_interval: Some( + polling_interval_ms: Some( *required(&self.polling_interval).context("polling_interval")?, ), query_rows_limit: Some( @@ -42,10 +42,10 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { ), }) } - Some(proto::data_availability_dispatcher::Credentials::ObjectStore(config)) => { + Some(proto::data_availability_dispatcher::Mode::ObjectStore(config)) => { Ok(configs::da_dispatcher::DADispatcherConfig { - da_mode: DataAvailabilityMode::GCS(config.read()?), - polling_interval: Some( + da_mode: DataAvailabilityMode::ObjectStore(config.read()?), + polling_interval_ms: Some( *required(&self.polling_interval).context("polling_interval")?, ), query_rows_limit: Some( @@ -58,7 +58,7 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { } None => Ok(configs::da_dispatcher::DADispatcherConfig { da_mode: DataAvailabilityMode::NoDA, - polling_interval: None, + polling_interval_ms: None, query_rows_limit: None, max_retries: None, }), @@ -66,27 +66,25 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { } fn build(this: &Self::Type) -> Self { - let credentials = match this.da_mode.clone() { + let mode = match this.da_mode.clone() { DataAvailabilityMode::DALayer(info) => match info { DALayerConfig::Celestia(info) => Some( - proto::data_availability_dispatcher::Credentials::DaLayer(proto::DaLayer { + proto::data_availability_dispatcher::Mode::DaLayer(proto::DaLayer { name: Some("celestia".to_string()), private_key: Some(info.private_key.clone()), light_node_url: Some(info.light_node_url.clone()), }), ), }, - DataAvailabilityMode::GCS(config) => Some( - proto::data_availability_dispatcher::Credentials::ObjectStore(ObjectStore::build( - &config, - )), + DataAvailabilityMode::ObjectStore(config) => Some( + proto::data_availability_dispatcher::Mode::ObjectStore(ObjectStore::build(&config)), ), DataAvailabilityMode::NoDA => None, }; Self { - credentials, - polling_interval: this.polling_interval, + mode, + polling_interval: this.polling_interval_ms, query_rows_limit: this.query_rows_limit, max_retries: this.max_retries.map(|x| x as u32), } diff --git a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto index 8de0b9e28d01..ec575bbae60c 100644 --- a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto +++ b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto @@ -11,7 +11,7 @@ message DALayer { } message DataAvailabilityDispatcher { - oneof credentials { + oneof mode { config.object_store.ObjectStore object_store = 1; DALayer da_layer = 2; } diff --git a/core/lib/snapshots_applier/src/lib.rs b/core/lib/snapshots_applier/src/lib.rs index 8e6543a80958..bcf4b3c14329 100644 --- a/core/lib/snapshots_applier/src/lib.rs +++ b/core/lib/snapshots_applier/src/lib.rs @@ -78,13 +78,10 @@ enum SnapshotsApplierError { impl SnapshotsApplierError { fn object_store(err: ObjectStoreError, context: String) -> Self { - match err { - ObjectStoreError::KeyNotFound(_) | ObjectStoreError::Serialization(_) => { - Self::Fatal(anyhow::Error::from(err).context(context)) - } - ObjectStoreError::Other(_) => { - Self::Retryable(anyhow::Error::from(err).context(context)) - } + if err.is_transient() { + Self::Retryable(anyhow::Error::from(err).context(context)) + } else { + Self::Fatal(anyhow::Error::from(err).context(context)) } } } diff --git a/core/lib/snapshots_applier/src/tests/mod.rs b/core/lib/snapshots_applier/src/tests/mod.rs index 33ba37b55771..59a95792c1ca 100644 --- a/core/lib/snapshots_applier/src/tests/mod.rs +++ b/core/lib/snapshots_applier/src/tests/mod.rs @@ -50,7 +50,10 @@ async fn snapshots_creator_can_successfully_recover_db( if error_counter.fetch_add(1, Ordering::SeqCst) >= 3 { Ok(()) // "recover" after 3 retries } else { - Err(ObjectStoreError::Other("transient error".into())) + Err(ObjectStoreError::Other { + is_transient: true, + source: "transient error".into(), + }) } }); Arc::new(object_store_with_errors) @@ -315,7 +318,10 @@ async fn applier_returns_error_after_too_many_object_store_retries() { let storage_logs = random_storage_logs(expected_status.l1_batch_number, 100); let (object_store, client) = prepare_clients(&expected_status, &storage_logs).await; let object_store = ObjectStoreWithErrors::new(object_store, |_| { - Err(ObjectStoreError::Other("service not available".into())) + Err(ObjectStoreError::Other { + is_transient: true, + source: "service not available".into(), + }) }); let task = SnapshotsApplierTask::new( @@ -328,7 +334,7 @@ async fn applier_returns_error_after_too_many_object_store_retries() { assert!(err.chain().any(|cause| { matches!( cause.downcast_ref::(), - Some(ObjectStoreError::Other(_)) + Some(ObjectStoreError::Other { .. }) ) })); } diff --git a/core/lib/types/src/transaction_request.rs b/core/lib/types/src/transaction_request.rs index 72551d762d18..c2526cc3ed6f 100644 --- a/core/lib/types/src/transaction_request.rs +++ b/core/lib/types/src/transaction_request.rs @@ -66,11 +66,32 @@ pub struct CallRequest { pub eip712_meta: Option, } +/// While some default parameters are usually provided for the `eth_call` methods, +/// sometimes users may want to override those. +pub struct CallOverrides { + pub enforced_base_fee: Option, +} + impl CallRequest { /// Function to return a builder for a Call Request pub fn builder() -> CallRequestBuilder { CallRequestBuilder::default() } + + pub fn get_call_overrides(&self) -> Result { + let provided_gas_price = self.max_fee_per_gas.or(self.gas_price); + let enforced_base_fee = if let Some(provided_gas_price) = provided_gas_price { + Some( + provided_gas_price + .try_into() + .map_err(|_| SerializationTransactionError::MaxFeePerGasNotU64)?, + ) + } else { + None + }; + + Ok(CallOverrides { enforced_base_fee }) + } } /// Call Request Builder @@ -183,10 +204,16 @@ pub enum SerializationTransactionError { AccessListsNotSupported, #[error("nonce has max value")] TooBigNonce, - /// Sanity check error to avoid extremely big numbers specified + + /// Sanity checks to avoid extremely big numbers specified /// to gas and pubdata price. - #[error("{0}")] - TooHighGas(String), + #[error("max fee per gas higher than 2^64-1")] + MaxFeePerGasNotU64, + #[error("max fee per pubdata byte higher than 2^64-1")] + MaxFeePerPubdataByteNotU64, + #[error("max priority fee per gas higher than 2^64-1")] + MaxPriorityFeePerGasNotU64, + /// OversizedData is returned if the raw tx size is greater /// than some meaningful limit a user might use. This is not a consensus error /// making the transaction invalid, rather a DOS protection. @@ -736,16 +763,12 @@ impl TransactionRequest { fn get_fee_data_checked(&self) -> Result { if self.gas_price > u64::MAX.into() { - return Err(SerializationTransactionError::TooHighGas( - "max fee per gas higher than 2^64-1".to_string(), - )); + return Err(SerializationTransactionError::MaxFeePerGasNotU64); } let gas_per_pubdata_limit = if let Some(meta) = &self.eip712_meta { if meta.gas_per_pubdata > u64::MAX.into() { - return Err(SerializationTransactionError::TooHighGas( - "max fee per pubdata byte higher than 2^64-1".to_string(), - )); + return Err(SerializationTransactionError::MaxFeePerPubdataByteNotU64); } else if meta.gas_per_pubdata == U256::zero() { return Err(SerializationTransactionError::GasPerPubDataLimitZero); } @@ -757,9 +780,7 @@ impl TransactionRequest { let max_priority_fee_per_gas = self.max_priority_fee_per_gas.unwrap_or(self.gas_price); if max_priority_fee_per_gas > u64::MAX.into() { - return Err(SerializationTransactionError::TooHighGas( - "max priority fee per gas higher than 2^64-1".to_string(), - )); + return Err(SerializationTransactionError::MaxPriorityFeePerGasNotU64); } Ok(Fee { @@ -1316,9 +1337,7 @@ mod tests { L2Tx::from_request(tx1, usize::MAX); assert_eq!( execute_tx1.unwrap_err(), - SerializationTransactionError::TooHighGas( - "max fee per gas higher than 2^64-1".to_string() - ) + SerializationTransactionError::MaxFeePerGasNotU64 ); let tx2 = TransactionRequest { @@ -1332,9 +1351,7 @@ mod tests { L2Tx::from_request(tx2, usize::MAX); assert_eq!( execute_tx2.unwrap_err(), - SerializationTransactionError::TooHighGas( - "max priority fee per gas higher than 2^64-1".to_string() - ) + SerializationTransactionError::MaxPriorityFeePerGasNotU64 ); let tx3 = TransactionRequest { @@ -1352,9 +1369,7 @@ mod tests { L2Tx::from_request(tx3, usize::MAX); assert_eq!( execute_tx3.unwrap_err(), - SerializationTransactionError::TooHighGas( - "max fee per pubdata byte higher than 2^64-1".to_string() - ) + SerializationTransactionError::MaxFeePerPubdataByteNotU64 ); } diff --git a/core/lib/utils/src/env.rs b/core/lib/utils/src/env.rs index fec413927929..0eddc6c2cd64 100644 --- a/core/lib/utils/src/env.rs +++ b/core/lib/utils/src/env.rs @@ -52,10 +52,12 @@ pub fn locate_workspace() -> Option<&'static Path> { WORKSPACE .get_or_init(|| { let result = locate_workspace_inner(); - if let Err(err) = &result { + if result.is_err() { // `get_or_init()` is guaranteed to call the provided closure once per `OnceCell`; // i.e., we won't spam logs here. - tracing::warn!("locate_workspace() failed: {err:?}"); + tracing::info!( + "locate_workspace() failed. You are using an already compiled version" + ); } result.ok() }) diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index cc5a692921b3..67bde30080d6 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -37,7 +37,7 @@ use zksync_config::{ ApiConfig, DBConfig, EthWatchConfig, GenesisConfig, }; use zksync_contracts::governance_contract; -use zksync_da_client::{gcs::GCSDAClient, no_da::NoDAClient}; +use zksync_da_client::{gcs::ObjectStoreDAClient, no_da::NoDAClient}; use zksync_da_dispatcher::DataAvailabilityDispatcher; use zksync_da_layers::{ clients::celestia::CelestiaClient, config::DALayerConfig, DataAvailabilityClient, @@ -770,7 +770,9 @@ pub async fn initialize_components( .await .context("failed to build da_dispatcher_pool")?; let da_client: Box = match da_config.clone().da_mode { - DataAvailabilityMode::GCS(config) => Box::new(GCSDAClient::new(config).await), + DataAvailabilityMode::ObjectStore(config) => { + Box::new(ObjectStoreDAClient::new(config).await?) + } DataAvailabilityMode::NoDA => Box::new(NoDAClient::new()), DataAvailabilityMode::DALayer(config) => match config { DALayerConfig::Celestia(celestia_config) => { diff --git a/core/node/api_server/src/execution_sandbox/apply.rs b/core/node/api_server/src/execution_sandbox/apply.rs index d3af1a5c9dd4..dc8b56f41967 100644 --- a/core/node/api_server/src/execution_sandbox/apply.rs +++ b/core/node/api_server/src/execution_sandbox/apply.rs @@ -403,12 +403,12 @@ impl StoredL2BlockInfo { } #[derive(Debug)] -struct ResolvedBlockInfo { +pub(crate) struct ResolvedBlockInfo { state_l2_block_number: L2BlockNumber, state_l2_block_hash: H256, vm_l1_batch_number: L1BatchNumber, l1_batch_timestamp: u64, - protocol_version: ProtocolVersionId, + pub(crate) protocol_version: ProtocolVersionId, historical_fee_input: Option, } @@ -429,7 +429,7 @@ impl BlockArgs { ) } - async fn resolve_block_info( + pub(crate) async fn resolve_block_info( &self, connection: &mut Connection<'_, Core>, ) -> anyhow::Result { diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index 2fd5b376acb3..72c94e2a428c 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -4,14 +4,13 @@ use anyhow::Context as _; use multivm::{ interface::{TxExecutionMode, VmExecutionResultAndLogs, VmInterface}, tracers::StorageInvocations, - vm_latest::constants::ETH_CALL_GAS_LIMIT, MultiVMTracer, }; use tracing::{span, Level}; use zksync_dal::{ConnectionPool, Core}; use zksync_types::{ - fee::TransactionExecutionMetrics, l2::L2Tx, ExecuteTransactionCommon, Nonce, - PackedEthSignature, Transaction, U256, + fee::TransactionExecutionMetrics, l2::L2Tx, transaction_request::CallOverrides, + ExecuteTransactionCommon, Nonce, PackedEthSignature, Transaction, U256, }; use super::{ @@ -40,7 +39,7 @@ impl TxExecutionArgs { } fn for_eth_call( - enforced_base_fee: u64, + enforced_base_fee: Option, vm_execution_cache_misses_limit: Option, ) -> Self { let missed_storage_invocation_limit = vm_execution_cache_misses_limit.unwrap_or(usize::MAX); @@ -48,7 +47,7 @@ impl TxExecutionArgs { execution_mode: TxExecutionMode::EthCall, enforced_nonce: None, added_balance: U256::zero(), - enforced_base_fee: Some(enforced_base_fee), + enforced_base_fee, missed_storage_invocation_limit, } } @@ -170,23 +169,21 @@ impl TransactionExecutor { vm_permit: VmPermit, shared_args: TxSharedArgs, connection_pool: ConnectionPool, + call_overrides: CallOverrides, mut tx: L2Tx, block_args: BlockArgs, vm_execution_cache_misses_limit: Option, custom_tracers: Vec, ) -> anyhow::Result { - let enforced_base_fee = tx.common_data.fee.max_fee_per_gas.as_u64(); - let execution_args = - TxExecutionArgs::for_eth_call(enforced_base_fee, vm_execution_cache_misses_limit); + let execution_args = TxExecutionArgs::for_eth_call( + call_overrides.enforced_base_fee, + vm_execution_cache_misses_limit, + ); if tx.common_data.signature.is_empty() { tx.common_data.signature = PackedEthSignature::default().serialize_packed().into(); } - // Protection against infinite-loop eth_calls and alike: - // limiting the amount of gas the call can use. - // We can't use `BLOCK_ERGS_LIMIT` here since the VM itself has some overhead. - tx.common_data.fee.gas_limit = ETH_CALL_GAS_LIMIT.into(); let output = self .execute_tx_in_sandbox( vm_permit, diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index 9e6bd86415f9..1b13e50b410f 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -7,7 +7,7 @@ use multivm::{ interface::VmExecutionResultAndLogs, utils::{ adjust_pubdata_price_for_tx, derive_base_fee_and_gas_per_pubdata, derive_overhead, - get_max_batch_gas_limit, + get_eth_call_gas_limit, get_max_batch_gas_limit, }, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; @@ -28,6 +28,7 @@ use zksync_types::{ fee_model::BatchFeeInput, get_code_key, get_intrinsic_constants, l2::{error::TxCheckError::TxDuplication, L2Tx}, + transaction_request::CallOverrides, utils::storage_key_for_eth_balance, AccountTreeId, Address, ExecuteTransactionCommon, L2ChainId, Nonce, PackedEthSignature, ProtocolVersionId, Transaction, VmVersion, H160, H256, MAX_L2_TX_GAS_LIMIT, @@ -965,6 +966,7 @@ impl TxSender { pub(super) async fn eth_call( &self, block_args: BlockArgs, + call_overrides: CallOverrides, tx: L2Tx, ) -> Result, SubmitTxError> { let vm_permit = self.0.vm_concurrency_limiter.acquire().await; @@ -977,6 +979,7 @@ impl TxSender { vm_permit, self.shared_args().await?, self.0.replica_connection_pool.clone(), + call_overrides, tx, block_args, vm_execution_cache_misses_limit, @@ -1036,4 +1039,19 @@ impl TxSender { } Ok(()) } + + pub(crate) async fn get_default_eth_call_gas( + &self, + block_args: BlockArgs, + ) -> anyhow::Result { + let mut connection = self.acquire_replica_connection().await?; + + let protocol_version = block_args + .resolve_block_info(&mut connection) + .await + .context("failed to resolve block info")? + .protocol_version; + + Ok(get_eth_call_gas_limit(protocol_version.into())) + } } diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index 4b998adcfeb8..400711de8593 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -125,7 +125,7 @@ impl DebugNamespace { pub async fn debug_trace_call_impl( &self, - request: CallRequest, + mut request: CallRequest, block_id: Option, options: Option, ) -> Result { @@ -148,6 +148,19 @@ impl DebugNamespace { .last_sealed_l2_block .diff_with_block_args(&block_args), ); + + if request.gas.is_none() { + request.gas = Some( + self.state + .tx_sender + .get_default_eth_call_gas(block_args) + .await + .map_err(Web3Error::InternalError)? + .into(), + ) + } + + let call_overrides = request.get_call_overrides()?; let tx = L2Tx::from_request(request.into(), MAX_ENCODED_TX_SIZE)?; let shared_args = self.shared_args().await; @@ -173,6 +186,7 @@ impl DebugNamespace { vm_permit, shared_args, self.state.connection_pool.clone(), + call_overrides, tx.clone(), block_args, self.sender_config().vm_execution_cache_misses_limit, diff --git a/core/node/api_server/src/web3/namespaces/eth.rs b/core/node/api_server/src/web3/namespaces/eth.rs index ff2403051de0..b1541f7261bf 100644 --- a/core/node/api_server/src/web3/namespaces/eth.rs +++ b/core/node/api_server/src/web3/namespaces/eth.rs @@ -52,7 +52,7 @@ impl EthNamespace { pub async fn call_impl( &self, - request: CallRequest, + mut request: CallRequest, block_id: Option, ) -> Result { let block_id = block_id.unwrap_or(BlockId::Number(BlockNumber::Pending)); @@ -70,8 +70,25 @@ impl EthNamespace { ); drop(connection); + if request.gas.is_none() { + request.gas = Some( + self.state + .tx_sender + .get_default_eth_call_gas(block_args) + .await + .map_err(Web3Error::InternalError)? + .into(), + ) + } + let call_overrides = request.get_call_overrides()?; let tx = L2Tx::from_request(request.into(), self.state.api_config.max_tx_size)?; - let call_result = self.state.tx_sender.eth_call(block_args, tx).await?; + + // It is assumed that the previous checks has already enforced that the `max_fee_per_gas` is at most u64. + let call_result: Vec = self + .state + .tx_sender + .eth_call(block_args, call_overrides, tx) + .await?; Ok(call_result.into()) } diff --git a/core/node/block_reverter/Cargo.toml b/core/node/block_reverter/Cargo.toml index 178e3da6c58a..68fdf72acd83 100644 --- a/core/node/block_reverter/Cargo.toml +++ b/core/node/block_reverter/Cargo.toml @@ -21,11 +21,13 @@ zksync_state.workspace = true zksync_merkle_tree.workspace = true anyhow.workspace = true +futures.workspace = true tokio = { workspace = true, features = ["time", "fs"] } serde.workspace = true tracing.workspace = true [dev-dependencies] assert_matches.workspace = true +async-trait.workspace = true tempfile.workspace = true test-casing.workspace = true diff --git a/core/node/block_reverter/src/lib.rs b/core/node/block_reverter/src/lib.rs index f9f8858a7b1c..baba02a559f0 100644 --- a/core/node/block_reverter/src/lib.rs +++ b/core/node/block_reverter/src/lib.rs @@ -2,7 +2,7 @@ use std::{path::Path, sync::Arc, time::Duration}; use anyhow::Context as _; use serde::Serialize; -use tokio::fs; +use tokio::{fs, sync::Semaphore}; use zksync_config::{configs::chain::NetworkConfig, ContractsConfig, EthConfig}; use zksync_contracts::hyperchain_contract; use zksync_dal::{ConnectionPool, Core, CoreDal}; @@ -382,6 +382,8 @@ impl BlockReverter { object_store: &dyn ObjectStore, deleted_snapshots: &[SnapshotMetadata], ) -> anyhow::Result<()> { + const CONCURRENT_REMOVE_REQUESTS: usize = 20; + fn ignore_not_found_errors(err: ObjectStoreError) -> Result<(), ObjectStoreError> { match err { ObjectStoreError::KeyNotFound(err) => { @@ -421,18 +423,46 @@ impl BlockReverter { }); combine_results(&mut overall_result, result); - for chunk_id in 0..snapshot.storage_logs_filepaths.len() as u64 { + let mut is_incomplete_snapshot = false; + let chunk_ids_iter = (0_u64..) + .zip(&snapshot.storage_logs_filepaths) + .filter_map(|(chunk_id, path)| { + if path.is_none() { + if !is_incomplete_snapshot { + is_incomplete_snapshot = true; + tracing::warn!( + "Snapshot for L1 batch #{} is incomplete (misses al least storage logs chunk ID {chunk_id}). \ + It is probable that it's currently being created, in which case you'll need to clean up produced files \ + manually afterwards", + snapshot.l1_batch_number + ); + } + return None; + } + Some(chunk_id) + }); + + let remove_semaphore = &Semaphore::new(CONCURRENT_REMOVE_REQUESTS); + let remove_futures = chunk_ids_iter.map(|chunk_id| async move { + let _permit = remove_semaphore + .acquire() + .await + .context("semaphore is never closed")?; + let key = SnapshotStorageLogsStorageKey { l1_batch_number: snapshot.l1_batch_number, chunk_id, }; tracing::info!("Removing storage logs chunk {key:?}"); - - let result = object_store + object_store .remove::(key) .await .or_else(ignore_not_found_errors) - .with_context(|| format!("failed removing storage logs chunk {key:?}")); + .with_context(|| format!("failed removing storage logs chunk {key:?}")) + }); + let remove_results = futures::future::join_all(remove_futures).await; + + for result in remove_results { combine_results(&mut overall_result, result); } } diff --git a/core/node/block_reverter/src/tests.rs b/core/node/block_reverter/src/tests.rs index d5510aac3bed..30ff24fa175b 100644 --- a/core/node/block_reverter/src/tests.rs +++ b/core/node/block_reverter/src/tests.rs @@ -1,11 +1,14 @@ //! Tests for block reverter. +use std::{collections::HashSet, sync::Mutex}; + use assert_matches::assert_matches; +use async_trait::async_trait; use test_casing::test_casing; use tokio::sync::watch; use zksync_dal::Connection; use zksync_merkle_tree::TreeInstruction; -use zksync_object_store::ObjectStoreFactory; +use zksync_object_store::{Bucket, ObjectStoreFactory}; use zksync_state::ReadStorage; use zksync_types::{ block::{L1BatchHeader, L2BlockHeader}, @@ -201,8 +204,13 @@ async fn create_mock_snapshot( storage: &mut Connection<'_, Core>, object_store: &dyn ObjectStore, l1_batch_number: L1BatchNumber, + chunk_ids: impl Iterator + Clone, ) { - let storage_logs_chunk_count = 5; + let storage_logs_chunk_count = chunk_ids + .clone() + .max() + .expect("`chunk_ids` cannot be empty") + + 1; let factory_deps_key = object_store .put( @@ -224,7 +232,7 @@ async fn create_mock_snapshot( .await .unwrap(); - for chunk_id in 0..storage_logs_chunk_count { + for chunk_id in chunk_ids { let key = SnapshotStorageLogsStorageKey { l1_batch_number, chunk_id, @@ -255,7 +263,7 @@ async fn reverting_snapshot(remove_objects: bool) { setup_storage(&mut storage, &storage_logs).await; let object_store = ObjectStoreFactory::mock().create_store().await; - create_mock_snapshot(&mut storage, &object_store, L1BatchNumber(7)).await; + create_mock_snapshot(&mut storage, &object_store, L1BatchNumber(7), 0..5).await; // Sanity check: snapshot should be visible. let all_snapshots = storage .snapshots_dal() @@ -304,3 +312,160 @@ async fn reverting_snapshot(remove_objects: bool) { } } } + +#[tokio::test] +async fn reverting_snapshot_ignores_not_found_object_store_errors() { + let storage_logs = gen_storage_logs(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + setup_storage(&mut storage, &storage_logs).await; + + let object_store = ObjectStoreFactory::mock().create_store().await; + create_mock_snapshot(&mut storage, &object_store, L1BatchNumber(7), 0..5).await; + + // Manually remove some data from the store. + object_store + .remove::(L1BatchNumber(7)) + .await + .unwrap(); + let key = SnapshotStorageLogsStorageKey { + l1_batch_number: L1BatchNumber(7), + chunk_id: 1, + }; + object_store + .remove::(key) + .await + .unwrap(); + + let mut block_reverter = BlockReverter::new(NodeRole::External, pool.clone()); + block_reverter.enable_rolling_back_postgres(); + block_reverter.enable_rolling_back_snapshot_objects(object_store); + block_reverter.roll_back(L1BatchNumber(5)).await.unwrap(); + + // Check that snapshot metadata has been removed. + let all_snapshots = storage + .snapshots_dal() + .get_all_complete_snapshots() + .await + .unwrap(); + assert_eq!(all_snapshots.snapshots_l1_batch_numbers, []); +} + +#[derive(Debug, Default)] +struct ErroneousStore { + object_keys: Mutex>, +} + +#[async_trait] +impl ObjectStore for ErroneousStore { + async fn get_raw(&self, _bucket: Bucket, _key: &str) -> Result, ObjectStoreError> { + unreachable!("not called by reverter") + } + + async fn put_raw( + &self, + bucket: Bucket, + key: &str, + _value: Vec, + ) -> Result<(), ObjectStoreError> { + self.object_keys + .lock() + .unwrap() + .insert((bucket, key.to_owned())); + Ok(()) + } + + async fn remove_raw(&self, bucket: Bucket, key: &str) -> Result<(), ObjectStoreError> { + self.object_keys + .lock() + .unwrap() + .remove(&(bucket, key.to_owned())); + Err(ObjectStoreError::Other { + is_transient: false, + source: "fatal error".into(), + }) + } + + fn storage_prefix_raw(&self, bucket: Bucket) -> String { + bucket.to_string() + } +} + +#[tokio::test] +async fn reverting_snapshot_propagates_fatal_errors() { + let storage_logs = gen_storage_logs(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + setup_storage(&mut storage, &storage_logs).await; + + let object_store = Arc::new(ErroneousStore::default()); + create_mock_snapshot(&mut storage, &object_store, L1BatchNumber(7), 0..5).await; + + let mut block_reverter = BlockReverter::new(NodeRole::External, pool.clone()); + block_reverter.enable_rolling_back_postgres(); + block_reverter.enable_rolling_back_snapshot_objects(object_store.clone()); + let err = block_reverter + .roll_back(L1BatchNumber(5)) + .await + .unwrap_err(); + assert!(err.chain().any(|source| { + if let Some(err) = source.downcast_ref::() { + matches!(err, ObjectStoreError::Other { .. }) + } else { + false + } + })); + + // Check that snapshot metadata has been removed (it's not atomic with snapshot removal). + let all_snapshots = storage + .snapshots_dal() + .get_all_complete_snapshots() + .await + .unwrap(); + assert_eq!(all_snapshots.snapshots_l1_batch_numbers, []); + + // Check that removal was called for all objects (i.e., the reverter doesn't bail early). + let retained_object_keys = object_store.object_keys.lock().unwrap(); + assert!(retained_object_keys.is_empty(), "{retained_object_keys:?}"); +} + +#[tokio::test] +async fn reverter_handles_incomplete_snapshot() { + let storage_logs = gen_storage_logs(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + setup_storage(&mut storage, &storage_logs).await; + + let object_store = ObjectStoreFactory::mock().create_store().await; + let chunk_ids = [0, 1, 4].into_iter(); + create_mock_snapshot( + &mut storage, + &object_store, + L1BatchNumber(7), + chunk_ids.clone(), + ) + .await; + + let mut block_reverter = BlockReverter::new(NodeRole::External, pool.clone()); + block_reverter.enable_rolling_back_postgres(); + block_reverter.enable_rolling_back_snapshot_objects(object_store.clone()); + block_reverter.roll_back(L1BatchNumber(5)).await.unwrap(); + + // Check that snapshot metadata has been removed. + let all_snapshots = storage + .snapshots_dal() + .get_all_complete_snapshots() + .await + .unwrap(); + assert_eq!(all_snapshots.snapshots_l1_batch_numbers, []); + + // Check that chunk files have been removed. + for chunk_id in chunk_ids { + let key = SnapshotStorageLogsStorageKey { + l1_batch_number: L1BatchNumber(7), + chunk_id, + }; + let chunk_result = object_store.get::(key).await; + assert_matches!(chunk_result.unwrap_err(), ObjectStoreError::KeyNotFound(_)); + } +} diff --git a/core/node/commitment_generator/src/validation_task.rs b/core/node/commitment_generator/src/validation_task.rs index 4488e0c2c56e..cf93a4899b89 100644 --- a/core/node/commitment_generator/src/validation_task.rs +++ b/core/node/commitment_generator/src/validation_task.rs @@ -3,7 +3,7 @@ use std::time::Duration; use tokio::sync::watch; use zksync_eth_client::{ clients::{DynClient, L1}, - CallFunctionArgs, ClientError, Error as EthClientError, + CallFunctionArgs, ClientError, ContractCallError, }; use zksync_types::{commitment::L1BatchCommitmentMode, Address}; @@ -66,14 +66,14 @@ impl L1BatchCommitmentModeValidationTask { // Getters contract does not support `getPubdataPricingMode` method. // This case is accepted for backwards compatibility with older contracts, but emits a // warning in case the wrong contract address was passed by the caller. - Err(EthClientError::EthereumGateway(err)) + Err(ContractCallError::EthereumGateway(err)) if matches!(err.as_ref(), ClientError::Call(_)) => { tracing::warn!("Contract {diamond_proxy_address:?} does not support getPubdataPricingMode method: {err}"); return Ok(()); } - Err(EthClientError::EthereumGateway(err)) if err.is_transient() => { + Err(ContractCallError::EthereumGateway(err)) if err.is_transient() => { tracing::warn!( "Transient error validating commitment mode, will retry after {:?}: {err}", self.retry_interval @@ -92,7 +92,7 @@ impl L1BatchCommitmentModeValidationTask { async fn get_pubdata_pricing_mode( diamond_proxy_address: Address, eth_client: &DynClient, - ) -> Result { + ) -> Result { CallFunctionArgs::new("getPubdataPricingMode", ()) .for_contract( diamond_proxy_address, diff --git a/core/node/consistency_checker/src/lib.rs b/core/node/consistency_checker/src/lib.rs index 6fdc3ad480df..8ff8d6993617 100644 --- a/core/node/consistency_checker/src/lib.rs +++ b/core/node/consistency_checker/src/lib.rs @@ -7,7 +7,7 @@ use zksync_contracts::PRE_BOOJUM_COMMIT_FUNCTION; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_eth_client::{ clients::{DynClient, L1}, - CallFunctionArgs, Error as L1ClientError, EthInterface, + CallFunctionArgs, ContractCallError, EnrichedClientError, EthInterface, }; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_l1_contract_interface::{ @@ -29,7 +29,9 @@ mod tests; #[derive(Debug, thiserror::Error)] enum CheckError { #[error("Web3 error communicating with L1")] - Web3(#[from] L1ClientError), + Web3(#[from] EnrichedClientError), + #[error("error calling L1 contract")] + ContractCall(#[from] ContractCallError), /// Error that is caused by the main node providing incorrect information etc. #[error("failed validating commit transaction")] Validation(anyhow::Error), @@ -42,7 +44,7 @@ impl CheckError { fn is_transient(&self) -> bool { matches!( self, - Self::Web3(L1ClientError::EthereumGateway(err)) if err.is_transient() + Self::Web3(err) if err.is_transient() ) } } diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index dea938c51b4b..c246c5a5103e 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -5,8 +5,11 @@ use chrono::{NaiveDateTime, Utc}; use rand::Rng; use tokio::sync::watch; use zksync_config::DADispatcherConfig; -use zksync_da_layers::{types::IsTransient, DataAvailabilityClient}; -use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use zksync_da_layers::{ + types::{DAError, IsTransient}, + DataAvailabilityClient, +}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_types::L1BatchNumber; use crate::metrics::METRICS; @@ -39,26 +42,27 @@ impl DataAvailabilityDispatcher { break; } - let mut conn = pool.connection_tagged("da_dispatcher").await?; - if let Err(err) = self.dispatch(&mut conn).await { + if let Err(err) = self.dispatch(&pool).await { tracing::warn!("dispatch error {err:?}"); } - if let Err(err) = self.poll_for_inclusion(&mut conn).await { + + if let Err(err) = self.poll_for_inclusion(&pool).await { tracing::warn!("poll_for_inclusion error {err:?}"); } - drop(conn); tokio::time::sleep(self.config.polling_interval()).await; } Ok(()) } /// Dispatches the blobs to the data availability layer, and saves the blob_id in the database. - async fn dispatch(&self, conn: &mut Connection<'_, Core>) -> anyhow::Result<()> { + async fn dispatch(&self, pool: &ConnectionPool) -> anyhow::Result<()> { + let mut conn = pool.connection_tagged("da_dispatcher").await?; let batches = conn .data_availability_dal() .get_ready_for_da_dispatch_l1_batches(self.config.query_rows_limit() as usize) .await?; + drop(conn); for batch in batches { let dispatch_latency = METRICS.blob_dispatch_latency.start(); @@ -78,6 +82,8 @@ impl DataAvailabilityDispatcher { let sent_at = NaiveDateTime::from_timestamp_millis(Utc::now().timestamp_millis()).unwrap(); + + let mut conn = pool.connection_tagged("da_dispatcher").await?; conn.data_availability_dal() .insert_l1_batch_da( batch.l1_batch_number, @@ -91,6 +97,7 @@ impl DataAvailabilityDispatcher { batch.l1_batch_number ) })?; + drop(conn); METRICS .last_dispatched_l1_batch @@ -108,12 +115,14 @@ impl DataAvailabilityDispatcher { } /// Polls the data availability layer for inclusion data, and saves it in the database. - async fn poll_for_inclusion(&self, conn: &mut Connection<'_, Core>) -> anyhow::Result<()> { + async fn poll_for_inclusion(&self, pool: &ConnectionPool) -> anyhow::Result<()> { + let mut conn = pool.connection_tagged("da_dispatcher").await?; if let Some(blob_info) = conn .data_availability_dal() .get_first_da_blob_awaiting_inclusion() .await? { + drop(conn); let inclusion_data = self .client .get_inclusion_data(blob_info.blob_id.clone()) @@ -125,6 +134,7 @@ impl DataAvailabilityDispatcher { ) })?; + let mut conn = pool.connection_tagged("da_dispatcher").await?; if let Some(inclusion_data) = inclusion_data { conn.data_availability_dal() .save_l1_batch_inclusion_data( @@ -138,19 +148,19 @@ impl DataAvailabilityDispatcher { blob_info.l1_batch_number ) })?; + drop(conn); - let inclusion_latency_seconds = - (Utc::now().timestamp() - blob_info.sent_at.timestamp()) as u64; + let inclusion_latency = Utc::now().signed_duration_since(blob_info.sent_at); METRICS .inclusion_latency - .observe(Duration::from_secs(inclusion_latency_seconds)); + .observe(inclusion_latency.to_std()?); METRICS .last_included_l1_batch .set(blob_info.l1_batch_number.0 as usize); tracing::info!( "Received an inclusion data for a batch_number: {}, inclusion_latency_seconds: {}", - blob_info.l1_batch_number, inclusion_latency_seconds + blob_info.l1_batch_number, inclusion_latency.num_seconds() ); } } @@ -159,14 +169,13 @@ impl DataAvailabilityDispatcher { } } -async fn retry( +async fn retry( max_retries: u16, batch_number: L1BatchNumber, mut f: F, -) -> Result +) -> Result where - E: std::fmt::Display + IsTransient, - Fut: Future>, + Fut: Future>, F: FnMut() -> Fut, { let mut retries = 1; @@ -182,12 +191,12 @@ where return Err(err); } - tracing::warn!(%err, "Failed DA dispatch request {retries}/{max_retries} for batch {batch_number}, retrying in {backoff_secs} seconds."); retries += 1; let sleep_duration = Duration::from_secs(backoff_secs) .mul_f32(rand::thread_rng().gen_range(0.8..1.2)); + tracing::warn!(%err, "Failed DA dispatch request {retries}/{max_retries} for batch {batch_number}, retrying in {} milliseconds.", sleep_duration.as_millis()); tokio::time::sleep(sleep_duration).await; - backoff_secs = (backoff_secs * 2).min(128); + backoff_secs = (backoff_secs * 2).min(128); // cap the backoff at 128 seconds } } } diff --git a/core/node/da_dispatcher/src/lib.rs b/core/node/da_dispatcher/src/lib.rs index cb41ea1f7c25..965a44626764 100644 --- a/core/node/da_dispatcher/src/lib.rs +++ b/core/node/da_dispatcher/src/lib.rs @@ -1,3 +1,5 @@ +extern crate core; + pub use self::da_dispatcher::DataAvailabilityDispatcher; mod da_dispatcher; diff --git a/core/node/da_dispatcher/src/metrics.rs b/core/node/da_dispatcher/src/metrics.rs index 6c246cd447e8..d4e8c11ec2fe 100644 --- a/core/node/da_dispatcher/src/metrics.rs +++ b/core/node/da_dispatcher/src/metrics.rs @@ -13,7 +13,7 @@ pub(super) struct DataAvailabilityDispatcherMetrics { pub inclusion_latency: Histogram, /// Size of the dispatched blob. /// Buckets are bytes ranging from 1 KB to 16 MB, which has to satisfy all blob size values. - #[metrics(buckets = Buckets::exponential(1_024.0..=16.0 * 1_024.0 * 1_024.0, 2.0))] + #[metrics(buckets = Buckets::exponential(1_024.0..=16.0 * 1_024.0 * 1_024.0, 2.0), unit = Unit::Bytes)] pub blob_size: Histogram, /// Number of transactions resent by the DA dispatcher. diff --git a/core/node/eth_sender/src/error.rs b/core/node/eth_sender/src/error.rs index 206bbf2d583a..61d92bcbe132 100644 --- a/core/node/eth_sender/src/error.rs +++ b/core/node/eth_sender/src/error.rs @@ -1,9 +1,12 @@ +use zksync_eth_client::{ContractCallError, EnrichedClientError}; use zksync_types::web3::contract; #[derive(Debug, thiserror::Error)] -pub enum ETHSenderError { - #[error("Ethereum gateway Error {0}")] - EthereumGateWayError(#[from] zksync_eth_client::Error), - #[error("Token parsing Error: {0}")] - ParseError(#[from] contract::Error), +pub enum EthSenderError { + #[error("Ethereum gateway error: {0}")] + EthereumGateway(#[from] EnrichedClientError), + #[error("Contract call error: {0}")] + ContractCall(#[from] ContractCallError), + #[error("Token parsing error: {0}")] + Parse(#[from] contract::Error), } diff --git a/core/node/eth_sender/src/eth_tx_aggregator.rs b/core/node/eth_sender/src/eth_tx_aggregator.rs index 11c4f6362b7b..ee5806c72f54 100644 --- a/core/node/eth_sender/src/eth_tx_aggregator.rs +++ b/core/node/eth_sender/src/eth_tx_aggregator.rs @@ -29,7 +29,7 @@ use crate::{ metrics::{PubdataKind, METRICS}, utils::agg_l1_batch_base_cost, zksync_functions::ZkSyncFunctions, - Aggregator, ETHSenderError, + Aggregator, EthSenderError, }; /// Data queried from L1 using multicall contract. @@ -134,7 +134,7 @@ impl EthTxAggregator { Ok(()) } - pub(super) async fn get_multicall_data(&mut self) -> Result { + pub(super) async fn get_multicall_data(&mut self) -> Result { let calldata = self.generate_calldata_for_multicall(); let args = CallFunctionArgs::new(&self.functions.aggregate3.name, calldata).for_contract( self.l1_multicall3_address, @@ -221,14 +221,11 @@ impl EthTxAggregator { pub(super) fn parse_multicall_data( &self, token: Token, - ) -> Result { + ) -> Result { let parse_error = |tokens: &[Token]| { - Err(ETHSenderError::ParseError( - Web3ContractError::InvalidOutputType(format!( - "Failed to parse multicall token: {:?}", - tokens - )), - )) + Err(EthSenderError::Parse(Web3ContractError::InvalidOutputType( + format!("Failed to parse multicall token: {:?}", tokens), + ))) }; if let Token::Array(call_results) = token { @@ -242,24 +239,24 @@ impl EthTxAggregator { Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; if multicall3_bootloader.len() != 32 { - return Err(ETHSenderError::ParseError( - Web3ContractError::InvalidOutputType(format!( + return Err(EthSenderError::Parse(Web3ContractError::InvalidOutputType( + format!( "multicall3 bootloader hash data is not of the len of 32: {:?}", multicall3_bootloader - )), - )); + ), + ))); } let bootloader = H256::from_slice(&multicall3_bootloader); let multicall3_default_aa = Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; if multicall3_default_aa.len() != 32 { - return Err(ETHSenderError::ParseError( - Web3ContractError::InvalidOutputType(format!( + return Err(EthSenderError::Parse(Web3ContractError::InvalidOutputType( + format!( "multicall3 default aa hash data is not of the len of 32: {:?}", multicall3_default_aa - )), - )); + ), + ))); } let default_aa = H256::from_slice(&multicall3_default_aa); let base_system_contracts_hashes = BaseSystemContractsHashes { @@ -270,12 +267,12 @@ impl EthTxAggregator { let multicall3_verifier_params = Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; if multicall3_verifier_params.len() != 96 { - return Err(ETHSenderError::ParseError( - Web3ContractError::InvalidOutputType(format!( + return Err(EthSenderError::Parse(Web3ContractError::InvalidOutputType( + format!( "multicall3 verifier params data is not of the len of 96: {:?}", multicall3_default_aa - )), - )); + ), + ))); } let recursion_node_level_vk_hash = H256::from_slice(&multicall3_verifier_params[..32]); let recursion_leaf_level_vk_hash = @@ -291,24 +288,24 @@ impl EthTxAggregator { let multicall3_verifier_address = Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; if multicall3_verifier_address.len() != 32 { - return Err(ETHSenderError::ParseError( - Web3ContractError::InvalidOutputType(format!( + return Err(EthSenderError::Parse(Web3ContractError::InvalidOutputType( + format!( "multicall3 verifier address data is not of the len of 32: {:?}", multicall3_verifier_address - )), - )); + ), + ))); } let verifier_address = Address::from_slice(&multicall3_verifier_address[12..]); let multicall3_protocol_version = Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; if multicall3_protocol_version.len() != 32 { - return Err(ETHSenderError::ParseError( - Web3ContractError::InvalidOutputType(format!( + return Err(EthSenderError::Parse(Web3ContractError::InvalidOutputType( + format!( "multicall3 protocol version data is not of the len of 32: {:?}", multicall3_protocol_version - )), - )); + ), + ))); } let protocol_version = U256::from_big_endian(&multicall3_protocol_version); @@ -334,7 +331,7 @@ impl EthTxAggregator { async fn get_recursion_scheduler_level_vk_hash( &mut self, verifier_address: Address, - ) -> Result { + ) -> Result { let get_vk_hash = &self.functions.verification_key_hash; let vk_hash: H256 = CallFunctionArgs::new(&get_vk_hash.name, ()) .for_contract(verifier_address, &self.functions.verifier_contract) @@ -347,7 +344,7 @@ impl EthTxAggregator { async fn loop_iteration( &mut self, storage: &mut Connection<'_, Core>, - ) -> Result<(), ETHSenderError> { + ) -> Result<(), EthSenderError> { let MulticallData { base_system_contracts_hashes, verifier_params, @@ -546,7 +543,7 @@ impl EthTxAggregator { storage: &mut Connection<'_, Core>, aggregated_op: &AggregatedOperation, contracts_are_pre_shared_bridge: bool, - ) -> Result { + ) -> Result { let mut transaction = storage.start_transaction().await.unwrap(); let op_type = aggregated_op.get_action_type(); // We may be using a custom sender for commit transactions, so use this @@ -595,7 +592,7 @@ impl EthTxAggregator { &self, storage: &mut Connection<'_, Core>, from_addr: Option
, - ) -> Result { + ) -> Result { let db_nonce = storage .eth_sender_dal() .get_next_nonce(from_addr) diff --git a/core/node/eth_sender/src/eth_tx_manager.rs b/core/node/eth_sender/src/eth_tx_manager.rs index 09b1f3885551..7958aad6d78f 100644 --- a/core/node/eth_sender/src/eth_tx_manager.rs +++ b/core/node/eth_sender/src/eth_tx_manager.rs @@ -6,8 +6,8 @@ use zksync_config::configs::eth_sender::SenderConfig; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_eth_client::{ clients::{DynClient, L1}, - encode_blob_tx_with_sidecar, BoundEthInterface, ClientError, EnrichedClientError, Error, - EthInterface, ExecutedTxStatus, Options, RawTransactionBytes, SignedCallResult, + encode_blob_tx_with_sidecar, BoundEthInterface, ClientError, EnrichedClientError, EthInterface, + ExecutedTxStatus, Options, RawTransactionBytes, SignedCallResult, }; use zksync_node_fee_model::l1_gas_price::L1TxParamsProvider; use zksync_shared_metrics::BlockL1Stage; @@ -19,7 +19,7 @@ use zksync_types::{ }; use zksync_utils::time::seconds_since_epoch; -use super::{metrics::METRICS, ETHSenderError}; +use super::{metrics::METRICS, EthSenderError}; #[derive(Debug)] struct EthFee { @@ -85,7 +85,7 @@ impl EthTxManager { async fn get_tx_status( &self, tx_hash: H256, - ) -> Result, ETHSenderError> { + ) -> Result, EthSenderError> { self.query_client() .get_tx_status(tx_hash) .await @@ -125,7 +125,7 @@ impl EthTxManager { storage: &mut Connection<'_, Core>, tx: &EthTx, time_in_mempool: u32, - ) -> Result { + ) -> Result { let base_fee_per_gas = self.gas_adjuster.get_base_fee(0); let priority_fee_per_gas = self.gas_adjuster.get_priority_fee(); let blob_base_fee_per_gas = Some(self.gas_adjuster.get_blob_base_fee()); @@ -200,7 +200,7 @@ impl EthTxManager { storage: &mut Connection<'_, Core>, eth_tx_id: u32, base_fee_per_gas: u64, - ) -> Result { + ) -> Result { let previous_sent_tx = storage .eth_sender_dal() .get_last_sent_eth_tx(eth_tx_id) @@ -228,7 +228,7 @@ impl EthTxManager { .with_arg("base_fee_per_gas", &base_fee_per_gas) .with_arg("previous_base_fee", &previous_base_fee) .with_arg("next_block_minimal_base_fee", &next_block_minimal_base_fee); - return Err(ETHSenderError::from(Error::EthereumGateway(err))); + return Err(err.into()); } // Increase `priority_fee_per_gas` by at least 20% to prevent "replacement transaction under-priced" error. @@ -242,7 +242,7 @@ impl EthTxManager { tx: &EthTx, time_in_mempool: u32, current_block: L1BlockNumber, - ) -> Result { + ) -> Result { let EthFee { base_fee_per_gas, priority_fee_per_gas, @@ -310,7 +310,7 @@ impl EthTxManager { tx_history_id: u32, raw_tx: RawTransactionBytes, current_block: L1BlockNumber, - ) -> Result { + ) -> Result { match self.query_client().send_raw_tx(raw_tx).await { Ok(tx_hash) => { storage @@ -334,7 +334,7 @@ impl EthTxManager { async fn get_operator_nonce( &self, block_numbers: L1BlockNumbers, - ) -> Result { + ) -> Result { let finalized = self .ethereum_gateway .nonce_at(block_numbers.finalized.0.into()) @@ -354,7 +354,7 @@ impl EthTxManager { async fn get_blobs_operator_nonce( &self, block_numbers: L1BlockNumbers, - ) -> Result, ETHSenderError> { + ) -> Result, EthSenderError> { match &self.ethereum_gateway_blobs { None => Ok(None), Some(gateway) => { @@ -374,7 +374,7 @@ impl EthTxManager { } } - async fn get_l1_block_numbers(&self) -> Result { + async fn get_l1_block_numbers(&self) -> Result { let (finalized, safe) = if let Some(confirmations) = self.config.wait_confirmations { let latest_block_number = self.query_client().block_number().await?.as_u64(); @@ -418,7 +418,7 @@ impl EthTxManager { &mut self, storage: &mut Connection<'_, Core>, l1_block_numbers: L1BlockNumbers, - ) -> Result, ETHSenderError> { + ) -> Result, EthSenderError> { METRICS.track_block_numbers(&l1_block_numbers); let operator_nonce = self.get_operator_nonce(l1_block_numbers).await?; let blobs_operator_nonce = self.get_blobs_operator_nonce(l1_block_numbers).await?; @@ -458,7 +458,7 @@ impl EthTxManager { l1_block_numbers: L1BlockNumbers, operator_nonce: OperatorNonce, operator_address: Option
, - ) -> Result, ETHSenderError> { + ) -> Result, EthSenderError> { let inflight_txs = storage.eth_sender_dal().get_inflight_txs().await.unwrap(); METRICS.number_of_inflight_txs.set(inflight_txs.len()); @@ -799,7 +799,7 @@ impl EthTxManager { &mut self, storage: &mut Connection<'_, Core>, previous_block: L1BlockNumber, - ) -> Result { + ) -> Result { let l1_block_numbers = self.get_l1_block_numbers().await?; self.send_new_eth_txs(storage, l1_block_numbers.latest) diff --git a/core/node/eth_sender/src/lib.rs b/core/node/eth_sender/src/lib.rs index c0a4a892e52a..3ae29a520030 100644 --- a/core/node/eth_sender/src/lib.rs +++ b/core/node/eth_sender/src/lib.rs @@ -12,6 +12,6 @@ mod zksync_functions; mod tests; pub use self::{ - aggregator::Aggregator, error::ETHSenderError, eth_tx_aggregator::EthTxAggregator, + aggregator::Aggregator, error::EthSenderError, eth_tx_aggregator::EthTxAggregator, eth_tx_manager::EthTxManager, }; diff --git a/core/node/eth_sender/src/tests.rs b/core/node/eth_sender/src/tests.rs index 5090af08cf86..cd00f3af0883 100644 --- a/core/node/eth_sender/src/tests.rs +++ b/core/node/eth_sender/src/tests.rs @@ -29,7 +29,7 @@ use zksync_types::{ use crate::{ aggregated_operations::AggregatedOperation, eth_tx_manager::L1BlockNumbers, Aggregator, - ETHSenderError, EthTxAggregator, EthTxManager, + EthSenderError, EthTxAggregator, EthTxManager, }; // Alias to conveniently call static methods of `ETHSender`. @@ -1104,7 +1104,7 @@ async fn test_parse_multicall_data(commitment_mode: L1BatchCommitmentMode) { tester .aggregator .parse_multicall_data(wrong_data_instance.clone()), - Err(ETHSenderError::ParseError(Error::InvalidOutputType(_))) + Err(EthSenderError::Parse(Error::InvalidOutputType(_))) ); } } diff --git a/core/node/eth_watch/src/client.rs b/core/node/eth_watch/src/client.rs index 4e3e8e997361..604ea2f471cc 100644 --- a/core/node/eth_watch/src/client.rs +++ b/core/node/eth_watch/src/client.rs @@ -1,10 +1,10 @@ use std::fmt; use zksync_contracts::verifier_contract; -pub(super) use zksync_eth_client::Error as EthClientError; use zksync_eth_client::{ clients::{DynClient, L1}, - CallFunctionArgs, ClientError, EnrichedClientError, EthInterface, + CallFunctionArgs, ClientError, ContractCallError, EnrichedClientError, EnrichedClientResult, + EthInterface, }; use zksync_types::{ ethabi::Contract, @@ -21,11 +21,12 @@ pub trait EthClient: 'static + fmt::Debug + Send + Sync { from: BlockNumber, to: BlockNumber, retries_left: usize, - ) -> Result, EthClientError>; + ) -> EnrichedClientResult>; /// Returns finalized L1 block number. - async fn finalized_block_number(&self) -> Result; + async fn finalized_block_number(&self) -> EnrichedClientResult; /// Returns scheduler verification key hash by verifier address. - async fn scheduler_vk_hash(&self, verifier_address: Address) -> Result; + async fn scheduler_vk_hash(&self, verifier_address: Address) + -> Result; /// Sets list of topics to return events for. fn set_topics(&mut self, topics: Vec); } @@ -76,7 +77,7 @@ impl EthHttpQueryClient { from: BlockNumber, to: BlockNumber, topics: Vec, - ) -> Result, EthClientError> { + ) -> EnrichedClientResult> { let filter = FilterBuilder::default() .address( [ @@ -92,13 +93,16 @@ impl EthHttpQueryClient { .to_block(to) .topics(Some(topics), None, None, None) .build(); - self.client.logs(filter).await + self.client.logs(&filter).await } } #[async_trait::async_trait] impl EthClient for EthHttpQueryClient { - async fn scheduler_vk_hash(&self, verifier_address: Address) -> Result { + async fn scheduler_vk_hash( + &self, + verifier_address: Address, + ) -> Result { // New verifier returns the hash of the verification key. CallFunctionArgs::new("verificationKeyHash", ()) .for_contract(verifier_address, &self.verifier_contract_abi) @@ -111,12 +115,12 @@ impl EthClient for EthHttpQueryClient { from: BlockNumber, to: BlockNumber, retries_left: usize, - ) -> Result, EthClientError> { + ) -> EnrichedClientResult> { let mut result = self.get_filter_logs(from, to, self.topics.clone()).await; // This code is compatible with both Infura and Alchemy API providers. // Note: we don't handle rate-limits here - assumption is that we're never going to hit them. - if let Err(EthClientError::EthereumGateway(err)) = &result { + if let Err(err) = &result { tracing::warn!("Provider returned error message: {err}"); let err_message = err.as_ref().to_string(); let err_code = if let ClientError::Call(err) = err.as_ref() { @@ -181,7 +185,7 @@ impl EthClient for EthHttpQueryClient { result } - async fn finalized_block_number(&self) -> Result { + async fn finalized_block_number(&self) -> EnrichedClientResult { if let Some(confirmations) = self.confirmations_for_eth_event { let latest_block_number = self.client.block_number().await?.as_u64(); Ok(latest_block_number.saturating_sub(confirmations)) diff --git a/core/node/eth_watch/src/event_processors/mod.rs b/core/node/eth_watch/src/event_processors/mod.rs index 2a3a6344bdbc..396bcc2e1ca5 100644 --- a/core/node/eth_watch/src/event_processors/mod.rs +++ b/core/node/eth_watch/src/event_processors/mod.rs @@ -1,12 +1,13 @@ use std::fmt; use zksync_dal::{Connection, Core}; +use zksync_eth_client::{ContractCallError, EnrichedClientError}; use zksync_types::{web3::Log, H256}; pub(crate) use self::{ governance_upgrades::GovernanceUpgradesEventProcessor, priority_ops::PriorityOpsEventProcessor, }; -use crate::client::{EthClient, EthClientError}; +use crate::client::EthClient; mod governance_upgrades; mod priority_ops; @@ -21,7 +22,9 @@ pub(super) enum EventProcessorError { source: anyhow::Error, }, #[error("Eth client error: {0}")] - Client(#[from] EthClientError), + Client(#[from] EnrichedClientError), + #[error("Contract call error: {0}")] + ContractCall(#[from] ContractCallError), /// Internal errors are considered fatal (i.e., they bubble up and lead to the watcher termination). #[error("internal processing error: {0:?}")] Internal(#[from] anyhow::Error), diff --git a/core/node/eth_watch/src/tests.rs b/core/node/eth_watch/src/tests.rs index f6abe93b35f0..a93f58aa2ac2 100644 --- a/core/node/eth_watch/src/tests.rs +++ b/core/node/eth_watch/src/tests.rs @@ -3,6 +3,7 @@ use std::{collections::HashMap, convert::TryInto, sync::Arc}; use tokio::sync::RwLock; use zksync_contracts::{governance_contract, hyperchain_contract}; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use zksync_eth_client::{ContractCallError, EnrichedClientResult}; use zksync_types::{ ethabi::{encode, Hash, Token}, l1::{L1Tx, OpProcessingType, PriorityQueueType}, @@ -13,10 +14,7 @@ use zksync_types::{ ProtocolVersionId, Transaction, H256, U256, }; -use crate::{ - client::{EthClient, EthClientError}, - EthWatch, -}; +use crate::{client::EthClient, EthWatch}; #[derive(Debug)] struct FakeEthClientData { @@ -106,7 +104,7 @@ impl EthClient for MockEthClient { from: BlockNumber, to: BlockNumber, _retries_left: usize, - ) -> Result, EthClientError> { + ) -> EnrichedClientResult> { let from = self.block_to_number(from).await; let to = self.block_to_number(to).await; let mut logs = vec![]; @@ -126,11 +124,14 @@ impl EthClient for MockEthClient { fn set_topics(&mut self, _topics: Vec) {} - async fn scheduler_vk_hash(&self, _verifier_address: Address) -> Result { + async fn scheduler_vk_hash( + &self, + _verifier_address: Address, + ) -> Result { Ok(H256::zero()) } - async fn finalized_block_number(&self) -> Result { + async fn finalized_block_number(&self) -> EnrichedClientResult { Ok(self.inner.read().await.last_finalized_block_number) } } diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs index 1885159a23e1..8691c24ca94a 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs @@ -8,7 +8,7 @@ use std::{ use tokio::sync::watch; use zksync_config::{configs::eth_sender::PubdataSendingMode, GasAdjusterConfig}; -use zksync_eth_client::{Error, EthInterface}; +use zksync_eth_client::EthInterface; use zksync_types::{commitment::L1BatchCommitmentMode, L1_GAS_PER_PUBDATA_BYTE, U256, U64}; use zksync_web3_decl::client::{DynClient, L1}; @@ -41,7 +41,7 @@ impl GasAdjuster { config: GasAdjusterConfig, pubdata_sending_mode: PubdataSendingMode, commitment_mode: L1BatchCommitmentMode, - ) -> Result { + ) -> anyhow::Result { let eth_client = eth_client.for_component("gas_adjuster"); // Subtracting 1 from the "latest" block number to prevent errors in case @@ -81,7 +81,7 @@ impl GasAdjuster { /// Performs an actualization routine for `GasAdjuster`. /// This method is intended to be invoked periodically. - pub async fn keep_updated(&self) -> Result<(), Error> { + pub async fn keep_updated(&self) -> anyhow::Result<()> { // Subtracting 1 from the "latest" block number to prevent errors in case // the info about the latest block is not yet present on the node. // This sometimes happens on Infura. @@ -234,7 +234,7 @@ impl GasAdjuster { async fn get_base_fees_history( eth_client: &DynClient, block_range: RangeInclusive, - ) -> Result<(Vec, Vec), Error> { + ) -> anyhow::Result<(Vec, Vec)> { let mut base_fee_history = Vec::new(); let mut blob_base_fee_history = Vec::new(); for block_number in block_range { diff --git a/core/node/genesis/src/lib.rs b/core/node/genesis/src/lib.rs index 12dd6afc68b3..bfa6b77cbfef 100644 --- a/core/node/genesis/src/lib.rs +++ b/core/node/genesis/src/lib.rs @@ -432,7 +432,7 @@ pub async fn save_set_chain_id_tx( .from_block(from.into()) .to_block(BlockNumber::Latest) .build(); - let mut logs = query_client.logs(filter).await?; + let mut logs = query_client.logs(&filter).await?; anyhow::ensure!( logs.len() == 1, "Expected a single set_chain_id event, got these {}: {:?}", diff --git a/core/node/house_keeper/src/prover/metrics.rs b/core/node/house_keeper/src/prover/metrics.rs index 4af13b61b0c5..7711c9c04a6b 100644 --- a/core/node/house_keeper/src/prover/metrics.rs +++ b/core/node/house_keeper/src/prover/metrics.rs @@ -1,6 +1,5 @@ use vise::{Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, LabeledFamily, Metrics}; -use zksync_types::ProtocolVersionId; - +use zksync_types::protocol_version::ProtocolSemanticVersion; #[derive(Debug, Metrics)] #[metrics(prefix = "house_keeper")] pub(crate) struct HouseKeeperMetrics { @@ -63,7 +62,7 @@ impl FriProverMetrics { circuit_id: u8, aggregation_round: u8, prover_group_id: u8, - protocol_version: ProtocolVersionId, + protocol_version: ProtocolSemanticVersion, amount: u64, ) { self.prover_jobs[&ProverJobsLabels { diff --git a/core/node/house_keeper/src/prover/queue_reporter/fri_proof_compressor_queue_reporter.rs b/core/node/house_keeper/src/prover/queue_reporter/fri_proof_compressor_queue_reporter.rs index ce7d7467bcc9..886a4c116b89 100644 --- a/core/node/house_keeper/src/prover/queue_reporter/fri_proof_compressor_queue_reporter.rs +++ b/core/node/house_keeper/src/prover/queue_reporter/fri_proof_compressor_queue_reporter.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use async_trait::async_trait; use prover_dal::{Prover, ProverDal}; use zksync_dal::ConnectionPool; -use zksync_types::{prover_dal::JobCountStatistics, ProtocolVersionId}; +use zksync_types::{protocol_version::ProtocolSemanticVersion, prover_dal::JobCountStatistics}; use crate::{ periodic_job::PeriodicJob, @@ -28,7 +28,7 @@ impl FriProofCompressorQueueReporter { async fn get_job_statistics( pool: &ConnectionPool, - ) -> HashMap { + ) -> HashMap { pool.connection() .await .unwrap() diff --git a/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs b/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs index b3b04a519b29..1ae03c74b45e 100644 --- a/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs +++ b/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs @@ -2,7 +2,6 @@ use async_trait::async_trait; use prover_dal::{Prover, ProverDal}; use zksync_config::configs::fri_prover_group::FriProverGroupConfig; use zksync_dal::{ConnectionPool, Core, CoreDal}; -use zksync_types::ProtocolVersionId; use crate::{periodic_job::PeriodicJob, prover::metrics::FRI_PROVER_METRICS}; @@ -67,7 +66,7 @@ impl PeriodicJob for FriProverQueueReporter { circuit_id, job_identifiers.aggregation_round, group_id, - ProtocolVersionId::try_from(job_identifiers.protocol_version).unwrap(), + job_identifiers.get_semantic_protocol_version(), stats.queued as u64, ); @@ -76,7 +75,7 @@ impl PeriodicJob for FriProverQueueReporter { circuit_id, job_identifiers.aggregation_round, group_id, - ProtocolVersionId::try_from(job_identifiers.protocol_version).unwrap(), + job_identifiers.get_semantic_protocol_version(), stats.in_progress as u64, ); } diff --git a/core/node/house_keeper/src/prover/queue_reporter/fri_witness_generator_queue_reporter.rs b/core/node/house_keeper/src/prover/queue_reporter/fri_witness_generator_queue_reporter.rs index da44a34f145a..487b28491c43 100644 --- a/core/node/house_keeper/src/prover/queue_reporter/fri_witness_generator_queue_reporter.rs +++ b/core/node/house_keeper/src/prover/queue_reporter/fri_witness_generator_queue_reporter.rs @@ -4,7 +4,8 @@ use async_trait::async_trait; use prover_dal::{Prover, ProverDal}; use zksync_dal::ConnectionPool; use zksync_types::{ - basic_fri_types::AggregationRound, prover_dal::JobCountStatistics, ProtocolVersionId, + basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, + prover_dal::JobCountStatistics, }; use crate::{periodic_job::PeriodicJob, prover::metrics::SERVER_METRICS}; @@ -27,7 +28,7 @@ impl FriWitnessGeneratorQueueReporter { async fn get_job_statistics( &self, - ) -> HashMap<(AggregationRound, ProtocolVersionId), JobCountStatistics> { + ) -> HashMap<(AggregationRound, ProtocolSemanticVersion), JobCountStatistics> { let mut conn = self.pool.connection().await.unwrap(); let mut result = HashMap::new(); result.extend( @@ -62,7 +63,7 @@ impl FriWitnessGeneratorQueueReporter { fn emit_metrics_for_round( round: AggregationRound, - protocol_version: ProtocolVersionId, + protocol_version: ProtocolSemanticVersion, stats: &JobCountStatistics, ) { if stats.queued > 0 || stats.in_progress > 0 { @@ -95,7 +96,7 @@ impl PeriodicJob for FriWitnessGeneratorQueueReporter { async fn run_routine_task(&mut self) -> anyhow::Result<()> { let stats_for_all_rounds = self.get_job_statistics().await; - let mut aggregated = HashMap::::new(); + let mut aggregated = HashMap::::new(); for ((round, protocol_version), stats) in stats_for_all_rounds { emit_metrics_for_round(round, protocol_version, &stats); diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index f223afcf50d7..5b454db4db40 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -36,6 +36,7 @@ zksync_eth_sender.workspace = true zksync_da_client.workspace = true zksync_da_dispatcher.workspace = true zksync_da_layers.workspace = true +zksync_block_reverter.workspace = true zksync_state_keeper.workspace = true zksync_consistency_checker.workspace = true zksync_metadata_calculator.workspace = true @@ -45,6 +46,7 @@ zksync_node_consensus.workspace = true zksync_contract_verification_server.workspace = true zksync_tee_verifier_input_producer.workspace = true zksync_queued_job_processor.workspace = true +zksync_reorg_detector.workspace = true tracing.workspace = true thiserror.workspace = true diff --git a/core/node/node_framework/examples/showcase.rs b/core/node/node_framework/examples/showcase.rs index 0a1552f33501..98baa5bc9683 100644 --- a/core/node/node_framework/examples/showcase.rs +++ b/core/node/node_framework/examples/showcase.rs @@ -10,7 +10,7 @@ use std::{ use zksync_node_framework::{ resource::Resource, service::{ServiceContext, StopReceiver, ZkStackServiceBuilder}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -96,14 +96,14 @@ impl PutTask { #[async_trait::async_trait] impl Task for PutTask { - fn name(&self) -> &'static str { + fn id(&self) -> TaskId { // Task names simply have to be unique. They are used for logging and debugging. - "put_task" + "put_task".into() } /// This method will be invoked by the framework when the task is started. async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { - tracing::info!("Starting the task {}", self.name()); + tracing::info!("Starting the task {}", self.id()); // We have to respect the stop receiver and should exit as soon as we receive // a stop signal. @@ -138,12 +138,12 @@ impl CheckTask { #[async_trait::async_trait] impl Task for CheckTask { - fn name(&self) -> &'static str { - "check_task" + fn id(&self) -> TaskId { + "check_task".into() } async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { - tracing::info!("Starting the task {}", self.name()); + tracing::info!("Starting the task {}", self.id()); tokio::select! { _ = self.run_inner() => {}, diff --git a/core/node/node_framework/src/implementations/layers/circuit_breaker_checker.rs b/core/node/node_framework/src/implementations/layers/circuit_breaker_checker.rs index f493d8081ef9..b8fff34b7e92 100644 --- a/core/node/node_framework/src/implementations/layers/circuit_breaker_checker.rs +++ b/core/node/node_framework/src/implementations/layers/circuit_breaker_checker.rs @@ -4,7 +4,7 @@ use zksync_config::configs::chain::CircuitBreakerConfig; use crate::{ implementations::resources::circuit_breakers::CircuitBreakersResource, service::{ServiceContext, StopReceiver}, - task::UnconstrainedTask, + task::{TaskId, UnconstrainedTask}, wiring_layer::{WiringError, WiringLayer}, }; @@ -43,8 +43,8 @@ struct CircuitBreakerCheckerTask { #[async_trait::async_trait] impl UnconstrainedTask for CircuitBreakerCheckerTask { - fn name(&self) -> &'static str { - "circuit_breaker_checker" + fn id(&self) -> TaskId { + "circuit_breaker_checker".into() } async fn run_unconstrained( diff --git a/core/node/node_framework/src/implementations/layers/commitment_generator.rs b/core/node/node_framework/src/implementations/layers/commitment_generator.rs index aeb668dca178..5d2f63931295 100644 --- a/core/node/node_framework/src/implementations/layers/commitment_generator.rs +++ b/core/node/node_framework/src/implementations/layers/commitment_generator.rs @@ -7,7 +7,7 @@ use crate::{ pools::{MasterPool, PoolResource}, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -55,8 +55,8 @@ struct CommitmentGeneratorTask { #[async_trait::async_trait] impl Task for CommitmentGeneratorTask { - fn name(&self) -> &'static str { - "commitment_generator" + fn id(&self) -> TaskId { + "commitment_generator".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/consensus.rs b/core/node/node_framework/src/implementations/layers/consensus.rs index 5a91e796eb5f..06bca1bba3ae 100644 --- a/core/node/node_framework/src/implementations/layers/consensus.rs +++ b/core/node/node_framework/src/implementations/layers/consensus.rs @@ -14,7 +14,7 @@ use crate::{ sync_state::SyncStateResource, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -110,8 +110,8 @@ pub struct MainNodeConsensusTask { #[async_trait::async_trait] impl Task for MainNodeConsensusTask { - fn name(&self) -> &'static str { - "consensus" + fn id(&self) -> TaskId { + "consensus".into() } async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -147,8 +147,8 @@ pub struct FetcherTask { #[async_trait::async_trait] impl Task for FetcherTask { - fn name(&self) -> &'static str { - "consensus_fetcher" + fn id(&self) -> TaskId { + "consensus_fetcher".into() } async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/consistency_checker.rs b/core/node/node_framework/src/implementations/layers/consistency_checker.rs index 4f2ec2ededcc..a387fc19ead1 100644 --- a/core/node/node_framework/src/implementations/layers/consistency_checker.rs +++ b/core/node/node_framework/src/implementations/layers/consistency_checker.rs @@ -8,7 +8,7 @@ use crate::{ pools::{MasterPool, PoolResource}, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -75,8 +75,8 @@ pub struct ConsistencyCheckerTask { #[async_trait::async_trait] impl Task for ConsistencyCheckerTask { - fn name(&self) -> &'static str { - "consistency_checker" + fn id(&self) -> TaskId { + "consistency_checker".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/contract_verification_api.rs b/core/node/node_framework/src/implementations/layers/contract_verification_api.rs index 2e0dcf540ea6..5e76c32ddd53 100644 --- a/core/node/node_framework/src/implementations/layers/contract_verification_api.rs +++ b/core/node/node_framework/src/implementations/layers/contract_verification_api.rs @@ -4,7 +4,7 @@ use zksync_dal::{ConnectionPool, Core}; use crate::{ implementations::resources::pools::{MasterPool, PoolResource, ReplicaPool}, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -46,8 +46,8 @@ pub struct ContractVerificationApiTask { #[async_trait::async_trait] impl Task for ContractVerificationApiTask { - fn name(&self) -> &'static str { - "contract_verification_api" + fn id(&self) -> TaskId { + "contract_verification_api".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/da_client.rs b/core/node/node_framework/src/implementations/layers/da_client.rs index 14ee453249bb..91e9c006b6db 100644 --- a/core/node/node_framework/src/implementations/layers/da_client.rs +++ b/core/node/node_framework/src/implementations/layers/da_client.rs @@ -6,7 +6,7 @@ use zksync_config::{ }, EthConfig, }; -use zksync_da_client::{gcs::GCSDAClient, no_da::NoDAClient}; +use zksync_da_client::{gcs::ObjectStoreDAClient, no_da::NoDAClient}; use zksync_da_layers::{ clients::celestia::CelestiaClient, config::DALayerConfig, DataAvailabilityClient, }; @@ -61,7 +61,9 @@ impl WiringLayer for DataAvailabilityClientLayer { // wire the right one manually, which is less convenient than the current approach, which // uses the config to determine the right client let client: Box = match self.da_config.da_mode { - DataAvailabilityMode::GCS(config) => Box::new(GCSDAClient::new(config).await), + DataAvailabilityMode::ObjectStore(config) => { + Box::new(ObjectStoreDAClient::new(config).await?) + } DataAvailabilityMode::NoDA => Box::new(NoDAClient::new()), DataAvailabilityMode::DALayer(config) => match config { DALayerConfig::Celestia(celestia_config) => { diff --git a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs index 9bad60935e46..bbab646ecb0d 100644 --- a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs +++ b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs @@ -9,7 +9,7 @@ use crate::{ pools::{MasterPool, PoolResource}, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -55,8 +55,8 @@ struct DataAvailabilityDispatcherTask { #[async_trait::async_trait] impl Task for DataAvailabilityDispatcherTask { - fn name(&self) -> &'static str { - "da_dispatcher" + fn id(&self) -> TaskId { + "da_dispatcher".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/eth_sender.rs b/core/node/node_framework/src/implementations/layers/eth_sender.rs index ed27fe863214..3cf2cf597c31 100644 --- a/core/node/node_framework/src/implementations/layers/eth_sender.rs +++ b/core/node/node_framework/src/implementations/layers/eth_sender.rs @@ -14,7 +14,7 @@ use crate::{ pools::{MasterPool, PoolResource, ReplicaPool}, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -173,8 +173,8 @@ struct EthTxAggregatorTask { #[async_trait::async_trait] impl Task for EthTxAggregatorTask { - fn name(&self) -> &'static str { - "eth_tx_aggregator" + fn id(&self) -> TaskId { + "eth_tx_aggregator".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -189,8 +189,8 @@ struct EthTxManagerTask { #[async_trait::async_trait] impl Task for EthTxManagerTask { - fn name(&self) -> &'static str { - "eth_tx_manager" + fn id(&self) -> TaskId { + "eth_tx_manager".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/eth_watch.rs b/core/node/node_framework/src/implementations/layers/eth_watch.rs index c12d92907534..df9319013112 100644 --- a/core/node/node_framework/src/implementations/layers/eth_watch.rs +++ b/core/node/node_framework/src/implementations/layers/eth_watch.rs @@ -12,7 +12,7 @@ use crate::{ pools::{MasterPool, PoolResource}, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -75,8 +75,8 @@ struct EthWatchTask { #[async_trait::async_trait] impl Task for EthWatchTask { - fn name(&self) -> &'static str { - "eth_watch" + fn id(&self) -> TaskId { + "eth_watch".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/healtcheck_server.rs b/core/node/node_framework/src/implementations/layers/healtcheck_server.rs index 34c41fd70a97..c6138c711083 100644 --- a/core/node/node_framework/src/implementations/layers/healtcheck_server.rs +++ b/core/node/node_framework/src/implementations/layers/healtcheck_server.rs @@ -7,7 +7,7 @@ use zksync_node_api_server::healthcheck::HealthCheckHandle; use crate::{ implementations::resources::healthcheck::AppHealthCheckResource, service::{ServiceContext, StopReceiver}, - task::UnconstrainedTask, + task::{TaskId, UnconstrainedTask}, wiring_layer::{WiringError, WiringLayer}, }; @@ -53,8 +53,8 @@ struct HealthCheckTask { #[async_trait::async_trait] impl UnconstrainedTask for HealthCheckTask { - fn name(&self) -> &'static str { - "healthcheck_server" + fn id(&self) -> TaskId { + "healthcheck_server".into() } async fn run_unconstrained( diff --git a/core/node/node_framework/src/implementations/layers/house_keeper.rs b/core/node/node_framework/src/implementations/layers/house_keeper.rs index 1eb559ea5e1f..7b3e52c7ed5d 100644 --- a/core/node/node_framework/src/implementations/layers/house_keeper.rs +++ b/core/node/node_framework/src/implementations/layers/house_keeper.rs @@ -19,7 +19,7 @@ use zksync_house_keeper::{ use crate::{ implementations::resources::pools::{PoolResource, ProverPool, ReplicaPool}, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -179,8 +179,8 @@ struct PostgresMetricsScrapingTask { #[async_trait::async_trait] impl Task for PostgresMetricsScrapingTask { - fn name(&self) -> &'static str { - "postgres_metrics_scraping" + fn id(&self) -> TaskId { + "postgres_metrics_scraping".into() } async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -203,8 +203,8 @@ struct L1BatchMetricsReporterTask { #[async_trait::async_trait] impl Task for L1BatchMetricsReporterTask { - fn name(&self) -> &'static str { - "l1_batch_metrics_reporter" + fn id(&self) -> TaskId { + "l1_batch_metrics_reporter".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -219,8 +219,8 @@ struct FriProverJobRetryManagerTask { #[async_trait::async_trait] impl Task for FriProverJobRetryManagerTask { - fn name(&self) -> &'static str { - "fri_prover_job_retry_manager" + fn id(&self) -> TaskId { + "fri_prover_job_retry_manager".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -235,8 +235,8 @@ struct FriWitnessGeneratorJobRetryManagerTask { #[async_trait::async_trait] impl Task for FriWitnessGeneratorJobRetryManagerTask { - fn name(&self) -> &'static str { - "fri_witness_generator_job_retry_manager" + fn id(&self) -> TaskId { + "fri_witness_generator_job_retry_manager".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -253,8 +253,8 @@ struct WaitingToQueuedFriWitnessJobMoverTask { #[async_trait::async_trait] impl Task for WaitingToQueuedFriWitnessJobMoverTask { - fn name(&self) -> &'static str { - "waiting_to_queued_fri_witness_job_mover" + fn id(&self) -> TaskId { + "waiting_to_queued_fri_witness_job_mover".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -271,8 +271,8 @@ struct FriWitnessGeneratorStatsReporterTask { #[async_trait::async_trait] impl Task for FriWitnessGeneratorStatsReporterTask { - fn name(&self) -> &'static str { - "fri_witness_generator_stats_reporter" + fn id(&self) -> TaskId { + "fri_witness_generator_stats_reporter".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -289,8 +289,8 @@ struct FriProverStatsReporterTask { #[async_trait::async_trait] impl Task for FriProverStatsReporterTask { - fn name(&self) -> &'static str { - "fri_prover_stats_reporter" + fn id(&self) -> TaskId { + "fri_prover_stats_reporter".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -305,8 +305,8 @@ struct FriProofCompressorStatsReporterTask { #[async_trait::async_trait] impl Task for FriProofCompressorStatsReporterTask { - fn name(&self) -> &'static str { - "fri_proof_compressor_stats_reporter" + fn id(&self) -> TaskId { + "fri_proof_compressor_stats_reporter".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -323,8 +323,8 @@ struct FriProofCompressorJobRetryManagerTask { #[async_trait::async_trait] impl Task for FriProofCompressorJobRetryManagerTask { - fn name(&self) -> &'static str { - "fri_proof_compressor_job_retry_manager" + fn id(&self) -> TaskId { + "fri_proof_compressor_job_retry_manager".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -341,8 +341,8 @@ struct FriProverJobArchiverTask { #[async_trait::async_trait] impl Task for FriProverJobArchiverTask { - fn name(&self) -> &'static str { - "fri_prover_job_archiver" + fn id(&self) -> TaskId { + "fri_prover_job_archiver".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -356,8 +356,8 @@ struct FriProverGpuArchiverTask { #[async_trait::async_trait] impl Task for FriProverGpuArchiverTask { - fn name(&self) -> &'static str { - "fri_prover_gpu_archiver" + fn id(&self) -> TaskId { + "fri_prover_gpu_archiver".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/l1_gas.rs b/core/node/node_framework/src/implementations/layers/l1_gas.rs index d9e554aad04e..8deafd4e2949 100644 --- a/core/node/node_framework/src/implementations/layers/l1_gas.rs +++ b/core/node/node_framework/src/implementations/layers/l1_gas.rs @@ -14,7 +14,7 @@ use crate::{ l1_tx_params::L1TxParamsResource, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -80,8 +80,8 @@ struct GasAdjusterTask { #[async_trait::async_trait] impl Task for GasAdjusterTask { - fn name(&self) -> &'static str { - "gas_adjuster" + fn id(&self) -> TaskId { + "gas_adjuster".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs index 4b1e1d00cb5e..935bb283fe81 100644 --- a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs +++ b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs @@ -18,7 +18,7 @@ use crate::{ web3_api::TreeApiClientResource, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -118,8 +118,8 @@ pub struct MetadataCalculatorTask { #[async_trait::async_trait] impl Task for MetadataCalculatorTask { - fn name(&self) -> &'static str { - "metadata_calculator" + fn id(&self) -> TaskId { + "metadata_calculator".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -141,8 +141,8 @@ pub struct TreeApiTask { #[async_trait::async_trait] impl Task for TreeApiTask { - fn name(&self) -> &'static str { - "tree_api" + fn id(&self) -> TaskId { + "tree_api".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/mod.rs b/core/node/node_framework/src/implementations/layers/mod.rs index 1d8b03ce611f..57ab7fa21866 100644 --- a/core/node/node_framework/src/implementations/layers/mod.rs +++ b/core/node/node_framework/src/implementations/layers/mod.rs @@ -17,6 +17,8 @@ pub mod pools_layer; pub mod prometheus_exporter; pub mod proof_data_handler; pub mod query_eth_client; +pub mod reorg_detector_checker; +pub mod reorg_detector_runner; pub mod sigint; pub mod state_keeper; pub mod tee_verifier_input_producer; diff --git a/core/node/node_framework/src/implementations/layers/prometheus_exporter.rs b/core/node/node_framework/src/implementations/layers/prometheus_exporter.rs index 95477291e432..6c7d4f915df4 100644 --- a/core/node/node_framework/src/implementations/layers/prometheus_exporter.rs +++ b/core/node/node_framework/src/implementations/layers/prometheus_exporter.rs @@ -4,7 +4,7 @@ use zksync_health_check::{HealthStatus, HealthUpdater, ReactiveHealthCheck}; use crate::{ implementations::resources::healthcheck::AppHealthCheckResource, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -50,8 +50,8 @@ impl WiringLayer for PrometheusExporterLayer { #[async_trait::async_trait] impl Task for PrometheusExporterTask { - fn name(&self) -> &'static str { - "prometheus_exporter" + fn id(&self) -> TaskId { + "prometheus_exporter".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs index f9960036cec4..7952ca6a585f 100644 --- a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs +++ b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs @@ -11,7 +11,7 @@ use crate::{ pools::{MasterPool, PoolResource}, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -73,8 +73,8 @@ struct ProofDataHandlerTask { #[async_trait::async_trait] impl Task for ProofDataHandlerTask { - fn name(&self) -> &'static str { - "proof_data_handler" + fn id(&self) -> TaskId { + "proof_data_handler".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/reorg_detector_checker.rs b/core/node/node_framework/src/implementations/layers/reorg_detector_checker.rs new file mode 100644 index 000000000000..64454b63998b --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/reorg_detector_checker.rs @@ -0,0 +1,71 @@ +use std::time::Duration; + +use anyhow::Context; +use zksync_reorg_detector::{self, ReorgDetector}; + +use crate::{ + implementations::resources::{ + main_node_client::MainNodeClientResource, + pools::{MasterPool, PoolResource}, + }, + precondition::Precondition, + service::{ServiceContext, StopReceiver}, + task::TaskId, + wiring_layer::{WiringError, WiringLayer}, +}; + +const REORG_DETECTED_SLEEP_INTERVAL: Duration = Duration::from_secs(1); + +/// The layer is responsible for integrating reorg checking into the system. +/// When a reorg is detected, the system will not start running until it is fixed. +#[derive(Debug)] +pub struct ReorgDetectorCheckerLayer; + +#[async_trait::async_trait] +impl WiringLayer for ReorgDetectorCheckerLayer { + fn layer_name(&self) -> &'static str { + "reorg_detector_checker_layer" + } + + async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + // Get resources. + let main_node_client = context.get_resource::().await?.0; + + let pool_resource = context.get_resource::>().await?; + let pool = pool_resource.get().await?; + + // Create and insert precondition. + context.add_precondition(Box::new(CheckerPrecondition { + reorg_detector: ReorgDetector::new(main_node_client, pool), + })); + + Ok(()) + } +} + +pub struct CheckerPrecondition { + reorg_detector: ReorgDetector, +} + +#[async_trait::async_trait] +impl Precondition for CheckerPrecondition { + fn id(&self) -> TaskId { + "reorg_detector_checker".into() + } + + async fn check(mut self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + loop { + match self.reorg_detector.run_once(stop_receiver.0.clone()).await { + Ok(()) => return Ok(()), + Err(zksync_reorg_detector::Error::ReorgDetected(last_correct_l1_batch)) => { + tracing::warn!( + "Reorg detected, last correct L1 batch #{}. Waiting till it will be resolved. Sleep for {} seconds and retry", + last_correct_l1_batch, REORG_DETECTED_SLEEP_INTERVAL.as_secs() + ); + tokio::time::sleep(REORG_DETECTED_SLEEP_INTERVAL).await; + } + Err(err) => return Err(err).context("reorg_detector.check_consistency()"), + } + } + } +} diff --git a/core/node/node_framework/src/implementations/layers/reorg_detector_runner.rs b/core/node/node_framework/src/implementations/layers/reorg_detector_runner.rs new file mode 100644 index 000000000000..55ee621c15b0 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/reorg_detector_runner.rs @@ -0,0 +1,73 @@ +use std::sync::Arc; + +use anyhow::Context; +use zksync_block_reverter::BlockReverter; +use zksync_reorg_detector::{self, ReorgDetector}; + +use crate::{ + implementations::resources::{ + main_node_client::MainNodeClientResource, + pools::{MasterPool, PoolResource}, + reverter::BlockReverterResource, + }, + service::{ServiceContext, StopReceiver}, + task::{TaskId, UnconstrainedOneshotTask}, + wiring_layer::{WiringError, WiringLayer}, +}; + +/// Layer responsible for detecting reorg and reverting blocks in case it was found. +#[derive(Debug)] +pub struct ReorgDetectorRunnerLayer; + +#[async_trait::async_trait] +impl WiringLayer for ReorgDetectorRunnerLayer { + fn layer_name(&self) -> &'static str { + "reorg_detector_runner_layer" + } + + async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + // Get resources. + let main_node_client = context.get_resource::().await?.0; + + let pool_resource = context.get_resource::>().await?; + let pool = pool_resource.get().await?; + + let reverter = context.get_resource::().await?.0; + + // Create and insert task. + context.add_unconstrained_oneshot_task(Box::new(RunnerUnconstrainedOneshotTask { + reorg_detector: ReorgDetector::new(main_node_client, pool), + reverter, + })); + + Ok(()) + } +} + +pub struct RunnerUnconstrainedOneshotTask { + reorg_detector: ReorgDetector, + reverter: Arc, +} + +#[async_trait::async_trait] +impl UnconstrainedOneshotTask for RunnerUnconstrainedOneshotTask { + fn id(&self) -> TaskId { + "reorg_detector_runner".into() + } + + async fn run_unconstrained_oneshot( + mut self: Box, + stop_receiver: StopReceiver, + ) -> anyhow::Result<()> { + match self.reorg_detector.run_once(stop_receiver.0.clone()).await { + Ok(()) => {} + Err(zksync_reorg_detector::Error::ReorgDetected(last_correct_l1_batch)) => { + tracing::info!("Reverting to l1 batch number {last_correct_l1_batch}"); + self.reverter.roll_back(last_correct_l1_batch).await?; + tracing::info!("Revert successfully completed"); + } + Err(err) => return Err(err).context("reorg_detector.check_consistency()"), + } + Ok(()) + } +} diff --git a/core/node/node_framework/src/implementations/layers/sigint.rs b/core/node/node_framework/src/implementations/layers/sigint.rs index a028be97995d..2d11f1525370 100644 --- a/core/node/node_framework/src/implementations/layers/sigint.rs +++ b/core/node/node_framework/src/implementations/layers/sigint.rs @@ -2,7 +2,7 @@ use tokio::sync::oneshot; use crate::{ service::{ServiceContext, StopReceiver}, - task::UnconstrainedTask, + task::{TaskId, UnconstrainedTask}, wiring_layer::{WiringError, WiringLayer}, }; @@ -29,8 +29,8 @@ struct SigintHandlerTask; #[async_trait::async_trait] impl UnconstrainedTask for SigintHandlerTask { - fn name(&self) -> &'static str { - "sigint_handler" + fn id(&self) -> TaskId { + "sigint_handler".into() } async fn run_unconstrained( diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs b/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs index 91be11ea8a8e..65e86bef5204 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs @@ -22,7 +22,7 @@ use crate::{ }, resource::Unique, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -142,8 +142,8 @@ struct L2BlockSealerTask(zksync_state_keeper::L2BlockSealerTask); #[async_trait::async_trait] impl Task for L2BlockSealerTask { - fn name(&self) -> &'static str { - "state_keeper/l2_block_sealer" + fn id(&self) -> TaskId { + "state_keeper/l2_block_sealer".into() } async fn run(self: Box, _stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -157,8 +157,8 @@ struct MempoolFetcherTask(MempoolFetcher); #[async_trait::async_trait] impl Task for MempoolFetcherTask { - fn name(&self) -> &'static str { - "state_keeper/mempool_fetcher" + fn id(&self) -> TaskId { + "state_keeper/mempool_fetcher".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs b/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs index 8d56bdd671a4..edbe1d6e12f7 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs @@ -21,7 +21,7 @@ use crate::{ }, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -105,8 +105,8 @@ struct StateKeeperTask { #[async_trait::async_trait] impl Task for StateKeeperTask { - fn name(&self) -> &'static str { - "state_keeper" + fn id(&self) -> TaskId { + "state_keeper".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -134,8 +134,8 @@ struct RocksdbCatchupTask(AsyncCatchupTask); #[async_trait::async_trait] impl Task for RocksdbCatchupTask { - fn name(&self) -> &'static str { - "state_keeper/rocksdb_catchup_task" + fn id(&self) -> TaskId { + "state_keeper/rocksdb_catchup_task".into() } async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs b/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs index a595e2eeb20b..76ae0b26971f 100644 --- a/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs +++ b/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs @@ -8,7 +8,7 @@ use crate::{ pools::{MasterPool, PoolResource}, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -52,8 +52,8 @@ pub struct TeeVerifierInputProducerTask { #[async_trait::async_trait] impl Task for TeeVerifierInputProducerTask { - fn name(&self) -> &'static str { - "tee_verifier_input_producer" + fn id(&self) -> TaskId { + "tee_verifier_input_producer".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/web3_api/caches.rs b/core/node/node_framework/src/implementations/layers/web3_api/caches.rs index 7c6d160c3339..c01a62748fa4 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/caches.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/caches.rs @@ -8,7 +8,7 @@ use crate::{ web3_api::MempoolCacheResource, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -49,8 +49,8 @@ pub struct MempoolCacheUpdateTask(mempool_cache::MempoolCacheUpdateTask); #[async_trait::async_trait] impl Task for MempoolCacheUpdateTask { - fn name(&self) -> &'static str { - "mempool_cache_update_task" + fn id(&self) -> TaskId { + "mempool_cache_update_task".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/web3_api/server.rs b/core/node/node_framework/src/implementations/layers/web3_api/server.rs index 08eaa4b80444..c81b475c3ec4 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/server.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/server.rs @@ -14,7 +14,7 @@ use crate::{ web3_api::{MempoolCacheResource, TreeApiClientResource, TxSenderResource}, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -206,10 +206,10 @@ type ApiJoinHandle = JoinHandle>; #[async_trait::async_trait] impl Task for Web3ApiTask { - fn name(&self) -> &'static str { + fn id(&self) -> TaskId { match self.transport { - Transport::Http => "web3_http_server", - Transport::Ws => "web3_ws_server", + Transport::Http => "web3_http_server".into(), + Transport::Ws => "web3_ws_server".into(), } } @@ -232,8 +232,8 @@ struct ApiTaskGarbageCollector { #[async_trait::async_trait] impl Task for ApiTaskGarbageCollector { - fn name(&self) -> &'static str { - "api_task_garbage_collector" + fn id(&self) -> TaskId { + "api_task_garbage_collector".into() } async fn run(self: Box, _stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs index eea9148f6a6e..c7a568e5cb4d 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs @@ -14,7 +14,7 @@ use crate::{ web3_api::{TxSenderResource, TxSinkResource}, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -123,8 +123,8 @@ impl fmt::Debug for PostgresStorageCachesTask { #[async_trait::async_trait] impl Task for PostgresStorageCachesTask { - fn name(&self) -> &'static str { - "postgres_storage_caches" + fn id(&self) -> TaskId { + "postgres_storage_caches".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -138,8 +138,8 @@ struct VmConcurrencyBarrierTask { #[async_trait::async_trait] impl Task for VmConcurrencyBarrierTask { - fn name(&self) -> &'static str { - "vm_concurrency_barrier_task" + fn id(&self) -> TaskId { + "vm_concurrency_barrier_task".into() } async fn run(mut self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/resources/mod.rs b/core/node/node_framework/src/implementations/resources/mod.rs index b8176e148c6f..ac090d551316 100644 --- a/core/node/node_framework/src/implementations/resources/mod.rs +++ b/core/node/node_framework/src/implementations/resources/mod.rs @@ -8,6 +8,7 @@ pub mod l1_tx_params; pub mod main_node_client; pub mod object_store; pub mod pools; +pub mod reverter; pub mod state_keeper; pub mod sync_state; pub mod web3_api; diff --git a/core/node/node_framework/src/implementations/resources/reverter.rs b/core/node/node_framework/src/implementations/resources/reverter.rs new file mode 100644 index 000000000000..2a2bdb142a85 --- /dev/null +++ b/core/node/node_framework/src/implementations/resources/reverter.rs @@ -0,0 +1,15 @@ +use std::sync::Arc; + +use zksync_block_reverter::BlockReverter; + +use crate::resource::Resource; + +/// Wrapper for the block reverter. +#[derive(Debug, Clone)] +pub struct BlockReverterResource(pub Arc); + +impl Resource for BlockReverterResource { + fn name() -> String { + "common/block_reverter".into() + } +} diff --git a/core/node/node_framework/src/precondition.rs b/core/node/node_framework/src/precondition.rs index 0e47da6a631e..a612c5b90a8b 100644 --- a/core/node/node_framework/src/precondition.rs +++ b/core/node/node_framework/src/precondition.rs @@ -2,12 +2,12 @@ use std::sync::Arc; use tokio::sync::Barrier; -use crate::service::StopReceiver; +use crate::{service::StopReceiver, task::TaskId}; #[async_trait::async_trait] pub trait Precondition: 'static + Send + Sync { /// Unique name of the precondition. - fn name(&self) -> &'static str; + fn id(&self) -> TaskId; async fn check(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()>; } diff --git a/core/node/node_framework/src/service/context.rs b/core/node/node_framework/src/service/context.rs index 4ec76ca1d2a3..81d094630c32 100644 --- a/core/node/node_framework/src/service/context.rs +++ b/core/node/node_framework/src/service/context.rs @@ -39,7 +39,7 @@ impl<'a> ServiceContext<'a> { /// Added tasks will be launched after the wiring process will be finished and all the preconditions /// are met. pub fn add_task(&mut self, task: Box) -> &mut Self { - tracing::info!("Layer {} has added a new task: {}", self.layer, task.name()); + tracing::info!("Layer {} has added a new task: {}", self.layer, task.id()); self.service.runnables.tasks.push(task); self } @@ -50,7 +50,7 @@ impl<'a> ServiceContext<'a> { tracing::info!( "Layer {} has added a new unconstrained task: {}", self.layer, - task.name() + task.id() ); self.service.runnables.unconstrained_tasks.push(task); self @@ -61,7 +61,7 @@ impl<'a> ServiceContext<'a> { tracing::info!( "Layer {} has added a new precondition: {}", self.layer, - precondition.name() + precondition.id() ); self.service.runnables.preconditions.push(precondition); self @@ -72,7 +72,7 @@ impl<'a> ServiceContext<'a> { tracing::info!( "Layer {} has added a new oneshot task: {}", self.layer, - task.name() + task.id() ); self.service.runnables.oneshot_tasks.push(task); self @@ -86,7 +86,7 @@ impl<'a> ServiceContext<'a> { tracing::info!( "Layer {} has added a new unconstrained oneshot task: {}", self.layer, - task.name() + task.id() ); self.service .runnables diff --git a/core/node/node_framework/src/service/runnables.rs b/core/node/node_framework/src/service/runnables.rs index 7b3e3f7f43b7..7f35e384d6cc 100644 --- a/core/node/node_framework/src/service/runnables.rs +++ b/core/node/node_framework/src/service/runnables.rs @@ -27,22 +27,22 @@ pub(super) struct Runnables { impl fmt::Debug for Runnables { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // Macro that iterates over a `Vec`, invokes `.name()` method and collects the results into a `Vec`. + // Macro that iterates over a `Vec`, invokes `.id()` method and collects the results into a `Vec`. // Returns a reference to created `Vec` to satisfy the `.field` method signature. - macro_rules! names { + macro_rules! ids { ($vec:expr) => { - &$vec.iter().map(|x| x.name()).collect::>() + &$vec.iter().map(|x| x.id()).collect::>() }; } f.debug_struct("Runnables") - .field("preconditions", names!(self.preconditions)) - .field("tasks", names!(self.tasks)) - .field("oneshot_tasks", names!(self.oneshot_tasks)) - .field("unconstrained_tasks", names!(self.unconstrained_tasks)) + .field("preconditions", ids!(self.preconditions)) + .field("tasks", ids!(self.tasks)) + .field("oneshot_tasks", ids!(self.oneshot_tasks)) + .field("unconstrained_tasks", ids!(self.unconstrained_tasks)) .field( "unconstrained_oneshot_tasks", - names!(self.unconstrained_oneshot_tasks), + ids!(self.unconstrained_oneshot_tasks), ) .finish() } @@ -127,7 +127,7 @@ impl Runnables { stop_receiver: StopReceiver, ) { for task in std::mem::take(&mut self.unconstrained_tasks) { - let name = task.name(); + let name = task.id(); let stop_receiver = stop_receiver.clone(); let task_future = Box::pin(async move { task.run_unconstrained(stop_receiver) @@ -145,7 +145,7 @@ impl Runnables { stop_receiver: StopReceiver, ) { for task in std::mem::take(&mut self.tasks) { - let name = task.name(); + let name = task.id(); let stop_receiver = stop_receiver.clone(); let task_barrier = task_barrier.clone(); let task_future = Box::pin(async move { @@ -164,7 +164,7 @@ impl Runnables { stop_receiver: StopReceiver, ) { for precondition in std::mem::take(&mut self.preconditions) { - let name = precondition.name(); + let name = precondition.id(); let stop_receiver = stop_receiver.clone(); let task_barrier = task_barrier.clone(); let task_future = Box::pin(async move { @@ -184,7 +184,7 @@ impl Runnables { stop_receiver: StopReceiver, ) { for oneshot_task in std::mem::take(&mut self.oneshot_tasks) { - let name = oneshot_task.name(); + let name = oneshot_task.id(); let stop_receiver = stop_receiver.clone(); let task_barrier = task_barrier.clone(); let task_future = Box::pin(async move { @@ -203,7 +203,7 @@ impl Runnables { stop_receiver: StopReceiver, ) { for unconstrained_oneshot_task in std::mem::take(&mut self.unconstrained_oneshot_tasks) { - let name = unconstrained_oneshot_task.name(); + let name = unconstrained_oneshot_task.id(); let stop_receiver = stop_receiver.clone(); let task_future = Box::pin(async move { unconstrained_oneshot_task diff --git a/core/node/node_framework/src/service/tests.rs b/core/node/node_framework/src/service/tests.rs index 81a7eaabdc6d..b5bcc3aaa255 100644 --- a/core/node/node_framework/src/service/tests.rs +++ b/core/node/node_framework/src/service/tests.rs @@ -9,7 +9,7 @@ use crate::{ ServiceContext, StopReceiver, WiringError, WiringLayer, ZkStackServiceBuilder, ZkStackServiceError, }, - task::Task, + task::{Task, TaskId}, }; // `ZkStack` Service's `new()` method has to have a check for nested runtime. @@ -127,8 +127,8 @@ struct ErrorTask; #[async_trait::async_trait] impl Task for ErrorTask { - fn name(&self) -> &'static str { - "error_task" + fn id(&self) -> TaskId { + "error_task".into() } async fn run(self: Box, _stop_receiver: StopReceiver) -> anyhow::Result<()> { anyhow::bail!("error task") @@ -178,8 +178,8 @@ struct SuccessfulTask(Arc, Arc>); #[async_trait::async_trait] impl Task for SuccessfulTask { - fn name(&self) -> &'static str { - "successful_task" + fn id(&self) -> TaskId { + "successful_task".into() } async fn run(self: Box, _stop_receiver: StopReceiver) -> anyhow::Result<()> { self.0.wait().await; @@ -196,8 +196,8 @@ struct RemainingTask(Arc, Arc>); #[async_trait::async_trait] impl Task for RemainingTask { - fn name(&self) -> &'static str { - "remaining_task" + fn id(&self) -> TaskId { + "remaining_task".into() } async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/task.rs b/core/node/node_framework/src/task.rs index f5ba08de193a..a72d640731ea 100644 --- a/core/node/node_framework/src/task.rs +++ b/core/node/node_framework/src/task.rs @@ -28,12 +28,46 @@ //! - A task that must be started as soon as possible, e.g. healthcheck server. //! - A task that may be a driving force for some precondition to be met. -use std::sync::Arc; +use std::{ + fmt::{Display, Formatter}, + ops::Deref, + sync::Arc, +}; use tokio::sync::Barrier; use crate::service::StopReceiver; +/// A unique human-readable identifier of a task. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct TaskId(String); + +impl TaskId { + pub fn new(value: String) -> Self { + TaskId(value) + } +} + +impl Display for TaskId { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str(&self.0) + } +} + +impl From<&str> for TaskId { + fn from(value: &str) -> Self { + TaskId(value.to_owned()) + } +} + +impl Deref for TaskId { + type Target = str; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + /// A task implementation. /// /// Note: any `Task` added to the service will only start after all the [preconditions](crate::precondition::Precondition) @@ -41,7 +75,7 @@ use crate::service::StopReceiver; #[async_trait::async_trait] pub trait Task: 'static + Send { /// Unique name of the task. - fn name(&self) -> &'static str; + fn id(&self) -> TaskId; /// Runs the task. /// @@ -85,7 +119,7 @@ impl dyn Task { #[async_trait::async_trait] pub trait OneshotTask: 'static + Send { /// Unique name of the task. - fn name(&self) -> &'static str; + fn id(&self) -> TaskId; /// Runs the task. /// @@ -130,7 +164,7 @@ impl dyn OneshotTask { #[async_trait::async_trait] pub trait UnconstrainedTask: 'static + Send { /// Unique name of the task. - fn name(&self) -> &'static str; + fn id(&self) -> TaskId; /// Runs the task without waiting for any precondition to be met. async fn run_unconstrained(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()>; @@ -141,7 +175,7 @@ pub trait UnconstrainedTask: 'static + Send { #[async_trait::async_trait] pub trait UnconstrainedOneshotTask: 'static + Send { /// Unique name of the task. - fn name(&self) -> &'static str; + fn id(&self) -> TaskId; /// Runs the task without waiting for any precondition to be met. async fn run_unconstrained_oneshot( diff --git a/core/node/node_sync/Cargo.toml b/core/node/node_sync/Cargo.toml index 248478abddf5..9fd0aad73094 100644 --- a/core/node/node_sync/Cargo.toml +++ b/core/node/node_sync/Cargo.toml @@ -38,4 +38,5 @@ thiserror.workspace = true zksync_node_test_utils.workspace = true assert_matches.workspace = true +once_cell.workspace = true test-casing.workspace = true diff --git a/core/node/node_sync/src/tree_data_fetcher/mod.rs b/core/node/node_sync/src/tree_data_fetcher/mod.rs index dfa1f8ffa2cc..f143cc79198a 100644 --- a/core/node/node_sync/src/tree_data_fetcher/mod.rs +++ b/core/node/node_sync/src/tree_data_fetcher/mod.rs @@ -1,51 +1,32 @@ //! Fetcher responsible for getting Merkle tree outputs from the main node. -use std::{fmt, time::Duration}; +use std::time::Duration; use anyhow::Context as _; -use async_trait::async_trait; use serde::Serialize; #[cfg(test)] use tokio::sync::mpsc; use tokio::sync::watch; use zksync_dal::{ConnectionPool, Core, CoreDal, DalError}; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; -use zksync_types::{api, block::L1BatchTreeData, L1BatchNumber}; +use zksync_types::{block::L1BatchTreeData, Address, L1BatchNumber}; use zksync_web3_decl::{ - client::{DynClient, L2}, - error::{ClientRpcContext, EnrichedClientError, EnrichedClientResult}, - namespaces::ZksNamespaceClient, + client::{DynClient, L1, L2}, + error::EnrichedClientError, }; -use self::metrics::{ProcessingStage, TreeDataFetcherMetrics, METRICS}; +use self::{ + metrics::{ProcessingStage, TreeDataFetcherMetrics, METRICS}, + provider::{L1DataProvider, MissingData, TreeDataProvider}, +}; mod metrics; +mod provider; #[cfg(test)] mod tests; -#[async_trait] -trait MainNodeClient: fmt::Debug + Send + Sync + 'static { - async fn batch_details( - &self, - number: L1BatchNumber, - ) -> EnrichedClientResult>; -} - -#[async_trait] -impl MainNodeClient for Box> { - async fn batch_details( - &self, - number: L1BatchNumber, - ) -> EnrichedClientResult> { - self.get_l1_batch_details(number) - .rpc_context("get_l1_batch_details") - .with_arg("number", &number) - .await - } -} - #[derive(Debug, thiserror::Error)] -enum TreeDataFetcherError { +pub(crate) enum TreeDataFetcherError { #[error("error fetching data from main node")] Rpc(#[from] EnrichedClientError), #[error("internal error")] @@ -67,6 +48,8 @@ impl TreeDataFetcherError { } } +type TreeDataFetcherResult = Result; + #[derive(Debug, Serialize)] #[serde(untagged)] enum TreeDataFetcherHealth { @@ -108,7 +91,7 @@ enum StepOutcome { /// by Consistency checker. #[derive(Debug)] pub struct TreeDataFetcher { - main_node_client: Box, + data_provider: Box, pool: ConnectionPool, metrics: &'static TreeDataFetcherMetrics, health_updater: HealthUpdater, @@ -123,7 +106,7 @@ impl TreeDataFetcher { /// Creates a new fetcher connected to the main node. pub fn new(client: Box>, pool: ConnectionPool) -> Self { Self { - main_node_client: Box::new(client.for_component("tree_data_fetcher")), + data_provider: Box::new(client.for_component("tree_data_fetcher")), pool, metrics: &METRICS, health_updater: ReactiveHealthCheck::new("tree_data_fetcher").1, @@ -133,6 +116,23 @@ impl TreeDataFetcher { } } + /// Attempts to fetch root hashes from L1 (namely, `BlockCommit` events emitted by the diamond proxy) if possible. + /// The main node will still be used as a fallback in case communicating with L1 fails, or for newer batches, + /// which may not be committed on L1. + pub fn with_l1_data( + mut self, + eth_client: Box>, + diamond_proxy_address: Address, + ) -> anyhow::Result { + let l1_provider = L1DataProvider::new( + self.pool.clone(), + eth_client.for_component("tree_data_fetcher"), + diamond_proxy_address, + )?; + self.data_provider = Box::new(l1_provider.with_fallback(self.data_provider)); + Ok(self) + } + /// Returns a health check for this fetcher. pub fn health_check(&self) -> ReactiveHealthCheck { self.health_updater.subscribe() @@ -169,29 +169,30 @@ impl TreeDataFetcher { }) } - async fn step(&self) -> Result { + async fn step(&mut self) -> Result { let Some(l1_batch_to_fetch) = self.get_batch_to_fetch().await? else { return Ok(StepOutcome::NoProgress); }; tracing::debug!("Fetching tree data for L1 batch #{l1_batch_to_fetch} from main node"); let stage_latency = self.metrics.stage_latency[&ProcessingStage::Fetch].start(); - let batch_details = self - .main_node_client - .batch_details(l1_batch_to_fetch) - .await? - .with_context(|| { - format!( + let root_hash_result = self.data_provider.batch_details(l1_batch_to_fetch).await?; + stage_latency.observe(); + let root_hash = match root_hash_result { + Ok(hash) => hash, + Err(MissingData::Batch) => { + let err = anyhow::anyhow!( "L1 batch #{l1_batch_to_fetch} is sealed locally, but is not present on the main node, \ which is assumed to store batch info indefinitely" - ) - })?; - stage_latency.observe(); - let Some(root_hash) = batch_details.base.root_hash else { - tracing::debug!( - "L1 batch #{l1_batch_to_fetch} does not have root hash computed on the main node" - ); - return Ok(StepOutcome::RemoteHashMissing); + ); + return Err(err.into()); + } + Err(MissingData::RootHash) => { + tracing::debug!( + "L1 batch #{l1_batch_to_fetch} does not have root hash computed on the main node" + ); + return Ok(StepOutcome::RemoteHashMissing); + } }; let stage_latency = self.metrics.stage_latency[&ProcessingStage::Persistence].start(); @@ -224,7 +225,7 @@ impl TreeDataFetcher { /// Runs this component until a fatal error occurs or a stop signal is received. Transient errors /// (e.g., no network connection) are handled gracefully by retrying after a delay. - pub async fn run(self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + pub async fn run(mut self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { self.metrics.observe_info(&self); self.health_updater .update(Health::from(HealthStatus::Ready)); diff --git a/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs b/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs new file mode 100644 index 000000000000..ae13d0849726 --- /dev/null +++ b/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs @@ -0,0 +1,321 @@ +use std::fmt; + +use anyhow::Context; +use async_trait::async_trait; +use zksync_dal::{ConnectionPool, Core, CoreDal}; +use zksync_eth_client::EthInterface; +use zksync_types::{web3, Address, L1BatchNumber, H256, U256, U64}; +use zksync_web3_decl::{ + client::{DynClient, L1, L2}, + error::{ClientRpcContext, EnrichedClientError, EnrichedClientResult}, + jsonrpsee::core::ClientError, + namespaces::ZksNamespaceClient, +}; + +use super::TreeDataFetcherResult; + +#[cfg(test)] +mod tests; + +#[derive(Debug, thiserror::Error)] +pub(crate) enum MissingData { + /// The provider lacks a requested L1 batch. + #[error("no requested L1 batch")] + Batch, + /// The provider lacks a root hash for a requested L1 batch; the batch itself is present on the provider. + #[error("no root hash for L1 batch")] + RootHash, +} + +/// External provider of tree data, such as main node (via JSON-RPC). +#[async_trait] +pub(crate) trait TreeDataProvider: fmt::Debug + Send + Sync + 'static { + /// Fetches a state root hash for the L1 batch with the specified number. + /// + /// It is guaranteed that this method will be called with monotonically increasing `number`s (although not necessarily sequential ones). + async fn batch_details( + &mut self, + number: L1BatchNumber, + ) -> TreeDataFetcherResult>; +} + +#[async_trait] +impl TreeDataProvider for Box> { + async fn batch_details( + &mut self, + number: L1BatchNumber, + ) -> TreeDataFetcherResult> { + let Some(batch_details) = self + .get_l1_batch_details(number) + .rpc_context("get_l1_batch_details") + .with_arg("number", &number) + .await? + else { + return Ok(Err(MissingData::Batch)); + }; + Ok(batch_details.base.root_hash.ok_or(MissingData::RootHash)) + } +} + +#[derive(Debug, Clone, Copy)] +struct PastL1BatchInfo { + number: L1BatchNumber, + l1_commit_block_number: U64, + l1_commit_block_timestamp: U256, +} + +/// Provider of tree data loading it from L1 `BlockCommit` events emitted by the diamond proxy contract. +/// Should be used together with an L2 provider because L1 data can be missing for latest batches, +/// and the provider implementation uses assumptions that can break in some corner cases. +/// +/// # Implementation details +/// +/// To limit the range of L1 blocks for `eth_getLogs` calls, the provider assumes that an L1 block with a `BlockCommit` event +/// for a certain L1 batch is relatively close to L1 batch sealing. Thus, the provider finds an approximate L1 block number +/// for the event using binary search, or uses an L1 block number of the `BlockCommit` event for the previously queried L1 batch +/// (provided it's not too far behind the seal timestamp of the batch). +#[derive(Debug)] +pub(super) struct L1DataProvider { + pool: ConnectionPool, + eth_client: Box>, + diamond_proxy_address: Address, + block_commit_signature: H256, + past_l1_batch: Option, +} + +impl L1DataProvider { + /// Accuracy when guessing L1 block number by L1 batch timestamp. + const L1_BLOCK_ACCURACY: U64 = U64([1_000]); + /// Range of L1 blocks queried via `eth_getLogs`. Should be at least several times greater than + /// `L1_BLOCK_ACCURACY`, but not large enough to trigger request limiting on the L1 RPC provider. + const L1_BLOCK_RANGE: U64 = U64([20_000]); + + pub fn new( + pool: ConnectionPool, + eth_client: Box>, + diamond_proxy_address: Address, + ) -> anyhow::Result { + let block_commit_signature = zksync_contracts::hyperchain_contract() + .event("BlockCommit") + .context("missing `BlockCommit` event")? + .signature(); + Ok(Self { + pool, + eth_client, + diamond_proxy_address, + block_commit_signature, + past_l1_batch: None, + }) + } + + async fn l1_batch_seal_timestamp(&self, number: L1BatchNumber) -> anyhow::Result { + let mut storage = self.pool.connection_tagged("tree_data_fetcher").await?; + let (_, last_l2_block_number) = storage + .blocks_dal() + .get_l2_block_range_of_l1_batch(number) + .await? + .with_context(|| format!("L1 batch #{number} does not have L2 blocks"))?; + let block_header = storage + .blocks_dal() + .get_l2_block_header(last_l2_block_number) + .await? + .with_context(|| format!("L2 block #{last_l2_block_number} (last block in L1 batch #{number}) disappeared"))?; + Ok(block_header.timestamp) + } + + /// Guesses the number of an L1 block with a `BlockCommit` event for the specified L1 batch. + /// The guess is based on the L1 batch seal timestamp. + async fn guess_l1_commit_block_number( + eth_client: &DynClient, + l1_batch_seal_timestamp: u64, + ) -> EnrichedClientResult { + let l1_batch_seal_timestamp = U256::from(l1_batch_seal_timestamp); + let (latest_number, latest_timestamp) = + Self::get_block(eth_client, web3::BlockNumber::Latest).await?; + if latest_timestamp < l1_batch_seal_timestamp { + return Ok(latest_number); // No better estimate at this point + } + let (earliest_number, earliest_timestamp) = + Self::get_block(eth_client, web3::BlockNumber::Earliest).await?; + if earliest_timestamp > l1_batch_seal_timestamp { + return Ok(earliest_number); // No better estimate at this point + } + + // At this point, we have `earliest_timestamp <= l1_batch_seal_timestamp <= latest_timestamp`. + // Binary-search the range until we're sort of accurate. + let mut left = earliest_number; + let mut right = latest_number; + while left + Self::L1_BLOCK_ACCURACY < right { + let middle = (left + right) / 2; + let (_, middle_timestamp) = + Self::get_block(eth_client, web3::BlockNumber::Number(middle)).await?; + if middle_timestamp <= l1_batch_seal_timestamp { + left = middle; + } else { + right = middle; + } + } + Ok(left) + } + + /// Gets a block that should be present on L1. + async fn get_block( + eth_client: &DynClient, + number: web3::BlockNumber, + ) -> EnrichedClientResult<(U64, U256)> { + let block = eth_client.block(number.into()).await?.ok_or_else(|| { + let err = "block is missing on L1 RPC provider"; + EnrichedClientError::new(ClientError::Custom(err.into()), "get_block") + .with_arg("number", &number) + })?; + let number = block.number.ok_or_else(|| { + let err = "block is missing a number"; + EnrichedClientError::new(ClientError::Custom(err.into()), "get_block") + .with_arg("number", &number) + })?; + Ok((number, block.timestamp)) + } + + pub fn with_fallback(self, fallback: Box) -> CombinedDataProvider { + CombinedDataProvider { + l1: Some(self), + fallback, + } + } +} + +#[async_trait] +impl TreeDataProvider for L1DataProvider { + async fn batch_details( + &mut self, + number: L1BatchNumber, + ) -> TreeDataFetcherResult> { + let l1_batch_seal_timestamp = self.l1_batch_seal_timestamp(number).await?; + let from_block = self.past_l1_batch.and_then(|info| { + assert!( + info.number < number, + "`batch_details()` must be called with monotonically increasing numbers" + ); + let threshold_timestamp = info.l1_commit_block_timestamp + Self::L1_BLOCK_RANGE.as_u64() / 2; + if U256::from(l1_batch_seal_timestamp) > threshold_timestamp { + tracing::debug!( + number = number.0, + "L1 batch #{number} seal timestamp ({l1_batch_seal_timestamp}) is too far ahead \ + of the previous processed L1 batch ({info:?}); not using L1 batch info" + ); + None + } else { + // This is an exact lower boundary: L1 batches are committed in order + Some(info.l1_commit_block_number) + } + }); + + let from_block = match from_block { + Some(number) => number, + None => { + let approximate_block = Self::guess_l1_commit_block_number( + self.eth_client.as_ref(), + l1_batch_seal_timestamp, + ) + .await?; + tracing::debug!( + number = number.0, + "Guessed L1 block number for L1 batch #{number} commit: {approximate_block}" + ); + // Subtract to account for imprecise L1 and L2 timestamps etc. + approximate_block.saturating_sub(Self::L1_BLOCK_ACCURACY) + } + }; + + let number_topic = H256::from_low_u64_be(number.0.into()); + let filter = web3::FilterBuilder::default() + .address(vec![self.diamond_proxy_address]) + .from_block(web3::BlockNumber::Number(from_block)) + .to_block(web3::BlockNumber::Number(from_block + Self::L1_BLOCK_RANGE)) + .topics( + Some(vec![self.block_commit_signature]), + Some(vec![number_topic]), + None, + None, + ) + .build(); + let mut logs = self.eth_client.logs(&filter).await?; + logs.retain(|log| !log.is_removed() && log.block_number.is_some()); + + match logs.as_slice() { + [] => Ok(Err(MissingData::Batch)), + [log] => { + let root_hash_topic = log.topics.get(2).copied().ok_or_else(|| { + let err = "Bogus `BlockCommit` event, does not have the root hash topic"; + EnrichedClientError::new(ClientError::Custom(err.into()), "batch_details") + .with_arg("filter", &filter) + .with_arg("log", &log) + })?; + // `unwrap()` is safe due to the filtering above + let l1_commit_block_number = log.block_number.unwrap(); + + let l1_commit_block = self.eth_client.block(l1_commit_block_number.into()).await?; + let l1_commit_block = l1_commit_block.ok_or_else(|| { + let err = "Block disappeared from L1 RPC provider"; + EnrichedClientError::new(ClientError::Custom(err.into()), "batch_details") + .with_arg("number", &l1_commit_block_number) + })?; + self.past_l1_batch = Some(PastL1BatchInfo { + number, + l1_commit_block_number, + l1_commit_block_timestamp: l1_commit_block.timestamp, + }); + Ok(Ok(root_hash_topic)) + } + _ => { + tracing::warn!("Non-unique `BlockCommit` event for L1 batch #{number} queried using {filter:?}: {logs:?}"); + Ok(Err(MissingData::RootHash)) + } + } + } +} + +/// Data provider combining [`L1DataProvider`] with a fallback provider. +#[derive(Debug)] +pub(super) struct CombinedDataProvider { + l1: Option, + fallback: Box, +} + +#[async_trait] +impl TreeDataProvider for CombinedDataProvider { + async fn batch_details( + &mut self, + number: L1BatchNumber, + ) -> TreeDataFetcherResult> { + if let Some(l1) = &mut self.l1 { + match l1.batch_details(number).await { + Err(err) => { + if err.is_transient() { + tracing::info!( + number = number.0, + "Transient error calling L1 data provider: {err}" + ); + } else { + tracing::warn!( + number = number.0, + "Fatal error calling L1 data provider: {err}" + ); + self.l1 = None; + } + } + Ok(Ok(root_hash)) => return Ok(Ok(root_hash)), + Ok(Err(missing_data)) => { + tracing::debug!( + number = number.0, + "L1 data provider misses batch data: {missing_data}" + ); + // No sense of calling the L1 provider in the future; the L2 provider will very likely get information + // about batches significantly faster. + self.l1 = None; + } + } + } + self.fallback.batch_details(number).await + } +} diff --git a/core/node/node_sync/src/tree_data_fetcher/provider/tests.rs b/core/node/node_sync/src/tree_data_fetcher/provider/tests.rs new file mode 100644 index 000000000000..8bb5cc63390e --- /dev/null +++ b/core/node/node_sync/src/tree_data_fetcher/provider/tests.rs @@ -0,0 +1,244 @@ +//! Tests for tree data providers. + +use assert_matches::assert_matches; +use once_cell::sync::Lazy; +use test_casing::test_casing; +use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; +use zksync_web3_decl::client::MockClient; + +use super::*; +use crate::tree_data_fetcher::tests::{seal_l1_batch_with_timestamp, MockMainNodeClient}; + +const DIAMOND_PROXY_ADDRESS: Address = Address::repeat_byte(0x22); + +static BLOCK_COMMIT_SIGNATURE: Lazy = Lazy::new(|| { + zksync_contracts::hyperchain_contract() + .event("BlockCommit") + .expect("missing `BlockCommit` event") + .signature() +}); + +struct EthereumParameters { + block_number: U64, + // L1 block numbers in which L1 batches are committed starting from L1 batch #1 + l1_blocks_for_commits: Vec, +} + +impl EthereumParameters { + fn new(block_number: u64) -> Self { + Self { + block_number: block_number.into(), + l1_blocks_for_commits: vec![], + } + } + + fn push_commit(&mut self, l1_block_number: u64) { + assert!(l1_block_number <= self.block_number.as_u64()); + + let l1_block_number = U64::from(l1_block_number); + let last_commit = self.l1_blocks_for_commits.last().copied(); + let is_increasing = last_commit.map_or(true, |last_number| last_number <= l1_block_number); + assert!(is_increasing, "Invalid L1 block number for commit"); + + self.l1_blocks_for_commits.push(l1_block_number); + } + + fn filter_logs(logs: &[web3::Log], filter: web3::Filter) -> Vec { + let Some(web3::BlockNumber::Number(filter_from)) = filter.from_block else { + panic!("Unexpected filter: {filter:?}"); + }; + let Some(web3::BlockNumber::Number(filter_to)) = filter.to_block else { + panic!("Unexpected filter: {filter:?}"); + }; + let filter_block_range = filter_from..=filter_to; + + let filter_addresses = filter.address.unwrap().flatten(); + let filter_topics = filter.topics.unwrap(); + let filter_topics: Vec<_> = filter_topics + .into_iter() + .map(|topic| topic.map(web3::ValueOrArray::flatten)) + .collect(); + + let filtered_logs = logs.iter().filter(|log| { + if !filter_addresses.contains(&log.address) { + return false; + } + if !filter_block_range.contains(&log.block_number.unwrap()) { + return false; + } + filter_topics + .iter() + .zip(&log.topics) + .all(|(filter_topics, actual_topic)| match filter_topics { + Some(topics) => topics.contains(actual_topic), + None => true, + }) + }); + filtered_logs.cloned().collect() + } + + fn client(&self) -> MockClient { + let logs = self + .l1_blocks_for_commits + .iter() + .enumerate() + .map(|(i, &l1_block_number)| { + let l1_batch_number = H256::from_low_u64_be(i as u64 + 1); + let root_hash = H256::repeat_byte(i as u8 + 1); + web3::Log { + address: DIAMOND_PROXY_ADDRESS, + topics: vec![ + *BLOCK_COMMIT_SIGNATURE, + l1_batch_number, + root_hash, + H256::zero(), // commitment hash; not used + ], + block_number: Some(l1_block_number), + ..web3::Log::default() + } + }); + let logs: Vec<_> = logs.collect(); + let block_number = self.block_number; + + MockClient::builder(L1::default()) + .method("eth_blockNumber", move || Ok(block_number)) + .method( + "eth_getBlockByNumber", + move |number: web3::BlockNumber, with_txs: bool| { + assert!(!with_txs); + + let number = match number { + web3::BlockNumber::Number(number) => number, + web3::BlockNumber::Latest => block_number, + web3::BlockNumber::Earliest => U64::zero(), + _ => panic!("Unexpected number: {number:?}"), + }; + if number > block_number { + return Ok(None); + } + Ok(Some(web3::Block:: { + number: Some(number), + timestamp: U256::from(number.as_u64()), // timestamp == number + ..web3::Block::default() + })) + }, + ) + .method("eth_getLogs", move |filter: web3::Filter| { + Ok(Self::filter_logs(&logs, filter)) + }) + .build() + } +} + +#[tokio::test] +async fn guessing_l1_commit_block_number() { + let eth_params = EthereumParameters::new(100_000); + let eth_client = eth_params.client(); + + for timestamp in [0, 100, 1_000, 5_000, 10_000, 100_000] { + let guessed_block_number = + L1DataProvider::guess_l1_commit_block_number(ð_client, timestamp) + .await + .unwrap(); + + assert!( + guessed_block_number.abs_diff(timestamp.into()) <= L1DataProvider::L1_BLOCK_ACCURACY, + "timestamp={timestamp}, guessed={guessed_block_number}" + ); + } +} + +async fn test_using_l1_data_provider(l1_batch_timestamps: &[u64]) { + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + + let mut eth_params = EthereumParameters::new(1_000_000); + for (number, &ts) in l1_batch_timestamps.iter().enumerate() { + let number = L1BatchNumber(number as u32 + 1); + seal_l1_batch_with_timestamp(&mut storage, number, ts).await; + eth_params.push_commit(ts + 1_000); // have a reasonable small diff between batch generation and commitment + } + drop(storage); + + let mut provider = + L1DataProvider::new(pool, Box::new(eth_params.client()), DIAMOND_PROXY_ADDRESS).unwrap(); + for i in 0..l1_batch_timestamps.len() { + let number = L1BatchNumber(i as u32 + 1); + let root_hash = provider + .batch_details(number) + .await + .unwrap() + .expect("no root hash"); + assert_eq!(root_hash, H256::repeat_byte(number.0 as u8)); + + let past_l1_batch = provider.past_l1_batch.unwrap(); + assert_eq!(past_l1_batch.number, number); + let expected_l1_block_number = eth_params.l1_blocks_for_commits[i]; + assert_eq!( + past_l1_batch.l1_commit_block_number, + expected_l1_block_number + ); + assert_eq!( + past_l1_batch.l1_commit_block_timestamp, + expected_l1_block_number.as_u64().into() + ); + } +} + +#[test_casing(4, [500, 1_500, 10_000, 30_000])] +#[tokio::test] +async fn using_l1_data_provider(batch_spacing: u64) { + let l1_batch_timestamps: Vec<_> = (0..10).map(|i| 50_000 + batch_spacing * i).collect(); + test_using_l1_data_provider(&l1_batch_timestamps).await; +} + +#[tokio::test] +async fn combined_data_provider_errors() { + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + + let mut eth_params = EthereumParameters::new(1_000_000); + seal_l1_batch_with_timestamp(&mut storage, L1BatchNumber(1), 50_000).await; + eth_params.push_commit(51_000); + seal_l1_batch_with_timestamp(&mut storage, L1BatchNumber(2), 52_000).await; + drop(storage); + + let mut main_node_client = MockMainNodeClient::default(); + main_node_client.insert_batch(L1BatchNumber(2), H256::repeat_byte(2)); + let mut provider = + L1DataProvider::new(pool, Box::new(eth_params.client()), DIAMOND_PROXY_ADDRESS) + .unwrap() + .with_fallback(Box::new(main_node_client)); + + // L1 batch #1 should be obtained from L1 + let root_hash = provider + .batch_details(L1BatchNumber(1)) + .await + .unwrap() + .expect("no root hash"); + assert_eq!(root_hash, H256::repeat_byte(1)); + assert!(provider.l1.is_some()); + + // L1 batch #2 should be obtained from L2 + let root_hash = provider + .batch_details(L1BatchNumber(2)) + .await + .unwrap() + .expect("no root hash"); + assert_eq!(root_hash, H256::repeat_byte(2)); + assert!(provider.l1.is_none()); + + // L1 batch #3 is not present anywhere. + let missing = provider + .batch_details(L1BatchNumber(3)) + .await + .unwrap() + .unwrap_err(); + assert_matches!(missing, MissingData::Batch); +} diff --git a/core/node/node_sync/src/tree_data_fetcher/tests.rs b/core/node/node_sync/src/tree_data_fetcher/tests.rs index d1192e3ea942..cb25842f0517 100644 --- a/core/node/node_sync/src/tree_data_fetcher/tests.rs +++ b/core/node/node_sync/src/tree_data_fetcher/tests.rs @@ -8,64 +8,78 @@ use std::{ }; use assert_matches::assert_matches; +use async_trait::async_trait; use test_casing::test_casing; use zksync_dal::Connection; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; -use zksync_node_test_utils::{create_l1_batch, prepare_recovery_snapshot}; +use zksync_node_test_utils::{create_l1_batch, create_l2_block, prepare_recovery_snapshot}; use zksync_types::{AccountTreeId, Address, L2BlockNumber, StorageKey, StorageLog, H256}; use zksync_web3_decl::jsonrpsee::core::ClientError; use super::{metrics::StepOutcomeLabel, *}; #[derive(Debug, Default)] -struct MockMainNodeClient { +pub(super) struct MockMainNodeClient { transient_error: Arc, - batch_details_responses: HashMap, + batch_details_responses: HashMap, +} + +impl MockMainNodeClient { + pub fn insert_batch(&mut self, number: L1BatchNumber, root_hash: H256) { + self.batch_details_responses.insert(number, root_hash); + } } #[async_trait] -impl MainNodeClient for MockMainNodeClient { +impl TreeDataProvider for MockMainNodeClient { async fn batch_details( - &self, + &mut self, number: L1BatchNumber, - ) -> EnrichedClientResult> { + ) -> TreeDataFetcherResult> { if self.transient_error.fetch_and(false, Ordering::Relaxed) { let err = ClientError::RequestTimeout; - return Err(EnrichedClientError::new(err, "batch_details")); + return Err(EnrichedClientError::new(err, "batch_details").into()); } - Ok(self.batch_details_responses.get(&number).cloned()) + Ok(self + .batch_details_responses + .get(&number) + .copied() + .ok_or(MissingData::Batch)) } } -fn mock_l1_batch_details(number: L1BatchNumber, root_hash: Option) -> api::L1BatchDetails { - api::L1BatchDetails { - number, - base: api::BlockDetailsBase { - timestamp: number.0.into(), - l1_tx_count: 0, - l2_tx_count: 10, - root_hash, - status: api::BlockStatus::Sealed, - commit_tx_hash: None, - committed_at: None, - prove_tx_hash: None, - proven_at: None, - execute_tx_hash: None, - executed_at: None, - l1_gas_price: 123, - l2_fair_gas_price: 456, - base_system_contracts_hashes: Default::default(), - }, - } +async fn seal_l1_batch(storage: &mut Connection<'_, Core>, number: L1BatchNumber) { + seal_l1_batch_with_timestamp(storage, number, number.0.into()).await; } -async fn seal_l1_batch(storage: &mut Connection<'_, Core>, number: L1BatchNumber) { +pub(super) async fn seal_l1_batch_with_timestamp( + storage: &mut Connection<'_, Core>, + number: L1BatchNumber, + timestamp: u64, +) { let mut transaction = storage.start_transaction().await.unwrap(); + // Insert a single L2 block belonging to the batch. + let mut block_header = create_l2_block(number.0); + block_header.timestamp = timestamp; + transaction + .blocks_dal() + .insert_l2_block(&block_header) + .await + .unwrap(); + + let mut batch_header = create_l1_batch(number.0); + batch_header.timestamp = timestamp; + transaction + .blocks_dal() + .insert_mock_l1_batch(&batch_header) + .await + .unwrap(); transaction .blocks_dal() - .insert_mock_l1_batch(&create_l1_batch(number.0)) + .mark_l2_blocks_as_executed_in_l1_batch(batch_header.number) .await .unwrap(); + // One initial write per L1 batch let initial_writes = [StorageKey::new( AccountTreeId::new(Address::repeat_byte(1)), @@ -87,11 +101,11 @@ struct FetcherHarness { } impl FetcherHarness { - fn new(client: impl MainNodeClient, pool: ConnectionPool) -> Self { + fn new(client: impl TreeDataProvider, pool: ConnectionPool) -> Self { let (updates_sender, updates_receiver) = mpsc::unbounded_channel(); let metrics = &*Box::leak(Box::::default()); let fetcher = TreeDataFetcher { - main_node_client: Box::new(client), + data_provider: Box::new(client), pool: pool.clone(), metrics, health_updater: ReactiveHealthCheck::new("tree_data_fetcher").1, @@ -117,12 +131,13 @@ async fn tree_data_fetcher_steps() { let mut client = MockMainNodeClient::default(); for number in 1..=5 { let number = L1BatchNumber(number); - let details = mock_l1_batch_details(number, Some(H256::from_low_u64_be(number.0.into()))); - client.batch_details_responses.insert(number, details); + client + .batch_details_responses + .insert(number, H256::from_low_u64_be(number.0.into())); seal_l1_batch(&mut storage, number).await; } - let fetcher = FetcherHarness::new(client, pool.clone()).fetcher; + let mut fetcher = FetcherHarness::new(client, pool.clone()).fetcher; for number in 1..=5 { let step_outcome = fetcher.step().await.unwrap(); assert_matches!( @@ -181,12 +196,13 @@ async fn tree_data_fetcher_steps_after_snapshot_recovery() { let mut client = MockMainNodeClient::default(); for i in 1..=5 { let number = snapshot.l1_batch_number + i; - let details = mock_l1_batch_details(number, Some(H256::from_low_u64_be(number.0.into()))); - client.batch_details_responses.insert(number, details); + client + .batch_details_responses + .insert(number, H256::from_low_u64_be(number.0.into())); seal_l1_batch(&mut storage, number).await; } - let fetcher = FetcherHarness::new(client, pool.clone()).fetcher; + let mut fetcher = FetcherHarness::new(client, pool.clone()).fetcher; for i in 1..=5 { let step_outcome = fetcher.step().await.unwrap(); assert_matches!( @@ -212,8 +228,9 @@ async fn tree_data_fetcher_recovers_from_transient_errors() { let mut client = MockMainNodeClient::default(); for number in 1..=5 { let number = L1BatchNumber(number); - let details = mock_l1_batch_details(number, Some(H256::from_low_u64_be(number.0.into()))); - client.batch_details_responses.insert(number, details); + client + .batch_details_responses + .insert(number, H256::from_low_u64_be(number.0.into())); } let transient_error = client.transient_error.clone(); @@ -278,21 +295,20 @@ impl SlowMainNode { } #[async_trait] -impl MainNodeClient for SlowMainNode { +impl TreeDataProvider for SlowMainNode { async fn batch_details( - &self, + &mut self, number: L1BatchNumber, - ) -> EnrichedClientResult> { + ) -> TreeDataFetcherResult> { if number != L1BatchNumber(1) { - return Ok(None); + return Ok(Err(MissingData::Batch)); } let request_count = self.request_count.fetch_add(1, Ordering::Relaxed); - let root_hash = if request_count >= self.compute_root_hash_after { - Some(H256::repeat_byte(1)) + Ok(if request_count >= self.compute_root_hash_after { + Ok(H256::repeat_byte(1)) } else { - None - }; - Ok(Some(mock_l1_batch_details(number, root_hash))) + Err(MissingData::RootHash) + }) } } diff --git a/core/tests/loadnext/src/sdk/ethereum/mod.rs b/core/tests/loadnext/src/sdk/ethereum/mod.rs index 1c45d8b5b56a..6800fb75a7d3 100644 --- a/core/tests/loadnext/src/sdk/ethereum/mod.rs +++ b/core/tests/loadnext/src/sdk/ethereum/mod.rs @@ -4,7 +4,8 @@ use std::time::{Duration, Instant}; use serde_json::{Map, Value}; use zksync_eth_client::{ - clients::SigningClient, BoundEthInterface, CallFunctionArgs, Error, EthInterface, Options, + clients::SigningClient, BoundEthInterface, CallFunctionArgs, ContractCallError, EthInterface, + Options, }; use zksync_eth_signer::EthereumSigner; use zksync_types::{ @@ -158,7 +159,9 @@ impl EthereumProvider { .call(self.query_client()) .await .map_err(|err| match err { - Error::EthereumGateway(err) => ClientError::NetworkError(err.to_string()), + ContractCallError::EthereumGateway(err) => { + ClientError::NetworkError(err.to_string()) + } _ => ClientError::MalformedResponse(err.to_string()), }) } @@ -193,7 +196,9 @@ impl EthereumProvider { .call(self.query_client()) .await .map_err(|err| match err { - Error::EthereumGateway(err) => ClientError::NetworkError(err.to_string()), + ContractCallError::EthereumGateway(err) => { + ClientError::NetworkError(err.to_string()) + } _ => ClientError::MalformedResponse(err.to_string()), }) } @@ -360,7 +365,7 @@ impl EthereumProvider { gas_limit: U256, gas_per_pubdata_byte: u32, gas_price: Option, - ) -> Result { + ) -> Result { let gas_price = if let Some(gas_price) = gas_price { gas_price } else { diff --git a/core/tests/ts-integration/contracts/custom-account/interfaces/ISystemContext.sol b/core/tests/ts-integration/contracts/custom-account/interfaces/ISystemContext.sol new file mode 100644 index 000000000000..6b83f6d6ada4 --- /dev/null +++ b/core/tests/ts-integration/contracts/custom-account/interfaces/ISystemContext.sol @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.8.0; + +/** + * @author Matter Labs + * @custom:security-contact security@matterlabs.dev + * @notice Contract that stores some of the context variables, that may be either + * block-scoped, tx-scoped or system-wide. + */ +interface ISystemContext { + struct BlockInfo { + uint128 timestamp; + uint128 number; + } + + /// @notice A structure representing the timeline for the upgrade from the batch numbers to the L2 block numbers. + /// @dev It will be used for the L1 batch -> L2 block migration in Q3 2023 only. + struct VirtualBlockUpgradeInfo { + /// @notice In order to maintain consistent results for `blockhash` requests, we'll + /// have to remember the number of the batch when the upgrade to the virtual blocks has been done. + /// The hashes for virtual blocks before the upgrade are identical to the hashes of the corresponding batches. + uint128 virtualBlockStartBatch; + /// @notice L2 block when the virtual blocks have caught up with the L2 blocks. Starting from this block, + /// all the information returned to users for block.timestamp/number, etc should be the information about the L2 blocks and + /// not virtual blocks. + uint128 virtualBlockFinishL2Block; + } + + function chainId() external view returns (uint256); + + function origin() external view returns (address); + + function gasPrice() external view returns (uint256); + + function blockGasLimit() external view returns (uint256); + + function coinbase() external view returns (address); + + function difficulty() external view returns (uint256); + + function baseFee() external view returns (uint256); + + function txNumberInBlock() external view returns (uint16); + + function getBlockHashEVM(uint256 _block) external view returns (bytes32); + + function getBatchHash(uint256 _batchNumber) external view returns (bytes32 hash); + + function getBlockNumber() external view returns (uint128); + + function getBlockTimestamp() external view returns (uint128); + + function getBatchNumberAndTimestamp() external view returns (uint128 blockNumber, uint128 blockTimestamp); + + function getL2BlockNumberAndTimestamp() external view returns (uint128 blockNumber, uint128 blockTimestamp); + + function gasPerPubdataByte() external view returns (uint256 gasPerPubdataByte); + + function getCurrentPubdataSpent() external view returns (uint256 currentPubdataSpent); +} diff --git a/core/tests/ts-integration/src/env.ts b/core/tests/ts-integration/src/env.ts index ada8a695e0aa..ddbb8227dc60 100644 --- a/core/tests/ts-integration/src/env.ts +++ b/core/tests/ts-integration/src/env.ts @@ -222,8 +222,10 @@ export async function loadTestEnvironmentFromEnv(): Promise { const baseTokenAddressL2 = L2_BASE_TOKEN_ADDRESS; const l2ChainId = parseInt(process.env.CHAIN_ETH_ZKSYNC_NETWORK_ID!); - const l1BatchCommitDataGeneratorMode = process.env - .CHAIN_STATE_KEEPER_L1_BATCH_COMMIT_DATA_GENERATOR_MODE! as DataAvailabityMode; + // If the `CHAIN_STATE_KEEPER_L1_BATCH_COMMIT_DATA_GENERATOR_MODE` is not set, the default value is `Rollup`. + const l1BatchCommitDataGeneratorMode = (process.env.CHAIN_STATE_KEEPER_L1_BATCH_COMMIT_DATA_GENERATOR_MODE || + process.env.EN_L1_BATCH_COMMIT_DATA_GENERATOR_MODE || + 'Rollup') as DataAvailabityMode; let minimalL2GasPrice; if (process.env.CHAIN_STATE_KEEPER_MINIMAL_L2_GAS_PRICE !== undefined) { minimalL2GasPrice = ethers.BigNumber.from(process.env.CHAIN_STATE_KEEPER_MINIMAL_L2_GAS_PRICE!); diff --git a/core/tests/ts-integration/src/helpers.ts b/core/tests/ts-integration/src/helpers.ts index 966a77b3fb8e..d3464bc84bdd 100644 --- a/core/tests/ts-integration/src/helpers.ts +++ b/core/tests/ts-integration/src/helpers.ts @@ -4,6 +4,8 @@ import * as ethers from 'ethers'; import * as hre from 'hardhat'; import { ZkSyncArtifact } from '@matterlabs/hardhat-zksync-solc/dist/src/types'; +export const SYSTEM_CONTEXT_ADDRESS = '0x000000000000000000000000000000000000800b'; + /** * Loads the test contract * diff --git a/core/tests/ts-integration/tests/fees.test.ts b/core/tests/ts-integration/tests/fees.test.ts index a2a72cfa5be3..699b9e5e886b 100644 --- a/core/tests/ts-integration/tests/fees.test.ts +++ b/core/tests/ts-integration/tests/fees.test.ts @@ -16,8 +16,11 @@ import { TestMaster } from '../src/index'; import * as zksync from 'zksync-ethers'; import { BigNumber, ethers } from 'ethers'; import { DataAvailabityMode, Token } from '../src/types'; +import { keccak256 } from 'ethers/lib/utils'; +import { SYSTEM_CONTEXT_ADDRESS, getTestContract } from '../src/helpers'; const UINT32_MAX = BigNumber.from(2).pow(32).sub(1); +const MAX_GAS_PER_PUBDATA = 50_000; const logs = fs.createWriteStream('fees.log', { flags: 'a' }); @@ -168,6 +171,15 @@ testFees('Test fees', () => { const receipt = await tx.wait(); expect(receipt.gasUsed.gt(UINT32_MAX)).toBeTruthy(); + // Let's also check that the same transaction would work as eth_call + const systemContextArtifact = getTestContract('ISystemContext'); + const systemContext = new ethers.Contract(SYSTEM_CONTEXT_ADDRESS, systemContextArtifact.abi, alice.provider); + const systemContextGasPerPubdataByte = await systemContext.gasPerPubdataByte(); + expect(systemContextGasPerPubdataByte.toNumber()).toEqual(MAX_GAS_PER_PUBDATA); + + const dataHash = await l1Messenger.callStatic.sendToL1(largeData, { type: 0 }); + expect(dataHash).toEqual(keccak256(largeData)); + // Secondly, let's test an unsuccessful transaction with large refund. // The size of the data has increased, so the previous gas limit is not enough. diff --git a/core/tests/ts-integration/tests/system.test.ts b/core/tests/ts-integration/tests/system.test.ts index c46916c4ec67..2934226eed8f 100644 --- a/core/tests/ts-integration/tests/system.test.ts +++ b/core/tests/ts-integration/tests/system.test.ts @@ -13,7 +13,8 @@ import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; import { BigNumberish, BytesLike } from 'ethers'; import { hashBytecode, serialize } from 'zksync-ethers/build/utils'; -import { getTestContract } from '../src/helpers'; +import { SYSTEM_CONTEXT_ADDRESS, getTestContract } from '../src/helpers'; +import { DataAvailabityMode } from '../src/types'; const contracts = { counter: getTestContract('Counter'), @@ -311,6 +312,20 @@ describe('System behavior checks', () => { ).toBeAccepted(); }); + test('Gas per pubdata byte getter should work', async () => { + const systemContextArtifact = getTestContract('ISystemContext'); + const systemContext = new ethers.Contract(SYSTEM_CONTEXT_ADDRESS, systemContextArtifact.abi, alice.provider); + const currentGasPerPubdata = await systemContext.gasPerPubdataByte(); + + // The current gas per pubdata depends on a lot of factors, so it wouldn't be sustainable to check the exact value. + // We'll just check that it is greater than zero. + if (testMaster.environment().l1BatchCommitDataGeneratorMode === DataAvailabityMode.Rollup) { + expect(currentGasPerPubdata.toNumber()).toBeGreaterThan(0); + } else { + expect(currentGasPerPubdata.toNumber()).toEqual(0); + } + }); + it('should reject transaction with huge gas limit', async () => { await expect( alice.sendTransaction({ to: alice.address, gasLimit: ethers.BigNumber.from(2).pow(51) }) diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 991fcd0ac567..fcd32d3607a0 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -673,7 +673,7 @@ dependencies = [ "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", "const_format", "convert_case", - "crossbeam 0.8.4", + "crossbeam 0.7.3", "crypto-bigint 0.5.5", "cs_derive 0.1.0 (git+https://github.com/matter-labs/era-boojum?branch=main)", "derivative", @@ -2526,7 +2526,7 @@ dependencies = [ "crossbeam 0.7.3", "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=snark_wrapper)", "gpu-ffi", - "itertools 0.10.5", + "itertools 0.11.0", "num_cpus", "rand 0.4.6", "serde", @@ -4680,7 +4680,7 @@ dependencies = [ name = "prover_version" version = "0.1.0" dependencies = [ - "zksync_types", + "zksync_prover_fri_types", ] [[package]] @@ -7606,7 +7606,7 @@ dependencies = [ [[package]] name = "zk_evm" version = "1.5.0" -source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.5.0#9bbf7ffd2c38ee8b9667e96eaf0c111037fe976f" +source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.5.0#0c5cdca00cca4fa0a8c49147a11048c24f8a4b12" dependencies = [ "anyhow", "lazy_static", @@ -7757,7 +7757,7 @@ dependencies = [ [[package]] name = "zkevm_circuits" version = "1.5.0" -source = "git+https://github.com/matter-labs/era-zkevm_circuits.git?branch=v1.5.0#a93a3a5c34ec1ec31d73191d11ab00b4d8215a3f" +source = "git+https://github.com/matter-labs/era-zkevm_circuits.git?branch=v1.5.0#b7a86c739e8a8f88e788e90893c6e7496f6d7dfc" dependencies = [ "arrayvec 0.7.4", "boojum", @@ -7815,7 +7815,7 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" version = "1.5.0" -source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.5.0#109d9f734804a8b9dc0531c0b576e2a0f55a40de" +source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.5.0#28d2edabf902ea9b08f6a26a4506831fd89346b9" dependencies = [ "bitflags 2.4.2", "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -8192,6 +8192,7 @@ dependencies = [ "google-cloud-storage", "http", "prost 0.12.3", + "rand 0.8.5", "serde_json", "tokio", "tracing", diff --git a/prover/proof_fri_compressor/src/main.rs b/prover/proof_fri_compressor/src/main.rs index ec66515b6a35..9786170874ec 100644 --- a/prover/proof_fri_compressor/src/main.rs +++ b/prover/proof_fri_compressor/src/main.rs @@ -10,8 +10,8 @@ use tokio::sync::{oneshot, watch}; use zksync_config::configs::{DatabaseSecrets, FriProofCompressorConfig, ObservabilityConfig}; use zksync_env_config::{object_store::ProverObjectStoreConfig, FromEnv}; use zksync_object_store::ObjectStoreFactory; +use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; use zksync_queued_job_processor::JobProcessor; -use zksync_types::protocol_version::ProtocolSemanticVersion; use zksync_utils::wait_for_tasks::ManagedTasks; use crate::{ @@ -73,7 +73,7 @@ async fn main() -> anyhow::Result<()> { .create_store() .await; - let protocol_version = ProtocolSemanticVersion::current_prover_version(); + let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; let proof_compressor = ProofCompressor::new( blob_store, diff --git a/prover/prover_dal/.sqlx/query-7496a9df12e409162338266085ce27807ede6b4db9541198cee2861b874b52f9.json b/prover/prover_dal/.sqlx/query-7496a9df12e409162338266085ce27807ede6b4db9541198cee2861b874b52f9.json deleted file mode 100644 index 1a8ebf4e4253..000000000000 --- a/prover/prover_dal/.sqlx/query-7496a9df12e409162338266085ce27807ede6b4db9541198cee2861b874b52f9.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version,\n COUNT(*) FILTER (\n WHERE\n status = 'queued'\n ) AS queued,\n COUNT(*) FILTER (\n WHERE\n status = 'in_progress'\n ) AS in_progress\n FROM\n proof_compression_jobs_fri\n WHERE\n protocol_version IS NOT NULL\n GROUP BY\n protocol_version\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "protocol_version", - "type_info": "Int4" - }, - { - "ordinal": 1, - "name": "queued", - "type_info": "Int8" - }, - { - "ordinal": 2, - "name": "in_progress", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - true, - null, - null - ] - }, - "hash": "7496a9df12e409162338266085ce27807ede6b4db9541198cee2861b874b52f9" -} diff --git a/prover/prover_dal/.sqlx/query-5449963638944edc62b5ec293bcda37e6c0c6c85fe5f701ebc9919fcad749a23.json b/prover/prover_dal/.sqlx/query-a94fffdbc1827dc5df908ea1e99ef3ad13840d2c497760e9bd0513f68dc4271c.json similarity index 52% rename from prover/prover_dal/.sqlx/query-5449963638944edc62b5ec293bcda37e6c0c6c85fe5f701ebc9919fcad749a23.json rename to prover/prover_dal/.sqlx/query-a94fffdbc1827dc5df908ea1e99ef3ad13840d2c497760e9bd0513f68dc4271c.json index 01d32127608e..20db1e57aeb8 100644 --- a/prover/prover_dal/.sqlx/query-5449963638944edc62b5ec293bcda37e6c0c6c85fe5f701ebc9919fcad749a23.json +++ b/prover/prover_dal/.sqlx/query-a94fffdbc1827dc5df908ea1e99ef3ad13840d2c497760e9bd0513f68dc4271c.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n COUNT(*) AS \"count!\",\n circuit_id AS \"circuit_id!\",\n aggregation_round AS \"aggregation_round!\",\n status AS \"status!\",\n protocol_version AS \"protocol_version!\"\n FROM\n prover_jobs_fri\n WHERE\n (\n status = 'queued'\n OR status = 'in_progress'\n )\n AND protocol_version IS NOT NULL\n GROUP BY\n circuit_id,\n aggregation_round,\n status,\n protocol_version\n ", + "query": "\n SELECT\n COUNT(*) AS \"count!\",\n circuit_id AS \"circuit_id!\",\n aggregation_round AS \"aggregation_round!\",\n status AS \"status!\",\n protocol_version AS \"protocol_version!\",\n protocol_version_patch AS \"protocol_version_patch!\"\n FROM\n prover_jobs_fri\n WHERE\n (\n status = 'queued'\n OR status = 'in_progress'\n )\n AND protocol_version IS NOT NULL\n GROUP BY\n circuit_id,\n aggregation_round,\n status,\n protocol_version,\n protocol_version_patch\n ", "describe": { "columns": [ { @@ -27,6 +27,11 @@ "ordinal": 4, "name": "protocol_version!", "type_info": "Int4" + }, + { + "ordinal": 5, + "name": "protocol_version_patch!", + "type_info": "Int4" } ], "parameters": { @@ -37,8 +42,9 @@ false, false, false, - true + true, + false ] }, - "hash": "5449963638944edc62b5ec293bcda37e6c0c6c85fe5f701ebc9919fcad749a23" + "hash": "a94fffdbc1827dc5df908ea1e99ef3ad13840d2c497760e9bd0513f68dc4271c" } diff --git a/prover/prover_dal/.sqlx/query-e9c9b69852fa68f463f17b6d63ab99cf505662036f2dd7a9f1807c4c1bad7c7b.json b/prover/prover_dal/.sqlx/query-e9c9b69852fa68f463f17b6d63ab99cf505662036f2dd7a9f1807c4c1bad7c7b.json new file mode 100644 index 000000000000..160eb31bf953 --- /dev/null +++ b/prover/prover_dal/.sqlx/query-e9c9b69852fa68f463f17b6d63ab99cf505662036f2dd7a9f1807c4c1bad7c7b.json @@ -0,0 +1,38 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n protocol_version,\n protocol_version_patch,\n COUNT(*) FILTER (\n WHERE\n status = 'queued'\n ) AS queued,\n COUNT(*) FILTER (\n WHERE\n status = 'in_progress'\n ) AS in_progress\n FROM\n proof_compression_jobs_fri\n WHERE\n protocol_version IS NOT NULL\n GROUP BY\n protocol_version,\n protocol_version_patch\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "protocol_version_patch", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "queued", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "in_progress", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true, + false, + null, + null + ] + }, + "hash": "e9c9b69852fa68f463f17b6d63ab99cf505662036f2dd7a9f1807c4c1bad7c7b" +} diff --git a/prover/prover_dal/src/fri_proof_compressor_dal.rs b/prover/prover_dal/src/fri_proof_compressor_dal.rs index 35bb6329bdb8..38f09114f2bf 100644 --- a/prover/prover_dal/src/fri_proof_compressor_dal.rs +++ b/prover/prover_dal/src/fri_proof_compressor_dal.rs @@ -251,11 +251,12 @@ impl FriProofCompressorDal<'_, '_> { .unwrap(); } - pub async fn get_jobs_stats(&mut self) -> HashMap { + pub async fn get_jobs_stats(&mut self) -> HashMap { sqlx::query!( r#" SELECT protocol_version, + protocol_version_patch, COUNT(*) FILTER ( WHERE status = 'queued' @@ -269,7 +270,8 @@ impl FriProofCompressorDal<'_, '_> { WHERE protocol_version IS NOT NULL GROUP BY - protocol_version + protocol_version, + protocol_version_patch "#, ) .fetch_all(self.storage.conn()) @@ -277,7 +279,10 @@ impl FriProofCompressorDal<'_, '_> { .unwrap() .into_iter() .map(|row| { - let key = ProtocolVersionId::try_from(row.protocol_version.unwrap() as u16).unwrap(); + let key = ProtocolSemanticVersion::new( + ProtocolVersionId::try_from(row.protocol_version.unwrap() as u16).unwrap(), + VersionPatch(row.protocol_version_patch as u32), + ); let value = JobCountStatistics { queued: row.queued.unwrap() as usize, in_progress: row.in_progress.unwrap() as usize, diff --git a/prover/prover_dal/src/fri_prover_dal.rs b/prover/prover_dal/src/fri_prover_dal.rs index 18d9ec9e14fa..35fb46e8aff3 100644 --- a/prover/prover_dal/src/fri_prover_dal.rs +++ b/prover/prover_dal/src/fri_prover_dal.rs @@ -409,7 +409,8 @@ impl FriProverDal<'_, '_> { circuit_id AS "circuit_id!", aggregation_round AS "aggregation_round!", status AS "status!", - protocol_version AS "protocol_version!" + protocol_version AS "protocol_version!", + protocol_version_patch AS "protocol_version_patch!" FROM prover_jobs_fri WHERE @@ -422,7 +423,8 @@ impl FriProverDal<'_, '_> { circuit_id, aggregation_round, status, - protocol_version + protocol_version, + protocol_version_patch "# ) .fetch_all(self.storage.conn()) @@ -437,6 +439,7 @@ impl FriProverDal<'_, '_> { circuit_id: row.circuit_id as u8, aggregation_round: row.aggregation_round as u8, protocol_version: row.protocol_version as u16, + protocol_version_patch: row.protocol_version_patch as u32, }) .or_default(); match row.status.as_ref() { diff --git a/prover/prover_dal/src/fri_witness_generator_dal.rs b/prover/prover_dal/src/fri_witness_generator_dal.rs index 4ce0122d7143..3c733623e477 100644 --- a/prover/prover_dal/src/fri_witness_generator_dal.rs +++ b/prover/prover_dal/src/fri_witness_generator_dal.rs @@ -1365,19 +1365,21 @@ impl FriWitnessGeneratorDal<'_, '_> { pub async fn get_witness_jobs_stats( &mut self, aggregation_round: AggregationRound, - ) -> HashMap<(AggregationRound, ProtocolVersionId), JobCountStatistics> { + ) -> HashMap<(AggregationRound, ProtocolSemanticVersion), JobCountStatistics> { let table_name = Self::input_table_name_for(aggregation_round); let sql = format!( r#" SELECT protocol_version, + protocol_version_patch, COUNT(*) FILTER (WHERE status = 'queued') as queued, COUNT(*) FILTER (WHERE status = 'in_progress') as in_progress FROM {} WHERE protocol_version IS NOT NULL GROUP BY - protocol_version + protocol_version, + protocol_version_patch "#, table_name, ); @@ -1387,11 +1389,12 @@ impl FriWitnessGeneratorDal<'_, '_> { .unwrap() .into_iter() .map(|row| { - let key = ( - aggregation_round, + let protocol_semantic_version = ProtocolSemanticVersion::new( ProtocolVersionId::try_from(row.get::("protocol_version") as u16) .unwrap(), + VersionPatch(row.get::("protocol_version_patch") as u32), ); + let key = (aggregation_round, protocol_semantic_version); let value = JobCountStatistics { queued: row.get::("queued") as usize, in_progress: row.get::("in_progress") as usize, diff --git a/prover/prover_fri/src/main.rs b/prover/prover_fri/src/main.rs index 4caceae13e9d..7bd658868258 100644 --- a/prover/prover_fri/src/main.rs +++ b/prover/prover_fri/src/main.rs @@ -18,11 +18,11 @@ use zksync_env_config::{ FromEnv, }; use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; use zksync_prover_fri_utils::{get_all_circuit_id_round_tuples_for, region_fetcher::get_zone}; use zksync_queued_job_processor::JobProcessor; use zksync_types::{ basic_fri_types::CircuitIdRoundTuple, - protocol_version::ProtocolSemanticVersion, prover_dal::{GpuProverInstanceStatus, SocketAddress}, }; use zksync_utils::wait_for_tasks::ManagedTasks; @@ -195,7 +195,7 @@ async fn get_prover_tasks( ) -> anyhow::Result>>> { use crate::prover_job_processor::{load_setup_data_cache, Prover}; - let protocol_version = ProtocolSemanticVersion::current_prover_version(); + let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; tracing::info!( "Starting CPU FRI proof generation for with protocol_version: {:?}", @@ -247,7 +247,7 @@ async fn get_prover_tasks( port: prover_config.witness_vector_receiver_port, }; - let protocol_version = ProtocolSemanticVersion::current_prover_version(); + let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; let prover = gpu_prover::Prover::new( store_factory.create_store().await, diff --git a/prover/prover_fri_types/src/lib.rs b/prover/prover_fri_types/src/lib.rs index 611702cd34f1..0c6557c27ffc 100644 --- a/prover/prover_fri_types/src/lib.rs +++ b/prover/prover_fri_types/src/lib.rs @@ -14,7 +14,11 @@ use circuit_definitions::{ }, }; use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; -use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; +use zksync_types::{ + basic_fri_types::AggregationRound, + protocol_version::{ProtocolSemanticVersion, VersionPatch}, + L1BatchNumber, ProtocolVersionId, +}; use crate::keys::FriCircuitKey; @@ -23,6 +27,14 @@ pub mod queue; pub const EIP_4844_CIRCUIT_ID: u8 = 255; +// THESE VALUES SHOULD BE UPDATED ON ANY PROTOCOL UPGRADE OF PROVERS +pub const PROVER_PROTOCOL_VERSION: ProtocolVersionId = ProtocolVersionId::Version24; +pub const PROVER_PROTOCOL_PATCH: VersionPatch = VersionPatch(1); +pub const PROVER_PROTOCOL_SEMANTIC_VERSION: ProtocolSemanticVersion = ProtocolSemanticVersion { + minor: PROVER_PROTOCOL_VERSION, + patch: PROVER_PROTOCOL_PATCH, +}; + #[derive(serde::Serialize, serde::Deserialize, Clone)] #[allow(clippy::large_enum_variant)] pub enum CircuitWrapper { diff --git a/prover/prover_version/Cargo.toml b/prover/prover_version/Cargo.toml index af2c9936ec7f..0275b4169b72 100644 --- a/prover/prover_version/Cargo.toml +++ b/prover/prover_version/Cargo.toml @@ -4,4 +4,4 @@ version = "0.1.0" edition.workspace = true [dependencies] -zksync_types.workspace = true +zksync_prover_fri_types.workspace = true diff --git a/prover/prover_version/src/main.rs b/prover/prover_version/src/main.rs index 3ed931240d9f..f4b52801820a 100644 --- a/prover/prover_version/src/main.rs +++ b/prover/prover_version/src/main.rs @@ -1,5 +1,5 @@ -use zksync_types::ProtocolVersionId; +use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; fn main() { - println!("{}", ProtocolVersionId::current_prover_version()); + println!("{}", PROVER_PROTOCOL_SEMANTIC_VERSION); } diff --git a/prover/setup-data-gpu-keys.json b/prover/setup-data-gpu-keys.json index 600427385c79..4acc51b9add0 100644 --- a/prover/setup-data-gpu-keys.json +++ b/prover/setup-data-gpu-keys.json @@ -1,5 +1,5 @@ { - "us": "gs://matterlabs-setup-data-us/744b4e8-gpu/", - "europe": "gs://matterlabs-setup-data-europe/744b4e8-gpu/", - "asia": "gs://matterlabs-setup-data-asia/744b4e8-gpu/" + "us": "gs://matterlabs-setup-data-us/ffc5da2-gpu/", + "europe": "gs://matterlabs-setup-data-europe/ffc5da2-gpu/", + "asia": "gs://matterlabs-setup-data-asia/ffc5da2-gpu/" } diff --git a/prover/vk_setup_data_generator_server_fri/data/commitments.json b/prover/vk_setup_data_generator_server_fri/data/commitments.json index 00161454a9a2..086609a5822b 100644 --- a/prover/vk_setup_data_generator_server_fri/data/commitments.json +++ b/prover/vk_setup_data_generator_server_fri/data/commitments.json @@ -1,6 +1,6 @@ { - "leaf": "0xcc4ac1853353538a166f5c2dde2c24e7e6c461dce8e3dc47d81e9139e1719456", + "leaf": "0xf9664f4324c1400fa5c3822d667f30e873f53f1b8033180cd15fe41c1e2355c6", "node": "0xf520cd5b37e74e19fdb369c8d676a04dce8a19457497ac6686d2bb95d94109c8", - "scheduler": "0x8e58ecfdb4d987f32c45ed50f72a47dc5c46c262d83549c426a8fa6edacbc4dd", - "snark_wrapper": "0xb45190a52235abe353afd606a9144728f807804f5282df9247e27c56e817ccd6" + "scheduler": "0xe6ba9d6b042440c480fa1c7182be32387db6e90281e82f37398d3f98f63f098a", + "snark_wrapper": "0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824ed4f42bbca2" } \ No newline at end of file diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_1.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_1.bin index eeaee8f8a3b46870699f01aed8405bcd84329268..b1623bfe3ef1d593a5eb321903de9daafddce42f 100644 GIT binary patch delta 69 mcmbQjG=*ux9;P6+iTmU%*8b#Y00BlY$-n^PGokUB(f9!ICJRyk delta 69 mcmbQjG=*ux9;Rhn6ZgqkM1JLG00BlY$-n^PGokUB(f9z`f(l9i diff --git a/prover/vk_setup_data_generator_server_fri/data/snark_verification_scheduler_key.json b/prover/vk_setup_data_generator_server_fri/data/snark_verification_scheduler_key.json index 4313abe7616b..acb7e3fe8969 100644 --- a/prover/vk_setup_data_generator_server_fri/data/snark_verification_scheduler_key.json +++ b/prover/vk_setup_data_generator_server_fri/data/snark_verification_scheduler_key.json @@ -6,16 +6,16 @@ "gate_setup_commitments": [ { "x": [ - 3639645538835826981, - 13358681319193882915, - 14654814390686320869, - 2265744977747292559 + 14543631136906534221, + 11532161447842416044, + 11114175029926010938, + 1228896787564295039 ], "y": [ - 5699456119250210464, - 11698616611432786025, - 15205083455076303537, - 793062898509501988 + 13293602262342424489, + 8897930584356943159, + 13256028170406220369, + 3214939367598363288 ], "infinity": false }, @@ -96,16 +96,16 @@ }, { "x": [ - 8181305420893527265, - 8023799216002703714, - 15496213284243332216, - 770710052375668551 + 9586697317366528906, + 2325800863365957883, + 1243781259615311278, + 3048012003267036960 ], "y": [ - 1173987788591134762, - 3283714838474547428, - 15288445962933699259, - 953799583719157434 + 612821620743617231, + 1510385666449513894, + 9368337288452385056, + 2949736812933507034 ], "infinity": false }, diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json index 1f219f9e876d..8459e87826ac 100644 --- a/prover/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json +++ b/prover/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json @@ -19,19 +19,19 @@ "public_inputs_locations": [ [ 0, - 1045849 + 1046957 ], [ 1, - 1045849 + 1046957 ], [ 2, - 1045849 + 1046957 ], [ 3, - 1045849 + 1046957 ] ], "extra_constant_polys_for_selectors": 3, @@ -183,100 +183,100 @@ }, "setup_merkle_tree_cap": [ [ - 7045554076696889632, - 16529088100684214116, - 6290514233821252509, - 3001343423260616923 + 9473487953399898748, + 16270419805909860203, + 7335367583540379607, + 18438161812709418982 ], [ - 2940766705131855345, - 4555670488918609622, - 5753494248126846134, - 6256617137189379231 + 12967681057814187922, + 15701035168973396898, + 11259967584839810575, + 10571912581839654023 ], [ - 11827587136011675723, - 10889029680830982431, - 13439167774157155113, - 2734855668043648738 + 5264981558950918922, + 7322263530084687711, + 17011319323793220700, + 14479065901870485923 ], [ - 15389434355711868094, - 11598886769225733235, - 8482571407659321701, - 1997900333773344820 + 15574099641370951434, + 17000829784989701584, + 15964436826107516267, + 11346203353481465805 ], [ - 4548024410962672141, - 4394433224146674864, - 13832051321856375918, - 18445586359141413559 + 5474255527556252767, + 16570571942564149566, + 11428025503403431038, + 6617585440243326997 ], [ - 3613486671466248529, - 8630760380746238913, - 14296646559228531904, - 9397645087732339531 + 308081994977850819, + 8729962239283422104, + 14597407866734738386, + 14829347258931409833 ], [ - 840865276850212173, - 16736429831088322497, - 14611332307377976471, - 3907807757864441481 + 9980505926358439430, + 4909215529832368544, + 8351461288536129828, + 1249767629546599012 ], [ - 2637545975653412188, - 3660986788535112218, - 9902405273825560113, - 7195558443610319480 + 1807216890691480940, + 8617426931824195446, + 11002408656746191939, + 2928848780068318198 ], [ - 8393139460037640371, - 10765566899430361860, - 18329680108258922867, - 741850204565671783 + 11541179157141990516, + 12173830690959139035, + 2440341332114286947, + 12109090346106141232 ], [ - 4000428793481961239, - 15763840098880028026, - 10171423830051614055, - 13386566252539583097 + 11418690736500468651, + 16634379025633469741, + 15202881082421411217, + 1933046213639751324 ], [ - 998896299132355394, - 14206990988719530146, - 8999279144001525320, - 10626686453302503838 + 7447003196248321129, + 18332700323878037759, + 9559830827790696535, + 15476899088175820878 ], [ - 17426248181155971215, - 4962517775468765428, - 7032151950452105750, - 7025431744279194673 + 9516228739964317619, + 3715247844046085602, + 3402341140845153636, + 6208479534561471430 ], [ - 12275611679628867217, - 4758528062899618473, - 14082115197178538846, - 3896427251413045084 + 13129761831635161708, + 1199200173405945178, + 2225893329254814674, + 11792586660360798317 ], [ - 15483865238199990360, - 5691435570314737886, - 14756340954295671676, - 17828994026924671768 + 11807698182439073980, + 7978262413534788419, + 11140621065717310105, + 1380069160672719340 ], [ - 17160835723214490721, - 7256922695144660559, - 4901345145374519964, - 1493120881299167685 + 347840206922472862, + 10448076973761280929, + 6823062094681347787, + 15218544951788424466 ], [ - 1740794570609564600, - 609320811844141042, - 426822094057894482, - 6559582870374070860 + 13614576575170767970, + 7218359081103672230, + 15716723129949529907, + 15097061601049280170 ] ] } diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json index 70823d429afd..a44d59cd38ec 100644 --- a/prover/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json +++ b/prover/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json @@ -162,100 +162,100 @@ }, "setup_merkle_tree_cap": [ [ - 14888709561675676412, - 9216741205039404929, - 9684149635019531913, - 13880860109035593219 + 17855141276447231405, + 7822266582101144460, + 13588292742840523493, + 6469182181208683317 ], [ - 15104809072293329963, - 1896126018678273430, - 12116942096160132903, - 7145610089866937425 + 4232699233227875249, + 16903438402968182485, + 6943950277201482792, + 2110689468668186473 ], [ - 5938467841458718442, - 13853503804678923615, - 9221120555920683684, - 15112098065915315318 + 7707237321810352304, + 6515546920961633488, + 12952446233485170717, + 15066548759710591627 ], [ - 10492005768294435976, - 10245537693158081259, - 17481852070620274887, - 9681223495665222888 + 4639470535288257573, + 9977204060471305820, + 13620252730672745323, + 13906174107064885101 ], [ - 2330970386857215037, - 4019699060591160553, - 1410714382025032836, - 13967465531165811113 + 3380569754818632951, + 14592200377838954179, + 4655944779251366596, + 10461459338163125811 ], [ - 2697285946544359790, - 10219469019881018060, - 4617295552426676526, - 4165342253057202206 + 9505371692898482313, + 17672643349055132324, + 10968459678378506342, + 7203066191514731188 ], [ - 7573986049996963514, - 7859751312783523495, - 6058686987847329688, - 17050513781000134964 + 6361719037117192382, + 14180108541189529084, + 6222651441291357456, + 992683928102460932 ], [ - 7848395666220166703, - 1808170012978044134, - 12886183437176343290, - 9247906664812684040 + 533421257849918809, + 11687478703243746707, + 17923492118938261966, + 3240289105687966878 ], [ - 4758224957823408119, - 18390374702861572456, - 12054973031816727956, - 9964456186628666135 + 10537826768508055055, + 12735025794843706714, + 12285680957016823071, + 10987522679748444515 ], [ - 9913247106175321276, - 1133994713615747518, - 15467305915923599881, - 14137150334296727741 + 13934405620933279246, + 3346346012923536354, + 13038612823504141140, + 5021904630472945213 ], [ - 519510401159554954, - 671623465327617337, - 6946618752566126355, - 14839792343867641685 + 4317559511773342187, + 9030560588429997541, + 4631410576253261376, + 9787322710458812055 ], [ - 15769588697424611648, - 2044484567072981120, - 9195524138415042973, - 17683243399640174941 + 6546515965342993735, + 14693131313122528660, + 17792579751764566634, + 8313761089615939487 ], [ - 12667910057570482067, - 5348170454137185946, - 13596174350294476632, - 10205751496630857536 + 3974680093533741999, + 14912060828934556038, + 1881259422671526373, + 12651251867986376553 ], [ - 6454065087063181969, - 6868636153285926242, - 15096145533308286351, - 5607823324493271199 + 4700501802410133974, + 13415065184486663986, + 2400366378830519355, + 16672949145027127976 ], [ - 9258544726611497878, - 10424111256988796050, - 6681130502078897352, - 7923029268540343473 + 14532304468096502099, + 8898488667664282945, + 421877734780369270, + 18139574494023430530 ], [ - 1072638076145855116, - 5751602392190609095, - 10716732206422190696, - 12121400551621687065 + 2695266391937250139, + 8565247931723474329, + 8596490620847451819, + 2058702883352054572 ] ] } diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json index 4c328cbfd819..8a52cc244bac 100644 --- a/prover/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json +++ b/prover/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json @@ -170,100 +170,100 @@ }, "setup_merkle_tree_cap": [ [ - 2680192913777199386, - 7877900777764568562, - 7967270885539056261, - 11491786516879257714 + 9887208323851505217, + 1123001217986730435, + 343259880253311786, + 2151140818520262118 ], [ - 1576848689219001454, - 2538042691131197824, - 16789498574115229290, - 3214129711903181558 + 12495904531249642919, + 17232615797756148395, + 3335544159309667561, + 6261962261160675850 ], [ - 856301905705619734, - 4331213335266799158, - 15267490766684530921, - 3265714654258242220 + 3290174806954782361, + 3957604867997030178, + 12129129725630125865, + 1636089896333385634 ], [ - 8865784570897245270, - 2362765988103793581, - 6943670874402562853, - 14632996114278721596 + 14645858759272203991, + 11653487901803110416, + 2499237237036147984, + 1841727833267838231 ], [ - 63247458005995468, - 12539771084927052853, - 13041512411442114569, - 9742813247561592554 + 18193008520821522692, + 14508611443656176962, + 15201308762805005611, + 16051075400380887227 ], [ - 16743936557271219178, - 14841453989210747254, - 12724413787690930702, - 10592542358880202219 + 4504987266706704494, + 7397695837427186224, + 10067172051000661467, + 5044520361343796759 ], [ - 16695338323889693576, - 8527536001711027994, - 13212045085202022064, - 11071462626939596790 + 9408005523417633181, + 14924548137262927482, + 8927260223716946348, + 25087104176919469 ], [ - 18060750313558946749, - 15824434706098663517, - 775292596891170912, - 18445377984966327048 + 11857324568001808264, + 5783626311717767938, + 10769426771780222703, + 8523712547334248178 ], [ - 3549745875383468285, - 2238890537215251462, - 4591889095789072384, - 13012706980710418598 + 18394924697039022030, + 3773697459649116941, + 6013511991919985339, + 17810626771729638933 ], [ - 14771394899136640222, - 13143304103596416048, - 14456129193020560275, - 5740433968684323698 + 13290121767754155136, + 11225142773614876536, + 4764911669339622945, + 17476639133556434478 ], [ - 11651473654699970526, - 4694969877986805556, - 7029204199916750383, - 6916614362901685796 + 11822797557540925718, + 17521847674855164779, + 18126641713175128985, + 3215884914057380988 ], [ - 4368206191480113515, - 9562279231528697429, - 1907048590194817686, - 13209277185471975687 + 15220380051263546850, + 7948573237324556416, + 264360501330239312, + 16455579027557250339 ], [ - 14438342866286439870, - 383769026263703315, - 1077241575478137065, - 1158227982301730574 + 17738768733790921549, + 4021891743990340907, + 17352941271057641152, + 15584530612705924787 ], [ - 10868817472877525981, - 11920954565057859026, - 10684659491915725994, - 15343028344024922569 + 7157587680183062137, + 8837818432071888650, + 16467824236289155049, + 17557580094049845697 ], [ - 4969179907509861760, - 3560160134545277440, - 11797495979614319546, - 13436348584120593030 + 15526977922222496027, + 5885713491624121557, + 8813450728670527813, + 10234120825800411733 ], [ - 8873263215018682993, - 13828390019511310487, - 12329030402425507188, - 18004618114160314165 + 12554317685609787988, + 4789370247234643566, + 16370523223191414986, + 9108687955872827734 ] ] } diff --git a/prover/vk_setup_data_generator_server_fri/src/keystore.rs b/prover/vk_setup_data_generator_server_fri/src/keystore.rs index d1ba66e1fd2a..25aedeb089ff 100644 --- a/prover/vk_setup_data_generator_server_fri/src/keystore.rs +++ b/prover/vk_setup_data_generator_server_fri/src/keystore.rs @@ -44,7 +44,18 @@ pub struct Keystore { } fn get_base_path() -> PathBuf { - core_workspace_dir_or_current_dir().join("prover/vk_setup_data_generator_server_fri/data") + let path = core_workspace_dir_or_current_dir(); + + let new_path = path.join("prover/vk_setup_data_generator_server_fri/data"); + if new_path.exists() { + return new_path; + } + + let mut components = path.components(); + components.next_back().unwrap(); + components + .as_path() + .join("prover/vk_setup_data_generator_server_fri/data") } impl Default for Keystore { diff --git a/prover/witness_generator/src/main.rs b/prover/witness_generator/src/main.rs index e176347acafe..e0e39b442a83 100644 --- a/prover/witness_generator/src/main.rs +++ b/prover/witness_generator/src/main.rs @@ -41,7 +41,7 @@ mod utils; #[cfg(not(target_env = "msvc"))] use jemallocator::Jemalloc; use zksync_dal::Core; -use zksync_types::protocol_version::ProtocolSemanticVersion; +use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; #[cfg(not(target_env = "msvc"))] #[global_allocator] @@ -126,7 +126,7 @@ async fn main() -> anyhow::Result<()> { .context("failed to build a prover_connection_pool")?; let (stop_sender, stop_receiver) = watch::channel(false); - let protocol_version = ProtocolSemanticVersion::current_prover_version(); + let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; let vk_commitments_in_db = match prover_connection_pool .connection() .await diff --git a/prover/witness_vector_generator/src/main.rs b/prover/witness_vector_generator/src/main.rs index 843ae02530d2..2b8134d09e58 100644 --- a/prover/witness_vector_generator/src/main.rs +++ b/prover/witness_vector_generator/src/main.rs @@ -13,9 +13,9 @@ use zksync_config::configs::{ }; use zksync_env_config::{object_store::ProverObjectStoreConfig, FromEnv}; use zksync_object_store::ObjectStoreFactory; +use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; use zksync_prover_fri_utils::{get_all_circuit_id_round_tuples_for, region_fetcher::get_zone}; use zksync_queued_job_processor::JobProcessor; -use zksync_types::protocol_version::ProtocolSemanticVersion; use zksync_utils::wait_for_tasks::ManagedTasks; use crate::generator::WitnessVectorGenerator; @@ -87,7 +87,7 @@ async fn main() -> anyhow::Result<()> { let zone_url = &fri_prover_config.zone_read_url; let zone = get_zone(zone_url).await.context("get_zone()")?; - let protocol_version = ProtocolSemanticVersion::current_prover_version(); + let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; let witness_vector_generator = WitnessVectorGenerator::new( blob_store, diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index 1469b183152b..927ef514f324 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -531,6 +531,7 @@ dependencies = [ "serde_yaml", "sqlx", "strum_macros 0.26.2", + "tokio", "toml", "url", "xshell", @@ -3948,6 +3949,7 @@ dependencies = [ "serde", "strum 0.26.2", "strum_macros 0.26.2", + "thiserror", ] [[package]] @@ -4533,6 +4535,7 @@ dependencies = [ "console", "ethers", "human-panic", + "lazy_static", "serde", "serde_json", "serde_yaml", @@ -4550,7 +4553,16 @@ dependencies = [ name = "zk_supervisor" version = "0.1.0" dependencies = [ + "anyhow", + "clap", + "common", + "config", "human-panic", + "strum 0.26.2", + "strum_macros 0.26.2", + "tokio", + "url", + "xshell", ] [[package]] diff --git a/zk_toolbox/Cargo.toml b/zk_toolbox/Cargo.toml index 539c656292a4..ae4b40fa435e 100644 --- a/zk_toolbox/Cargo.toml +++ b/zk_toolbox/Cargo.toml @@ -32,7 +32,9 @@ clap = { version = "4.4", features = ["derive", "wrap_help"] } cliclack = "0.2.5" console = "0.15.8" ethers = "2.0" +futures = "0.3.30" human-panic = "2.0" +lazy_static = "1.4.0" once_cell = "1.19.0" rand = "0.8.5" serde = { version = "1.0", features = ["derive"] } @@ -41,9 +43,8 @@ serde_yaml = "0.9" sqlx = { version = "0.7.4", features = ["runtime-tokio", "migrate", "postgres"] } strum = "0.26.2" strum_macros = "0.26.2" +thiserror = "1.0.57" tokio = { version = "1.37", features = ["full"] } toml = "0.8.12" url = { version = "2.5.0", features = ["serde"] } xshell = "0.2.6" -futures = "0.3.30" -thiserror = "1.0.57" diff --git a/zk_toolbox/crates/common/Cargo.toml b/zk_toolbox/crates/common/Cargo.toml index efdde1cdfc18..00c3b7775112 100644 --- a/zk_toolbox/crates/common/Cargo.toml +++ b/zk_toolbox/crates/common/Cargo.toml @@ -16,13 +16,14 @@ clap.workspace = true cliclack.workspace = true console.workspace = true ethers.workspace = true +futures.workspace = true once_cell.workspace = true serde.workspace = true serde_json.workspace = true serde_yaml.workspace = true sqlx.workspace = true strum_macros.workspace = true +tokio.workspace = true toml.workspace = true url.workspace = true xshell.workspace = true -futures.workspace = true diff --git a/zk_toolbox/crates/common/src/cmd.rs b/zk_toolbox/crates/common/src/cmd.rs index 8b18c7733059..e39f1e18972c 100644 --- a/zk_toolbox/crates/common/src/cmd.rs +++ b/zk_toolbox/crates/common/src/cmd.rs @@ -1,3 +1,5 @@ +use std::process::Output; + use anyhow::bail; use console::style; @@ -31,13 +33,6 @@ impl<'a> Cmd<'a> { /// Run the command without capturing its output. pub fn run(&mut self) -> anyhow::Result<()> { - self.run_cmd()?; - Ok(()) - } - - /// Run the command and capture its output, logging the command - /// and its output if verbose selected. - fn run_cmd(&mut self) -> anyhow::Result<()> { if global_config().verbose || self.force_run { logger::debug(format!("Running: {}", self.inner)); logger::new_empty_line(); @@ -60,6 +55,25 @@ impl<'a> Cmd<'a> { Ok(()) } + /// Run the command and return its output. + pub fn run_with_output(&mut self) -> anyhow::Result { + if global_config().verbose || self.force_run { + logger::debug(format!("Running: {}", self.inner)); + logger::new_empty_line(); + } + + self.inner.set_ignore_status(true); + let output = self.inner.output()?; + + if global_config().verbose || self.force_run { + logger::raw(log_output(&output)); + logger::new_empty_line(); + logger::new_line(); + } + + Ok(output) + } + fn check_output_status(&self, output: &std::process::Output) -> anyhow::Result<()> { if !output.status.success() { logger::new_line(); diff --git a/zk_toolbox/crates/common/src/db.rs b/zk_toolbox/crates/common/src/db.rs index 887880b2c55c..c0a681bc74c0 100644 --- a/zk_toolbox/crates/common/src/db.rs +++ b/zk_toolbox/crates/common/src/db.rs @@ -1,5 +1,7 @@ use std::{collections::HashMap, path::PathBuf}; +use anyhow::anyhow; +use serde::{Deserialize, Serialize}; use sqlx::{ migrate::{Migrate, MigrateError, Migrator}, Connection, PgConnection, @@ -9,22 +11,63 @@ use xshell::Shell; use crate::{config::global_config, logger}; -pub async fn init_db(db_url: &Url, name: &str) -> anyhow::Result<()> { +/// Database configuration. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DatabaseConfig { + /// Database URL. + pub url: Url, + /// Database name. + pub name: String, +} + +impl DatabaseConfig { + /// Create a new `Db` instance. + pub fn new(url: Url, name: String) -> Self { + Self { url, name } + } + + /// Create a new `Db` instance from a URL. + pub fn from_url(url: Url) -> anyhow::Result { + let name = url + .path_segments() + .ok_or(anyhow!("Failed to parse database name from URL"))? + .last() + .ok_or(anyhow!("Failed to parse database name from URL"))?; + let url_without_db_name = { + let mut url = url.clone(); + url.set_path(""); + url + }; + Ok(Self { + url: url_without_db_name, + name: name.to_string(), + }) + } + + /// Get the full URL of the database. + pub fn full_url(&self) -> Url { + let mut url = self.url.clone(); + url.set_path(&self.name); + url + } +} + +pub async fn init_db(db: &DatabaseConfig) -> anyhow::Result<()> { // Connect to the database. - let mut connection = PgConnection::connect(db_url.as_ref()).await?; + let mut connection = PgConnection::connect(db.url.as_str()).await?; - let query = format!("CREATE DATABASE {}", name); + let query = format!("CREATE DATABASE {}", db.name); // Create DB. sqlx::query(&query).execute(&mut connection).await?; Ok(()) } -pub async fn drop_db_if_exists(db_url: &Url, name: &str) -> anyhow::Result<()> { +pub async fn drop_db_if_exists(db: &DatabaseConfig) -> anyhow::Result<()> { // Connect to the database. - let mut connection = PgConnection::connect(db_url.as_ref()).await?; + let mut connection = PgConnection::connect(db.url.as_str()).await?; - let query = format!("DROP DATABASE IF EXISTS {}", name); + let query = format!("DROP DATABASE IF EXISTS {}", db.name); // DROP DB. sqlx::query(&query).execute(&mut connection).await?; @@ -34,7 +77,7 @@ pub async fn drop_db_if_exists(db_url: &Url, name: &str) -> anyhow::Result<()> { pub async fn migrate_db( shell: &Shell, migrations_folder: PathBuf, - db_url: &str, + db_url: &Url, ) -> anyhow::Result<()> { // Most of this file is copy-pasted from SQLx CLI: // https://github.com/launchbadge/sqlx/blob/main/sqlx-cli/src/migrate.rs @@ -45,7 +88,7 @@ pub async fn migrate_db( } let migrator = Migrator::new(migrations_folder).await?; - let mut conn = PgConnection::connect(db_url).await?; + let mut conn = PgConnection::connect(db_url.as_str()).await?; conn.ensure_migrations_table().await?; let version = conn.dirty_version().await?; @@ -83,7 +126,7 @@ pub async fn migrate_db( let text = if skip { "Skipped" } else { "Applied" }; if global_config().verbose { - logger::raw(&format!( + logger::step(&format!( " {} {}/{} {} ({elapsed:?})", text, migration.version, @@ -104,3 +147,15 @@ pub async fn migrate_db( Ok(()) } + +pub async fn wait_for_db(db_url: &Url, tries: u32) -> anyhow::Result<()> { + for i in 0..tries { + if PgConnection::connect(db_url.as_str()).await.is_ok() { + return Ok(()); + } + if i < tries - 1 { + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + } + } + anyhow::bail!("Unable to connect to Postgres, connection cannot be established"); +} diff --git a/zk_toolbox/crates/common/src/term/logger.rs b/zk_toolbox/crates/common/src/term/logger.rs index 9e13c2958078..b505123114be 100644 --- a/zk_toolbox/crates/common/src/term/logger.rs +++ b/zk_toolbox/crates/common/src/term/logger.rs @@ -43,10 +43,14 @@ pub fn success(msg: impl Display) { log::success(msg).unwrap(); } -pub fn raw(msg: impl Display) { +pub fn step(msg: impl Display) { log::step(msg).unwrap(); } +pub fn raw(msg: impl Display) { + term_write(msg); +} + pub fn note(msg: impl Display, content: impl Display) { cliclack::note(msg, content).unwrap(); } diff --git a/zk_toolbox/crates/common/src/term/spinner.rs b/zk_toolbox/crates/common/src/term/spinner.rs index 3e9322ba636c..dcfaaf44d44d 100644 --- a/zk_toolbox/crates/common/src/term/spinner.rs +++ b/zk_toolbox/crates/common/src/term/spinner.rs @@ -34,4 +34,13 @@ impl Spinner { self.time.elapsed().as_secs_f64() )); } + + /// Interrupt the spinner with a failed message. + pub fn fail(self) { + self.pb.error(format!( + "{} failed in {} secs", + self.msg, + self.time.elapsed().as_secs_f64() + )); + } } diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs index 87556d36795f..585ad407b67e 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs @@ -1,6 +1,9 @@ use std::{collections::HashMap, str::FromStr}; -use ethers::types::{Address, H256}; +use ethers::{ + prelude::U256, + types::{Address, H256}, +}; use rand::Rng; use serde::{Deserialize, Serialize}; use types::ChainId; @@ -146,7 +149,7 @@ impl DeployL1Config { genesis_batch_commitment: genesis_config.genesis_batch_commitment, genesis_rollup_leaf_index: genesis_config.genesis_rollup_leaf_index, genesis_root: genesis_config.genesis_root, - latest_protocol_version: genesis_config.genesis_protocol_version, + latest_protocol_version: genesis_config.genesis_protocol_semantic_version.pack(), recursion_circuits_set_vks_hash: H256::zero(), recursion_leaf_level_vk_hash: H256::zero(), recursion_node_level_vk_hash: H256::zero(), @@ -173,7 +176,7 @@ pub struct ContractsDeployL1Config { pub genesis_root: H256, pub genesis_rollup_leaf_index: u32, pub genesis_batch_commitment: H256, - pub latest_protocol_version: u64, + pub latest_protocol_version: U256, pub recursion_node_level_vk_hash: H256, pub recursion_leaf_level_vk_hash: H256, pub recursion_circuits_set_vks_hash: H256, diff --git a/zk_toolbox/crates/config/src/genesis.rs b/zk_toolbox/crates/config/src/genesis.rs index 16f44a45c2e7..4e3d931ea0f0 100644 --- a/zk_toolbox/crates/config/src/genesis.rs +++ b/zk_toolbox/crates/config/src/genesis.rs @@ -1,6 +1,6 @@ use ethers::types::{Address, H256}; use serde::{Deserialize, Serialize}; -use types::{ChainId, L1BatchCommitDataGeneratorMode}; +use types::{ChainId, L1BatchCommitDataGeneratorMode, ProtocolSemanticVersion}; use crate::{consts::GENESIS_FILE, traits::FileConfigWithDefaultName}; @@ -16,6 +16,7 @@ pub struct GenesisConfig { pub genesis_rollup_leaf_index: u32, pub genesis_root: H256, pub genesis_protocol_version: u64, + pub genesis_protocol_semantic_version: ProtocolSemanticVersion, #[serde(flatten)] pub other: serde_json::Value, } diff --git a/zk_toolbox/crates/config/src/secrets.rs b/zk_toolbox/crates/config/src/secrets.rs index 829d903adb66..ebacc5d437cb 100644 --- a/zk_toolbox/crates/config/src/secrets.rs +++ b/zk_toolbox/crates/config/src/secrets.rs @@ -5,8 +5,8 @@ use crate::{consts::SECRETS_FILE, traits::FileConfigWithDefaultName}; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct DatabaseSecrets { - pub server_url: String, - pub prover_url: String, + pub server_url: Url, + pub prover_url: Url, #[serde(flatten)] pub other: serde_json::Value, } @@ -29,28 +29,3 @@ pub struct SecretsConfig { impl FileConfigWithDefaultName for SecretsConfig { const FILE_NAME: &'static str = SECRETS_FILE; } - -#[derive(Debug, Serialize)] -pub struct DatabaseConfig { - pub base_url: Url, - pub database_name: String, -} - -impl DatabaseConfig { - pub fn new(base_url: Url, database_name: String) -> Self { - Self { - base_url, - database_name, - } - } - - pub fn full_url(&self) -> String { - format!("{}/{}", self.base_url, self.database_name) - } -} - -#[derive(Debug, Serialize)] -pub struct DatabasesConfig { - pub server: DatabaseConfig, - pub prover: DatabaseConfig, -} diff --git a/zk_toolbox/crates/types/Cargo.toml b/zk_toolbox/crates/types/Cargo.toml index 2c7ceedd1f08..efd8f84d7088 100644 --- a/zk_toolbox/crates/types/Cargo.toml +++ b/zk_toolbox/crates/types/Cargo.toml @@ -16,3 +16,4 @@ ethers.workspace = true serde.workspace = true strum.workspace = true strum_macros.workspace = true +thiserror.workspace = true diff --git a/zk_toolbox/crates/types/src/lib.rs b/zk_toolbox/crates/types/src/lib.rs index a973f8bfc918..c405013990cf 100644 --- a/zk_toolbox/crates/types/src/lib.rs +++ b/zk_toolbox/crates/types/src/lib.rs @@ -2,6 +2,7 @@ mod base_token; mod chain_id; mod l1_batch_commit_data_generator_mode; mod l1_network; +mod protocol_version; mod prover_mode; mod wallet_creation; @@ -9,5 +10,6 @@ pub use base_token::*; pub use chain_id::*; pub use l1_batch_commit_data_generator_mode::*; pub use l1_network::*; +pub use protocol_version::ProtocolSemanticVersion; pub use prover_mode::*; pub use wallet_creation::*; diff --git a/zk_toolbox/crates/types/src/protocol_version.rs b/zk_toolbox/crates/types/src/protocol_version.rs new file mode 100644 index 000000000000..5b619c883a3e --- /dev/null +++ b/zk_toolbox/crates/types/src/protocol_version.rs @@ -0,0 +1,87 @@ +use std::{fmt, num::ParseIntError, str::FromStr}; + +use ethers::prelude::U256; +use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; + +pub const PACKED_SEMVER_MINOR_OFFSET: u32 = 32; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct ProtocolSemanticVersion { + pub minor: u16, + pub patch: u16, +} + +impl ProtocolSemanticVersion { + const MAJOR_VERSION: u8 = 0; + + pub fn new(minor: u16, patch: u16) -> Self { + Self { minor, patch } + } + + pub fn pack(&self) -> U256 { + (U256::from(self.minor) << U256::from(PACKED_SEMVER_MINOR_OFFSET)) | U256::from(self.patch) + } +} + +impl fmt::Display for ProtocolSemanticVersion { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}.{}.{}", Self::MAJOR_VERSION, self.minor, self.patch) + } +} + +#[derive(Debug, thiserror::Error)] +pub enum ParseProtocolSemanticVersionError { + #[error("invalid format")] + InvalidFormat, + #[error("non zero major version")] + NonZeroMajorVersion, + #[error("{0}")] + ParseIntError(ParseIntError), +} + +impl FromStr for ProtocolSemanticVersion { + type Err = ParseProtocolSemanticVersionError; + + fn from_str(s: &str) -> Result { + let parts: Vec<&str> = s.split('.').collect(); + if parts.len() != 3 { + return Err(ParseProtocolSemanticVersionError::InvalidFormat); + } + + let major = parts[0] + .parse::() + .map_err(ParseProtocolSemanticVersionError::ParseIntError)?; + if major != 0 { + return Err(ParseProtocolSemanticVersionError::NonZeroMajorVersion); + } + + let minor = parts[1] + .parse::() + .map_err(ParseProtocolSemanticVersionError::ParseIntError)?; + + let patch = parts[2] + .parse::() + .map_err(ParseProtocolSemanticVersionError::ParseIntError)?; + + Ok(ProtocolSemanticVersion { minor, patch }) + } +} + +impl<'de> Deserialize<'de> for ProtocolSemanticVersion { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + ProtocolSemanticVersion::from_str(&s).map_err(D::Error::custom) + } +} + +impl Serialize for ProtocolSemanticVersion { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&self.to_string()) + } +} diff --git a/zk_toolbox/crates/zk_inception/Cargo.toml b/zk_toolbox/crates/zk_inception/Cargo.toml index 8123746f1abf..ff22e982e3cc 100644 --- a/zk_toolbox/crates/zk_inception/Cargo.toml +++ b/zk_toolbox/crates/zk_inception/Cargo.toml @@ -17,6 +17,7 @@ cliclack.workspace = true config.workspace = true console.workspace = true human-panic.workspace = true +lazy_static.workspace = true serde_yaml.workspace = true serde.workspace = true serde_json.workspace = true diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs index 42c653b9bce1..d835b1eb36a6 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs @@ -1,6 +1,6 @@ use clap::Parser; -use common::{slugify, Prompt}; -use config::{ChainConfig, DatabaseConfig, DatabasesConfig}; +use common::{db::DatabaseConfig, slugify, Prompt}; +use config::ChainConfig; use serde::{Deserialize, Serialize}; use url::Url; @@ -16,11 +16,11 @@ use crate::{ #[derive(Debug, Clone, Serialize, Deserialize, Parser, Default)] pub struct GenesisArgs { #[clap(long, help = MSG_SERVER_DB_URL_HELP)] - pub server_db_url: Option, + pub server_db_url: Option, #[clap(long, help = MSG_SERVER_DB_NAME_HELP)] pub server_db_name: Option, #[clap(long, help = MSG_PROVER_DB_URL_HELP)] - pub prover_db_url: Option, + pub prover_db_url: Option, #[clap(long, help = MSG_PROVER_DB_NAME_HELP)] pub prover_db_name: Option, #[clap(long, short, help = MSG_GENESIS_USE_DEFAULT_HELP)] @@ -38,16 +38,14 @@ impl GenesisArgs { let chain_name = config.name.clone(); if self.use_default { GenesisArgsFinal { - server_db_url: DATABASE_SERVER_URL.to_string(), - server_db_name: server_name, - prover_db_url: DATABASE_PROVER_URL.to_string(), - prover_db_name: prover_name, + server_db: DatabaseConfig::new(DATABASE_SERVER_URL.clone(), server_name), + prover_db: DatabaseConfig::new(DATABASE_PROVER_URL.clone(), prover_name), dont_drop: self.dont_drop, } } else { let server_db_url = self.server_db_url.unwrap_or_else(|| { Prompt::new(&msg_server_db_url_prompt(&chain_name)) - .default(DATABASE_SERVER_URL) + .default(DATABASE_SERVER_URL.as_str()) .ask() }); let server_db_name = slugify(&self.server_db_name.unwrap_or_else(|| { @@ -57,7 +55,7 @@ impl GenesisArgs { })); let prover_db_url = self.prover_db_url.unwrap_or_else(|| { Prompt::new(&msg_prover_db_url_prompt(&chain_name)) - .default(DATABASE_PROVER_URL) + .default(DATABASE_PROVER_URL.as_str()) .ask() }); let prover_db_name = slugify(&self.prover_db_name.unwrap_or_else(|| { @@ -66,10 +64,8 @@ impl GenesisArgs { .ask() })); GenesisArgsFinal { - server_db_url, - server_db_name, - prover_db_url, - prover_db_name, + server_db: DatabaseConfig::new(server_db_url, server_db_name), + prover_db: DatabaseConfig::new(prover_db_url, prover_db_name), dont_drop: self.dont_drop, } } @@ -78,21 +74,7 @@ impl GenesisArgs { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct GenesisArgsFinal { - pub server_db_url: String, - pub server_db_name: String, - pub prover_db_url: String, - pub prover_db_name: String, + pub server_db: DatabaseConfig, + pub prover_db: DatabaseConfig, pub dont_drop: bool, } - -impl GenesisArgsFinal { - pub fn databases_config(&self) -> anyhow::Result { - let server_url = Url::parse(&self.server_db_url)?; - let prover_url = Url::parse(&self.prover_db_url)?; - - Ok(DatabasesConfig { - server: DatabaseConfig::new(server_url, self.server_db_name.clone()), - prover: DatabaseConfig::new(prover_url, self.prover_db_name.clone()), - }) - } -} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs index 4ac4c0014046..8c4edc88290d 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs @@ -3,11 +3,11 @@ use std::path::PathBuf; use anyhow::Context; use common::{ config::global_config, - db::{drop_db_if_exists, init_db, migrate_db}, + db::{drop_db_if_exists, init_db, migrate_db, DatabaseConfig}, logger, spinner::Spinner, }; -use config::{ChainConfig, DatabasesConfig, EcosystemConfig}; +use config::{ChainConfig, EcosystemConfig}; use xshell::Shell; use super::args::genesis::GenesisArgsFinal; @@ -17,9 +17,9 @@ use crate::{ messages::{ MSG_CHAIN_NOT_INITIALIZED, MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR, MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR, MSG_GENESIS_COMPLETED, - MSG_GENESIS_DATABASE_CONFIG_ERR, MSG_INITIALIZING_DATABASES_SPINNER, - MSG_INITIALIZING_PROVER_DATABASE, MSG_INITIALIZING_SERVER_DATABASE, MSG_SELECTED_CONFIG, - MSG_STARTING_GENESIS, MSG_STARTING_GENESIS_SPINNER, + MSG_INITIALIZING_DATABASES_SPINNER, MSG_INITIALIZING_PROVER_DATABASE, + MSG_INITIALIZING_SERVER_DATABASE, MSG_SELECTED_CONFIG, MSG_STARTING_GENESIS, + MSG_STARTING_GENESIS_SPINNER, }, server::{RunServer, ServerMode}, }; @@ -50,17 +50,15 @@ pub async fn genesis( shell.remove_path(&config.rocks_db_path)?; shell.create_dir(&config.rocks_db_path)?; - let db_config = args - .databases_config() - .context(MSG_GENESIS_DATABASE_CONFIG_ERR)?; update_general_config(shell, config)?; - update_database_secrets(shell, config, &db_config)?; + update_database_secrets(shell, config, &args.server_db, &args.prover_db)?; logger::note( MSG_SELECTED_CONFIG, logger::object_to_string(serde_json::json!({ "chain_config": config, - "db_config": db_config, + "server_db_config": args.server_db, + "prover_db_config": args.prover_db, })), ); logger::info(MSG_STARTING_GENESIS); @@ -68,7 +66,8 @@ pub async fn genesis( let spinner = Spinner::new(MSG_INITIALIZING_DATABASES_SPINNER); initialize_databases( shell, - db_config, + &args.server_db, + &args.prover_db, config.link_to_code.clone(), args.dont_drop, ) @@ -84,7 +83,8 @@ pub async fn genesis( async fn initialize_databases( shell: &Shell, - db_config: DatabasesConfig, + server_db_config: &DatabaseConfig, + prover_db_config: &DatabaseConfig, link_to_code: PathBuf, dont_drop: bool, ) -> anyhow::Result<()> { @@ -94,15 +94,15 @@ async fn initialize_databases( logger::debug(MSG_INITIALIZING_SERVER_DATABASE) } if !dont_drop { - drop_db_if_exists(&db_config.server.base_url, &db_config.server.database_name) + drop_db_if_exists(server_db_config) .await .context(MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR)?; - init_db(&db_config.server.base_url, &db_config.server.database_name).await?; + init_db(server_db_config).await?; } migrate_db( shell, path_to_server_migration, - &db_config.server.full_url(), + &server_db_config.full_url(), ) .await?; @@ -110,16 +110,16 @@ async fn initialize_databases( logger::debug(MSG_INITIALIZING_PROVER_DATABASE) } if !dont_drop { - drop_db_if_exists(&db_config.prover.base_url, &db_config.prover.database_name) + drop_db_if_exists(prover_db_config) .await .context(MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR)?; - init_db(&db_config.prover.base_url, &db_config.prover.database_name).await?; + init_db(prover_db_config).await?; } let path_to_prover_migration = link_to_code.join(PROVER_MIGRATIONS); migrate_db( shell, path_to_prover_migration, - &db_config.prover.full_url(), + &prover_db_config.full_url(), ) .await?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs index 951e8d116963..fecda40c7760 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs @@ -97,7 +97,7 @@ pub async fn run(args: EcosystemInitArgs, shell: &Shell) -> anyhow::Result<()> { }; for chain_name in &list_of_chains { - logger::info(msg_initializing_chain(&chain_name)); + logger::info(msg_initializing_chain(chain_name)); let chain_config = ecosystem_config .load_chain(Some(chain_name.clone())) .context(MSG_CHAIN_NOT_INITIALIZED)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs index 1e232b5cf6c6..e2db65b213f8 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs @@ -12,6 +12,7 @@ pub mod create_configs; mod init; #[derive(Subcommand, Debug)] +#[allow(clippy::large_enum_variant)] pub enum EcosystemCommands { /// Create a new ecosystem and chain, /// setting necessary configurations for later initialization diff --git a/zk_toolbox/crates/zk_inception/src/config_manipulations.rs b/zk_toolbox/crates/zk_inception/src/config_manipulations.rs index a5edcb7bde4a..3c350fa8d894 100644 --- a/zk_toolbox/crates/zk_inception/src/config_manipulations.rs +++ b/zk_toolbox/crates/zk_inception/src/config_manipulations.rs @@ -1,10 +1,11 @@ +use common::db::DatabaseConfig; use config::{ forge_interface::{ initialize_bridges::output::InitializeBridgeOutput, paymaster::DeployPaymasterOutput, register_chain::output::RegisterChainOutput, }, traits::{ReadConfigWithBasePath, SaveConfigWithBasePath}, - ChainConfig, ContractsConfig, DatabasesConfig, GeneralConfig, GenesisConfig, SecretsConfig, + ChainConfig, ContractsConfig, GeneralConfig, GenesisConfig, SecretsConfig, }; use types::ProverMode; use xshell::Shell; @@ -25,11 +26,12 @@ pub(crate) fn update_genesis(shell: &Shell, config: &ChainConfig) -> anyhow::Res pub(crate) fn update_database_secrets( shell: &Shell, config: &ChainConfig, - db_config: &DatabasesConfig, + server_db_config: &DatabaseConfig, + prover_db_config: &DatabaseConfig, ) -> anyhow::Result<()> { let mut secrets = SecretsConfig::read_with_base_path(shell, &config.configs)?; - secrets.database.server_url = db_config.server.full_url(); - secrets.database.prover_url = db_config.prover.full_url(); + secrets.database.server_url = server_db_config.full_url(); + secrets.database.prover_url = prover_db_config.full_url(); secrets.save_with_base_path(shell, &config.configs)?; Ok(()) } diff --git a/zk_toolbox/crates/zk_inception/src/defaults.rs b/zk_toolbox/crates/zk_inception/src/defaults.rs index 4b768abe907d..04b735e02275 100644 --- a/zk_toolbox/crates/zk_inception/src/defaults.rs +++ b/zk_toolbox/crates/zk_inception/src/defaults.rs @@ -1,7 +1,13 @@ use config::ChainConfig; +use lazy_static::lazy_static; +use url::Url; -pub const DATABASE_SERVER_URL: &str = "postgres://postgres:notsecurepassword@localhost:5432"; -pub const DATABASE_PROVER_URL: &str = "postgres://postgres:notsecurepassword@localhost:5432"; +lazy_static! { + pub static ref DATABASE_SERVER_URL: Url = + Url::parse("postgres://postgres:notsecurepassword@localhost:5432").unwrap(); + pub static ref DATABASE_PROVER_URL: Url = + Url::parse("postgres://postgres:notsecurepassword@localhost:5432").unwrap(); +} pub const ROCKS_DB_STATE_KEEPER: &str = "main/state_keeper"; pub const ROCKS_DB_TREE: &str = "main/tree"; diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zk_toolbox/crates/zk_inception/src/messages.rs index 5745212a6270..799f1a5e2d7a 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zk_toolbox/crates/zk_inception/src/messages.rs @@ -117,7 +117,6 @@ pub(super) const MSG_PROVER_DB_URL_HELP: &str = "Prover database url without dat pub(super) const MSG_PROVER_DB_NAME_HELP: &str = "Prover database name"; pub(super) const MSG_GENESIS_USE_DEFAULT_HELP: &str = "Use default database urls and names"; pub(super) const MSG_GENESIS_COMPLETED: &str = "Genesis completed successfully"; -pub(super) const MSG_GENESIS_DATABASE_CONFIG_ERR: &str = "Database config was not fully generated"; pub(super) const MSG_STARTING_GENESIS: &str = "Starting genesis process"; pub(super) const MSG_INITIALIZING_DATABASES_SPINNER: &str = "Initializing databases..."; pub(super) const MSG_STARTING_GENESIS_SPINNER: &str = diff --git a/zk_toolbox/crates/zk_supervisor/Cargo.toml b/zk_toolbox/crates/zk_supervisor/Cargo.toml index 74e04fc68aac..79d2bac74905 100644 --- a/zk_toolbox/crates/zk_supervisor/Cargo.toml +++ b/zk_toolbox/crates/zk_supervisor/Cargo.toml @@ -11,4 +11,13 @@ description.workspace = true keywords.workspace = true [dependencies] +anyhow.workspace = true +clap.workspace = true +common.workspace = true +config.workspace = true human-panic.workspace = true +strum.workspace = true +strum_macros.workspace = true +tokio.workspace = true +url.workspace = true +xshell.workspace = true diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/args/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/database/args/mod.rs new file mode 100644 index 000000000000..1541e7f518d8 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/database/args/mod.rs @@ -0,0 +1,41 @@ +use clap::Parser; + +use crate::{ + dals::SelectedDals, + messages::{MSG_DATABASE_COMMON_CORE_HELP, MSG_DATABASE_COMMON_PROVER_HELP}, +}; + +pub mod new_migration; + +#[derive(Debug, Parser)] +pub struct DatabaseCommonArgs { + #[clap(short, long, default_missing_value = "true", num_args = 0..=1, help = MSG_DATABASE_COMMON_PROVER_HELP)] + pub prover: Option, + #[clap(short, long, default_missing_value = "true", num_args = 0..=1, help = MSG_DATABASE_COMMON_CORE_HELP)] + pub core: Option, +} + +impl DatabaseCommonArgs { + pub fn parse(self) -> DatabaseCommonArgsFinal { + if self.prover.is_none() && self.core.is_none() { + return DatabaseCommonArgsFinal { + selected_dals: SelectedDals { + prover: true, + core: true, + }, + }; + } + + DatabaseCommonArgsFinal { + selected_dals: SelectedDals { + prover: self.prover.unwrap_or(false), + core: self.core.unwrap_or(false), + }, + } + } +} + +#[derive(Debug)] +pub struct DatabaseCommonArgsFinal { + pub selected_dals: SelectedDals, +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/args/new_migration.rs b/zk_toolbox/crates/zk_supervisor/src/commands/database/args/new_migration.rs new file mode 100644 index 000000000000..ef053ca50c77 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/database/args/new_migration.rs @@ -0,0 +1,49 @@ +use clap::{Parser, ValueEnum}; +use common::{Prompt, PromptSelect}; +use strum::IntoEnumIterator; +use strum_macros::{Display, EnumIter}; + +use crate::messages::{ + MSG_DATABASE_NEW_MIGRATION_DATABASE_HELP, MSG_DATABASE_NEW_MIGRATION_DB_PROMPT, + MSG_DATABASE_NEW_MIGRATION_NAME_HELP, MSG_DATABASE_NEW_MIGRATION_NAME_PROMPT, +}; + +#[derive(Debug, Parser)] +pub struct DatabaseNewMigrationArgs { + #[clap(long, help = MSG_DATABASE_NEW_MIGRATION_DATABASE_HELP)] + pub database: Option, + #[clap(long, help = MSG_DATABASE_NEW_MIGRATION_NAME_HELP)] + pub name: Option, +} + +impl DatabaseNewMigrationArgs { + pub fn fill_values_with_prompt(self) -> DatabaseNewMigrationArgsFinal { + let selected_database = self.database.unwrap_or_else(|| { + PromptSelect::new( + MSG_DATABASE_NEW_MIGRATION_DB_PROMPT, + SelectedDatabase::iter(), + ) + .ask() + }); + let name = self + .name + .unwrap_or_else(|| Prompt::new(MSG_DATABASE_NEW_MIGRATION_NAME_PROMPT).ask()); + + DatabaseNewMigrationArgsFinal { + selected_database, + name, + } + } +} + +#[derive(Debug)] +pub struct DatabaseNewMigrationArgsFinal { + pub selected_database: SelectedDatabase, + pub name: String, +} + +#[derive(Debug, Clone, ValueEnum, EnumIter, PartialEq, Eq, Display)] +pub enum SelectedDatabase { + Prover, + Core, +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/check_sqlx_data.rs b/zk_toolbox/crates/zk_supervisor/src/commands/database/check_sqlx_data.rs new file mode 100644 index 000000000000..6a5bc663dc7f --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/database/check_sqlx_data.rs @@ -0,0 +1,59 @@ +use std::path::Path; + +use common::{cmd::Cmd, logger, spinner::Spinner}; +use config::EcosystemConfig; +use xshell::{cmd, Shell}; + +use super::args::DatabaseCommonArgs; +use crate::{ + dals::{get_dals, Dal}, + messages::{ + msg_database_info, msg_database_loading, msg_database_success, + MSG_DATABASE_CHECK_SQLX_DATA_GERUND, MSG_DATABASE_CHECK_SQLX_DATA_PAST, + MSG_NO_DATABASES_SELECTED, + }, +}; + +pub fn run(shell: &Shell, args: DatabaseCommonArgs) -> anyhow::Result<()> { + let args = args.parse(); + if args.selected_dals.none() { + logger::outro(MSG_NO_DATABASES_SELECTED); + return Ok(()); + } + + let ecosystem_config = EcosystemConfig::from_file(shell)?; + + logger::info(msg_database_info(MSG_DATABASE_CHECK_SQLX_DATA_GERUND)); + + let dals = get_dals(shell, &args.selected_dals)?; + for dal in dals { + check_sqlx_data(shell, &ecosystem_config.link_to_code, dal)?; + } + + logger::outro(msg_database_success(MSG_DATABASE_CHECK_SQLX_DATA_PAST)); + + Ok(()) +} + +pub fn check_sqlx_data( + shell: &Shell, + link_to_code: impl AsRef, + dal: Dal, +) -> anyhow::Result<()> { + let dir = link_to_code.as_ref().join(&dal.path); + let _dir_guard = shell.push_dir(dir); + let url = dal.url.as_str(); + + let spinner = Spinner::new(&msg_database_loading( + MSG_DATABASE_CHECK_SQLX_DATA_GERUND, + &dal.path, + )); + Cmd::new(cmd!( + shell, + "cargo sqlx prepare --check --database-url {url} -- --tests" + )) + .run()?; + spinner.finish(); + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/drop.rs b/zk_toolbox/crates/zk_supervisor/src/commands/database/drop.rs new file mode 100644 index 000000000000..fb6996b40ee3 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/database/drop.rs @@ -0,0 +1,42 @@ +use common::{ + db::{drop_db_if_exists, DatabaseConfig}, + logger, + spinner::Spinner, +}; +use xshell::Shell; + +use super::args::DatabaseCommonArgs; +use crate::{ + dals::{get_dals, Dal}, + messages::{ + msg_database_info, msg_database_loading, msg_database_success, MSG_DATABASE_DROP_GERUND, + MSG_DATABASE_DROP_PAST, MSG_NO_DATABASES_SELECTED, + }, +}; + +pub async fn run(shell: &Shell, args: DatabaseCommonArgs) -> anyhow::Result<()> { + let args = args.parse(); + if args.selected_dals.none() { + logger::outro(MSG_NO_DATABASES_SELECTED); + return Ok(()); + } + + logger::info(msg_database_info(MSG_DATABASE_DROP_GERUND)); + + let dals = get_dals(shell, &args.selected_dals)?; + for dal in dals { + drop_database(dal).await?; + } + + logger::outro(msg_database_success(MSG_DATABASE_DROP_PAST)); + + Ok(()) +} + +pub async fn drop_database(dal: Dal) -> anyhow::Result<()> { + let spinner = Spinner::new(&msg_database_loading(MSG_DATABASE_DROP_GERUND, &dal.path)); + let db = DatabaseConfig::from_url(dal.url)?; + drop_db_if_exists(&db).await?; + spinner.finish(); + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/migrate.rs b/zk_toolbox/crates/zk_supervisor/src/commands/database/migrate.rs new file mode 100644 index 000000000000..72bc7d59148e --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/database/migrate.rs @@ -0,0 +1,54 @@ +use std::path::Path; + +use common::{cmd::Cmd, logger, spinner::Spinner}; +use config::EcosystemConfig; +use xshell::{cmd, Shell}; + +use super::args::DatabaseCommonArgs; +use crate::{ + dals::{get_dals, Dal}, + messages::{ + msg_database_info, msg_database_loading, msg_database_success, MSG_DATABASE_MIGRATE_GERUND, + MSG_DATABASE_MIGRATE_PAST, MSG_NO_DATABASES_SELECTED, + }, +}; + +pub fn run(shell: &Shell, args: DatabaseCommonArgs) -> anyhow::Result<()> { + let args = args.parse(); + if args.selected_dals.none() { + logger::outro(MSG_NO_DATABASES_SELECTED); + return Ok(()); + } + + logger::info(msg_database_info(MSG_DATABASE_MIGRATE_GERUND)); + let ecosystem_config = EcosystemConfig::from_file(shell)?; + + let dals = get_dals(shell, &args.selected_dals)?; + for dal in dals { + migrate_database(shell, &ecosystem_config.link_to_code, dal)?; + } + + logger::outro(msg_database_success(MSG_DATABASE_MIGRATE_PAST)); + + Ok(()) +} + +fn migrate_database(shell: &Shell, link_to_code: impl AsRef, dal: Dal) -> anyhow::Result<()> { + let dir = link_to_code.as_ref().join(&dal.path); + let _dir_guard = shell.push_dir(dir); + let url = dal.url.as_str(); + + let spinner = Spinner::new(&msg_database_loading( + MSG_DATABASE_MIGRATE_GERUND, + &dal.path, + )); + Cmd::new(cmd!( + shell, + "cargo sqlx database create --database-url {url}" + )) + .run()?; + Cmd::new(cmd!(shell, "cargo sqlx migrate run --database-url {url}")).run()?; + spinner.finish(); + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/database/mod.rs new file mode 100644 index 000000000000..74c4063a6974 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/database/mod.rs @@ -0,0 +1,48 @@ +use clap::Subcommand; +use xshell::Shell; + +use self::args::{new_migration::DatabaseNewMigrationArgs, DatabaseCommonArgs}; +use crate::messages::{ + MSG_DATABASE_CHECK_SQLX_DATA_ABOUT, MSG_DATABASE_DROP_ABOUT, MSG_DATABASE_MIGRATE_ABOUT, + MSG_DATABASE_NEW_MIGRATION_ABOUT, MSG_DATABASE_PREPARE_ABOUT, MSG_DATABASE_RESET_ABOUT, + MSG_DATABASE_SETUP_ABOUT, +}; + +mod args; +mod check_sqlx_data; +mod drop; +mod migrate; +mod new_migration; +mod prepare; +mod reset; +mod setup; + +#[derive(Subcommand, Debug)] +pub enum DatabaseCommands { + #[clap(about = MSG_DATABASE_CHECK_SQLX_DATA_ABOUT)] + CheckSqlxData(DatabaseCommonArgs), + #[clap(about = MSG_DATABASE_DROP_ABOUT)] + Drop(DatabaseCommonArgs), + #[clap(about = MSG_DATABASE_MIGRATE_ABOUT)] + Migrate(DatabaseCommonArgs), + #[clap(about = MSG_DATABASE_NEW_MIGRATION_ABOUT)] + NewMigration(DatabaseNewMigrationArgs), + #[clap(about = MSG_DATABASE_PREPARE_ABOUT)] + Prepare(DatabaseCommonArgs), + #[clap(about = MSG_DATABASE_RESET_ABOUT)] + Reset(DatabaseCommonArgs), + #[clap(about = MSG_DATABASE_SETUP_ABOUT)] + Setup(DatabaseCommonArgs), +} + +pub async fn run(shell: &Shell, args: DatabaseCommands) -> anyhow::Result<()> { + match args { + DatabaseCommands::CheckSqlxData(args) => check_sqlx_data::run(shell, args), + DatabaseCommands::Drop(args) => drop::run(shell, args).await, + DatabaseCommands::Migrate(args) => migrate::run(shell, args), + DatabaseCommands::NewMigration(args) => new_migration::run(shell, args), + DatabaseCommands::Prepare(args) => prepare::run(shell, args), + DatabaseCommands::Reset(args) => reset::run(shell, args).await, + DatabaseCommands::Setup(args) => setup::run(shell, args), + } +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/new_migration.rs b/zk_toolbox/crates/zk_supervisor/src/commands/database/new_migration.rs new file mode 100644 index 000000000000..127e01bdc10f --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/database/new_migration.rs @@ -0,0 +1,43 @@ +use std::path::Path; + +use common::{cmd::Cmd, logger, spinner::Spinner}; +use config::EcosystemConfig; +use xshell::{cmd, Shell}; + +use super::args::new_migration::{DatabaseNewMigrationArgs, SelectedDatabase}; +use crate::{ + dals::{get_core_dal, get_prover_dal, Dal}, + messages::{msg_database_new_migration_loading, MSG_DATABASE_NEW_MIGRATION_SUCCESS}, +}; + +pub fn run(shell: &Shell, args: DatabaseNewMigrationArgs) -> anyhow::Result<()> { + let args = args.fill_values_with_prompt(); + + let dal = match args.selected_database { + SelectedDatabase::Core => get_core_dal(shell)?, + SelectedDatabase::Prover => get_prover_dal(shell)?, + }; + let ecosystem_config = EcosystemConfig::from_file(shell)?; + + generate_migration(shell, ecosystem_config.link_to_code, dal, args.name)?; + + logger::outro(MSG_DATABASE_NEW_MIGRATION_SUCCESS); + + Ok(()) +} + +fn generate_migration( + shell: &Shell, + link_to_code: impl AsRef, + dal: Dal, + name: String, +) -> anyhow::Result<()> { + let dir = link_to_code.as_ref().join(&dal.path); + let _dir_guard = shell.push_dir(dir); + + let spinner = Spinner::new(&msg_database_new_migration_loading(&dal.path)); + Cmd::new(cmd!(shell, "cargo sqlx migrate add -r {name}")).run()?; + spinner.finish(); + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/prepare.rs b/zk_toolbox/crates/zk_supervisor/src/commands/database/prepare.rs new file mode 100644 index 000000000000..48f32319ac55 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/database/prepare.rs @@ -0,0 +1,58 @@ +use std::path::Path; + +use common::{cmd::Cmd, logger, spinner::Spinner}; +use config::EcosystemConfig; +use xshell::{cmd, Shell}; + +use super::args::DatabaseCommonArgs; +use crate::{ + dals::{get_dals, Dal}, + messages::{ + msg_database_info, msg_database_loading, msg_database_success, MSG_DATABASE_PREPARE_GERUND, + MSG_DATABASE_PREPARE_PAST, MSG_NO_DATABASES_SELECTED, + }, +}; + +pub fn run(shell: &Shell, args: DatabaseCommonArgs) -> anyhow::Result<()> { + let args = args.parse(); + if args.selected_dals.none() { + logger::outro(MSG_NO_DATABASES_SELECTED); + return Ok(()); + } + + let ecosystem_config = EcosystemConfig::from_file(shell)?; + + logger::info(msg_database_info(MSG_DATABASE_PREPARE_GERUND)); + + let dals = get_dals(shell, &args.selected_dals)?; + for dal in dals { + prepare_sqlx_data(shell, &ecosystem_config.link_to_code, dal)?; + } + + logger::outro(msg_database_success(MSG_DATABASE_PREPARE_PAST)); + + Ok(()) +} + +pub fn prepare_sqlx_data( + shell: &Shell, + link_to_code: impl AsRef, + dal: Dal, +) -> anyhow::Result<()> { + let dir = link_to_code.as_ref().join(&dal.path); + let _dir_guard = shell.push_dir(dir); + let url = dal.url.as_str(); + + let spinner = Spinner::new(&msg_database_loading( + MSG_DATABASE_PREPARE_GERUND, + &dal.path, + )); + Cmd::new(cmd!( + shell, + "cargo sqlx prepare --database-url {url} -- --tests" + )) + .run()?; + spinner.finish(); + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs b/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs new file mode 100644 index 000000000000..aa813a155510 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs @@ -0,0 +1,46 @@ +use std::path::Path; + +use common::logger; +use config::EcosystemConfig; +use xshell::Shell; + +use super::{args::DatabaseCommonArgs, drop::drop_database, setup::setup_database}; +use crate::{ + dals::{get_dals, Dal}, + messages::{ + msg_database_info, msg_database_loading, msg_database_success, MSG_DATABASE_RESET_GERUND, + MSG_DATABASE_RESET_PAST, MSG_NO_DATABASES_SELECTED, + }, +}; + +pub async fn run(shell: &Shell, args: DatabaseCommonArgs) -> anyhow::Result<()> { + let args = args.parse(); + if args.selected_dals.none() { + logger::outro(MSG_NO_DATABASES_SELECTED); + return Ok(()); + } + + let ecoseystem_config = EcosystemConfig::from_file(shell)?; + + logger::info(msg_database_info(MSG_DATABASE_RESET_GERUND)); + + let dals = get_dals(shell, &args.selected_dals)?; + for dal in dals { + logger::info(&msg_database_loading(MSG_DATABASE_RESET_GERUND, &dal.path)); + reset_database(shell, ecoseystem_config.link_to_code.clone(), dal).await?; + } + + logger::outro(msg_database_success(MSG_DATABASE_RESET_PAST)); + + Ok(()) +} + +async fn reset_database( + shell: &Shell, + link_to_code: impl AsRef, + dal: Dal, +) -> anyhow::Result<()> { + drop_database(dal.clone()).await?; + setup_database(shell, link_to_code, dal)?; + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/setup.rs b/zk_toolbox/crates/zk_supervisor/src/commands/database/setup.rs new file mode 100644 index 000000000000..d9d37041774b --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/database/setup.rs @@ -0,0 +1,56 @@ +use std::path::Path; + +use common::{cmd::Cmd, logger, spinner::Spinner}; +use config::EcosystemConfig; +use xshell::{cmd, Shell}; + +use super::args::DatabaseCommonArgs; +use crate::{ + dals::{get_dals, Dal}, + messages::{ + msg_database_info, msg_database_loading, msg_database_success, MSG_DATABASE_SETUP_GERUND, + MSG_DATABASE_SETUP_PAST, MSG_NO_DATABASES_SELECTED, + }, +}; + +pub fn run(shell: &Shell, args: DatabaseCommonArgs) -> anyhow::Result<()> { + let args = args.parse(); + if args.selected_dals.none() { + logger::outro(MSG_NO_DATABASES_SELECTED); + return Ok(()); + } + + let ecosystem_config = EcosystemConfig::from_file(shell)?; + + logger::info(msg_database_info(MSG_DATABASE_SETUP_GERUND)); + + let dals = get_dals(shell, &args.selected_dals)?; + for dal in dals { + setup_database(shell, &ecosystem_config.link_to_code, dal)?; + } + + logger::outro(msg_database_success(MSG_DATABASE_SETUP_PAST)); + + Ok(()) +} + +pub fn setup_database( + shell: &Shell, + link_to_code: impl AsRef, + dal: Dal, +) -> anyhow::Result<()> { + let dir = link_to_code.as_ref().join(&dal.path); + let _dir_guard = shell.push_dir(dir); + let url = dal.url.as_str(); + + let spinner = Spinner::new(&msg_database_loading(MSG_DATABASE_SETUP_GERUND, &dal.path)); + Cmd::new(cmd!( + shell, + "cargo sqlx database create --database-url {url}" + )) + .run()?; + Cmd::new(cmd!(shell, "cargo sqlx migrate run --database-url {url}")).run()?; + spinner.finish(); + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs new file mode 100644 index 000000000000..8fd0a6be869b --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs @@ -0,0 +1 @@ +pub mod database; diff --git a/zk_toolbox/crates/zk_supervisor/src/dals.rs b/zk_toolbox/crates/zk_supervisor/src/dals.rs new file mode 100644 index 000000000000..f2f6f86cfc61 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/dals.rs @@ -0,0 +1,70 @@ +use anyhow::anyhow; +use common::config::global_config; +use config::{EcosystemConfig, SecretsConfig}; +use url::Url; +use xshell::Shell; + +use crate::messages::MSG_CHAIN_NOT_FOUND_ERR; + +const CORE_DAL_PATH: &str = "core/lib/dal"; +const PROVER_DAL_PATH: &str = "prover/prover_dal"; + +#[derive(Debug, Clone)] +pub struct SelectedDals { + pub prover: bool, + pub core: bool, +} + +impl SelectedDals { + /// Returns true if no databases are selected + pub fn none(&self) -> bool { + !self.prover && !self.core + } +} + +#[derive(Debug, Clone)] +pub struct Dal { + pub path: String, + pub url: Url, +} + +pub fn get_dals(shell: &Shell, selected_dals: &SelectedDals) -> anyhow::Result> { + let mut dals = vec![]; + + if selected_dals.prover { + dals.push(get_prover_dal(shell)?); + } + if selected_dals.core { + dals.push(get_core_dal(shell)?); + } + + Ok(dals) +} + +pub fn get_prover_dal(shell: &Shell) -> anyhow::Result { + let secrets = get_secrets(shell)?; + + Ok(Dal { + path: PROVER_DAL_PATH.to_string(), + url: secrets.database.prover_url.clone(), + }) +} + +pub fn get_core_dal(shell: &Shell) -> anyhow::Result { + let secrets = get_secrets(shell)?; + + Ok(Dal { + path: CORE_DAL_PATH.to_string(), + url: secrets.database.server_url.clone(), + }) +} + +fn get_secrets(shell: &Shell) -> anyhow::Result { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_chain(global_config().chain_name.clone()) + .ok_or(anyhow!(MSG_CHAIN_NOT_FOUND_ERR))?; + let secrets = chain_config.get_secrets_config()?; + + Ok(secrets) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/main.rs b/zk_toolbox/crates/zk_supervisor/src/main.rs index 9936141be106..24daaba35347 100644 --- a/zk_toolbox/crates/zk_supervisor/src/main.rs +++ b/zk_toolbox/crates/zk_supervisor/src/main.rs @@ -1,4 +1,112 @@ -fn main() { +use clap::{Parser, Subcommand}; +use commands::database::DatabaseCommands; +use common::{ + check_prerequisites, + config::{global_config, init_global_config, GlobalConfig}, + init_prompt_theme, logger, +}; +use config::EcosystemConfig; +use messages::msg_global_chain_does_not_exist; +use xshell::Shell; + +mod commands; +mod dals; +mod messages; + +#[derive(Parser, Debug)] +#[command(version, about)] +struct Supervisor { + #[command(subcommand)] + command: SupervisorSubcommands, + #[clap(flatten)] + global: SupervisorGlobalArgs, +} + +#[derive(Subcommand, Debug)] +enum SupervisorSubcommands { + /// Database related commands + #[command(subcommand)] + Database(DatabaseCommands), +} + +#[derive(Parser, Debug)] +#[clap(next_help_heading = "Global options")] +struct SupervisorGlobalArgs { + /// Verbose mode + #[clap(short, long, global = true)] + verbose: bool, + /// Chain to use + #[clap(long, global = true)] + chain: Option, + /// Ignores prerequisites checks + #[clap(long, global = true)] + ignore_prerequisites: bool, +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { human_panic::setup_panic!(); - println!("Hello, world!"); + + init_prompt_theme(); + + logger::new_empty_line(); + logger::intro(); + + let shell = Shell::new().unwrap(); + let args = Supervisor::parse(); + + init_global_config_inner(&shell, &args.global)?; + + if !global_config().ignore_prerequisites { + check_prerequisites(&shell); + } + + match run_subcommand(args, &shell).await { + Ok(_) => {} + Err(e) => { + logger::error(e.to_string()); + + if e.chain().count() > 1 { + logger::error_note( + "Caused by:", + &e.chain() + .skip(1) + .enumerate() + .map(|(i, cause)| format!(" {i}: {}", cause)) + .collect::>() + .join("\n"), + ); + } + + logger::outro("Failed"); + std::process::exit(1); + } + } + + Ok(()) +} + +async fn run_subcommand(args: Supervisor, shell: &Shell) -> anyhow::Result<()> { + match args.command { + SupervisorSubcommands::Database(command) => commands::database::run(shell, command).await?, + } + Ok(()) +} + +fn init_global_config_inner(shell: &Shell, args: &SupervisorGlobalArgs) -> anyhow::Result<()> { + if let Some(name) = &args.chain { + if let Ok(config) = EcosystemConfig::from_file(shell) { + let chains = config.list_of_chains(); + if !chains.contains(name) { + anyhow::bail!(msg_global_chain_does_not_exist(name, &chains.join(", "))); + } + } + } + + init_global_config(GlobalConfig { + verbose: args.verbose, + chain_name: args.chain.clone(), + ignore_prerequisites: args.ignore_prerequisites, + }); + Ok(()) } diff --git a/zk_toolbox/crates/zk_supervisor/src/messages.rs b/zk_toolbox/crates/zk_supervisor/src/messages.rs new file mode 100644 index 000000000000..97152396b5e5 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/messages.rs @@ -0,0 +1,59 @@ +// Ecosystem related messages +pub(super) const MSG_CHAIN_NOT_FOUND_ERR: &str = "Chain not found"; +pub(super) fn msg_global_chain_does_not_exist(chain: &str, available_chains: &str) -> String { + format!("Chain with name {chain} doesnt exist, please choose one of: {available_chains}") +} + +// Database related messages +pub(super) const MSG_NO_DATABASES_SELECTED: &str = "No databases selected"; +pub(super) fn msg_database_info(gerund_verb: &str) -> String { + format!("{gerund_verb} databases") +} +pub(super) fn msg_database_success(past_verb: &str) -> String { + format!("Databases {past_verb} successfully") +} +pub(super) fn msg_database_loading(gerund_verb: &str, dal: &str) -> String { + format!("{gerund_verb} database for dal {dal}...") +} + +pub(super) const MSG_DATABASE_CHECK_SQLX_DATA_GERUND: &str = "Checking"; +pub(super) const MSG_DATABASE_CHECK_SQLX_DATA_PAST: &str = "checked"; +pub(super) const MSG_DATABASE_DROP_GERUND: &str = "Dropping"; +pub(super) const MSG_DATABASE_DROP_PAST: &str = "dropped"; +pub(super) const MSG_DATABASE_MIGRATE_GERUND: &str = "Migrating"; +pub(super) const MSG_DATABASE_MIGRATE_PAST: &str = "migrated"; +pub(super) const MSG_DATABASE_PREPARE_GERUND: &str = "Preparing"; +pub(super) const MSG_DATABASE_PREPARE_PAST: &str = "prepared"; +pub(super) const MSG_DATABASE_RESET_GERUND: &str = "Resetting"; +pub(super) const MSG_DATABASE_RESET_PAST: &str = "reset"; +pub(super) const MSG_DATABASE_SETUP_GERUND: &str = "Setting up"; +pub(super) const MSG_DATABASE_SETUP_PAST: &str = "set up"; + +pub(super) const MSG_DATABASE_COMMON_PROVER_HELP: &str = "Prover database"; +pub(super) const MSG_DATABASE_COMMON_CORE_HELP: &str = "Core database"; +pub(super) const MSG_DATABASE_NEW_MIGRATION_DATABASE_HELP: &str = + "Database to create new migration for"; +pub(super) const MSG_DATABASE_NEW_MIGRATION_NAME_HELP: &str = "Migration name"; + +pub(super) const MSG_DATABASE_CHECK_SQLX_DATA_ABOUT: &str = "Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked."; +pub(super) const MSG_DATABASE_DROP_ABOUT: &str = + "Drop databases. If no databases are selected, all databases will be dropped."; +pub(super) const MSG_DATABASE_MIGRATE_ABOUT: &str = + "Migrate databases. If no databases are selected, all databases will be migrated."; +pub(super) const MSG_DATABASE_NEW_MIGRATION_ABOUT: &str = "Create new migration"; +pub(super) const MSG_DATABASE_PREPARE_ABOUT: &str = + "Prepare sqlx-data.json. If no databases are selected, all databases will be prepared."; +pub(super) const MSG_DATABASE_RESET_ABOUT: &str = + "Reset databases. If no databases are selected, all databases will be reset."; +pub(super) const MSG_DATABASE_SETUP_ABOUT: &str = + "Setup databases. If no databases are selected, all databases will be setup."; + +// Database new_migration messages +pub(super) const MSG_DATABASE_NEW_MIGRATION_DB_PROMPT: &str = + "What database do you want to create a new migration for?"; +pub(super) const MSG_DATABASE_NEW_MIGRATION_NAME_PROMPT: &str = + "How do you want to name the migration?"; +pub(super) fn msg_database_new_migration_loading(dal: &str) -> String { + format!("Creating new database migration for dal {}...", dal) +} +pub(super) const MSG_DATABASE_NEW_MIGRATION_SUCCESS: &str = "Migration created successfully"; From 4aa8ef40ed6c4fd8119d86011ba3e1d097047b8b Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Tue, 4 Jun 2024 15:29:01 +0200 Subject: [PATCH 38/69] feat: fully utilize the node framework for external DA --- Cargo.lock | 37 ++++---- Cargo.toml | 3 +- core/bin/zksync_server/Cargo.toml | 1 + core/bin/zksync_server/src/node_builder.rs | 22 ++--- core/lib/config/Cargo.toml | 1 - core/lib/config/src/configs/da_dispatcher.rs | 25 ------ core/lib/da_client/Cargo.toml | 3 +- core/lib/da_client/src/lib.rs | 34 +++++++- core/lib/da_client/src/types.rs | 48 +++++++++++ core/lib/default_da_clients/Cargo.toml | 22 +++++ .../src/gcs/client.rs} | 2 +- core/lib/default_da_clients/src/gcs/mod.rs | 2 + .../src/gcs/wiring_layer.rs | 36 ++++++++ core/lib/default_da_clients/src/lib.rs | 3 + .../src/no_da/client.rs} | 2 +- core/lib/default_da_clients/src/no_da/mod.rs | 2 + .../src/no_da/wiring_layer.rs | 32 +++++++ core/lib/env_config/Cargo.toml | 1 - core/lib/env_config/src/da_dispatcher.rs | 84 +----------------- core/lib/protobuf_config/Cargo.toml | 1 - core/lib/protobuf_config/src/da_dispatcher.rs | 85 +++---------------- .../src/proto/config/da_dispatcher.proto | 16 +--- core/lib/zksync_core_leftovers/Cargo.toml | 2 +- core/lib/zksync_core_leftovers/src/lib.rs | 20 +---- core/node/da_dispatcher/Cargo.toml | 2 +- core/node/da_dispatcher/src/da_dispatcher.rs | 2 +- core/node/node_framework/Cargo.toml | 1 - .../node/node_framework/examples/main_node.rs | 7 +- .../src/implementations/layers/da_client.rs | 83 ------------------ .../implementations/layers/da_dispatcher.rs | 16 +++- .../src/implementations/layers/mod.rs | 1 - .../implementations/resources/da_client.rs | 2 +- 32 files changed, 252 insertions(+), 346 deletions(-) create mode 100644 core/lib/da_client/src/types.rs create mode 100644 core/lib/default_da_clients/Cargo.toml rename core/lib/{da_client/src/gcs/mod.rs => default_da_clients/src/gcs/client.rs} (99%) create mode 100644 core/lib/default_da_clients/src/gcs/mod.rs create mode 100644 core/lib/default_da_clients/src/gcs/wiring_layer.rs create mode 100644 core/lib/default_da_clients/src/lib.rs rename core/lib/{da_client/src/no_da/mod.rs => default_da_clients/src/no_da/client.rs} (97%) create mode 100644 core/lib/default_da_clients/src/no_da/mod.rs create mode 100644 core/lib/default_da_clients/src/no_da/wiring_layer.rs delete mode 100644 core/node/node_framework/src/implementations/layers/da_client.rs diff --git a/Cargo.lock b/Cargo.lock index edfd97a49026..286f95177a54 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2819,16 +2819,6 @@ dependencies = [ "tokio-native-tls", ] -[[package]] -name = "hyperchain_da" -version = "0.1.0" -source = "git+https://github.com/matter-labs/hyperchain-da.git?rev=822542abff3e7c9e42c6e65f4ba29f289a979d3d#822542abff3e7c9e42c6e65f4ba29f289a979d3d" -dependencies = [ - "anyhow", - "async-trait", - "serde", -] - [[package]] name = "iai" version = "0.1.1" @@ -8122,7 +8112,6 @@ name = "zksync_config" version = "0.1.0" dependencies = [ "anyhow", - "hyperchain_da", "rand 0.8.5", "secrecy", "serde", @@ -8369,7 +8358,6 @@ dependencies = [ "futures 0.3.28", "governor", "hex", - "hyperchain_da", "itertools 0.10.5", "jsonrpsee", "lru", @@ -8414,6 +8402,7 @@ dependencies = [ "zksync_da_dispatcher", "zksync_dal", "zksync_db_connection", + "zksync_default_da_clients", "zksync_eth_client", "zksync_eth_sender", "zksync_eth_signer", @@ -8486,10 +8475,9 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "hyperchain_da", + "serde", "tracing", "zksync_config", - "zksync_object_store", "zksync_types", ] @@ -8499,12 +8487,12 @@ version = "0.1.0" dependencies = [ "anyhow", "chrono", - "hyperchain_da", "rand 0.8.5", "tokio", "tracing", "vise", "zksync_config", + "zksync_da_client", "zksync_dal", "zksync_types", "zksync_utils", @@ -8559,13 +8547,27 @@ dependencies = [ "zksync_health_check", ] +[[package]] +name = "zksync_default_da_clients" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "serde", + "tracing", + "zksync_config", + "zksync_da_client", + "zksync_node_framework", + "zksync_object_store", + "zksync_types", +] + [[package]] name = "zksync_env_config" version = "0.1.0" dependencies = [ "anyhow", "envy", - "hyperchain_da", "serde", "zksync_basic_types", "zksync_config", @@ -8967,7 +8969,6 @@ dependencies = [ "async-trait", "ctrlc", "futures 0.3.28", - "hyperchain_da", "prometheus_exporter", "prover_dal", "thiserror", @@ -9159,7 +9160,6 @@ version = "0.1.0" dependencies = [ "anyhow", "hex", - "hyperchain_da", "prost 0.12.1", "rand 0.8.5", "secrecy", @@ -9239,6 +9239,7 @@ dependencies = [ "zksync_consensus_executor", "zksync_consensus_roles", "zksync_core_leftovers", + "zksync_default_da_clients", "zksync_env_config", "zksync_eth_client", "zksync_metadata_calculator", diff --git a/Cargo.toml b/Cargo.toml index 97c803a298c6..93a22889ad93 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,6 +45,7 @@ members = [ "core/lib/dal", "core/lib/env_config", "core/lib/da_client", + "core/lib/default_da_clients", "core/lib/eth_client", "core/lib/eth_signer", "core/lib/l1_contract_interface", @@ -202,7 +203,6 @@ zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter zksync_consensus_utils = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "3e6f101ee4124308c4c974caaa259d524549b0c6" } zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "3e6f101ee4124308c4c974caaa259d524549b0c6" } zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "3e6f101ee4124308c4c974caaa259d524549b0c6" } -zksync_da_layers = { package = "hyperchain_da", git = "https://github.com/matter-labs/hyperchain-da.git", rev = "822542abff3e7c9e42c6e65f4ba29f289a979d3d" } # "Local" dependencies multivm = { path = "core/lib/multivm" } @@ -223,6 +223,7 @@ zksync_db_connection = { path = "core/lib/db_connection" } zksync_env_config = { path = "core/lib/env_config" } zksync_eth_client = { path = "core/lib/eth_client" } zksync_da_client = { path = "core/lib/da_client" } +zksync_default_da_clients = { path = "core/lib/default_da_clients" } zksync_eth_signer = { path = "core/lib/eth_signer" } zksync_health_check = { path = "core/lib/health_check" } zksync_l1_contract_interface = { path = "core/lib/l1_contract_interface" } diff --git a/core/bin/zksync_server/Cargo.toml b/core/bin/zksync_server/Cargo.toml index a2f9067872e2..54bcdb93b979 100644 --- a/core/bin/zksync_server/Cargo.toml +++ b/core/bin/zksync_server/Cargo.toml @@ -20,6 +20,7 @@ zksync_utils.workspace = true zksync_types.workspace = true zksync_core_leftovers.workspace = true zksync_node_genesis.workspace = true +zksync_default_da_clients.workspace = true # Consensus dependenices zksync_consensus_crypto.workspace = true diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 3aa1c9e15ee9..f0925a7849da 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -8,6 +8,7 @@ use zksync_config::{ ContractsConfig, GenesisConfig, }; use zksync_core_leftovers::Component; +use zksync_default_da_clients::no_da::wiring_layer::NoDAClientWiringLayer; use zksync_metadata_calculator::MetadataCalculatorConfig; use zksync_node_api_server::{ tx_sender::{ApiContracts, TxSenderConfig}, @@ -19,7 +20,6 @@ use zksync_node_framework::{ commitment_generator::CommitmentGeneratorLayer, consensus::{ConsensusLayer, Mode as ConsensusMode}, contract_verification_api::ContractVerificationApiLayer, - da_client::DataAvailabilityClientLayer, da_dispatcher::DataAvailabilityDispatcherLayer, eth_sender::{EthTxAggregatorLayer, EthTxManagerLayer}, eth_watch::EthWatchLayer, @@ -401,22 +401,18 @@ impl MainNodeBuilder { Ok(self) } - fn add_da_client_layer(mut self) -> anyhow::Result { - let eth_sender_config = try_load_config!(self.configs.eth); - let da_config = try_load_config!(self.configs.da_dispatcher_config); - let state_keeper_config = try_load_config!(self.configs.state_keeper_config); - self.node.add_layer(DataAvailabilityClientLayer::new( - da_config, - eth_sender_config, - state_keeper_config, - )); + fn add_no_da_client_layer(mut self) -> anyhow::Result { + self.node.add_layer(NoDAClientWiringLayer::new()); Ok(self) } fn add_da_dispatcher_layer(mut self) -> anyhow::Result { + let state_keeper_config = try_load_config!(self.configs.state_keeper_config); let da_config = try_load_config!(self.configs.da_dispatcher_config); - self.node - .add_layer(DataAvailabilityDispatcherLayer::new(da_config)); + self.node.add_layer(DataAvailabilityDispatcherLayer::new( + state_keeper_config, + da_config, + )); Ok(self) } @@ -502,7 +498,7 @@ impl MainNodeBuilder { self = self.add_commitment_generator_layer()?; } Component::DADispatcher => { - self = self.add_da_client_layer()?.add_da_dispatcher_layer()?; + self = self.add_no_da_client_layer()?.add_da_dispatcher_layer()?; } } } diff --git a/core/lib/config/Cargo.toml b/core/lib/config/Cargo.toml index a11c6aefac5d..24e05696fd5a 100644 --- a/core/lib/config/Cargo.toml +++ b/core/lib/config/Cargo.toml @@ -13,7 +13,6 @@ categories.workspace = true zksync_basic_types.workspace = true zksync_crypto_primitives.workspace = true zksync_consensus_utils.workspace = true -zksync_da_layers.workspace = true zksync_system_constants.workspace = true anyhow.workspace = true diff --git a/core/lib/config/src/configs/da_dispatcher.rs b/core/lib/config/src/configs/da_dispatcher.rs index e78816352796..288e93a03569 100644 --- a/core/lib/config/src/configs/da_dispatcher.rs +++ b/core/lib/config/src/configs/da_dispatcher.rs @@ -1,32 +1,13 @@ use std::time::Duration; use serde::Deserialize; -use zksync_da_layers::config::DALayerConfig; - -use crate::ObjectStoreConfig; pub const DEFAULT_POLLING_INTERVAL_MS: u32 = 5000; pub const DEFAULT_QUERY_ROWS_LIMIT: u32 = 100; pub const DEFAULT_MAX_RETRIES: u16 = 5; -#[derive(Clone, Debug, PartialEq, Deserialize)] -#[serde(tag = "da_mode")] -pub enum DataAvailabilityMode { - /// Uses the data availability layer to dispatch pubdata. - DALayer(DALayerConfig), - /// Stores the pubdata in the Object Store(GCS/S3/...). - ObjectStore(ObjectStoreConfig), - /// Does not store the pubdata. - NoDA, -} - #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct DADispatcherConfig { - /// The mode of the data availability layer. This defines the underlying client that will be - /// used, and the configuration for that client. - #[serde(flatten)] - pub da_mode: DataAvailabilityMode, - /// The interval at which the dispatcher will poll the DA layer for inclusion data. pub polling_interval_ms: Option, /// The maximum number of rows to query from the database in a single query. pub query_rows_limit: Option, @@ -37,12 +18,6 @@ pub struct DADispatcherConfig { impl DADispatcherConfig { pub fn for_tests() -> Self { Self { - da_mode: DataAvailabilityMode::DALayer(DALayerConfig::Celestia( - zksync_da_layers::clients::celestia::config::CelestiaConfig { - light_node_url: "localhost:12345".to_string(), - private_key: "0x0".to_string(), - }, - )), polling_interval_ms: Some(DEFAULT_POLLING_INTERVAL_MS), query_rows_limit: Some(DEFAULT_QUERY_ROWS_LIMIT), max_retries: Some(DEFAULT_MAX_RETRIES), diff --git a/core/lib/da_client/Cargo.toml b/core/lib/da_client/Cargo.toml index 21ca99366b84..8dae145426b3 100644 --- a/core/lib/da_client/Cargo.toml +++ b/core/lib/da_client/Cargo.toml @@ -10,11 +10,10 @@ keywords.workspace = true categories.workspace = true [dependencies] +serde = { workspace = true, features = ["derive"] } tracing = "0.1.40" async-trait = "0.1.74" anyhow.workspace = true zksync_config.workspace = true zksync_types.workspace = true -zksync_da_layers.workspace = true -zksync_object_store.workspace = true diff --git a/core/lib/da_client/src/lib.rs b/core/lib/da_client/src/lib.rs index 6a4994931375..6fa31283ff73 100644 --- a/core/lib/da_client/src/lib.rs +++ b/core/lib/da_client/src/lib.rs @@ -1,2 +1,32 @@ -pub mod gcs; -pub mod no_da; +pub mod types; + +use std::fmt; + +use async_trait::async_trait; +use types::{DAError, DispatchResponse, InclusionData}; + +/// Trait that defines the interface for the data availability layer clients. +#[async_trait] +pub trait DataAvailabilityClient: Sync + Send + fmt::Debug { + /// Dispatches a blob to the data availability layer. + async fn dispatch_blob( + &self, + batch_number: u32, + data: Vec, + ) -> Result; + + /// Fetches the inclusion data for a given blob_id. + async fn get_inclusion_data(&self, blob_id: String) -> Result, DAError>; + + /// Clones the client and wraps it in a Box. + fn clone_boxed(&self) -> Box; + + /// Returns the maximum size of the blob (in bytes) that can be dispatched. + fn blob_size_limit(&self) -> usize; +} + +impl Clone for Box { + fn clone(&self) -> Box { + self.clone_boxed() + } +} diff --git a/core/lib/da_client/src/types.rs b/core/lib/da_client/src/types.rs new file mode 100644 index 000000000000..3d9ea6e85983 --- /dev/null +++ b/core/lib/da_client/src/types.rs @@ -0,0 +1,48 @@ +use std::{error, fmt::Display}; + +use serde::Serialize; + +/// DAError is the error type returned by the DA clients. +#[derive(Debug)] +pub struct DAError { + pub error: anyhow::Error, + pub is_transient: bool, +} + +impl Display for DAError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "DAError: {}, is_transient: {}", + self.error, self.is_transient + ) + } +} + +impl IsTransient for DAError { + fn is_transient(&self) -> bool { + self.is_transient + } +} + +/// Trait that defines whether an error is transient or not, i.e. if it is safe to retry the operation. +pub trait IsTransient { + fn is_transient(&self) -> bool; +} + +impl error::Error for DAError {} + +/// DispatchResponse is the response received from the DA layer after dispatching a blob. +#[derive(Default)] +pub struct DispatchResponse { + /// The blob_id is needed to fetch the inclusion data. + pub blob_id: String, +} + +/// InclusionData is the data needed to prove that a blob is included in the DA layer. +#[derive(Default, Serialize)] +pub struct InclusionData { + /// The inclusion data serialized by the DA client. Serialization is done in a way that allows + /// the deserialization of the data in Solidity contracts. + pub data: Vec, +} diff --git a/core/lib/default_da_clients/Cargo.toml b/core/lib/default_da_clients/Cargo.toml new file mode 100644 index 000000000000..ee668343875e --- /dev/null +++ b/core/lib/default_da_clients/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "zksync_default_da_clients" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +[dependencies] +serde = { workspace = true, features = ["derive"] } +tracing = "0.1.40" +async-trait = "0.1.74" +anyhow.workspace = true + +zksync_config.workspace = true +zksync_types.workspace = true +zksync_object_store.workspace = true +zksync_da_client.workspace = true +zksync_node_framework.workspace = true diff --git a/core/lib/da_client/src/gcs/mod.rs b/core/lib/default_da_clients/src/gcs/client.rs similarity index 99% rename from core/lib/da_client/src/gcs/mod.rs rename to core/lib/default_da_clients/src/gcs/client.rs index 57c1085bf08c..78c086d50f8f 100644 --- a/core/lib/da_client/src/gcs/mod.rs +++ b/core/lib/default_da_clients/src/gcs/client.rs @@ -6,7 +6,7 @@ use std::{ use async_trait::async_trait; use zksync_config::ObjectStoreConfig; -use zksync_da_layers::{ +use zksync_da_client::{ types::{DAError, DispatchResponse, InclusionData}, DataAvailabilityClient, }; diff --git a/core/lib/default_da_clients/src/gcs/mod.rs b/core/lib/default_da_clients/src/gcs/mod.rs new file mode 100644 index 000000000000..814cf30c2cbd --- /dev/null +++ b/core/lib/default_da_clients/src/gcs/mod.rs @@ -0,0 +1,2 @@ +pub mod client; +pub mod wiring_layer; diff --git a/core/lib/default_da_clients/src/gcs/wiring_layer.rs b/core/lib/default_da_clients/src/gcs/wiring_layer.rs new file mode 100644 index 000000000000..71320ab1aa7c --- /dev/null +++ b/core/lib/default_da_clients/src/gcs/wiring_layer.rs @@ -0,0 +1,36 @@ +use zksync_config::ObjectStoreConfig; +use zksync_da_client::DataAvailabilityClient; +use zksync_node_framework::{ + implementations::resources::da_client::DAClientResource, + service::ServiceContext, + wiring_layer::{WiringError, WiringLayer}, +}; + +use crate::gcs::client::ObjectStoreDAClient; + +#[derive(Debug)] +pub struct ObjectStorageClientWiringLayer { + config: ObjectStoreConfig, +} + +impl ObjectStorageClientWiringLayer { + pub fn new(config: ObjectStoreConfig) -> Self { + Self { config } + } +} + +#[async_trait::async_trait] +impl WiringLayer for ObjectStorageClientWiringLayer { + fn layer_name(&self) -> &'static str { + "object_store_da_layer" + } + + async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + let client: Box = + Box::new(ObjectStoreDAClient::new(self.config).await?); + + context.insert_resource(DAClientResource(client))?; + + Ok(()) + } +} diff --git a/core/lib/default_da_clients/src/lib.rs b/core/lib/default_da_clients/src/lib.rs new file mode 100644 index 000000000000..ac73de6deb41 --- /dev/null +++ b/core/lib/default_da_clients/src/lib.rs @@ -0,0 +1,3 @@ +pub mod gcs; + +pub mod no_da; diff --git a/core/lib/da_client/src/no_da/mod.rs b/core/lib/default_da_clients/src/no_da/client.rs similarity index 97% rename from core/lib/da_client/src/no_da/mod.rs rename to core/lib/default_da_clients/src/no_da/client.rs index 5f233d623861..6e939b9abceb 100644 --- a/core/lib/da_client/src/no_da/mod.rs +++ b/core/lib/default_da_clients/src/no_da/client.rs @@ -1,7 +1,7 @@ use std::fmt::Debug; use async_trait::async_trait; -use zksync_da_layers::{ +use zksync_da_client::{ types::{DAError, DispatchResponse, InclusionData}, DataAvailabilityClient, }; diff --git a/core/lib/default_da_clients/src/no_da/mod.rs b/core/lib/default_da_clients/src/no_da/mod.rs new file mode 100644 index 000000000000..814cf30c2cbd --- /dev/null +++ b/core/lib/default_da_clients/src/no_da/mod.rs @@ -0,0 +1,2 @@ +pub mod client; +pub mod wiring_layer; diff --git a/core/lib/default_da_clients/src/no_da/wiring_layer.rs b/core/lib/default_da_clients/src/no_da/wiring_layer.rs new file mode 100644 index 000000000000..f605eff35869 --- /dev/null +++ b/core/lib/default_da_clients/src/no_da/wiring_layer.rs @@ -0,0 +1,32 @@ +use zksync_da_client::DataAvailabilityClient; +use zksync_node_framework::{ + implementations::resources::da_client::DAClientResource, + service::ServiceContext, + wiring_layer::{WiringError, WiringLayer}, +}; + +use crate::no_da::client::NoDAClient; + +#[derive(Debug)] +pub struct NoDAClientWiringLayer {} + +impl NoDAClientWiringLayer { + pub fn new() -> Self { + Self {} + } +} + +#[async_trait::async_trait] +impl WiringLayer for NoDAClientWiringLayer { + fn layer_name(&self) -> &'static str { + "no_da_layer" + } + + async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + let client: Box = Box::new(NoDAClient::new()); + + context.insert_resource(DAClientResource(client))?; + + Ok(()) + } +} diff --git a/core/lib/env_config/Cargo.toml b/core/lib/env_config/Cargo.toml index 344c91a2260c..c86621584010 100644 --- a/core/lib/env_config/Cargo.toml +++ b/core/lib/env_config/Cargo.toml @@ -12,7 +12,6 @@ categories.workspace = true [dependencies] zksync_basic_types.workspace = true zksync_config.workspace = true -zksync_da_layers.workspace = true anyhow.workspace = true serde.workspace = true diff --git a/core/lib/env_config/src/da_dispatcher.rs b/core/lib/env_config/src/da_dispatcher.rs index fea67c8880b3..ffbc71d68fc8 100644 --- a/core/lib/env_config/src/da_dispatcher.rs +++ b/core/lib/env_config/src/da_dispatcher.rs @@ -10,113 +10,35 @@ impl FromEnv for DADispatcherConfig { #[cfg(test)] mod tests { - use zksync_config::configs::{ - da_dispatcher::{DADispatcherConfig, DataAvailabilityMode}, - object_store::{ObjectStoreConfig, ObjectStoreMode}, - }; - use zksync_da_layers::{clients::celestia::config::CelestiaConfig, config::DALayerConfig}; + use zksync_config::configs::da_dispatcher::DADispatcherConfig; use super::*; use crate::test_utils::EnvMutex; static MUTEX: EnvMutex = EnvMutex::new(); - fn expected_gcs_config( - bucket_base_url: &str, - interval: u32, - rows_limit: u32, - max_retries: u16, - ) -> DADispatcherConfig { - DADispatcherConfig { - da_mode: DataAvailabilityMode::ObjectStore(ObjectStoreConfig { - mode: ObjectStoreMode::GCSWithCredentialFile { - bucket_base_url: bucket_base_url.to_owned(), - gcs_credential_file_path: "/path/to/credentials.json".to_owned(), - }, - max_retries: 5, - }), - polling_interval_ms: Some(interval), - query_rows_limit: Some(rows_limit), - max_retries: Some(max_retries), - } - } - fn expected_celestia_da_layer_config( - pk: &str, interval: u32, rows_limit: u32, max_retries: u16, ) -> DADispatcherConfig { DADispatcherConfig { - da_mode: DataAvailabilityMode::DALayer(DALayerConfig::Celestia(CelestiaConfig { - light_node_url: "localhost:12345".to_string(), - private_key: pk.to_owned(), - })), polling_interval_ms: Some(interval), query_rows_limit: Some(rows_limit), max_retries: Some(max_retries), } } - fn expected_no_da_config() -> DADispatcherConfig { - DADispatcherConfig { - da_mode: DataAvailabilityMode::NoDA, - polling_interval_ms: None, - query_rows_limit: None, - max_retries: None, - } - } - #[test] - fn from_env_da_layer() { + fn from_env_da_dispatcher() { let mut lock = MUTEX.lock(); let config = r#" DA_DISPATCHER_POLLING_INTERVAL=5 DA_DISPATCHER_QUERY_ROWS_LIMIT=60 DA_DISPATCHER_MAX_RETRIES=7 - DA_DISPATCHER_DA_MODE="DALayer" - DA_DISPATCHER_CLIENT_NAME="Celestia" - DA_DISPATCHER_LIGHT_NODE_URL="localhost:12345" - DA_DISPATCHER_PRIVATE_KEY="0xf55baf7c0e4e33b1d78fbf52f069c426bc36cff1aceb9bc8f45d14c07f034d73" - "#; - lock.set_env(config); - let actual = DADispatcherConfig::from_env().unwrap(); - assert_eq!( - actual, - expected_celestia_da_layer_config( - "0xf55baf7c0e4e33b1d78fbf52f069c426bc36cff1aceb9bc8f45d14c07f034d73", - 5, - 60, - 7 - ) - ); - } - - #[test] - fn from_env_no_da() { - let mut lock = MUTEX.lock(); - let config = r#" - DA_DISPATCHER_DA_MODE="NoDA" - "#; - lock.set_env(config); - let actual = DADispatcherConfig::from_env().unwrap(); - assert_eq!(actual, expected_no_da_config()); - } - - #[test] - fn from_env_object_store() { - let mut lock = MUTEX.lock(); - let config = r#" - DA_DISPATCHER_POLLING_INTERVAL=10 - DA_DISPATCHER_QUERY_ROWS_LIMIT=100 - DA_DISPATCHER_MAX_RETRIES=6 - DA_DISPATCHER_DA_MODE="GCS" - DA_DISPATCHER_MODE="GCSWithCredentialFile" - DA_DISPATCHER_GCS_CREDENTIAL_FILE_PATH="/path/to/credentials.json" - DA_DISPATCHER_BUCKET_BASE_URL="/base/url" "#; lock.set_env(config); let actual = DADispatcherConfig::from_env().unwrap(); - assert_eq!(actual, expected_gcs_config("/base/url", 10, 100, 6)); + assert_eq!(actual, expected_celestia_da_layer_config(5, 60, 7)); } } diff --git a/core/lib/protobuf_config/Cargo.toml b/core/lib/protobuf_config/Cargo.toml index 3595413ff901..ee52d8d5472f 100644 --- a/core/lib/protobuf_config/Cargo.toml +++ b/core/lib/protobuf_config/Cargo.toml @@ -18,7 +18,6 @@ zksync_basic_types.workspace = true zksync_config.workspace = true zksync_protobuf.workspace = true zksync_types.workspace = true -zksync_da_layers.workspace = true anyhow.workspace = true prost.workspace = true diff --git a/core/lib/protobuf_config/src/da_dispatcher.rs b/core/lib/protobuf_config/src/da_dispatcher.rs index 9681da9cf01f..670a2c802f38 100644 --- a/core/lib/protobuf_config/src/da_dispatcher.rs +++ b/core/lib/protobuf_config/src/da_dispatcher.rs @@ -1,89 +1,24 @@ -use anyhow::{Context, Error}; -use zksync_config::configs::{self, da_dispatcher::DataAvailabilityMode}; -use zksync_da_layers::config::DALayerConfig; +use anyhow::Context; +use zksync_config::configs::{self}; use zksync_protobuf::{required, ProtoRepr}; -use crate::proto::{da_dispatcher as proto, object_store::ObjectStore}; +use crate::proto::da_dispatcher as proto; impl ProtoRepr for proto::DataAvailabilityDispatcher { type Type = configs::da_dispatcher::DADispatcherConfig; fn read(&self) -> anyhow::Result { - match &self.mode { - Some(proto::data_availability_dispatcher::Mode::DaLayer(config)) => { - let da_config = match required(&config.name).context("da_layer_name")?.as_str() { - "celestia" => DALayerConfig::Celestia( - zksync_da_layers::clients::celestia::config::CelestiaConfig { - light_node_url: required(&config.light_node_url) - .context("light_node_url")? - .clone(), - private_key: required(&config.private_key) - .context("private_key")? - .clone(), - }, - ), - _ => { - return Err(Error::msg(format!( - "Unknown DA layer name: {}", - required(&config.name).context("da_layer_name")? - ))) - } - }; - Ok(configs::da_dispatcher::DADispatcherConfig { - da_mode: DataAvailabilityMode::DALayer(da_config), - polling_interval_ms: Some( - *required(&self.polling_interval).context("polling_interval")?, - ), - query_rows_limit: Some( - *required(&self.query_rows_limit).context("query_rows_limit")?, - ), - max_retries: Some( - *required(&self.max_retries).context("query_rows_limit")? as u16 - ), - }) - } - Some(proto::data_availability_dispatcher::Mode::ObjectStore(config)) => { - Ok(configs::da_dispatcher::DADispatcherConfig { - da_mode: DataAvailabilityMode::ObjectStore(config.read()?), - polling_interval_ms: Some( - *required(&self.polling_interval).context("polling_interval")?, - ), - query_rows_limit: Some( - *required(&self.query_rows_limit).context("query_rows_limit")?, - ), - max_retries: Some( - *required(&self.max_retries).context("query_rows_limit")? as u16 - ), - }) - } - None => Ok(configs::da_dispatcher::DADispatcherConfig { - da_mode: DataAvailabilityMode::NoDA, - polling_interval_ms: None, - query_rows_limit: None, - max_retries: None, - }), - } + Ok(configs::da_dispatcher::DADispatcherConfig { + polling_interval_ms: Some( + *required(&self.polling_interval).context("polling_interval")?, + ), + query_rows_limit: Some(*required(&self.query_rows_limit).context("query_rows_limit")?), + max_retries: Some(*required(&self.max_retries).context("query_rows_limit")? as u16), + }) } fn build(this: &Self::Type) -> Self { - let mode = match this.da_mode.clone() { - DataAvailabilityMode::DALayer(info) => match info { - DALayerConfig::Celestia(info) => Some( - proto::data_availability_dispatcher::Mode::DaLayer(proto::DaLayer { - name: Some("celestia".to_string()), - private_key: Some(info.private_key.clone()), - light_node_url: Some(info.light_node_url.clone()), - }), - ), - }, - DataAvailabilityMode::ObjectStore(config) => Some( - proto::data_availability_dispatcher::Mode::ObjectStore(ObjectStore::build(&config)), - ), - DataAvailabilityMode::NoDA => None, - }; - Self { - mode, polling_interval: this.polling_interval_ms, query_rows_limit: this.query_rows_limit, max_retries: this.max_retries.map(|x| x as u32), diff --git a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto index ec575bbae60c..702b01c9ea22 100644 --- a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto +++ b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto @@ -4,18 +4,8 @@ package zksync.config.da_dispatcher; import "zksync/config/object_store.proto"; -message DALayer { - optional string name = 1; // required - optional string private_key = 2; // required - optional string light_node_url = 3; -} - message DataAvailabilityDispatcher { - oneof mode { - config.object_store.ObjectStore object_store = 1; - DALayer da_layer = 2; - } - optional uint32 polling_interval = 3; - optional uint32 query_rows_limit = 4; - optional uint32 max_retries = 5; + optional uint32 polling_interval = 1; + optional uint32 query_rows_limit = 2; + optional uint32 max_retries = 3; } diff --git a/core/lib/zksync_core_leftovers/Cargo.toml b/core/lib/zksync_core_leftovers/Cargo.toml index d62d5220ac9f..4742c6385f30 100644 --- a/core/lib/zksync_core_leftovers/Cargo.toml +++ b/core/lib/zksync_core_leftovers/Cargo.toml @@ -47,7 +47,7 @@ zksync_node_genesis.workspace = true zksync_eth_sender.workspace = true zksync_da_dispatcher.workspace = true zksync_da_client.workspace = true -zksync_da_layers.workspace = true +zksync_default_da_clients.workspace = true zksync_node_fee_model.workspace = true zksync_state_keeper.workspace = true zksync_metadata_calculator.workspace = true diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index 67bde30080d6..367a0095f01f 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -27,7 +27,6 @@ use zksync_config::{ api::{MerkleTreeApiConfig, Web3JsonRpcConfig}, chain::{CircuitBreakerConfig, MempoolConfig, OperationsManagerConfig, StateKeeperConfig}, consensus::ConsensusConfig, - da_dispatcher::DataAvailabilityMode, database::{MerkleTreeConfig, MerkleTreeMode}, eth_sender::PubdataSendingMode, wallets, @@ -37,13 +36,11 @@ use zksync_config::{ ApiConfig, DBConfig, EthWatchConfig, GenesisConfig, }; use zksync_contracts::governance_contract; -use zksync_da_client::{gcs::ObjectStoreDAClient, no_da::NoDAClient}; +use zksync_da_client::DataAvailabilityClient; use zksync_da_dispatcher::DataAvailabilityDispatcher; -use zksync_da_layers::{ - clients::celestia::CelestiaClient, config::DALayerConfig, DataAvailabilityClient, -}; use zksync_dal::{metrics::PostgresMetrics, ConnectionPool, Core, CoreDal}; use zksync_db_connection::healthcheck::ConnectionPoolHealthCheck; +use zksync_default_da_clients::no_da::client::NoDAClient; use zksync_eth_client::{clients::PKSigningClient, BoundEthInterface}; use zksync_eth_sender::{Aggregator, EthTxAggregator, EthTxManager}; use zksync_eth_watch::{EthHttpQueryClient, EthWatch}; @@ -769,17 +766,8 @@ pub async fn initialize_components( .build() .await .context("failed to build da_dispatcher_pool")?; - let da_client: Box = match da_config.clone().da_mode { - DataAvailabilityMode::ObjectStore(config) => { - Box::new(ObjectStoreDAClient::new(config).await?) - } - DataAvailabilityMode::NoDA => Box::new(NoDAClient::new()), - DataAvailabilityMode::DALayer(config) => match config { - DALayerConfig::Celestia(celestia_config) => { - Box::new(CelestiaClient::new(celestia_config)) - } - }, - }; + let da_client: Box = Box::new(NoDAClient::new()); // use the NoDAClient as a default one + let da_dispatcher = DataAvailabilityDispatcher::new(da_dispatcher_pool, da_config, da_client); task_futures.push(tokio::spawn(da_dispatcher.run(stop_receiver.clone()))); diff --git a/core/node/da_dispatcher/Cargo.toml b/core/node/da_dispatcher/Cargo.toml index 4bee01e05118..41999d8ff5c7 100644 --- a/core/node/da_dispatcher/Cargo.toml +++ b/core/node/da_dispatcher/Cargo.toml @@ -16,7 +16,7 @@ zksync_dal.workspace = true zksync_utils.workspace = true zksync_config.workspace = true zksync_types.workspace = true -zksync_da_layers.workspace = true +zksync_da_client.workspace = true tokio = { workspace = true, features = ["time"] } anyhow.workspace = true diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index c246c5a5103e..7ae71076234f 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -5,7 +5,7 @@ use chrono::{NaiveDateTime, Utc}; use rand::Rng; use tokio::sync::watch; use zksync_config::DADispatcherConfig; -use zksync_da_layers::{ +use zksync_da_client::{ types::{DAError, IsTransient}, DataAvailabilityClient, }; diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index 5b454db4db40..09c77ece6577 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -35,7 +35,6 @@ zksync_node_fee_model.workspace = true zksync_eth_sender.workspace = true zksync_da_client.workspace = true zksync_da_dispatcher.workspace = true -zksync_da_layers.workspace = true zksync_block_reverter.workspace = true zksync_state_keeper.workspace = true zksync_consistency_checker.workspace = true diff --git a/core/node/node_framework/examples/main_node.rs b/core/node/node_framework/examples/main_node.rs index 8a2f51d143b3..d4e769a82afc 100644 --- a/core/node/node_framework/examples/main_node.rs +++ b/core/node/node_framework/examples/main_node.rs @@ -328,9 +328,12 @@ impl MainNodeBuilder { } fn add_da_dispatcher_layer(mut self) -> anyhow::Result { + let state_keeper_config = StateKeeperConfig::from_env()?; let da_config = DADispatcherConfig::from_env()?; - self.node - .add_layer(DataAvailabilityDispatcherLayer::new(da_config)); + self.node.add_layer(DataAvailabilityDispatcherLayer::new( + state_keeper_config, + da_config, + )); Ok(self) } diff --git a/core/node/node_framework/src/implementations/layers/da_client.rs b/core/node/node_framework/src/implementations/layers/da_client.rs deleted file mode 100644 index 91e9c006b6db..000000000000 --- a/core/node/node_framework/src/implementations/layers/da_client.rs +++ /dev/null @@ -1,83 +0,0 @@ -use zksync_config::{ - configs::{ - chain::StateKeeperConfig, - da_dispatcher::{DADispatcherConfig, DataAvailabilityMode}, - eth_sender::PubdataSendingMode, - }, - EthConfig, -}; -use zksync_da_client::{gcs::ObjectStoreDAClient, no_da::NoDAClient}; -use zksync_da_layers::{ - clients::celestia::CelestiaClient, config::DALayerConfig, DataAvailabilityClient, -}; - -use crate::{ - implementations::resources::da_client::DAClientResource, - service::ServiceContext, - wiring_layer::{WiringError, WiringLayer}, -}; - -#[derive(Debug)] -pub struct DataAvailabilityClientLayer { - da_config: DADispatcherConfig, - eth_config: EthConfig, - state_keeper_config: StateKeeperConfig, -} - -impl DataAvailabilityClientLayer { - pub fn new( - da_config: DADispatcherConfig, - eth_config: EthConfig, - state_keeper_config: StateKeeperConfig, - ) -> Self { - Self { - da_config, - eth_config, - state_keeper_config, - } - } -} - -#[async_trait::async_trait] -impl WiringLayer for DataAvailabilityClientLayer { - fn layer_name(&self) -> &'static str { - "da_client_layer" - } - - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - if self - .eth_config - .sender - .ok_or(WiringError::Configuration( - "missing the eth_sender config".to_string(), - ))? - .pubdata_sending_mode - != PubdataSendingMode::Custom - { - panic!("DA client layer requires custom pubdata sending mode"); - } - - // this can be broken down into the separate layers, but that would require the operator to - // wire the right one manually, which is less convenient than the current approach, which - // uses the config to determine the right client - let client: Box = match self.da_config.da_mode { - DataAvailabilityMode::ObjectStore(config) => { - Box::new(ObjectStoreDAClient::new(config).await?) - } - DataAvailabilityMode::NoDA => Box::new(NoDAClient::new()), - DataAvailabilityMode::DALayer(config) => match config { - DALayerConfig::Celestia(celestia_config) => { - Box::new(CelestiaClient::new(celestia_config)) - } - }, - }; - - if self.state_keeper_config.max_pubdata_per_batch > client.blob_size_limit() as u64 { - panic!("State keeper max pubdata per batch is greater than the client blob size limit"); - } - - context.insert_resource(DAClientResource(client))?; - - Ok(()) - } -} diff --git a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs index bbab646ecb0d..9ee39c97909e 100644 --- a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs +++ b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs @@ -1,5 +1,5 @@ -use zksync_config::configs::da_dispatcher::DADispatcherConfig; -use zksync_da_layers::DataAvailabilityClient; +use zksync_config::configs::{chain::StateKeeperConfig, da_dispatcher::DADispatcherConfig}; +use zksync_da_client::DataAvailabilityClient; use zksync_dal::Core; use zksync_db_connection::connection_pool::ConnectionPool; @@ -16,12 +16,16 @@ use crate::{ /// A layer that wires the data availability dispatcher task. #[derive(Debug)] pub struct DataAvailabilityDispatcherLayer { + state_keeper_config: StateKeeperConfig, da_config: DADispatcherConfig, } impl DataAvailabilityDispatcherLayer { - pub fn new(da_config: DADispatcherConfig) -> Self { - Self { da_config } + pub fn new(state_keeper_config: StateKeeperConfig, da_config: DADispatcherConfig) -> Self { + Self { + state_keeper_config, + da_config, + } } } @@ -36,6 +40,10 @@ impl WiringLayer for DataAvailabilityDispatcherLayer { let master_pool = master_pool_resource.get().await?; let da_client = context.get_resource::().await?.0; + if self.state_keeper_config.max_pubdata_per_batch > da_client.blob_size_limit() as u64 { + panic!("State keeper max pubdata per batch is greater than the client blob size limit"); + } + context.add_task(Box::new(DataAvailabilityDispatcherTask { main_pool: master_pool, da_config: self.da_config, diff --git a/core/node/node_framework/src/implementations/layers/mod.rs b/core/node/node_framework/src/implementations/layers/mod.rs index 57ab7fa21866..4eb13caf3f9d 100644 --- a/core/node/node_framework/src/implementations/layers/mod.rs +++ b/core/node/node_framework/src/implementations/layers/mod.rs @@ -3,7 +3,6 @@ pub mod commitment_generator; pub mod consensus; pub mod consistency_checker; pub mod contract_verification_api; -pub mod da_client; pub mod da_dispatcher; pub mod eth_sender; pub mod eth_watch; diff --git a/core/node/node_framework/src/implementations/resources/da_client.rs b/core/node/node_framework/src/implementations/resources/da_client.rs index 90027220aa11..525164cb9b10 100644 --- a/core/node/node_framework/src/implementations/resources/da_client.rs +++ b/core/node/node_framework/src/implementations/resources/da_client.rs @@ -1,4 +1,4 @@ -use zksync_da_layers::DataAvailabilityClient; +use zksync_da_client::DataAvailabilityClient; use crate::resource::Resource; From 683f739252ce8637370dd4188c9f70670daf6390 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Wed, 5 Jun 2024 14:44:05 +0200 Subject: [PATCH 39/69] arrangements for better usage of node-framework --- Cargo.lock | 1 + core/bin/zksync_server/src/node_builder.rs | 14 +++++++++++++- core/lib/default_da_clients/Cargo.toml | 1 + core/lib/default_da_clients/src/gcs/config.rs | 14 ++++++++++++++ core/lib/default_da_clients/src/gcs/mod.rs | 1 + core/lib/env_config/src/da_dispatcher.rs | 4 ++-- prover/Cargo.lock | 12 ------------ 7 files changed, 32 insertions(+), 15 deletions(-) create mode 100644 core/lib/default_da_clients/src/gcs/config.rs diff --git a/Cargo.lock b/Cargo.lock index fad079a92bcc..141341fe4ae4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8557,6 +8557,7 @@ dependencies = [ "tracing", "zksync_config", "zksync_da_client", + "zksync_env_config", "zksync_node_framework", "zksync_object_store", "zksync_types", diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 054e1276c49a..41a5d57c72c7 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -8,7 +8,10 @@ use zksync_config::{ ContractsConfig, GenesisConfig, }; use zksync_core_leftovers::Component; -use zksync_default_da_clients::no_da::wiring_layer::NoDAClientWiringLayer; +use zksync_default_da_clients::{ + gcs::{config::ObjectStoreDAConfig, wiring_layer::ObjectStorageClientWiringLayer}, + no_da::wiring_layer::NoDAClientWiringLayer, +}; use zksync_metadata_calculator::MetadataCalculatorConfig; use zksync_node_api_server::{ tx_sender::{ApiContracts, TxSenderConfig}, @@ -407,6 +410,15 @@ impl MainNodeBuilder { Ok(self) } + #[allow(dead_code)] + fn add_object_storage_da_client_layer(mut self) -> anyhow::Result { + let object_store_config = ObjectStoreDAConfig::from_env()?; + self.node.add_layer(ObjectStorageClientWiringLayer::new( + object_store_config.config, + )); + Ok(self) + } + fn add_da_dispatcher_layer(mut self) -> anyhow::Result { let state_keeper_config = try_load_config!(self.configs.state_keeper_config); let da_config = try_load_config!(self.configs.da_dispatcher_config); diff --git a/core/lib/default_da_clients/Cargo.toml b/core/lib/default_da_clients/Cargo.toml index ee668343875e..9682d4ba19c4 100644 --- a/core/lib/default_da_clients/Cargo.toml +++ b/core/lib/default_da_clients/Cargo.toml @@ -20,3 +20,4 @@ zksync_types.workspace = true zksync_object_store.workspace = true zksync_da_client.workspace = true zksync_node_framework.workspace = true +zksync_env_config.workspace = true diff --git a/core/lib/default_da_clients/src/gcs/config.rs b/core/lib/default_da_clients/src/gcs/config.rs new file mode 100644 index 000000000000..427aa3dd0d9c --- /dev/null +++ b/core/lib/default_da_clients/src/gcs/config.rs @@ -0,0 +1,14 @@ +use zksync_config::ObjectStoreConfig; +use zksync_env_config::envy_load; + +pub struct ObjectStoreDAConfig { + pub config: ObjectStoreConfig, +} + +impl ObjectStoreDAConfig { + pub fn from_env() -> anyhow::Result { + Ok(Self { + config: envy_load("object_store", "OBJECT_STORE_DA_CLIENT_")?, + }) + } +} diff --git a/core/lib/default_da_clients/src/gcs/mod.rs b/core/lib/default_da_clients/src/gcs/mod.rs index 814cf30c2cbd..bc66b1789845 100644 --- a/core/lib/default_da_clients/src/gcs/mod.rs +++ b/core/lib/default_da_clients/src/gcs/mod.rs @@ -1,2 +1,3 @@ pub mod client; +pub mod config; pub mod wiring_layer; diff --git a/core/lib/env_config/src/da_dispatcher.rs b/core/lib/env_config/src/da_dispatcher.rs index ffbc71d68fc8..be24c72b9b40 100644 --- a/core/lib/env_config/src/da_dispatcher.rs +++ b/core/lib/env_config/src/da_dispatcher.rs @@ -17,7 +17,7 @@ mod tests { static MUTEX: EnvMutex = EnvMutex::new(); - fn expected_celestia_da_layer_config( + fn expected_da_layer_config( interval: u32, rows_limit: u32, max_retries: u16, @@ -39,6 +39,6 @@ mod tests { "#; lock.set_env(config); let actual = DADispatcherConfig::from_env().unwrap(); - assert_eq!(actual, expected_celestia_da_layer_config(5, 60, 7)); + assert_eq!(actual, expected_da_layer_config(5, 60, 7)); } } diff --git a/prover/Cargo.lock b/prover/Cargo.lock index fcd32d3607a0..e052f86816da 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -2821,16 +2821,6 @@ dependencies = [ "tokio-native-tls", ] -[[package]] -name = "hyperchain_da" -version = "0.1.0" -source = "git+https://github.com/matter-labs/hyperchain-da.git?rev=822542abff3e7c9e42c6e65f4ba29f289a979d3d#822542abff3e7c9e42c6e65f4ba29f289a979d3d" -dependencies = [ - "anyhow", - "async-trait", - "serde", -] - [[package]] name = "iana-time-zone" version = "0.1.60" @@ -7949,7 +7939,6 @@ name = "zksync_config" version = "0.1.0" dependencies = [ "anyhow", - "hyperchain_da", "rand 0.8.5", "secrecy", "serde", @@ -8124,7 +8113,6 @@ version = "0.1.0" dependencies = [ "anyhow", "envy", - "hyperchain_da", "serde", "zksync_basic_types", "zksync_config", From 778ead71cf969f108d130098d909b56b22ce5025 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Wed, 5 Jun 2024 14:47:45 +0200 Subject: [PATCH 40/69] fix spellcheck --- core/lib/da_client/src/types.rs | 6 +++--- core/lib/zksync_core_leftovers/src/lib.rs | 2 +- core/node/da_dispatcher/src/da_dispatcher.rs | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/core/lib/da_client/src/types.rs b/core/lib/da_client/src/types.rs index 3d9ea6e85983..14dfaa95e4de 100644 --- a/core/lib/da_client/src/types.rs +++ b/core/lib/da_client/src/types.rs @@ -2,7 +2,7 @@ use std::{error, fmt::Display}; use serde::Serialize; -/// DAError is the error type returned by the DA clients. +/// `DAError` is the error type returned by the DA clients. #[derive(Debug)] pub struct DAError { pub error: anyhow::Error, @@ -32,14 +32,14 @@ pub trait IsTransient { impl error::Error for DAError {} -/// DispatchResponse is the response received from the DA layer after dispatching a blob. +/// `DispatchResponse` is the response received from the DA layer after dispatching a blob. #[derive(Default)] pub struct DispatchResponse { /// The blob_id is needed to fetch the inclusion data. pub blob_id: String, } -/// InclusionData is the data needed to prove that a blob is included in the DA layer. +/// `InclusionData` is the data needed to prove that a blob is included in the DA layer. #[derive(Default, Serialize)] pub struct InclusionData { /// The inclusion data serialized by the DA client. Serialization is done in a way that allows diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index 85969c358088..b2e9dd94fccc 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -771,7 +771,7 @@ pub async fn initialize_components( .build() .await .context("failed to build da_dispatcher_pool")?; - let da_client: Box = Box::new(NoDAClient::new()); // use the NoDAClient as a default one + let da_client: Box = Box::new(NoDAClient::new()); // use the `NoDAClient` as a default one let da_dispatcher = DataAvailabilityDispatcher::new(da_dispatcher_pool, da_config, da_client); diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index 7ae71076234f..f48e5cf2e4d9 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -196,7 +196,7 @@ where .mul_f32(rand::thread_rng().gen_range(0.8..1.2)); tracing::warn!(%err, "Failed DA dispatch request {retries}/{max_retries} for batch {batch_number}, retrying in {} milliseconds.", sleep_duration.as_millis()); tokio::time::sleep(sleep_duration).await; - backoff_secs = (backoff_secs * 2).min(128); // cap the backoff at 128 seconds + backoff_secs = (backoff_secs * 2).min(128); // cap the back-off at 128 seconds } } } From 148c027fa182b9af72a24241ed734de921af23b7 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Wed, 5 Jun 2024 15:26:40 +0200 Subject: [PATCH 41/69] remove unused code --- .../node/node_framework/examples/main_node.rs | 25 ------------------- 1 file changed, 25 deletions(-) diff --git a/core/node/node_framework/examples/main_node.rs b/core/node/node_framework/examples/main_node.rs index d4e769a82afc..e4791c7b6765 100644 --- a/core/node/node_framework/examples/main_node.rs +++ b/core/node/node_framework/examples/main_node.rs @@ -29,7 +29,6 @@ use zksync_node_framework::{ circuit_breaker_checker::CircuitBreakerCheckerLayer, commitment_generator::CommitmentGeneratorLayer, contract_verification_api::ContractVerificationApiLayer, - da_client::DataAvailabilityClientLayer, da_dispatcher::DataAvailabilityDispatcherLayer, eth_sender::{EthTxAggregatorLayer, EthTxManagerLayer}, eth_watch::EthWatchLayer, @@ -315,28 +314,6 @@ impl MainNodeBuilder { Ok(self) } - fn add_da_client_layer(mut self) -> anyhow::Result { - let da_config = DADispatcherConfig::from_env()?; - let eth_config = EthConfig::from_env()?; - let state_keeper_config = StateKeeperConfig::from_env()?; - self.node.add_layer(DataAvailabilityClientLayer::new( - da_config, - eth_config, - state_keeper_config, - )); - Ok(self) - } - - fn add_da_dispatcher_layer(mut self) -> anyhow::Result { - let state_keeper_config = StateKeeperConfig::from_env()?; - let da_config = DADispatcherConfig::from_env()?; - self.node.add_layer(DataAvailabilityDispatcherLayer::new( - state_keeper_config, - da_config, - )); - Ok(self) - } - fn add_house_keeper_layer(mut self) -> anyhow::Result { let house_keeper_config = HouseKeeperConfig::from_env()?; let fri_prover_config = FriProverConfig::from_env()?; @@ -406,8 +383,6 @@ fn main() -> anyhow::Result<()> { .add_eth_watch_layer()? .add_pk_signing_client_layer()? .add_eth_sender_layer()? - .add_da_client_layer()? - .add_da_dispatcher_layer()? .add_proof_data_handler_layer()? .add_healthcheck_layer()? .add_tx_sender_layer()? From abed58e9d164a206c6e8ace842a9f0182c5d6536 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Wed, 5 Jun 2024 15:27:45 +0200 Subject: [PATCH 42/69] zk fmt --- core/node/node_framework/examples/main_node.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/core/node/node_framework/examples/main_node.rs b/core/node/node_framework/examples/main_node.rs index e4791c7b6765..f42cf76d33a2 100644 --- a/core/node/node_framework/examples/main_node.rs +++ b/core/node/node_framework/examples/main_node.rs @@ -15,8 +15,8 @@ use zksync_config::{ DatabaseSecrets, FriProofCompressorConfig, FriProverConfig, FriWitnessGeneratorConfig, L1Secrets, ObservabilityConfig, ProofDataHandlerConfig, }, - ApiConfig, ContractVerifierConfig, ContractsConfig, DADispatcherConfig, DBConfig, EthConfig, - EthWatchConfig, GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, PostgresConfig, + ApiConfig, ContractVerifierConfig, ContractsConfig, DBConfig, EthConfig, EthWatchConfig, + GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, PostgresConfig, }; use zksync_env_config::FromEnv; use zksync_metadata_calculator::MetadataCalculatorConfig; @@ -29,7 +29,6 @@ use zksync_node_framework::{ circuit_breaker_checker::CircuitBreakerCheckerLayer, commitment_generator::CommitmentGeneratorLayer, contract_verification_api::ContractVerificationApiLayer, - da_dispatcher::DataAvailabilityDispatcherLayer, eth_sender::{EthTxAggregatorLayer, EthTxManagerLayer}, eth_watch::EthWatchLayer, healtcheck_server::HealthCheckLayer, From 51ffccc3c1b9ec2e3107b775ee0b547ef43d262c Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Wed, 5 Jun 2024 16:08:28 +0200 Subject: [PATCH 43/69] fix --- .github/workflows/ci-core-reusable.yml | 4 ++-- core/lib/protobuf_config/src/proto/config/general.proto | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index ec548f9b91cc..d2598ce021a7 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -134,7 +134,7 @@ jobs: base_token: ["Eth", "Custom"] deployment_mode: ["Rollup", "Validium"] env: - SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads$${{ matrix.consensus && ',consensus' || '' }}${{ matrix.deployment_mode == 'Validium' && ',da_dispatcher' || '' }}" + SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads${{ matrix.consensus && ',consensus' || '' }}${{ matrix.deployment_mode == 'Validium' && ',da_dispatcher' || '' }}" runs-on: [matterlabs-ci-runner] steps: @@ -302,7 +302,7 @@ jobs: runs-on: [matterlabs-ci-runner] env: - SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads$${{ matrix.consensus && ',consensus' || '' }}${{ matrix.deployment_mode == 'Validium' && ',da_dispatcher' || '' }}" + SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads${{ matrix.consensus && ',consensus' || '' }}${{ matrix.deployment_mode == 'Validium' && ',da_dispatcher' || '' }}" EXT_NODE_FLAGS: "${{ matrix.consensus && '-- --enable-consensus' || '' }}" steps: diff --git a/core/lib/protobuf_config/src/proto/config/general.proto b/core/lib/protobuf_config/src/proto/config/general.proto index 759335048b46..0e1c5e16f72d 100644 --- a/core/lib/protobuf_config/src/proto/config/general.proto +++ b/core/lib/protobuf_config/src/proto/config/general.proto @@ -37,6 +37,6 @@ message GeneralConfig { optional config.prover.ProverGateway prover_gateway = 30; optional config.snapshot_creator.SnapshotsCreator snapshot_creator = 31; optional config.observability.Observability observability = 32; - optional config.da_dispatcher.DataAvailabilityDispatcher da_dispatcher = 33; - optional config.vm_runner.ProtectiveReadsWriter protective_reads_writer = 34; + optional config.vm_runner.ProtectiveReadsWriter protective_reads_writer = 33; + optional config.da_dispatcher.DataAvailabilityDispatcher da_dispatcher = 34; } From 5d3f9efd679e930ba8765e05a40a0e46a75f3745 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Wed, 5 Jun 2024 17:30:40 +0200 Subject: [PATCH 44/69] use different version of contracts --- contracts | 2 +- core/lib/env_config/src/da_dispatcher.rs | 4 ++-- core/lib/object_store/src/factory.rs | 5 +++++ core/lib/zksync_core_leftovers/src/lib.rs | 2 +- 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/contracts b/contracts index 32ca4e665da8..8a70bbbc4812 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 32ca4e665da89f5b4f2f705eee40d91024ad5b48 +Subproject commit 8a70bbbc48125f5bde6189b4e3c6a3ee79631678 diff --git a/core/lib/env_config/src/da_dispatcher.rs b/core/lib/env_config/src/da_dispatcher.rs index be24c72b9b40..6ed6223ae5b0 100644 --- a/core/lib/env_config/src/da_dispatcher.rs +++ b/core/lib/env_config/src/da_dispatcher.rs @@ -33,12 +33,12 @@ mod tests { fn from_env_da_dispatcher() { let mut lock = MUTEX.lock(); let config = r#" - DA_DISPATCHER_POLLING_INTERVAL=5 + DA_DISPATCHER_POLLING_INTERVAL_MS=5000 DA_DISPATCHER_QUERY_ROWS_LIMIT=60 DA_DISPATCHER_MAX_RETRIES=7 "#; lock.set_env(config); let actual = DADispatcherConfig::from_env().unwrap(); - assert_eq!(actual, expected_da_layer_config(5, 60, 7)); + assert_eq!(actual, expected_da_layer_config(5000, 60, 7)); } } diff --git a/core/lib/object_store/src/factory.rs b/core/lib/object_store/src/factory.rs index 8ae5fdbd0728..c9b530a7edc7 100644 --- a/core/lib/object_store/src/factory.rs +++ b/core/lib/object_store/src/factory.rs @@ -51,6 +51,11 @@ impl ObjectStoreFactory { .cloned() } + /// Creates an [`ObjectStore`] based on the provided `config`. + /// + /// # Errors + /// + /// Returns an error if store initialization fails (e.g., because of incorrect configuration). pub async fn create_from_config( config: &ObjectStoreConfig, ) -> Result, ObjectStoreError> { diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index c7376595b390..f8ee7facdc4a 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -771,7 +771,7 @@ pub async fn initialize_components( .build() .await .context("failed to build da_dispatcher_pool")?; - let da_client: Box = Box::new(NoDAClient::new()); // use the `NoDAClient` as a default one + let da_client: Box = Box::new(NoDAClient::new()); // use the `NoDAClient` as a default option for Validium let da_dispatcher = DataAvailabilityDispatcher::new(da_dispatcher_pool, da_config, da_client); From 3f62afc5edf28e5c766a77c09188f8600ef0fd5a Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Thu, 6 Jun 2024 15:19:05 +0200 Subject: [PATCH 45/69] minor tweaks --- core/node/da_dispatcher/Cargo.toml | 4 ++-- core/node/da_dispatcher/src/da_dispatcher.rs | 1 - etc/env/configs/dev_validium.toml | 3 --- etc/env/configs/dev_validium_docker.toml | 3 --- 4 files changed, 2 insertions(+), 9 deletions(-) diff --git a/core/node/da_dispatcher/Cargo.toml b/core/node/da_dispatcher/Cargo.toml index 41999d8ff5c7..bd24da3da6e8 100644 --- a/core/node/da_dispatcher/Cargo.toml +++ b/core/node/da_dispatcher/Cargo.toml @@ -21,5 +21,5 @@ zksync_da_client.workspace = true tokio = { workspace = true, features = ["time"] } anyhow.workspace = true tracing.workspace = true -chrono = "0.4.31" -rand = "0.8.5" +chrono.workspace = true +rand.workspace = true diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index f48e5cf2e4d9..df57ae446570 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -157,7 +157,6 @@ impl DataAvailabilityDispatcher { METRICS .last_included_l1_batch .set(blob_info.l1_batch_number.0 as usize); - tracing::info!( "Received an inclusion data for a batch_number: {}, inclusion_latency_seconds: {}", blob_info.l1_batch_number, inclusion_latency.num_seconds() diff --git a/etc/env/configs/dev_validium.toml b/etc/env/configs/dev_validium.toml index b655e58bb030..5ed4ccb38e41 100644 --- a/etc/env/configs/dev_validium.toml +++ b/etc/env/configs/dev_validium.toml @@ -17,8 +17,5 @@ sender_pubdata_sending_mode="Custom" [eth_sender.gas_adjuster] max_blob_base_fee=0 -[da_dispatcher] -da_mode="NoDA" - [_metadata] base=['dev.toml'] diff --git a/etc/env/configs/dev_validium_docker.toml b/etc/env/configs/dev_validium_docker.toml index 66ed37f320a6..7e985cb974ab 100644 --- a/etc/env/configs/dev_validium_docker.toml +++ b/etc/env/configs/dev_validium_docker.toml @@ -22,9 +22,6 @@ miniblock_iteration_interval = 50 [eth_sender] sender_pubdata_sending_mode="Custom" -[da_dispatcher] -da_mode="NoDA" - [eth_client] web3_url = "http://reth:8545" From 5f34a50894286083fc236d3989f20f6f27a2a4e0 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Thu, 6 Jun 2024 15:53:44 +0200 Subject: [PATCH 46/69] fix lint --- core/lib/default_da_clients/src/no_da/wiring_layer.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/core/lib/default_da_clients/src/no_da/wiring_layer.rs b/core/lib/default_da_clients/src/no_da/wiring_layer.rs index f605eff35869..4688e214cc73 100644 --- a/core/lib/default_da_clients/src/no_da/wiring_layer.rs +++ b/core/lib/default_da_clients/src/no_da/wiring_layer.rs @@ -1,3 +1,4 @@ +use std::fmt::Debug; use zksync_da_client::DataAvailabilityClient; use zksync_node_framework::{ implementations::resources::da_client::DAClientResource, @@ -7,12 +8,12 @@ use zksync_node_framework::{ use crate::no_da::client::NoDAClient; -#[derive(Debug)] -pub struct NoDAClientWiringLayer {} +#[derive(Debug, Default)] +pub struct NoDAClientWiringLayer; impl NoDAClientWiringLayer { pub fn new() -> Self { - Self {} + Self } } From cc4009fe6d1504789190cce51d612efc5b9487cf Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Thu, 6 Jun 2024 19:19:35 +0200 Subject: [PATCH 47/69] fix lint and tests --- .github/workflows/ci-core-reusable.yml | 2 +- .../src/no_da/wiring_layer.rs | 1 + core/tests/ts-integration/tests/fees.test.ts | 3 + prover/Cargo.lock | 175 ++++++++++++++++++ 4 files changed, 180 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index d2598ce021a7..58c0b9aa7b89 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -242,7 +242,7 @@ jobs: ci_run yarn recovery-test genesis-recovery-test - name: Fee projection tests - run: ci_run zk test i fees + run: DEPLOYMENT_MODE=${{ matrix.deployment_mode }} ci_run zk test i fees - name: Run revert test run: | diff --git a/core/lib/default_da_clients/src/no_da/wiring_layer.rs b/core/lib/default_da_clients/src/no_da/wiring_layer.rs index 4688e214cc73..5706fcc1c7e6 100644 --- a/core/lib/default_da_clients/src/no_da/wiring_layer.rs +++ b/core/lib/default_da_clients/src/no_da/wiring_layer.rs @@ -1,4 +1,5 @@ use std::fmt::Debug; + use zksync_da_client::DataAvailabilityClient; use zksync_node_framework::{ implementations::resources::da_client::DAClientResource, diff --git a/core/tests/ts-integration/tests/fees.test.ts b/core/tests/ts-integration/tests/fees.test.ts index 796ff6d7daff..10ccc35a3ea0 100644 --- a/core/tests/ts-integration/tests/fees.test.ts +++ b/core/tests/ts-integration/tests/fees.test.ts @@ -299,6 +299,9 @@ async function setInternalL1GasPrice( // Run server in background. let command = 'zk server --components api,tree,eth,state_keeper'; + if (process.env.DEPLOYMENT_MODE == 'Validium') { + command += `,da_dispatcher`; + } command = `DATABASE_MERKLE_TREE_MODE=full ${command}`; if (newPubdataPrice) { diff --git a/prover/Cargo.lock b/prover/Cargo.lock index dd2722473c20..d396ba0c7d99 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -8403,6 +8403,26 @@ dependencies = [ "url", ] +[[package]] +name = "zksync_block_reverter" +version = "0.1.0" +dependencies = [ + "anyhow", + "futures 0.3.30", + "serde", + "tokio", + "tracing", + "zksync_config", + "zksync_contracts", + "zksync_dal", + "zksync_eth_client", + "zksync_merkle_tree", + "zksync_object_store", + "zksync_state", + "zksync_storage", + "zksync_types", +] + [[package]] name = "zksync_circuit_breaker" version = "0.1.0" @@ -8612,6 +8632,25 @@ dependencies = [ "zksync_concurrency", ] +[[package]] +name = "zksync_consistency_checker" +version = "0.1.0" +dependencies = [ + "anyhow", + "serde", + "thiserror", + "tokio", + "tracing", + "zksync_contracts", + "zksync_dal", + "zksync_eth_client", + "zksync_eth_sender", + "zksync_health_check", + "zksync_l1_contract_interface", + "zksync_shared_metrics", + "zksync_types", +] + [[package]] name = "zksync_contract_verification_server" version = "0.1.0" @@ -8691,8 +8730,11 @@ dependencies = [ "zksync_consensus_utils", "zksync_contract_verification_server", "zksync_contracts", + "zksync_da_client", + "zksync_da_dispatcher", "zksync_dal", "zksync_db_connection", + "zksync_default_da_clients", "zksync_eth_client", "zksync_eth_sender", "zksync_eth_signer", @@ -8756,6 +8798,35 @@ dependencies = [ "zksync_utils", ] +[[package]] +name = "zksync_da_client" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "serde", + "tracing", + "zksync_config", + "zksync_types", +] + +[[package]] +name = "zksync_da_dispatcher" +version = "0.1.0" +dependencies = [ + "anyhow", + "chrono", + "rand 0.8.5", + "tokio", + "tracing", + "vise", + "zksync_config", + "zksync_da_client", + "zksync_dal", + "zksync_types", + "zksync_utils", +] + [[package]] name = "zksync_dal" version = "0.1.0" @@ -8804,6 +8875,22 @@ dependencies = [ "zksync_health_check", ] +[[package]] +name = "zksync_default_da_clients" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "serde", + "tracing", + "zksync_config", + "zksync_da_client", + "zksync_env_config", + "zksync_node_framework", + "zksync_object_store", + "zksync_types", +] + [[package]] name = "zksync_env_config" version = "0.1.0" @@ -9079,6 +9166,56 @@ dependencies = [ "zksync_web3_decl", ] +[[package]] +name = "zksync_node_framework" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "ctrlc", + "futures 0.3.30", + "prometheus_exporter", + "prover_dal", + "thiserror", + "tokio", + "tracing", + "zksync_block_reverter", + "zksync_circuit_breaker", + "zksync_commitment_generator", + "zksync_concurrency", + "zksync_config", + "zksync_consistency_checker", + "zksync_contract_verification_server", + "zksync_contracts", + "zksync_da_client", + "zksync_da_dispatcher", + "zksync_dal", + "zksync_db_connection", + "zksync_eth_client", + "zksync_eth_sender", + "zksync_eth_watch", + "zksync_health_check", + "zksync_house_keeper", + "zksync_metadata_calculator", + "zksync_node_api_server", + "zksync_node_consensus", + "zksync_node_fee_model", + "zksync_node_sync", + "zksync_object_store", + "zksync_proof_data_handler", + "zksync_protobuf_config", + "zksync_queued_job_processor", + "zksync_reorg_detector", + "zksync_state", + "zksync_state_keeper", + "zksync_storage", + "zksync_tee_verifier_input_producer", + "zksync_types", + "zksync_utils", + "zksync_vm_runner", + "zksync_web3_decl", +] + [[package]] name = "zksync_node_genesis" version = "0.1.0" @@ -9382,6 +9519,23 @@ dependencies = [ "zksync_utils", ] +[[package]] +name = "zksync_reorg_detector" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "serde_json", + "thiserror", + "tokio", + "tracing", + "zksync_dal", + "zksync_health_check", + "zksync_shared_metrics", + "zksync_types", + "zksync_web3_decl", +] + [[package]] name = "zksync_shared_metrics" version = "0.1.0" @@ -9560,6 +9714,27 @@ dependencies = [ "zksync_basic_types", ] +[[package]] +name = "zksync_vm_runner" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "dashmap", + "multivm", + "once_cell", + "tokio", + "tracing", + "vm_utils", + "zksync_contracts", + "zksync_dal", + "zksync_state", + "zksync_state_keeper", + "zksync_storage", + "zksync_types", + "zksync_utils", +] + [[package]] name = "zksync_web3_decl" version = "0.1.0" From ac05afbe18f8eea90440735620a19a7a3c91bc36 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Thu, 6 Jun 2024 20:23:32 +0200 Subject: [PATCH 48/69] fix test --- .github/workflows/ci-core-reusable.yml | 2 +- core/tests/ts-integration/tests/fees.test.ts | 25 ++++++++++++++++---- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 58c0b9aa7b89..d2598ce021a7 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -242,7 +242,7 @@ jobs: ci_run yarn recovery-test genesis-recovery-test - name: Fee projection tests - run: DEPLOYMENT_MODE=${{ matrix.deployment_mode }} ci_run zk test i fees + run: ci_run zk test i fees - name: Run revert test run: | diff --git a/core/tests/ts-integration/tests/fees.test.ts b/core/tests/ts-integration/tests/fees.test.ts index 10ccc35a3ea0..378ee6b52a2b 100644 --- a/core/tests/ts-integration/tests/fees.test.ts +++ b/core/tests/ts-integration/tests/fees.test.ts @@ -129,7 +129,8 @@ testFees('Test fees', () => { } ], gasPrice, - reports + reports, + testMaster.environment().l1BatchCommitDataGeneratorMode ); } @@ -158,6 +159,7 @@ testFees('Test fees', () => { await setInternalL1GasPrice( alice._providerL2(), + testMaster.environment().l1BatchCommitDataGeneratorMode, requiredPubdataPrice.toString(), requiredPubdataPrice.toString() ); @@ -201,7 +203,13 @@ testFees('Test fees', () => { afterAll(async () => { // Returning the pubdata price to the default one - await setInternalL1GasPrice(alice._providerL2(), undefined, undefined, true); + await setInternalL1GasPrice( + alice._providerL2(), + testMaster.environment().l1BatchCommitDataGeneratorMode, + undefined, + undefined, + true + ); await testMaster.deinitialize(); }); @@ -212,10 +220,16 @@ async function appendResults( originalL1Receipts: ethers.providers.TransactionReceipt[], transactionRequests: ethers.providers.TransactionRequest[], newL1GasPrice: number, - reports: string[] + reports: string[], + deploymentMode: DataAvailabityMode ): Promise { // For the sake of simplicity, we'll use the same pubdata price as the L1 gas price. - await setInternalL1GasPrice(sender._providerL2(), newL1GasPrice.toString(), newL1GasPrice.toString()); + await setInternalL1GasPrice( + sender._providerL2(), + deploymentMode, + newL1GasPrice.toString(), + newL1GasPrice.toString() + ); if (originalL1Receipts.length !== reports.length && originalL1Receipts.length !== transactionRequests.length) { throw new Error('The array of receipts and reports have different length'); @@ -288,6 +302,7 @@ async function killServerAndWaitForShutdown(provider: zksync.Provider) { async function setInternalL1GasPrice( provider: zksync.Provider, + deploymentMode: DataAvailabityMode, newL1GasPrice?: string, newPubdataPrice?: string, disconnect?: boolean @@ -299,7 +314,7 @@ async function setInternalL1GasPrice( // Run server in background. let command = 'zk server --components api,tree,eth,state_keeper'; - if (process.env.DEPLOYMENT_MODE == 'Validium') { + if (process.env.DEPLOYMENT_MODE == DataAvailabityMode.Validium) { command += `,da_dispatcher`; } command = `DATABASE_MERKLE_TREE_MODE=full ${command}`; From e65109afa2e6ec87163952c94d0fd83e43a51163 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Thu, 6 Jun 2024 20:57:41 +0200 Subject: [PATCH 49/69] add fee test logs --- .github/workflows/ci-core-reusable.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index d2598ce021a7..242c062e93eb 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -285,6 +285,10 @@ jobs: if: always() run: ci_run cat core/tests/upgrade-test/upgrade.log || true + - name: Show fee-projection.log logs + if: always() + run: ci_run cat core/tests/ts-integration/fees.log || true + - name: Show sccache logs if: always() run: | From 1b998b44d97af06b1b9f1716db1db102831370f9 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Thu, 6 Jun 2024 21:18:33 +0200 Subject: [PATCH 50/69] fix fee test --- core/tests/ts-integration/tests/fees.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/tests/ts-integration/tests/fees.test.ts b/core/tests/ts-integration/tests/fees.test.ts index 378ee6b52a2b..81ac6034f4cd 100644 --- a/core/tests/ts-integration/tests/fees.test.ts +++ b/core/tests/ts-integration/tests/fees.test.ts @@ -314,7 +314,7 @@ async function setInternalL1GasPrice( // Run server in background. let command = 'zk server --components api,tree,eth,state_keeper'; - if (process.env.DEPLOYMENT_MODE == DataAvailabityMode.Validium) { + if (deploymentMode == DataAvailabityMode.Validium) { command += `,da_dispatcher`; } command = `DATABASE_MERKLE_TREE_MODE=full ${command}`; From c35b4a263add371507636d7c8afa4ff79bfb055d Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Fri, 7 Jun 2024 14:52:06 +0200 Subject: [PATCH 51/69] make da_dispatcher configs optional in proto --- core/lib/dal/src/data_availability_dal.rs | 2 +- core/lib/protobuf_config/src/da_dispatcher.rs | 8 +++----- core/node/da_dispatcher/src/da_dispatcher.rs | 1 + 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/core/lib/dal/src/data_availability_dal.rs b/core/lib/dal/src/data_availability_dal.rs index 3074605d490b..f90ffdf1faae 100644 --- a/core/lib/dal/src/data_availability_dal.rs +++ b/core/lib/dal/src/data_availability_dal.rs @@ -67,7 +67,7 @@ impl DataAvailabilityDal<'_, '_> { anyhow::ensure!( matched == 1, - "DA blob_id verification failed. DA blob_id for L1 batch #{number} does not match the expected value" + "DA blob_id verification failed. DA blob_id {blob_id} for L1 batch #{number} does not match the expected value" ); } Ok(()) diff --git a/core/lib/protobuf_config/src/da_dispatcher.rs b/core/lib/protobuf_config/src/da_dispatcher.rs index 670a2c802f38..9efcc3a24b18 100644 --- a/core/lib/protobuf_config/src/da_dispatcher.rs +++ b/core/lib/protobuf_config/src/da_dispatcher.rs @@ -9,11 +9,9 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { fn read(&self) -> anyhow::Result { Ok(configs::da_dispatcher::DADispatcherConfig { - polling_interval_ms: Some( - *required(&self.polling_interval).context("polling_interval")?, - ), - query_rows_limit: Some(*required(&self.query_rows_limit).context("query_rows_limit")?), - max_retries: Some(*required(&self.max_retries).context("query_rows_limit")? as u16), + polling_interval_ms: self.polling_interval, + query_rows_limit: self.query_rows_limit, + max_retries: self.max_retries.map(|x| x as u16), }) } diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index df57ae446570..f48e5cf2e4d9 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -157,6 +157,7 @@ impl DataAvailabilityDispatcher { METRICS .last_included_l1_batch .set(blob_info.l1_batch_number.0 as usize); + tracing::info!( "Received an inclusion data for a batch_number: {}, inclusion_latency_seconds: {}", blob_info.l1_batch_number, inclusion_latency.num_seconds() From bd88b51435b1374425613aff97fbe02fca001876 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Fri, 7 Jun 2024 15:11:22 +0200 Subject: [PATCH 52/69] use DalError --- core/lib/dal/src/data_availability_dal.rs | 73 ++++++++++++------- core/lib/protobuf_config/src/da_dispatcher.rs | 3 +- core/node/da_dispatcher/src/da_dispatcher.rs | 16 +--- 3 files changed, 49 insertions(+), 43 deletions(-) diff --git a/core/lib/dal/src/data_availability_dal.rs b/core/lib/dal/src/data_availability_dal.rs index f90ffdf1faae..f22ebd174a5b 100644 --- a/core/lib/dal/src/data_availability_dal.rs +++ b/core/lib/dal/src/data_availability_dal.rs @@ -1,4 +1,8 @@ -use zksync_db_connection::{connection::Connection, error::DalResult, instrument::InstrumentExt}; +use zksync_db_connection::{ + connection::Connection, + error::DalResult, + instrument::{InstrumentExt, Instrumented}, +}; use zksync_types::{pubdata_da::DataAvailabilityBlob, L1BatchNumber}; use crate::{ @@ -20,7 +24,7 @@ impl DataAvailabilityDal<'_, '_> { number: L1BatchNumber, blob_id: &str, sent_at: chrono::NaiveDateTime, - ) -> anyhow::Result<()> { + ) -> DalResult<()> { let update_result = sqlx::query!( r#" INSERT INTO @@ -44,8 +48,12 @@ impl DataAvailabilityDal<'_, '_> { "L1 batch #{number}: DA blob_id wasn't updated as it's already present" ); + let instrumentation = Instrumented::new("get_matching_batch_da_blob_id") + .with_arg("number", &number) + .with_arg("blob_id", &blob_id); + // Batch was already processed. Verify that existing DA blob_id matches - let matched: i64 = sqlx::query!( + let query = sqlx::query!( r#" SELECT COUNT(*) AS "count!" @@ -57,18 +65,22 @@ impl DataAvailabilityDal<'_, '_> { "#, i64::from(number.0), blob_id, - ) - .instrument("get_matching_batch_da_blob_id") - .with_arg("number", &number) - .report_latency() - .fetch_one(self.storage) - .await? - .count; - - anyhow::ensure!( - matched == 1, - "DA blob_id verification failed. DA blob_id {blob_id} for L1 batch #{number} does not match the expected value" ); + + let matched: i64 = instrumentation + .clone() + .with(query) + .report_latency() + .fetch_one(self.storage) + .await? + .count; + + if matched != 1 { + let err = instrumentation.constraint_error(anyhow::anyhow!( + "DA blob_id verification failed. DA blob_id {blob_id} for L1 batch #{number} does not match the expected value" + )); + return Err(err); + } } Ok(()) } @@ -80,7 +92,7 @@ impl DataAvailabilityDal<'_, '_> { &mut self, number: L1BatchNumber, da_inclusion_data: &[u8], - ) -> anyhow::Result<()> { + ) -> DalResult<()> { let update_result = sqlx::query!( r#" UPDATE data_availability @@ -103,8 +115,11 @@ impl DataAvailabilityDal<'_, '_> { if update_result.rows_affected() == 0 { tracing::debug!("L1 batch #{number}: DA data wasn't updated as it's already present or the row for the batch_number is missing"); + let instrumentation = + Instrumented::new("get_matching_batch_da_data").with_arg("number", &number); + // Batch was already processed. Verify that existing DA data matches - let matched: i64 = sqlx::query!( + let query = sqlx::query!( r#" SELECT COUNT(*) AS "count!" @@ -116,18 +131,22 @@ impl DataAvailabilityDal<'_, '_> { "#, i64::from(number.0), da_inclusion_data, - ) - .instrument("get_matching_batch_da_data") - .with_arg("number", &number) - .report_latency() - .fetch_one(self.storage) - .await? - .count; - - anyhow::ensure!( - matched == 1, - "DA data verification failed. DA data for L1 batch #{number} does not match the one provided before" ); + + let matched: i64 = instrumentation + .clone() + .with(query) + .report_latency() + .fetch_one(self.storage) + .await? + .count; + + if matched != 1 { + let err = instrumentation.constraint_error(anyhow::anyhow!( + "DA data verification failed. DA data for L1 batch #{number} does not match the one provided before" + )); + return Err(err); + } } Ok(()) } diff --git a/core/lib/protobuf_config/src/da_dispatcher.rs b/core/lib/protobuf_config/src/da_dispatcher.rs index 9efcc3a24b18..bc3ef416c8e0 100644 --- a/core/lib/protobuf_config/src/da_dispatcher.rs +++ b/core/lib/protobuf_config/src/da_dispatcher.rs @@ -1,6 +1,5 @@ -use anyhow::Context; use zksync_config::configs::{self}; -use zksync_protobuf::{required, ProtoRepr}; +use zksync_protobuf::ProtoRepr; use crate::proto::da_dispatcher as proto; diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index f48e5cf2e4d9..6cdbbe3224f9 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -90,13 +90,7 @@ impl DataAvailabilityDispatcher { dispatch_response.blob_id.as_str(), sent_at, ) - .await - .with_context(|| { - format!( - "failed to save blob_id for batch_number: {}", - batch.l1_batch_number - ) - })?; + .await?; drop(conn); METRICS @@ -141,13 +135,7 @@ impl DataAvailabilityDispatcher { L1BatchNumber(blob_info.l1_batch_number.0), inclusion_data.data.as_slice(), ) - .await - .with_context(|| { - format!( - "failed to save inclusion data for batch_number: {}", - blob_info.l1_batch_number - ) - })?; + .await?; drop(conn); let inclusion_latency = Utc::now().signed_duration_since(blob_info.sent_at); From d14bab193588632131e50cdb3ccbab1631243771 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Fri, 7 Jun 2024 15:28:47 +0200 Subject: [PATCH 53/69] fix prover config --- prover/config/src/lib.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/prover/config/src/lib.rs b/prover/config/src/lib.rs index 8614f1677bda..57156853c1eb 100644 --- a/prover/config/src/lib.rs +++ b/prover/config/src/lib.rs @@ -8,9 +8,10 @@ use zksync_config::{ }, fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, - DatabaseSecrets, FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig, - FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, GeneralConfig, - ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, ProtectiveReadsWriterConfig, + DADispatcherConfig, DatabaseSecrets, FriProofCompressorConfig, FriProverConfig, + FriProverGatewayConfig, FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, + GeneralConfig, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, + ProtectiveReadsWriterConfig, }, ApiConfig, ContractVerifierConfig, DBConfig, EthConfig, EthWatchConfig, GasAdjusterConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, @@ -48,6 +49,7 @@ fn load_env_config() -> anyhow::Result { object_store_config: ObjectStoreConfig::from_env().ok(), observability: ObservabilityConfig::from_env().ok(), snapshot_creator: SnapshotsCreatorConfig::from_env().ok(), + da_dispatcher_config: DADispatcherConfig::from_env().ok(), protective_reads_writer_config: ProtectiveReadsWriterConfig::from_env().ok(), }) } From 144defcae9d5b483705369a7ffc9c2dace431fcb Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Mon, 10 Jun 2024 19:10:24 +0200 Subject: [PATCH 54/69] address Shahar's comments --- core/bin/zksync_server/src/node_builder.rs | 2 +- core/lib/config/src/configs/chain.rs | 3 ++- core/lib/config/src/configs/da_dispatcher.rs | 14 ++++++++------ core/lib/config/src/configs/eth_sender.rs | 2 +- core/lib/da_client/README.md | 17 +++++++++-------- core/lib/da_client/src/types.rs | 2 +- core/lib/dal/src/data_availability_dal.rs | 4 ++-- core/lib/default_da_clients/README.md | 11 +++++++++++ core/lib/default_da_clients/src/lib.rs | 2 +- .../src/{gcs => object_store}/client.rs | 0 .../src/{gcs => object_store}/config.rs | 0 .../src/{gcs => object_store}/mod.rs | 0 .../src/{gcs => object_store}/wiring_layer.rs | 2 +- core/lib/env_config/src/da_dispatcher.rs | 2 +- .../i_executor/structures/commit_batch_info.rs | 2 +- core/lib/protobuf_config/src/da_dispatcher.rs | 4 ++-- .../src/proto/config/da_dispatcher.proto | 2 +- core/node/da_dispatcher/src/da_dispatcher.rs | 6 +++--- 18 files changed, 45 insertions(+), 30 deletions(-) create mode 100644 core/lib/default_da_clients/README.md rename core/lib/default_da_clients/src/{gcs => object_store}/client.rs (100%) rename core/lib/default_da_clients/src/{gcs => object_store}/config.rs (100%) rename core/lib/default_da_clients/src/{gcs => object_store}/mod.rs (100%) rename core/lib/default_da_clients/src/{gcs => object_store}/wiring_layer.rs (94%) diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 01f7ccffc873..5c6084aef0ab 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -9,8 +9,8 @@ use zksync_config::{ }; use zksync_core_leftovers::Component; use zksync_default_da_clients::{ - gcs::{config::ObjectStoreDAConfig, wiring_layer::ObjectStorageClientWiringLayer}, no_da::wiring_layer::NoDAClientWiringLayer, + object_store::{config::ObjectStoreDAConfig, wiring_layer::ObjectStorageClientWiringLayer}, }; use zksync_metadata_calculator::MetadataCalculatorConfig; use zksync_node_api_server::{ diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index 20cd6b323a6d..a2b1ce9701df 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -109,7 +109,8 @@ pub struct StateKeeperConfig { /// This variable should not exceed: /// - 128kb for calldata-based rollups /// - 120kb * n, where `n` is a number of blobs for blob-based rollups - /// - the DA layer blob size limit for the DA layer-based validiums + /// - the DA layer's blob size limit for the DA layer-based validiums + /// - 100 MB for the object store-based or no-da validiums pub max_pubdata_per_batch: u64, /// The version of the fee model to use. diff --git a/core/lib/config/src/configs/da_dispatcher.rs b/core/lib/config/src/configs/da_dispatcher.rs index 288e93a03569..303a2c0b54c1 100644 --- a/core/lib/config/src/configs/da_dispatcher.rs +++ b/core/lib/config/src/configs/da_dispatcher.rs @@ -3,15 +3,16 @@ use std::time::Duration; use serde::Deserialize; pub const DEFAULT_POLLING_INTERVAL_MS: u32 = 5000; -pub const DEFAULT_QUERY_ROWS_LIMIT: u32 = 100; +pub const DEFAULT_MAX_ROWS_TO_DISPATCH: u32 = 100; pub const DEFAULT_MAX_RETRIES: u16 = 5; #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct DADispatcherConfig { + /// The interval between the `da_dispatcher's` iterations. pub polling_interval_ms: Option, /// The maximum number of rows to query from the database in a single query. - pub query_rows_limit: Option, - /// The maximum number of retries for the dispatching of a blob. + pub max_rows_to_dispatch: Option, + /// The maximum number of retries for the dispatch of a blob. pub max_retries: Option, } @@ -19,7 +20,7 @@ impl DADispatcherConfig { pub fn for_tests() -> Self { Self { polling_interval_ms: Some(DEFAULT_POLLING_INTERVAL_MS), - query_rows_limit: Some(DEFAULT_QUERY_ROWS_LIMIT), + max_rows_to_dispatch: Some(DEFAULT_MAX_ROWS_TO_DISPATCH), max_retries: Some(DEFAULT_MAX_RETRIES), } } @@ -31,8 +32,9 @@ impl DADispatcherConfig { } } - pub fn query_rows_limit(&self) -> u32 { - self.query_rows_limit.unwrap_or(DEFAULT_QUERY_ROWS_LIMIT) + pub fn max_rows_to_dispatch(&self) -> u32 { + self.max_rows_to_dispatch + .unwrap_or(DEFAULT_MAX_ROWS_TO_DISPATCH) } pub fn max_retries(&self) -> u16 { diff --git a/core/lib/config/src/configs/eth_sender.rs b/core/lib/config/src/configs/eth_sender.rs index b4fa46132ada..92836c74b1c6 100644 --- a/core/lib/config/src/configs/eth_sender.rs +++ b/core/lib/config/src/configs/eth_sender.rs @@ -115,7 +115,7 @@ pub struct SenderConfig { // Max acceptable fee for sending tx it acts as a safeguard to prevent sending tx with very high fees. pub max_acceptable_priority_fee_in_gwei: u64, - /// The mode in which we send pubdata + /// The mode in which we send pubdata: Calldata, Blobs or Custom (DA layers, Object Store, etc.) pub pubdata_sending_mode: PubdataSendingMode, } diff --git a/core/lib/da_client/README.md b/core/lib/da_client/README.md index 6c3e2734337c..0bec6e05cf75 100644 --- a/core/lib/da_client/README.md +++ b/core/lib/da_client/README.md @@ -1,13 +1,14 @@ -# Data Availability clients +# Data Availability Client -This crate contains an implementations of the default DataAvailability clients. These are maintained within this repo -because they are tightly coupled with the codebase, and would cause the circular dependency if they were to be moved to -the [hyperchain-da](https://github.com/matter-labs/hyperchain-da) repository. +This crate contains a trait that has to be implemented by all the DA clients. ## Overview -Currently, the following DataAvailability clients are implemented: +This trait assumes that every implementation follows these logical assumptions: -- `NoDA client` that does not send or store any pubdata, it is needed to run the zkSync network in the "no-DA" mode - utilizing the DA framework. -- `GCS client` that stores the pubdata in the GCS. +- The DA client is only serving as a connector between the ZK chain's sequencer and the DA layer. +- The DA client is not supposed to be a standalone application, but rather a library that is used by the + `da_dispatcher`. +- The logic of the retries is implemented in the `da_dispatcher`, not in the DA clients. +- The `get_inclusion_data` has to return the data only when the state roots are relayed to the L1 verification contract + (if the DA solution has one). diff --git a/core/lib/da_client/src/types.rs b/core/lib/da_client/src/types.rs index 14dfaa95e4de..52809b53e9db 100644 --- a/core/lib/da_client/src/types.rs +++ b/core/lib/da_client/src/types.rs @@ -39,7 +39,7 @@ pub struct DispatchResponse { pub blob_id: String, } -/// `InclusionData` is the data needed to prove that a blob is included in the DA layer. +/// `InclusionData` is the data needed to verify on L1 that a blob is included in the DA layer. #[derive(Default, Serialize)] pub struct InclusionData { /// The inclusion data serialized by the DA client. Serialization is done in a way that allows diff --git a/core/lib/dal/src/data_availability_dal.rs b/core/lib/dal/src/data_availability_dal.rs index f22ebd174a5b..06ff7d7904a2 100644 --- a/core/lib/dal/src/data_availability_dal.rs +++ b/core/lib/dal/src/data_availability_dal.rs @@ -77,7 +77,7 @@ impl DataAvailabilityDal<'_, '_> { if matched != 1 { let err = instrumentation.constraint_error(anyhow::anyhow!( - "DA blob_id verification failed. DA blob_id {blob_id} for L1 batch #{number} does not match the expected value" + "Error storing DA blob id. DA blob_id {blob_id} for L1 batch #{number} does not match the expected value" )); return Err(err); } @@ -143,7 +143,7 @@ impl DataAvailabilityDal<'_, '_> { if matched != 1 { let err = instrumentation.constraint_error(anyhow::anyhow!( - "DA data verification failed. DA data for L1 batch #{number} does not match the one provided before" + "Error storing DA inclusion data. DA data for L1 batch #{number} does not match the one provided before" )); return Err(err); } diff --git a/core/lib/default_da_clients/README.md b/core/lib/default_da_clients/README.md new file mode 100644 index 000000000000..17ced715b268 --- /dev/null +++ b/core/lib/default_da_clients/README.md @@ -0,0 +1,11 @@ +# Default DA Clients + +This crate contains the default implementations of the Data Availability clients. Default clients are maintained within +this repo because they are tightly coupled with the codebase, and would cause the circular dependency if they were to be +moved to the [hyperchain-da](https://github.com/matter-labs/hyperchain-da) repository. + +Currently, the following DataAvailability clients are implemented: + +- `NoDA client` that does not send or store any pubdata, it is needed to run the zkSync network in the "no-DA" mode + utilizing the DA framework. +- `Object Store client` that stores the pubdata in the Object Store(GCS). diff --git a/core/lib/default_da_clients/src/lib.rs b/core/lib/default_da_clients/src/lib.rs index ac73de6deb41..96cffc760774 100644 --- a/core/lib/default_da_clients/src/lib.rs +++ b/core/lib/default_da_clients/src/lib.rs @@ -1,3 +1,3 @@ -pub mod gcs; +pub mod object_store; pub mod no_da; diff --git a/core/lib/default_da_clients/src/gcs/client.rs b/core/lib/default_da_clients/src/object_store/client.rs similarity index 100% rename from core/lib/default_da_clients/src/gcs/client.rs rename to core/lib/default_da_clients/src/object_store/client.rs diff --git a/core/lib/default_da_clients/src/gcs/config.rs b/core/lib/default_da_clients/src/object_store/config.rs similarity index 100% rename from core/lib/default_da_clients/src/gcs/config.rs rename to core/lib/default_da_clients/src/object_store/config.rs diff --git a/core/lib/default_da_clients/src/gcs/mod.rs b/core/lib/default_da_clients/src/object_store/mod.rs similarity index 100% rename from core/lib/default_da_clients/src/gcs/mod.rs rename to core/lib/default_da_clients/src/object_store/mod.rs diff --git a/core/lib/default_da_clients/src/gcs/wiring_layer.rs b/core/lib/default_da_clients/src/object_store/wiring_layer.rs similarity index 94% rename from core/lib/default_da_clients/src/gcs/wiring_layer.rs rename to core/lib/default_da_clients/src/object_store/wiring_layer.rs index 71320ab1aa7c..7af7e4d04fa6 100644 --- a/core/lib/default_da_clients/src/gcs/wiring_layer.rs +++ b/core/lib/default_da_clients/src/object_store/wiring_layer.rs @@ -6,7 +6,7 @@ use zksync_node_framework::{ wiring_layer::{WiringError, WiringLayer}, }; -use crate::gcs::client::ObjectStoreDAClient; +use crate::object_store::client::ObjectStoreDAClient; #[derive(Debug)] pub struct ObjectStorageClientWiringLayer { diff --git a/core/lib/env_config/src/da_dispatcher.rs b/core/lib/env_config/src/da_dispatcher.rs index 6ed6223ae5b0..afe6b6d6492c 100644 --- a/core/lib/env_config/src/da_dispatcher.rs +++ b/core/lib/env_config/src/da_dispatcher.rs @@ -24,7 +24,7 @@ mod tests { ) -> DADispatcherConfig { DADispatcherConfig { polling_interval_ms: Some(interval), - query_rows_limit: Some(rows_limit), + max_rows_to_dispatch: Some(rows_limit), max_retries: Some(max_retries), } } diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs index 2ff3a1472fc0..b5d77ff60c16 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs @@ -210,7 +210,7 @@ impl Tokenizable for CommitBatchInfo<'_> { } (L1BatchCommitmentMode::Rollup, PubdataDA::Custom) => { - panic!("Custom pubdata DA is not supported for rollup mode") + panic!("Custom pubdata DA is incompatible with Rollup mode") } (L1BatchCommitmentMode::Validium, PubdataDA::Custom) => { vec![PUBDATA_SOURCE_CUSTOM] diff --git a/core/lib/protobuf_config/src/da_dispatcher.rs b/core/lib/protobuf_config/src/da_dispatcher.rs index bc3ef416c8e0..5d94cdd607b6 100644 --- a/core/lib/protobuf_config/src/da_dispatcher.rs +++ b/core/lib/protobuf_config/src/da_dispatcher.rs @@ -9,7 +9,7 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { fn read(&self) -> anyhow::Result { Ok(configs::da_dispatcher::DADispatcherConfig { polling_interval_ms: self.polling_interval, - query_rows_limit: self.query_rows_limit, + max_rows_to_dispatch: self.max_rows_to_dispatch, max_retries: self.max_retries.map(|x| x as u16), }) } @@ -17,7 +17,7 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { fn build(this: &Self::Type) -> Self { Self { polling_interval: this.polling_interval_ms, - query_rows_limit: this.query_rows_limit, + max_rows_to_dispatch: this.max_rows_to_dispatch, max_retries: this.max_retries.map(|x| x as u32), } } diff --git a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto index 702b01c9ea22..ffe4af7a73cd 100644 --- a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto +++ b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto @@ -6,6 +6,6 @@ import "zksync/config/object_store.proto"; message DataAvailabilityDispatcher { optional uint32 polling_interval = 1; - optional uint32 query_rows_limit = 2; + optional uint32 max_rows_to_dispatch = 2; optional uint32 max_retries = 3; } diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index 6cdbbe3224f9..1c496b985e34 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -43,11 +43,11 @@ impl DataAvailabilityDispatcher { } if let Err(err) = self.dispatch(&pool).await { - tracing::warn!("dispatch error {err:?}"); + tracing::error!("dispatch error {err:?}"); } if let Err(err) = self.poll_for_inclusion(&pool).await { - tracing::warn!("poll_for_inclusion error {err:?}"); + tracing::error!("poll_for_inclusion error {err:?}"); } tokio::time::sleep(self.config.polling_interval()).await; @@ -60,7 +60,7 @@ impl DataAvailabilityDispatcher { let mut conn = pool.connection_tagged("da_dispatcher").await?; let batches = conn .data_availability_dal() - .get_ready_for_da_dispatch_l1_batches(self.config.query_rows_limit() as usize) + .get_ready_for_da_dispatch_l1_batches(self.config.max_rows_to_dispatch() as usize) .await?; drop(conn); From a884df4fc206cb293da515a9b14a0e3152c1b590 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Mon, 10 Jun 2024 19:46:43 +0200 Subject: [PATCH 55/69] run dispatch and poll_for_inclusion in parallel --- core/node/da_dispatcher/src/da_dispatcher.rs | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index 1c496b985e34..18dd76d46e59 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -42,13 +42,18 @@ impl DataAvailabilityDispatcher { break; } - if let Err(err) = self.dispatch(&pool).await { - tracing::error!("dispatch error {err:?}"); - } - - if let Err(err) = self.poll_for_inclusion(&pool).await { - tracing::error!("poll_for_inclusion error {err:?}"); - } + tokio::join!( + async { + if let Err(err) = self.dispatch(&pool).await { + tracing::error!("dispatch error {err:?}"); + } + }, + async { + if let Err(err) = self.poll_for_inclusion(&pool).await { + tracing::error!("poll_for_inclusion error {err:?}"); + } + } + ); tokio::time::sleep(self.config.polling_interval()).await; } From 743f982b87565fc57959caca0e13b28bea659ae7 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Tue, 11 Jun 2024 00:04:41 +0200 Subject: [PATCH 56/69] fix unit test --- core/lib/env_config/src/da_dispatcher.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/lib/env_config/src/da_dispatcher.rs b/core/lib/env_config/src/da_dispatcher.rs index afe6b6d6492c..194e4185b286 100644 --- a/core/lib/env_config/src/da_dispatcher.rs +++ b/core/lib/env_config/src/da_dispatcher.rs @@ -34,7 +34,7 @@ mod tests { let mut lock = MUTEX.lock(); let config = r#" DA_DISPATCHER_POLLING_INTERVAL_MS=5000 - DA_DISPATCHER_QUERY_ROWS_LIMIT=60 + DA_DISPATCHER_MAX_ROWS_TO_DISPATCH=60 DA_DISPATCHER_MAX_RETRIES=7 "#; lock.set_env(config); From d3a4cd506326d7b4659711fe09eca68f93347835 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Tue, 11 Jun 2024 17:58:36 +0200 Subject: [PATCH 57/69] address review comments --- core/lib/da_client/src/types.rs | 17 ++++------ ...39300ad3b80ac9e70c00864c3d9f6521b028.json} | 4 +-- core/lib/dal/src/data_availability_dal.rs | 6 ++-- .../src/models/storage_data_availability.rs | 2 +- .../src/object_store/client.rs | 23 ++++--------- core/lib/object_store/src/factory.rs | 2 +- core/lib/zksync_core_leftovers/src/lib.rs | 7 ++-- core/node/da_dispatcher/src/da_dispatcher.rs | 33 ++++++++++--------- core/node/da_dispatcher/src/metrics.rs | 6 +++- .../implementations/layers/da_dispatcher.rs | 6 +++- 10 files changed, 52 insertions(+), 54 deletions(-) rename core/lib/dal/.sqlx/{query-519a299c5fabd641b718b64945b4e765933f54bb3b4dd7ee73416b3e1bc1bd68.json => query-928139bf23bd0d57b8dbdb3283b139300ad3b80ac9e70c00864c3d9f6521b028.json} (77%) diff --git a/core/lib/da_client/src/types.rs b/core/lib/da_client/src/types.rs index 52809b53e9db..fef5c6a0a2e7 100644 --- a/core/lib/da_client/src/types.rs +++ b/core/lib/da_client/src/types.rs @@ -9,6 +9,12 @@ pub struct DAError { pub is_transient: bool, } +impl DAError { + pub fn is_transient(&self) -> bool { + self.is_transient + } +} + impl Display for DAError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( @@ -19,17 +25,6 @@ impl Display for DAError { } } -impl IsTransient for DAError { - fn is_transient(&self) -> bool { - self.is_transient - } -} - -/// Trait that defines whether an error is transient or not, i.e. if it is safe to retry the operation. -pub trait IsTransient { - fn is_transient(&self) -> bool; -} - impl error::Error for DAError {} /// `DispatchResponse` is the response received from the DA layer after dispatching a blob. diff --git a/core/lib/dal/.sqlx/query-519a299c5fabd641b718b64945b4e765933f54bb3b4dd7ee73416b3e1bc1bd68.json b/core/lib/dal/.sqlx/query-928139bf23bd0d57b8dbdb3283b139300ad3b80ac9e70c00864c3d9f6521b028.json similarity index 77% rename from core/lib/dal/.sqlx/query-519a299c5fabd641b718b64945b4e765933f54bb3b4dd7ee73416b3e1bc1bd68.json rename to core/lib/dal/.sqlx/query-928139bf23bd0d57b8dbdb3283b139300ad3b80ac9e70c00864c3d9f6521b028.json index 80cba7150eae..e192763b189b 100644 --- a/core/lib/dal/.sqlx/query-519a299c5fabd641b718b64945b4e765933f54bb3b4dd7ee73416b3e1bc1bd68.json +++ b/core/lib/dal/.sqlx/query-928139bf23bd0d57b8dbdb3283b139300ad3b80ac9e70c00864c3d9f6521b028.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND data_availability.blob_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND data_availability.blob_id IS NULL\n AND pubdata_input IS NOT NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -24,5 +24,5 @@ true ] }, - "hash": "519a299c5fabd641b718b64945b4e765933f54bb3b4dd7ee73416b3e1bc1bd68" + "hash": "928139bf23bd0d57b8dbdb3283b139300ad3b80ac9e70c00864c3d9f6521b028" } diff --git a/core/lib/dal/src/data_availability_dal.rs b/core/lib/dal/src/data_availability_dal.rs index 06ff7d7904a2..184f3aed3433 100644 --- a/core/lib/dal/src/data_availability_dal.rs +++ b/core/lib/dal/src/data_availability_dal.rs @@ -77,8 +77,8 @@ impl DataAvailabilityDal<'_, '_> { if matched != 1 { let err = instrumentation.constraint_error(anyhow::anyhow!( - "Error storing DA blob id. DA blob_id {blob_id} for L1 batch #{number} does not match the expected value" - )); + "Error storing DA blob id. DA blob_id {blob_id} for L1 batch #{number} does not match the expected value" + )); return Err(err); } } @@ -197,6 +197,7 @@ impl DataAvailabilityDal<'_, '_> { eth_commit_tx_id IS NULL AND number != 0 AND data_availability.blob_id IS NULL + AND pubdata_input IS NOT NULL ORDER BY number LIMIT @@ -212,6 +213,7 @@ impl DataAvailabilityDal<'_, '_> { Ok(rows .into_iter() .map(|row| L1BatchDA { + // `unwrap` is safe here because we have a `WHERE` clause that filters out `NULL` values pubdata: row.pubdata_input.unwrap(), l1_batch_number: L1BatchNumber(row.number as u32), }) diff --git a/core/lib/dal/src/models/storage_data_availability.rs b/core/lib/dal/src/models/storage_data_availability.rs index 2d8a2d0183db..3285334e8373 100644 --- a/core/lib/dal/src/models/storage_data_availability.rs +++ b/core/lib/dal/src/models/storage_data_availability.rs @@ -3,7 +3,7 @@ use zksync_types::{pubdata_da::DataAvailabilityBlob, L1BatchNumber}; /// Represents a blob in the data availability layer. #[derive(Debug, Clone)] -pub struct StorageDABlob { +pub(crate) struct StorageDABlob { pub l1_batch_number: i64, pub blob_id: String, pub inclusion_data: Option>, diff --git a/core/lib/default_da_clients/src/object_store/client.rs b/core/lib/default_da_clients/src/object_store/client.rs index 78c086d50f8f..5a69407b4a73 100644 --- a/core/lib/default_da_clients/src/object_store/client.rs +++ b/core/lib/default_da_clients/src/object_store/client.rs @@ -1,8 +1,4 @@ -use std::{ - fmt, - fmt::{Debug, Formatter}, - sync::Arc, -}; +use std::{fmt::Debug, sync::Arc}; use async_trait::async_trait; use zksync_config::ObjectStoreConfig; @@ -14,7 +10,7 @@ use zksync_object_store::{ObjectStore, ObjectStoreFactory}; use zksync_types::{pubdata_da::StorablePubdata, L1BatchNumber}; /// An implementation of the `DataAvailabilityClient` trait that stores the pubdata in the GCS. -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct ObjectStoreDAClient { object_store: Arc, } @@ -22,7 +18,9 @@ pub struct ObjectStoreDAClient { impl ObjectStoreDAClient { pub async fn new(object_store_conf: ObjectStoreConfig) -> anyhow::Result { Ok(ObjectStoreDAClient { - object_store: ObjectStoreFactory::create_from_config(&object_store_conf).await?, + object_store: ObjectStoreFactory::new(object_store_conf) + .create_store() + .await?, }) } } @@ -52,7 +50,7 @@ impl DataAvailabilityClient for ObjectStoreDAClient { async fn get_inclusion_data(&self, key: String) -> Result, DAError> { let key_u32 = key.parse::().map_err(|err| DAError { - error: anyhow::Error::from(err), + error: anyhow::Error::from(err).context("Failed to parse blob key"), is_transient: false, })?; @@ -84,12 +82,3 @@ impl DataAvailabilityClient for ObjectStoreDAClient { 100 * 1024 * 1024 // 100 MB, high enough to not be a problem } } - -impl Debug for ObjectStoreDAClient { - fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - formatter - .debug_struct("ObjectStoreDAClient") - .field("object_store", &self.object_store) - .finish() - } -} diff --git a/core/lib/object_store/src/factory.rs b/core/lib/object_store/src/factory.rs index f24fd5dad275..af00a8193d7f 100644 --- a/core/lib/object_store/src/factory.rs +++ b/core/lib/object_store/src/factory.rs @@ -57,7 +57,7 @@ impl ObjectStoreFactory { /// # Errors /// /// Returns an error if store initialization fails (e.g., because of incorrect configuration). - pub async fn create_from_config( + async fn create_from_config( config: &ObjectStoreConfig, ) -> Result, ObjectStoreError> { tracing::trace!("Initializing object store with configuration {config:?}"); diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index 487584d2873c..11bf355adf37 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -755,9 +755,10 @@ pub async fn initialize_components( .pubdata_sending_mode == PubdataSendingMode::Custom { - if !components.contains(&Component::DADispatcher) { - panic!("DA dispatcher requires custom pubdata sending mode"); - } + anyhow::ensure!( + components.contains(&Component::DADispatcher), + "Custom pubdata sending mode requires the DA dispatcher component to be enabled" + ); let started_at = Instant::now(); let da_config = configs diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index 18dd76d46e59..57ffe84c74a9 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -5,10 +5,7 @@ use chrono::{NaiveDateTime, Utc}; use rand::Rng; use tokio::sync::watch; use zksync_config::DADispatcherConfig; -use zksync_da_client::{ - types::{DAError, IsTransient}, - DataAvailabilityClient, -}; +use zksync_da_client::{types::DAError, DataAvailabilityClient}; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_types::L1BatchNumber; @@ -34,7 +31,7 @@ impl DataAvailabilityDispatcher { } } - pub async fn run(self, stop_receiver: watch::Receiver) -> anyhow::Result<()> { + pub async fn run(self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { let pool = self.pool.clone(); loop { if *stop_receiver.borrow() { @@ -55,7 +52,13 @@ impl DataAvailabilityDispatcher { } ); - tokio::time::sleep(self.config.polling_interval()).await; + if tokio::time::timeout(self.config.polling_interval(), stop_receiver.changed()) + .await + .is_ok() + { + tracing::info!("Stop signal received, da_dispatcher is shutting down"); + break; + } } Ok(()) } @@ -103,10 +106,9 @@ impl DataAvailabilityDispatcher { .set(batch.l1_batch_number.0 as usize); METRICS.blob_size.observe(batch.pubdata.len()); tracing::info!( - "Dispatched a DA for batch_number: {}, pubdata_size: {}, dispatch_latency ms: {}", + "Dispatched a DA for batch_number: {}, pubdata_size: {}, dispatch_latency: {dispatch_latency_duration:?}", batch.l1_batch_number, batch.pubdata.len(), - dispatch_latency_duration.as_millis() ); } @@ -116,12 +118,13 @@ impl DataAvailabilityDispatcher { /// Polls the data availability layer for inclusion data, and saves it in the database. async fn poll_for_inclusion(&self, pool: &ConnectionPool) -> anyhow::Result<()> { let mut conn = pool.connection_tagged("da_dispatcher").await?; - if let Some(blob_info) = conn + let blob_info = conn .data_availability_dal() .get_first_da_blob_awaiting_inclusion() - .await? - { - drop(conn); + .await?; + drop(conn); + + if let Some(blob_info) = blob_info { let inclusion_data = self .client .get_inclusion_data(blob_info.blob_id.clone()) @@ -144,9 +147,9 @@ impl DataAvailabilityDispatcher { drop(conn); let inclusion_latency = Utc::now().signed_duration_since(blob_info.sent_at); - METRICS - .inclusion_latency - .observe(inclusion_latency.to_std()?); + if let Ok(latency) = inclusion_latency.to_std() { + METRICS.inclusion_latency.observe(latency); + } METRICS .last_included_l1_batch .set(blob_info.l1_batch_number.0 as usize); diff --git a/core/node/da_dispatcher/src/metrics.rs b/core/node/da_dispatcher/src/metrics.rs index d4e8c11ec2fe..67ac5ed68222 100644 --- a/core/node/da_dispatcher/src/metrics.rs +++ b/core/node/da_dispatcher/src/metrics.rs @@ -2,11 +2,15 @@ use std::time::Duration; use vise::{Buckets, Gauge, Histogram, Metrics, Unit}; +/// Buckets for `blob_dispatch_latency` (from 0.1 to 120 seconds). +const DISPATCH_LATENCIES: Buckets = + Buckets::values(&[0.1, 0.5, 1.0, 2.0, 5.0, 10.0, 30.0, 60.0, 120.0]); + #[derive(Debug, Metrics)] #[metrics(prefix = "server_da_dispatcher")] pub(super) struct DataAvailabilityDispatcherMetrics { /// Latency of the dispatch of the blob. - #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] + #[metrics(buckets = DISPATCH_LATENCIES, unit = Unit::Seconds)] pub blob_dispatch_latency: Histogram, /// The duration between the moment when the blob is dispatched and the moment when it is included. #[metrics(buckets = Buckets::LATENCIES)] diff --git a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs index 9ee39c97909e..ab95c42461d0 100644 --- a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs +++ b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs @@ -41,7 +41,11 @@ impl WiringLayer for DataAvailabilityDispatcherLayer { let da_client = context.get_resource::().await?.0; if self.state_keeper_config.max_pubdata_per_batch > da_client.blob_size_limit() as u64 { - panic!("State keeper max pubdata per batch is greater than the client blob size limit"); + return Err(WiringError::Configuration(format!( + "Max pubdata per batch is greater than the blob size limit: {} > {}", + self.state_keeper_config.max_pubdata_per_batch, + da_client.blob_size_limit() + ))); } context.add_task(Box::new(DataAvailabilityDispatcherTask { From 3cf6f2e68cd3ec0384cf5e5c9121835c5a9dbfe9 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Tue, 11 Jun 2024 18:01:27 +0200 Subject: [PATCH 58/69] add key to the context --- core/lib/default_da_clients/src/object_store/client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/lib/default_da_clients/src/object_store/client.rs b/core/lib/default_da_clients/src/object_store/client.rs index 5a69407b4a73..e60a58003335 100644 --- a/core/lib/default_da_clients/src/object_store/client.rs +++ b/core/lib/default_da_clients/src/object_store/client.rs @@ -50,7 +50,7 @@ impl DataAvailabilityClient for ObjectStoreDAClient { async fn get_inclusion_data(&self, key: String) -> Result, DAError> { let key_u32 = key.parse::().map_err(|err| DAError { - error: anyhow::Error::from(err).context("Failed to parse blob key"), + error: anyhow::Error::from(err).context(format!("Failed to parse blob key: {}", key)), is_transient: false, })?; From b6abd12b26a8fc8dad80c4a9144c3f1c316a0a9f Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Wed, 12 Jun 2024 02:01:35 +0200 Subject: [PATCH 59/69] remove unneeded line --- ...74b489bae4799177b9a22283340a8c9fb4c28d2d76de921ca77b.json} | 4 ++-- core/lib/dal/src/data_availability_dal.rs | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) rename core/lib/dal/.sqlx/{query-c3759facd600c44ff628fe504a672f215fa85b24deea1c71419ef945f7f9f7c6.json => query-0ccfbde0df7c74b489bae4799177b9a22283340a8c9fb4c28d2d76de921ca77b.json} (78%) diff --git a/core/lib/dal/.sqlx/query-c3759facd600c44ff628fe504a672f215fa85b24deea1c71419ef945f7f9f7c6.json b/core/lib/dal/.sqlx/query-0ccfbde0df7c74b489bae4799177b9a22283340a8c9fb4c28d2d76de921ca77b.json similarity index 78% rename from core/lib/dal/.sqlx/query-c3759facd600c44ff628fe504a672f215fa85b24deea1c71419ef945f7f9f7c6.json rename to core/lib/dal/.sqlx/query-0ccfbde0df7c74b489bae4799177b9a22283340a8c9fb4c28d2d76de921ca77b.json index 5099527325ba..f4bd9fdfb765 100644 --- a/core/lib/dal/.sqlx/query-c3759facd600c44ff628fe504a672f215fa85b24deea1c71419ef945f7f9f7c6.json +++ b/core/lib/dal/.sqlx/query-0ccfbde0df7c74b489bae4799177b9a22283340a8c9fb4c28d2d76de921ca77b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n l1_batch_number,\n blob_id,\n inclusion_data,\n sent_at\n FROM\n data_availability\n WHERE\n inclusion_data IS NULL\n AND blob_id IS NOT NULL\n ORDER BY\n l1_batch_number\n LIMIT\n 1\n ", + "query": "\n SELECT\n l1_batch_number,\n blob_id,\n inclusion_data,\n sent_at\n FROM\n data_availability\n WHERE\n inclusion_data IS NULL\n ORDER BY\n l1_batch_number\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -34,5 +34,5 @@ false ] }, - "hash": "c3759facd600c44ff628fe504a672f215fa85b24deea1c71419ef945f7f9f7c6" + "hash": "0ccfbde0df7c74b489bae4799177b9a22283340a8c9fb4c28d2d76de921ca77b" } diff --git a/core/lib/dal/src/data_availability_dal.rs b/core/lib/dal/src/data_availability_dal.rs index 184f3aed3433..9d5fb6eaa6c2 100644 --- a/core/lib/dal/src/data_availability_dal.rs +++ b/core/lib/dal/src/data_availability_dal.rs @@ -167,7 +167,6 @@ impl DataAvailabilityDal<'_, '_> { data_availability WHERE inclusion_data IS NULL - AND blob_id IS NOT NULL ORDER BY l1_batch_number LIMIT From 1323d9ef52f9eeb838f72b198aec433fed8b9afc Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Wed, 12 Jun 2024 10:59:03 +0200 Subject: [PATCH 60/69] update prover/Cargo.lock --- prover/Cargo.lock | 1 + 1 file changed, 1 insertion(+) diff --git a/prover/Cargo.lock b/prover/Cargo.lock index a95984059770..3443c1e149c8 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -9738,6 +9738,7 @@ dependencies = [ "once_cell", "tokio", "tracing", + "vise", "vm_utils", "zksync_contracts", "zksync_dal", From 59cbc451280a9f369758bee12babdf0ab5d11a09 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Thu, 13 Jun 2024 18:13:36 +0200 Subject: [PATCH 61/69] fix comments --- core/bin/zksync_server/src/node_builder.rs | 2 +- core/lib/da_client/src/lib.rs | 4 ++-- core/lib/dal/src/data_availability_dal.rs | 2 +- .../src/models/storage_data_availability.rs | 1 + .../lib/default_da_clients/src/no_da/client.rs | 12 ++---------- .../src/no_da/wiring_layer.rs | 8 +------- .../src/object_store/client.rs | 4 ++-- core/lib/zksync_core_leftovers/src/lib.rs | 18 ++++++++---------- core/node/da_dispatcher/README.md | 4 ++++ core/node/da_dispatcher/src/da_dispatcher.rs | 12 ++++++------ .../implementations/layers/da_dispatcher.rs | 13 +++++++------ 11 files changed, 35 insertions(+), 45 deletions(-) diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 5c6084aef0ab..517110c56e91 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -404,7 +404,7 @@ impl MainNodeBuilder { } fn add_no_da_client_layer(mut self) -> anyhow::Result { - self.node.add_layer(NoDAClientWiringLayer::new()); + self.node.add_layer(NoDAClientWiringLayer); Ok(self) } diff --git a/core/lib/da_client/src/lib.rs b/core/lib/da_client/src/lib.rs index 6fa31283ff73..6912c2fadbb8 100644 --- a/core/lib/da_client/src/lib.rs +++ b/core/lib/da_client/src/lib.rs @@ -21,8 +21,8 @@ pub trait DataAvailabilityClient: Sync + Send + fmt::Debug { /// Clones the client and wraps it in a Box. fn clone_boxed(&self) -> Box; - /// Returns the maximum size of the blob (in bytes) that can be dispatched. - fn blob_size_limit(&self) -> usize; + /// Returns the maximum size of the blob (in bytes) that can be dispatched. None means no limit. + fn blob_size_limit(&self) -> Option; } impl Clone for Box { diff --git a/core/lib/dal/src/data_availability_dal.rs b/core/lib/dal/src/data_availability_dal.rs index 9d5fb6eaa6c2..5880146e0a9c 100644 --- a/core/lib/dal/src/data_availability_dal.rs +++ b/core/lib/dal/src/data_availability_dal.rs @@ -113,7 +113,7 @@ impl DataAvailabilityDal<'_, '_> { .await?; if update_result.rows_affected() == 0 { - tracing::debug!("L1 batch #{number}: DA data wasn't updated as it's already present or the row for the batch_number is missing"); + tracing::debug!("L1 batch #{number}: DA data wasn't updated as it's already present"); let instrumentation = Instrumented::new("get_matching_batch_da_data").with_arg("number", &number); diff --git a/core/lib/dal/src/models/storage_data_availability.rs b/core/lib/dal/src/models/storage_data_availability.rs index 3285334e8373..2a1b39845e69 100644 --- a/core/lib/dal/src/models/storage_data_availability.rs +++ b/core/lib/dal/src/models/storage_data_availability.rs @@ -22,6 +22,7 @@ impl From for DataAvailabilityBlob { } /// A small struct used to store a batch and its data availability, which are retrieved from the database. +#[derive(Debug)] pub struct L1BatchDA { pub pubdata: Vec, pub l1_batch_number: L1BatchNumber, diff --git a/core/lib/default_da_clients/src/no_da/client.rs b/core/lib/default_da_clients/src/no_da/client.rs index 6e939b9abceb..cc73b8744a0d 100644 --- a/core/lib/default_da_clients/src/no_da/client.rs +++ b/core/lib/default_da_clients/src/no_da/client.rs @@ -1,5 +1,3 @@ -use std::fmt::Debug; - use async_trait::async_trait; use zksync_da_client::{ types::{DAError, DispatchResponse, InclusionData}, @@ -10,12 +8,6 @@ use zksync_da_client::{ #[derive(Clone, Debug, Default)] pub struct NoDAClient; -impl NoDAClient { - pub fn new() -> Self { - NoDAClient {} - } -} - #[async_trait] impl DataAvailabilityClient for NoDAClient { async fn dispatch_blob(&self, _: u32, _: Vec) -> Result { @@ -30,7 +22,7 @@ impl DataAvailabilityClient for NoDAClient { Box::new(self.clone()) } - fn blob_size_limit(&self) -> usize { - 100 * 1024 * 1024 // 100 MB, high enough to not be a problem + fn blob_size_limit(&self) -> Option { + None } } diff --git a/core/lib/default_da_clients/src/no_da/wiring_layer.rs b/core/lib/default_da_clients/src/no_da/wiring_layer.rs index 5706fcc1c7e6..c1332da9a97e 100644 --- a/core/lib/default_da_clients/src/no_da/wiring_layer.rs +++ b/core/lib/default_da_clients/src/no_da/wiring_layer.rs @@ -12,12 +12,6 @@ use crate::no_da::client::NoDAClient; #[derive(Debug, Default)] pub struct NoDAClientWiringLayer; -impl NoDAClientWiringLayer { - pub fn new() -> Self { - Self - } -} - #[async_trait::async_trait] impl WiringLayer for NoDAClientWiringLayer { fn layer_name(&self) -> &'static str { @@ -25,7 +19,7 @@ impl WiringLayer for NoDAClientWiringLayer { } async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let client: Box = Box::new(NoDAClient::new()); + let client: Box = Box::new(NoDAClient); context.insert_resource(DAClientResource(client))?; diff --git a/core/lib/default_da_clients/src/object_store/client.rs b/core/lib/default_da_clients/src/object_store/client.rs index e60a58003335..e6b2ab2c8485 100644 --- a/core/lib/default_da_clients/src/object_store/client.rs +++ b/core/lib/default_da_clients/src/object_store/client.rs @@ -78,7 +78,7 @@ impl DataAvailabilityClient for ObjectStoreDAClient { Box::new(self.clone()) } - fn blob_size_limit(&self) -> usize { - 100 * 1024 * 1024 // 100 MB, high enough to not be a problem + fn blob_size_limit(&self) -> Option { + None } } diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index 11bf355adf37..89e37ab53b64 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -748,16 +748,14 @@ pub async fn initialize_components( .context("add_tee_verifier_input_producer_to_task_futures()")?; } - if eth - .sender - .clone() - .context("eth_sender")? - .pubdata_sending_mode - == PubdataSendingMode::Custom - { + if components.contains(&Component::DADispatcher) { anyhow::ensure!( - components.contains(&Component::DADispatcher), - "Custom pubdata sending mode requires the DA dispatcher component to be enabled" + eth.sender + .clone() + .context("eth_sender")? + .pubdata_sending_mode + == PubdataSendingMode::Custom, + "DA dispatcher component has to be used with custom pubdata sending mode" ); let started_at = Instant::now(); @@ -769,7 +767,7 @@ pub async fn initialize_components( .build() .await .context("failed to build da_dispatcher_pool")?; - let da_client: Box = Box::new(NoDAClient::new()); // use the `NoDAClient` as a default option for Validium + let da_client: Box = Box::new(NoDAClient); // use the `NoDAClient` as a default option for Validium let da_dispatcher = DataAvailabilityDispatcher::new(da_dispatcher_pool, da_config, da_client); diff --git a/core/node/da_dispatcher/README.md b/core/node/da_dispatcher/README.md index e8ab740cb213..a7ea6351a5ed 100644 --- a/core/node/da_dispatcher/README.md +++ b/core/node/da_dispatcher/README.md @@ -12,3 +12,7 @@ also part of the DA dispatcher. This component assumes that batches are being sent to the L1 sequentially and that there is no need to fetch the inclusion data for their DA in parallel. Same with dispatching DA blobs, there is no need to do that in parallel unless we are facing performance issues when the sequencer is trying to catch up after some outage. + +This is a singleton component, only one instance of the DA dispatcher should be running at a time. In case multiple +instances are started, they will be dispatching the same pubdata blobs to the DA layer. It is not going to cause any +critical issues, but it is wasteful. diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index 57ffe84c74a9..204716cd83d1 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -35,13 +35,12 @@ impl DataAvailabilityDispatcher { let pool = self.pool.clone(); loop { if *stop_receiver.borrow() { - tracing::info!("Stop signal received, da_dispatcher is shutting down"); break; } tokio::join!( async { - if let Err(err) = self.dispatch(&pool).await { + if let Err(err) = self.dispatch().await { tracing::error!("dispatch error {err:?}"); } }, @@ -56,16 +55,17 @@ impl DataAvailabilityDispatcher { .await .is_ok() { - tracing::info!("Stop signal received, da_dispatcher is shutting down"); break; } } + + tracing::info!("Stop signal received, da_dispatcher is shutting down"); Ok(()) } /// Dispatches the blobs to the data availability layer, and saves the blob_id in the database. - async fn dispatch(&self, pool: &ConnectionPool) -> anyhow::Result<()> { - let mut conn = pool.connection_tagged("da_dispatcher").await?; + async fn dispatch(&self) -> anyhow::Result<()> { + let mut conn = self.pool.connection_tagged("da_dispatcher").await?; let batches = conn .data_availability_dal() .get_ready_for_da_dispatch_l1_batches(self.config.max_rows_to_dispatch() as usize) @@ -91,7 +91,7 @@ impl DataAvailabilityDispatcher { let sent_at = NaiveDateTime::from_timestamp_millis(Utc::now().timestamp_millis()).unwrap(); - let mut conn = pool.connection_tagged("da_dispatcher").await?; + let mut conn = self.pool.connection_tagged("da_dispatcher").await?; conn.data_availability_dal() .insert_l1_batch_da( batch.l1_batch_number, diff --git a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs index ab95c42461d0..bc05bd343169 100644 --- a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs +++ b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs @@ -40,12 +40,13 @@ impl WiringLayer for DataAvailabilityDispatcherLayer { let master_pool = master_pool_resource.get().await?; let da_client = context.get_resource::().await?.0; - if self.state_keeper_config.max_pubdata_per_batch > da_client.blob_size_limit() as u64 { - return Err(WiringError::Configuration(format!( - "Max pubdata per batch is greater than the blob size limit: {} > {}", - self.state_keeper_config.max_pubdata_per_batch, - da_client.blob_size_limit() - ))); + if let Some(limit) = da_client.blob_size_limit() { + if self.state_keeper_config.max_pubdata_per_batch > limit as u64 { + return Err(WiringError::Configuration(format!( + "Max pubdata per batch is greater than the blob size limit: {} > {}", + self.state_keeper_config.max_pubdata_per_batch, limit + ))); + } } context.add_task(Box::new(DataAvailabilityDispatcherTask { From 1d2bf4762118befc9b7b09a49e72913e54aab2df Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Fri, 14 Jun 2024 00:04:09 +0200 Subject: [PATCH 62/69] enable da_dispather by default and skip the initialization if not Custom mode --- .github/workflows/ci-core-reusable.yml | 4 +- core/lib/zksync_core_leftovers/src/lib.rs | 55 ++++++++++--------- .../tests/revert-and-restart-en.test.ts | 6 +- .../tests/revert-and-restart.test.ts | 6 +- core/tests/ts-integration/tests/fees.test.ts | 28 ++-------- core/tests/upgrade-test/tests/upgrade.test.ts | 5 +- 6 files changed, 40 insertions(+), 64 deletions(-) diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 242c062e93eb..5e735b8aa650 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -134,7 +134,7 @@ jobs: base_token: ["Eth", "Custom"] deployment_mode: ["Rollup", "Validium"] env: - SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads${{ matrix.consensus && ',consensus' || '' }}${{ matrix.deployment_mode == 'Validium' && ',da_dispatcher' || '' }}" + SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,da_dispatcher${{ matrix.consensus && ',consensus' || '' }}" runs-on: [matterlabs-ci-runner] steps: @@ -306,7 +306,7 @@ jobs: runs-on: [matterlabs-ci-runner] env: - SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads${{ matrix.consensus && ',consensus' || '' }}${{ matrix.deployment_mode == 'Validium' && ',da_dispatcher' || '' }}" + SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,da_dispatcher${{ matrix.consensus && ',consensus' || '' }}" EXT_NODE_FLAGS: "${{ matrix.consensus && '-- --enable-consensus' || '' }}" steps: diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index 89e37ab53b64..b283b7f4f950 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -749,33 +749,38 @@ pub async fn initialize_components( } if components.contains(&Component::DADispatcher) { - anyhow::ensure!( - eth.sender - .clone() - .context("eth_sender")? - .pubdata_sending_mode - == PubdataSendingMode::Custom, - "DA dispatcher component has to be used with custom pubdata sending mode" - ); - - let started_at = Instant::now(); - let da_config = configs - .da_dispatcher_config + if eth + .sender .clone() - .context("da_dispatcher_config")?; - let da_dispatcher_pool = ConnectionPool::::singleton(database_secrets.master_url()?) - .build() - .await - .context("failed to build da_dispatcher_pool")?; - let da_client: Box = Box::new(NoDAClient); // use the `NoDAClient` as a default option for Validium - - let da_dispatcher = - DataAvailabilityDispatcher::new(da_dispatcher_pool, da_config, da_client); - task_futures.push(tokio::spawn(da_dispatcher.run(stop_receiver.clone()))); + .context("eth_sender")? + .pubdata_sending_mode + != PubdataSendingMode::Custom + { + // Warning instead of returning an error is appropriate here because the DA dispatcher + // is not a critical component. It's more convenient for tests and local setup to enable + // it by default, but don't start the component if the pubdata sending mode is not `Custom` + tracing::warn!("DA dispatcher is enabled, but the pubdata sending mode is not `Custom`. DA dispatcher will not be started."); + } else { + let started_at = Instant::now(); + let da_config = configs + .da_dispatcher_config + .clone() + .context("da_dispatcher_config")?; + let da_dispatcher_pool = + ConnectionPool::::singleton(database_secrets.master_url()?) + .build() + .await + .context("failed to build da_dispatcher_pool")?; + let da_client: Box = Box::new(NoDAClient); // use the `NoDAClient` as a default option for Validium + + let da_dispatcher = + DataAvailabilityDispatcher::new(da_dispatcher_pool, da_config, da_client); + task_futures.push(tokio::spawn(da_dispatcher.run(stop_receiver.clone()))); - let elapsed = started_at.elapsed(); - APP_METRICS.init_latency[&InitStage::DADispatcher].set(elapsed); - tracing::info!("initialized DA dispatcher in {elapsed:?}"); + let elapsed = started_at.elapsed(); + APP_METRICS.init_latency[&InitStage::DADispatcher].set(elapsed); + tracing::info!("initialized DA dispatcher in {elapsed:?}"); + } } if components.contains(&Component::Housekeeper) { diff --git a/core/tests/revert-test/tests/revert-and-restart-en.test.ts b/core/tests/revert-test/tests/revert-and-restart-en.test.ts index c32aaf80b966..a46412ad1c8c 100644 --- a/core/tests/revert-test/tests/revert-and-restart-en.test.ts +++ b/core/tests/revert-test/tests/revert-and-restart-en.test.ts @@ -137,14 +137,10 @@ class MainNode { env.DATABASE_MERKLE_TREE_MODE = 'full'; console.log(`DATABASE_URL = ${env.DATABASE_URL}`); - const isValidium = process.env.DEPLOYMENT_MODE == 'Validium'; - let components = 'api,tree,eth,state_keeper,commitment_generator'; + let components = 'api,tree,eth,state_keeper,commitment_generator,da_dispatcher'; if (enableConsensus) { components += ',consensus'; } - if (isValidium) { - components += ',da_dispatcher'; - } let proc = spawn('./target/release/zksync_server', ['--components', components], { cwd: env.ZKSYNC_HOME, diff --git a/core/tests/revert-test/tests/revert-and-restart.test.ts b/core/tests/revert-test/tests/revert-and-restart.test.ts index 405c8c7e890b..fcf905007320 100644 --- a/core/tests/revert-test/tests/revert-and-restart.test.ts +++ b/core/tests/revert-test/tests/revert-and-restart.test.ts @@ -62,14 +62,10 @@ describe('Block reverting test', function () { let operatorAddress = process.env.ETH_SENDER_SENDER_OPERATOR_COMMIT_ETH_ADDR; const enableConsensus = process.env.ENABLE_CONSENSUS == 'true'; - const isValidium = process.env.DEPLOYMENT_MODE == 'Validium'; - let components = 'api,tree,eth,state_keeper,commitment_generator'; + let components = 'api,tree,eth,state_keeper,commitment_generator,da_dispatcher'; if (enableConsensus) { components += ',consensus'; } - if (isValidium) { - components += ',da_dispatcher'; - } before('create test wallet', async () => { tester = await Tester.init( diff --git a/core/tests/ts-integration/tests/fees.test.ts b/core/tests/ts-integration/tests/fees.test.ts index 81ac6034f4cd..3a87ca3d7f5a 100644 --- a/core/tests/ts-integration/tests/fees.test.ts +++ b/core/tests/ts-integration/tests/fees.test.ts @@ -129,8 +129,7 @@ testFees('Test fees', () => { } ], gasPrice, - reports, - testMaster.environment().l1BatchCommitDataGeneratorMode + reports ); } @@ -159,7 +158,6 @@ testFees('Test fees', () => { await setInternalL1GasPrice( alice._providerL2(), - testMaster.environment().l1BatchCommitDataGeneratorMode, requiredPubdataPrice.toString(), requiredPubdataPrice.toString() ); @@ -203,13 +201,7 @@ testFees('Test fees', () => { afterAll(async () => { // Returning the pubdata price to the default one - await setInternalL1GasPrice( - alice._providerL2(), - testMaster.environment().l1BatchCommitDataGeneratorMode, - undefined, - undefined, - true - ); + await setInternalL1GasPrice(alice._providerL2(), undefined, undefined, true); await testMaster.deinitialize(); }); @@ -220,16 +212,10 @@ async function appendResults( originalL1Receipts: ethers.providers.TransactionReceipt[], transactionRequests: ethers.providers.TransactionRequest[], newL1GasPrice: number, - reports: string[], - deploymentMode: DataAvailabityMode + reports: string[] ): Promise { // For the sake of simplicity, we'll use the same pubdata price as the L1 gas price. - await setInternalL1GasPrice( - sender._providerL2(), - deploymentMode, - newL1GasPrice.toString(), - newL1GasPrice.toString() - ); + await setInternalL1GasPrice(sender._providerL2(), newL1GasPrice.toString(), newL1GasPrice.toString()); if (originalL1Receipts.length !== reports.length && originalL1Receipts.length !== transactionRequests.length) { throw new Error('The array of receipts and reports have different length'); @@ -302,7 +288,6 @@ async function killServerAndWaitForShutdown(provider: zksync.Provider) { async function setInternalL1GasPrice( provider: zksync.Provider, - deploymentMode: DataAvailabityMode, newL1GasPrice?: string, newPubdataPrice?: string, disconnect?: boolean @@ -313,10 +298,7 @@ async function setInternalL1GasPrice( } catch (_) {} // Run server in background. - let command = 'zk server --components api,tree,eth,state_keeper'; - if (deploymentMode == DataAvailabityMode.Validium) { - command += `,da_dispatcher`; - } + let command = 'zk server --components api,tree,eth,state_keeper,da_dispatcher'; command = `DATABASE_MERKLE_TREE_MODE=full ${command}`; if (newPubdataPrice) { diff --git a/core/tests/upgrade-test/tests/upgrade.test.ts b/core/tests/upgrade-test/tests/upgrade.test.ts index b2c8d4ea8408..0afdb3bb2e7a 100644 --- a/core/tests/upgrade-test/tests/upgrade.test.ts +++ b/core/tests/upgrade-test/tests/upgrade.test.ts @@ -28,10 +28,7 @@ const STATE_TRANSITON_MANAGER = new ethers.utils.Interface( require(`${L1_CONTRACTS_FOLDER}/state-transition/StateTransitionManager.sol/StateTransitionManager.json`).abi ); -let serverComponents = 'api,tree,eth,state_keeper,commitment_generator'; -if (process.env.CHAIN_STATE_KEEPER_L1_BATCH_COMMIT_DATA_GENERATOR_MODE == 'Validium') { - serverComponents += ',da_dispatcher'; -} +let serverComponents = 'api,tree,eth,state_keeper,commitment_generator,da_dispatcher'; const depositAmount = ethers.utils.parseEther('0.001'); From 3b620222863127df392bbc3628975e6e2f199de7 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Sun, 16 Jun 2024 11:40:24 +0200 Subject: [PATCH 63/69] improve naming, imports and error message --- Cargo.lock | 1 - core/bin/zksync_server/src/node_builder.rs | 22 ++++++++++++++----- core/lib/config/Cargo.toml | 1 - core/lib/da_client/Cargo.toml | 4 ++-- core/lib/da_client/src/lib.rs | 2 +- core/lib/da_client/src/types.rs | 11 +++++----- .../default_da_clients/src/no_da/client.rs | 2 +- .../src/object_store/client.rs | 2 +- .../src/object_store/config.rs | 12 +++++----- core/lib/protobuf_config/src/da_dispatcher.rs | 4 ++-- .../src/proto/config/da_dispatcher.proto | 2 +- core/lib/zksync_core_leftovers/src/lib.rs | 2 +- core/node/da_dispatcher/src/da_dispatcher.rs | 11 +++++----- .../implementations/layers/da_dispatcher.rs | 2 +- 14 files changed, 42 insertions(+), 36 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8667242d8c39..594ae5236f20 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8097,7 +8097,6 @@ dependencies = [ "zksync_basic_types", "zksync_consensus_utils", "zksync_crypto_primitives", - "zksync_system_constants", ] [[package]] diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 517110c56e91..0d3b5b019797 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -4,13 +4,16 @@ use anyhow::Context; use prometheus_exporter::PrometheusExporterConfig; use zksync_config::{ - configs::{consensus::ConsensusConfig, wallets::Wallets, GeneralConfig, Secrets}, + configs::{ + consensus::ConsensusConfig, eth_sender::PubdataSendingMode, wallets::Wallets, + GeneralConfig, Secrets, + }, ContractsConfig, GenesisConfig, }; use zksync_core_leftovers::Component; use zksync_default_da_clients::{ no_da::wiring_layer::NoDAClientWiringLayer, - object_store::{config::ObjectStoreDAConfig, wiring_layer::ObjectStorageClientWiringLayer}, + object_store::{config::DAObjectStoreConfig, wiring_layer::ObjectStorageClientWiringLayer}, }; use zksync_metadata_calculator::MetadataCalculatorConfig; use zksync_node_api_server::{ @@ -410,14 +413,21 @@ impl MainNodeBuilder { #[allow(dead_code)] fn add_object_storage_da_client_layer(mut self) -> anyhow::Result { - let object_store_config = ObjectStoreDAConfig::from_env()?; - self.node.add_layer(ObjectStorageClientWiringLayer::new( - object_store_config.config, - )); + let object_store_config = DAObjectStoreConfig::from_env()?; + self.node + .add_layer(ObjectStorageClientWiringLayer::new(object_store_config.0)); Ok(self) } fn add_da_dispatcher_layer(mut self) -> anyhow::Result { + let eth_sender_config = try_load_config!(self.configs.eth); + if let Some(sender_config) = eth_sender_config.sender { + if sender_config.pubdata_sending_mode != PubdataSendingMode::Custom { + tracing::warn!("DA dispatcher is enabled, but the pubdata sending mode is not `Custom`. DA dispatcher will not be started."); + return Ok(self); + } + } + let state_keeper_config = try_load_config!(self.configs.state_keeper_config); let da_config = try_load_config!(self.configs.da_dispatcher_config); self.node.add_layer(DataAvailabilityDispatcherLayer::new( diff --git a/core/lib/config/Cargo.toml b/core/lib/config/Cargo.toml index 24e05696fd5a..144843c2bab2 100644 --- a/core/lib/config/Cargo.toml +++ b/core/lib/config/Cargo.toml @@ -13,7 +13,6 @@ categories.workspace = true zksync_basic_types.workspace = true zksync_crypto_primitives.workspace = true zksync_consensus_utils.workspace = true -zksync_system_constants.workspace = true anyhow.workspace = true rand.workspace = true diff --git a/core/lib/da_client/Cargo.toml b/core/lib/da_client/Cargo.toml index 8dae145426b3..da118058eab5 100644 --- a/core/lib/da_client/Cargo.toml +++ b/core/lib/da_client/Cargo.toml @@ -11,8 +11,8 @@ categories.workspace = true [dependencies] serde = { workspace = true, features = ["derive"] } -tracing = "0.1.40" -async-trait = "0.1.74" +tracing.workspace = true +async-trait.workspace = true anyhow.workspace = true zksync_config.workspace = true diff --git a/core/lib/da_client/src/lib.rs b/core/lib/da_client/src/lib.rs index 6912c2fadbb8..7e4a2643a259 100644 --- a/core/lib/da_client/src/lib.rs +++ b/core/lib/da_client/src/lib.rs @@ -16,7 +16,7 @@ pub trait DataAvailabilityClient: Sync + Send + fmt::Debug { ) -> Result; /// Fetches the inclusion data for a given blob_id. - async fn get_inclusion_data(&self, blob_id: String) -> Result, DAError>; + async fn get_inclusion_data(&self, blob_id: &str) -> Result, DAError>; /// Clones the client and wraps it in a Box. fn clone_boxed(&self) -> Box; diff --git a/core/lib/da_client/src/types.rs b/core/lib/da_client/src/types.rs index fef5c6a0a2e7..e339111bb51a 100644 --- a/core/lib/da_client/src/types.rs +++ b/core/lib/da_client/src/types.rs @@ -17,11 +17,12 @@ impl DAError { impl Display for DAError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "DAError: {}, is_transient: {}", - self.error, self.is_transient - ) + let kind = if self.is_transient { + "transient" + } else { + "fatal" + }; + write!(f, "{kind} data availability client error: {}", self.error) } } diff --git a/core/lib/default_da_clients/src/no_da/client.rs b/core/lib/default_da_clients/src/no_da/client.rs index cc73b8744a0d..2710c9ce9d9b 100644 --- a/core/lib/default_da_clients/src/no_da/client.rs +++ b/core/lib/default_da_clients/src/no_da/client.rs @@ -14,7 +14,7 @@ impl DataAvailabilityClient for NoDAClient { Ok(DispatchResponse::default()) } - async fn get_inclusion_data(&self, _: String) -> Result, DAError> { + async fn get_inclusion_data(&self, _: &str) -> Result, DAError> { return Ok(Some(InclusionData::default())); } diff --git a/core/lib/default_da_clients/src/object_store/client.rs b/core/lib/default_da_clients/src/object_store/client.rs index e6b2ab2c8485..35d4b23d0fa3 100644 --- a/core/lib/default_da_clients/src/object_store/client.rs +++ b/core/lib/default_da_clients/src/object_store/client.rs @@ -48,7 +48,7 @@ impl DataAvailabilityClient for ObjectStoreDAClient { }) } - async fn get_inclusion_data(&self, key: String) -> Result, DAError> { + async fn get_inclusion_data(&self, key: &str) -> Result, DAError> { let key_u32 = key.parse::().map_err(|err| DAError { error: anyhow::Error::from(err).context(format!("Failed to parse blob key: {}", key)), is_transient: false, diff --git a/core/lib/default_da_clients/src/object_store/config.rs b/core/lib/default_da_clients/src/object_store/config.rs index 427aa3dd0d9c..285c39827c79 100644 --- a/core/lib/default_da_clients/src/object_store/config.rs +++ b/core/lib/default_da_clients/src/object_store/config.rs @@ -1,14 +1,12 @@ use zksync_config::ObjectStoreConfig; use zksync_env_config::envy_load; -pub struct ObjectStoreDAConfig { - pub config: ObjectStoreConfig, -} +#[derive(Debug)] +pub struct DAObjectStoreConfig(pub ObjectStoreConfig); -impl ObjectStoreDAConfig { +impl DAObjectStoreConfig { pub fn from_env() -> anyhow::Result { - Ok(Self { - config: envy_load("object_store", "OBJECT_STORE_DA_CLIENT_")?, - }) + let config = envy_load("object_store", "DA_CLIENT_OBJECT_STORE_")?; + Ok(Self(config)) } } diff --git a/core/lib/protobuf_config/src/da_dispatcher.rs b/core/lib/protobuf_config/src/da_dispatcher.rs index 5d94cdd607b6..27e8eb3a4316 100644 --- a/core/lib/protobuf_config/src/da_dispatcher.rs +++ b/core/lib/protobuf_config/src/da_dispatcher.rs @@ -8,7 +8,7 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { fn read(&self) -> anyhow::Result { Ok(configs::da_dispatcher::DADispatcherConfig { - polling_interval_ms: self.polling_interval, + polling_interval_ms: self.polling_interval_ms, max_rows_to_dispatch: self.max_rows_to_dispatch, max_retries: self.max_retries.map(|x| x as u16), }) @@ -16,7 +16,7 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { fn build(this: &Self::Type) -> Self { Self { - polling_interval: this.polling_interval_ms, + polling_interval_ms: this.polling_interval_ms, max_rows_to_dispatch: this.max_rows_to_dispatch, max_retries: this.max_retries.map(|x| x as u32), } diff --git a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto index ffe4af7a73cd..d1d913498a4e 100644 --- a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto +++ b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto @@ -5,7 +5,7 @@ package zksync.config.da_dispatcher; import "zksync/config/object_store.proto"; message DataAvailabilityDispatcher { - optional uint32 polling_interval = 1; + optional uint32 polling_interval_ms = 1; optional uint32 max_rows_to_dispatch = 2; optional uint32 max_retries = 3; } diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index b283b7f4f950..a1aa0ae74584 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -767,7 +767,7 @@ pub async fn initialize_components( .clone() .context("da_dispatcher_config")?; let da_dispatcher_pool = - ConnectionPool::::singleton(database_secrets.master_url()?) + ConnectionPool::::builder(database_secrets.master_url()?, 2) .build() .await .context("failed to build da_dispatcher_pool")?; diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index 204716cd83d1..7e3e1494d2cb 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -32,7 +32,6 @@ impl DataAvailabilityDispatcher { } pub async fn run(self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { - let pool = self.pool.clone(); loop { if *stop_receiver.borrow() { break; @@ -45,7 +44,7 @@ impl DataAvailabilityDispatcher { } }, async { - if let Err(err) = self.poll_for_inclusion(&pool).await { + if let Err(err) = self.poll_for_inclusion().await { tracing::error!("poll_for_inclusion error {err:?}"); } } @@ -116,8 +115,8 @@ impl DataAvailabilityDispatcher { } /// Polls the data availability layer for inclusion data, and saves it in the database. - async fn poll_for_inclusion(&self, pool: &ConnectionPool) -> anyhow::Result<()> { - let mut conn = pool.connection_tagged("da_dispatcher").await?; + async fn poll_for_inclusion(&self) -> anyhow::Result<()> { + let mut conn = self.pool.connection_tagged("da_dispatcher").await?; let blob_info = conn .data_availability_dal() .get_first_da_blob_awaiting_inclusion() @@ -127,7 +126,7 @@ impl DataAvailabilityDispatcher { if let Some(blob_info) = blob_info { let inclusion_data = self .client - .get_inclusion_data(blob_info.blob_id.clone()) + .get_inclusion_data(blob_info.blob_id.as_str()) .await .with_context(|| { format!( @@ -136,7 +135,7 @@ impl DataAvailabilityDispatcher { ) })?; - let mut conn = pool.connection_tagged("da_dispatcher").await?; + let mut conn = self.pool.connection_tagged("da_dispatcher").await?; if let Some(inclusion_data) = inclusion_data { conn.data_availability_dal() .save_l1_batch_inclusion_data( diff --git a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs index bc05bd343169..d15da3cc533f 100644 --- a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs +++ b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs @@ -37,7 +37,7 @@ impl WiringLayer for DataAvailabilityDispatcherLayer { async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { let master_pool_resource = context.get_resource::>().await?; - let master_pool = master_pool_resource.get().await?; + let master_pool = master_pool_resource.get_custom(2).await?; let da_client = context.get_resource::().await?.0; if let Some(limit) = da_client.blob_size_limit() { From a2c26bca9123b0e12894b3edaae1b683885cef10 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Sun, 16 Jun 2024 13:01:37 +0200 Subject: [PATCH 64/69] fix prover/Cargo.lock --- prover/Cargo.lock | 1 - 1 file changed, 1 deletion(-) diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 36f74674e7be..05d02d745f53 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -8496,7 +8496,6 @@ dependencies = [ "zksync_basic_types", "zksync_consensus_utils", "zksync_crypto_primitives", - "zksync_system_constants", ] [[package]] From 7ce721ab3d3014363cef8548632b9567b383d02e Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Tue, 18 Jun 2024 17:56:52 +0200 Subject: [PATCH 65/69] chore: fix code review nits --- .../20240522081114_create_data_availability_table.down.sql | 2 +- .../20240522081114_create_data_availability_table.up.sql | 2 +- core/lib/default_da_clients/Cargo.toml | 4 ++-- core/lib/default_da_clients/src/lib.rs | 3 +-- core/lib/protobuf_config/src/da_dispatcher.rs | 2 +- core/lib/zksync_core_leftovers/src/lib.rs | 1 + .../src/implementations/layers/da_dispatcher.rs | 1 + 7 files changed, 8 insertions(+), 7 deletions(-) diff --git a/core/lib/dal/migrations/20240522081114_create_data_availability_table.down.sql b/core/lib/dal/migrations/20240522081114_create_data_availability_table.down.sql index fd4c3d62f315..b6993d850ea5 100644 --- a/core/lib/dal/migrations/20240522081114_create_data_availability_table.down.sql +++ b/core/lib/dal/migrations/20240522081114_create_data_availability_table.down.sql @@ -1 +1 @@ -DROP TABLE data_availability; +DROP TABLE IF EXISTS data_availability; diff --git a/core/lib/dal/migrations/20240522081114_create_data_availability_table.up.sql b/core/lib/dal/migrations/20240522081114_create_data_availability_table.up.sql index ae98593a3766..037398021da6 100644 --- a/core/lib/dal/migrations/20240522081114_create_data_availability_table.up.sql +++ b/core/lib/dal/migrations/20240522081114_create_data_availability_table.up.sql @@ -1,4 +1,4 @@ -CREATE TABLE data_availability +CREATE TABLE IF NOT EXISTS data_availability ( l1_batch_number BIGINT PRIMARY KEY REFERENCES l1_batches (number) ON DELETE CASCADE, diff --git a/core/lib/default_da_clients/Cargo.toml b/core/lib/default_da_clients/Cargo.toml index 9682d4ba19c4..d376d8641800 100644 --- a/core/lib/default_da_clients/Cargo.toml +++ b/core/lib/default_da_clients/Cargo.toml @@ -11,8 +11,8 @@ categories.workspace = true [dependencies] serde = { workspace = true, features = ["derive"] } -tracing = "0.1.40" -async-trait = "0.1.74" +tracing.workspace = true +async-trait.workspace = true anyhow.workspace = true zksync_config.workspace = true diff --git a/core/lib/default_da_clients/src/lib.rs b/core/lib/default_da_clients/src/lib.rs index 96cffc760774..3aa2a18cdcec 100644 --- a/core/lib/default_da_clients/src/lib.rs +++ b/core/lib/default_da_clients/src/lib.rs @@ -1,3 +1,2 @@ -pub mod object_store; - pub mod no_da; +pub mod object_store; diff --git a/core/lib/protobuf_config/src/da_dispatcher.rs b/core/lib/protobuf_config/src/da_dispatcher.rs index 27e8eb3a4316..1cafa37a1e19 100644 --- a/core/lib/protobuf_config/src/da_dispatcher.rs +++ b/core/lib/protobuf_config/src/da_dispatcher.rs @@ -18,7 +18,7 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { Self { polling_interval_ms: this.polling_interval_ms, max_rows_to_dispatch: this.max_rows_to_dispatch, - max_retries: this.max_retries.map(|x| x as u32), + max_retries: this.max_retries.map(Into::into), } } } diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index a1aa0ae74584..9a77f595e87b 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -766,6 +766,7 @@ pub async fn initialize_components( .da_dispatcher_config .clone() .context("da_dispatcher_config")?; + // A pool with size 2 is used here because there are 2 functions within a task that execute in parallel let da_dispatcher_pool = ConnectionPool::::builder(database_secrets.master_url()?, 2) .build() diff --git a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs index d15da3cc533f..419d33bc570c 100644 --- a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs +++ b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs @@ -37,6 +37,7 @@ impl WiringLayer for DataAvailabilityDispatcherLayer { async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { let master_pool_resource = context.get_resource::>().await?; + // A pool with size 2 is used here because there are 2 functions within a task that execute in parallel let master_pool = master_pool_resource.get_custom(2).await?; let da_client = context.get_resource::().await?.0; From 1f1c18fc3841e98145d3db369649113e2a4b0da3 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Mon, 1 Jul 2024 15:25:09 +0200 Subject: [PATCH 66/69] address code review comments --- Cargo.lock | 1 + ...897edf8c868094ad029e2e8fcf286d44fd55.json} | 4 +- ...e5c7fd0d06d506d4b6bdaa3b149b44a7b85ac.json | 23 ----------- ...2c62033a7f690353f01b2978ef9b30d52c94e.json | 22 +++++++++++ ...8f2dce89f7b700896fcc0f242e0e15ba058e.json} | 4 +- ...670ab55ca94647e0caa92adab7c18260f18ff.json | 22 +++++++++++ ...b1e2580f9f0ded642dd3085b9bf8d101bdb15.json | 23 ----------- core/lib/dal/src/blocks_dal.rs | 11 ++++-- core/lib/dal/src/data_availability_dal.rs | 26 ++++++------- core/lib/default_da_clients/Cargo.toml | 1 + .../src/object_store/client.rs | 4 +- .../src/object_store/mod.rs | 1 + .../src/object_store/types.rs | 38 +++++++++++++++++++ core/lib/object_store/src/objects.rs | 28 -------------- core/lib/types/src/pubdata_da.rs | 5 --- core/node/da_dispatcher/src/da_dispatcher.rs | 33 +++++++++++----- core/node/eth_sender/src/aggregator.rs | 2 +- .../implementations/layers/da_dispatcher.rs | 31 ++++----------- 18 files changed, 143 insertions(+), 136 deletions(-) rename core/lib/dal/.sqlx/{query-7b569dddae6e8a766392183baa902c15663bcaf6ad1c42fcdb0ca2ab7930c987.json => query-0b5d5efeac95d429cf6a5be22153897edf8c868094ad029e2e8fcf286d44fd55.json} (74%) delete mode 100644 core/lib/dal/.sqlx/query-16fdd84e9819be6f4e46b1afc5fe5c7fd0d06d506d4b6bdaa3b149b44a7b85ac.json create mode 100644 core/lib/dal/.sqlx/query-3ecd408294c93a5ee7dbbe128c52c62033a7f690353f01b2978ef9b30d52c94e.json rename core/lib/dal/.sqlx/{query-1a086d79340f332c13d8929f1710d386b7abbbb6cb56da3f7595a2fd82037197.json => query-63f95c6cdcfd933e2cf8f62c0d408f2dce89f7b700896fcc0f242e0e15ba058e.json} (95%) create mode 100644 core/lib/dal/.sqlx/query-6f003ee0311b9ff1f42d3a74587670ab55ca94647e0caa92adab7c18260f18ff.json delete mode 100644 core/lib/dal/.sqlx/query-e05fb44a407c9b0289aaf6f2addb1e2580f9f0ded642dd3085b9bf8d101bdb15.json create mode 100644 core/lib/default_da_clients/src/object_store/types.rs diff --git a/Cargo.lock b/Cargo.lock index a5b17973e0ec..9e59027b9ade 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8435,6 +8435,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "flate2", "serde", "tracing", "zksync_config", diff --git a/core/lib/dal/.sqlx/query-7b569dddae6e8a766392183baa902c15663bcaf6ad1c42fcdb0ca2ab7930c987.json b/core/lib/dal/.sqlx/query-0b5d5efeac95d429cf6a5be22153897edf8c868094ad029e2e8fcf286d44fd55.json similarity index 74% rename from core/lib/dal/.sqlx/query-7b569dddae6e8a766392183baa902c15663bcaf6ad1c42fcdb0ca2ab7930c987.json rename to core/lib/dal/.sqlx/query-0b5d5efeac95d429cf6a5be22153897edf8c868094ad029e2e8fcf286d44fd55.json index f5023abf2e4e..822a6967f6db 100644 --- a/core/lib/dal/.sqlx/query-7b569dddae6e8a766392183baa902c15663bcaf6ad1c42fcdb0ca2ab7930c987.json +++ b/core/lib/dal/.sqlx/query-0b5d5efeac95d429cf6a5be22153897edf8c868094ad029e2e8fcf286d44fd55.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n data_availability (l1_batch_number, blob_id, sent_at, created_at, updated_at)\n VALUES\n ($1, $2, $3, NOW(), NOW())\n ", + "query": "\n INSERT INTO\n data_availability (l1_batch_number, blob_id, sent_at, created_at, updated_at)\n VALUES\n ($1, $2, $3, NOW(), NOW())\n ON CONFLICT DO NOTHING\n ", "describe": { "columns": [], "parameters": { @@ -12,5 +12,5 @@ }, "nullable": [] }, - "hash": "7b569dddae6e8a766392183baa902c15663bcaf6ad1c42fcdb0ca2ab7930c987" + "hash": "0b5d5efeac95d429cf6a5be22153897edf8c868094ad029e2e8fcf286d44fd55" } diff --git a/core/lib/dal/.sqlx/query-16fdd84e9819be6f4e46b1afc5fe5c7fd0d06d506d4b6bdaa3b149b44a7b85ac.json b/core/lib/dal/.sqlx/query-16fdd84e9819be6f4e46b1afc5fe5c7fd0d06d506d4b6bdaa3b149b44a7b85ac.json deleted file mode 100644 index 0dc59785a8b6..000000000000 --- a/core/lib/dal/.sqlx/query-16fdd84e9819be6f4e46b1afc5fe5c7fd0d06d506d4b6bdaa3b149b44a7b85ac.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n data_availability\n WHERE\n l1_batch_number = $1\n AND blob_id = $2\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "count!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8", - "Text" - ] - }, - "nullable": [ - null - ] - }, - "hash": "16fdd84e9819be6f4e46b1afc5fe5c7fd0d06d506d4b6bdaa3b149b44a7b85ac" -} diff --git a/core/lib/dal/.sqlx/query-3ecd408294c93a5ee7dbbe128c52c62033a7f690353f01b2978ef9b30d52c94e.json b/core/lib/dal/.sqlx/query-3ecd408294c93a5ee7dbbe128c52c62033a7f690353f01b2978ef9b30d52c94e.json new file mode 100644 index 000000000000..a64b8e06628f --- /dev/null +++ b/core/lib/dal/.sqlx/query-3ecd408294c93a5ee7dbbe128c52c62033a7f690353f01b2978ef9b30d52c94e.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n inclusion_data\n FROM\n data_availability\n WHERE\n l1_batch_number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "inclusion_data", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + true + ] + }, + "hash": "3ecd408294c93a5ee7dbbe128c52c62033a7f690353f01b2978ef9b30d52c94e" +} diff --git a/core/lib/dal/.sqlx/query-1a086d79340f332c13d8929f1710d386b7abbbb6cb56da3f7595a2fd82037197.json b/core/lib/dal/.sqlx/query-63f95c6cdcfd933e2cf8f62c0d408f2dce89f7b700896fcc0f242e0e15ba058e.json similarity index 95% rename from core/lib/dal/.sqlx/query-1a086d79340f332c13d8929f1710d386b7abbbb6cb56da3f7595a2fd82037197.json rename to core/lib/dal/.sqlx/query-63f95c6cdcfd933e2cf8f62c0d408f2dce89f7b700896fcc0f242e0e15ba058e.json index d150ea03348f..cb68e7622524 100644 --- a/core/lib/dal/.sqlx/query-1a086d79340f332c13d8929f1710d386b7abbbb6cb56da3f7595a2fd82037197.json +++ b/core/lib/dal/.sqlx/query-63f95c6cdcfd933e2cf8f62c0d408f2dce89f7b700896fcc0f242e0e15ba058e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS TRUE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", "describe": { "columns": [ { @@ -172,5 +172,5 @@ true ] }, - "hash": "1a086d79340f332c13d8929f1710d386b7abbbb6cb56da3f7595a2fd82037197" + "hash": "63f95c6cdcfd933e2cf8f62c0d408f2dce89f7b700896fcc0f242e0e15ba058e" } diff --git a/core/lib/dal/.sqlx/query-6f003ee0311b9ff1f42d3a74587670ab55ca94647e0caa92adab7c18260f18ff.json b/core/lib/dal/.sqlx/query-6f003ee0311b9ff1f42d3a74587670ab55ca94647e0caa92adab7c18260f18ff.json new file mode 100644 index 000000000000..768089b083a1 --- /dev/null +++ b/core/lib/dal/.sqlx/query-6f003ee0311b9ff1f42d3a74587670ab55ca94647e0caa92adab7c18260f18ff.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n blob_id\n FROM\n data_availability\n WHERE\n l1_batch_number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "blob_id", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "6f003ee0311b9ff1f42d3a74587670ab55ca94647e0caa92adab7c18260f18ff" +} diff --git a/core/lib/dal/.sqlx/query-e05fb44a407c9b0289aaf6f2addb1e2580f9f0ded642dd3085b9bf8d101bdb15.json b/core/lib/dal/.sqlx/query-e05fb44a407c9b0289aaf6f2addb1e2580f9f0ded642dd3085b9bf8d101bdb15.json deleted file mode 100644 index 6bd046eb9d19..000000000000 --- a/core/lib/dal/.sqlx/query-e05fb44a407c9b0289aaf6f2addb1e2580f9f0ded642dd3085b9bf8d101bdb15.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n data_availability\n WHERE\n l1_batch_number = $1\n AND inclusion_data = $2\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "count!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8", - "Bytea" - ] - }, - "nullable": [ - null - ] - }, - "hash": "e05fb44a407c9b0289aaf6f2addb1e2580f9f0ded642dd3085b9bf8d101bdb15" -} diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 0f955e98ef86..4f4b3e99ff7b 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -1578,13 +1578,16 @@ impl BlocksDal<'_, '_> { .context("map_l1_batches()") } + /// When `with_da_inclusion_info` is true, only batches for which custom DA inclusion + /// information has already been provided will be included pub async fn get_ready_for_commit_l1_batches( &mut self, limit: usize, bootloader_hash: H256, default_aa_hash: H256, protocol_version_id: ProtocolVersionId, - is_rollup: bool, + + with_da_inclusion_info: bool, ) -> anyhow::Result> { let raw_batches = sqlx::query_as!( StorageL1Batch, @@ -1635,7 +1638,7 @@ impl BlocksDal<'_, '_> { AND bootloader_initial_content_commitment IS NOT NULL AND ( data_availability.inclusion_data IS NOT NULL - OR $4 IS TRUE + OR $4 IS FALSE ) ORDER BY number @@ -1645,7 +1648,7 @@ impl BlocksDal<'_, '_> { bootloader_hash.as_bytes(), default_aa_hash.as_bytes(), protocol_version_id as i32, - is_rollup, + with_da_inclusion_info, limit as i64, ) .instrument("get_ready_for_commit_l1_batches") @@ -1653,7 +1656,7 @@ impl BlocksDal<'_, '_> { .with_arg("bootloader_hash", &bootloader_hash) .with_arg("default_aa_hash", &default_aa_hash) .with_arg("protocol_version_id", &protocol_version_id) - .with_arg("is_rollup", &is_rollup) + .with_arg("with_da_inclusion_info", &with_da_inclusion_info) .fetch_all(self.storage) .await?; diff --git a/core/lib/dal/src/data_availability_dal.rs b/core/lib/dal/src/data_availability_dal.rs index 5757688789bc..24048ec4fa19 100644 --- a/core/lib/dal/src/data_availability_dal.rs +++ b/core/lib/dal/src/data_availability_dal.rs @@ -31,6 +31,7 @@ impl DataAvailabilityDal<'_, '_> { data_availability (l1_batch_number, blob_id, sent_at, created_at, updated_at) VALUES ($1, $2, $3, NOW(), NOW()) + ON CONFLICT DO NOTHING "#, i64::from(number.0), blob_id, @@ -48,34 +49,31 @@ impl DataAvailabilityDal<'_, '_> { "L1 batch #{number}: DA blob_id wasn't updated as it's already present" ); - let instrumentation = Instrumented::new("get_matching_batch_da_blob_id") - .with_arg("number", &number) - .with_arg("blob_id", &blob_id); + let instrumentation = + Instrumented::new("get_matching_batch_da_blob_id").with_arg("number", &number); // Batch was already processed. Verify that existing DA blob_id matches let query = sqlx::query!( r#" SELECT - COUNT(*) AS "count!" + blob_id FROM data_availability WHERE l1_batch_number = $1 - AND blob_id = $2 "#, i64::from(number.0), - blob_id, ); - let matched: i64 = instrumentation + let matched: String = instrumentation .clone() .with(query) .report_latency() .fetch_one(self.storage) .await? - .count; + .blob_id; - if matched != 1 { + if matched != *blob_id.to_string() { let err = instrumentation.constraint_error(anyhow::anyhow!( "Error storing DA blob id. DA blob_id {blob_id} for L1 batch #{number} does not match the expected value" )); @@ -122,26 +120,24 @@ impl DataAvailabilityDal<'_, '_> { let query = sqlx::query!( r#" SELECT - COUNT(*) AS "count!" + inclusion_data FROM data_availability WHERE l1_batch_number = $1 - AND inclusion_data = $2 "#, i64::from(number.0), - da_inclusion_data, ); - let matched: i64 = instrumentation + let matched: Option> = instrumentation .clone() .with(query) .report_latency() .fetch_one(self.storage) .await? - .count; + .inclusion_data; - if matched != 1 { + if matched.unwrap_or_default() != da_inclusion_data.to_vec() { let err = instrumentation.constraint_error(anyhow::anyhow!( "Error storing DA inclusion data. DA data for L1 batch #{number} does not match the one provided before" )); diff --git a/core/lib/default_da_clients/Cargo.toml b/core/lib/default_da_clients/Cargo.toml index d376d8641800..c19af34681a8 100644 --- a/core/lib/default_da_clients/Cargo.toml +++ b/core/lib/default_da_clients/Cargo.toml @@ -14,6 +14,7 @@ serde = { workspace = true, features = ["derive"] } tracing.workspace = true async-trait.workspace = true anyhow.workspace = true +flate2.workspace = true zksync_config.workspace = true zksync_types.workspace = true diff --git a/core/lib/default_da_clients/src/object_store/client.rs b/core/lib/default_da_clients/src/object_store/client.rs index 081426ecb99a..fc17a842a099 100644 --- a/core/lib/default_da_clients/src/object_store/client.rs +++ b/core/lib/default_da_clients/src/object_store/client.rs @@ -7,7 +7,9 @@ use zksync_da_client::{ DataAvailabilityClient, }; use zksync_object_store::{ObjectStore, ObjectStoreFactory}; -use zksync_types::{pubdata_da::StorablePubdata, L1BatchNumber}; +use zksync_types::L1BatchNumber; + +use crate::object_store::types::StorablePubdata; /// An implementation of the `DataAvailabilityClient` trait that stores the pubdata in the GCS. #[derive(Clone, Debug)] diff --git a/core/lib/default_da_clients/src/object_store/mod.rs b/core/lib/default_da_clients/src/object_store/mod.rs index bc66b1789845..1600941b0572 100644 --- a/core/lib/default_da_clients/src/object_store/mod.rs +++ b/core/lib/default_da_clients/src/object_store/mod.rs @@ -1,3 +1,4 @@ pub mod client; pub mod config; +mod types; pub mod wiring_layer; diff --git a/core/lib/default_da_clients/src/object_store/types.rs b/core/lib/default_da_clients/src/object_store/types.rs new file mode 100644 index 000000000000..b8ec9303e71e --- /dev/null +++ b/core/lib/default_da_clients/src/object_store/types.rs @@ -0,0 +1,38 @@ +use std::io::{Read, Write}; + +use flate2::{read::GzDecoder, write::GzEncoder, Compression}; +use zksync_object_store::{Bucket, StoredObject, _reexports::BoxedError}; +use zksync_types::L1BatchNumber; + +/// Used as a wrapper for the pubdata to be stored in the GCS. +#[derive(Debug)] +pub struct StorablePubdata { + pub data: Vec, +} + +impl StoredObject for StorablePubdata { + const BUCKET: Bucket = Bucket::DataAvailability; + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("l1_batch_{key}_pubdata.gzip") + } + + fn serialize(&self) -> Result, BoxedError> { + let mut encoder = GzEncoder::new(Vec::new(), Compression::default()); + encoder.write_all(&self.data[..])?; + encoder.finish().map_err(From::from) + } + + fn deserialize(bytes: Vec) -> Result { + let mut decoder = GzDecoder::new(&bytes[..]); + let mut decompressed_bytes = Vec::new(); + decoder + .read_to_end(&mut decompressed_bytes) + .map_err(BoxedError::from)?; + + Ok(Self { + data: decompressed_bytes, + }) + } +} diff --git a/core/lib/object_store/src/objects.rs b/core/lib/object_store/src/objects.rs index d28277b4e99b..897c93e0b6f8 100644 --- a/core/lib/object_store/src/objects.rs +++ b/core/lib/object_store/src/objects.rs @@ -7,7 +7,6 @@ use flate2::{read::GzDecoder, write::GzEncoder, Compression}; use prost::Message; use zksync_protobuf::{decode, ProtoFmt}; use zksync_types::{ - pubdata_da::StorablePubdata, snapshots::{ SnapshotFactoryDependencies, SnapshotStorageLogsChunk, SnapshotStorageLogsStorageKey, }, @@ -88,33 +87,6 @@ impl StoredObject for SnapshotFactoryDependencies { } } -impl StoredObject for StorablePubdata { - const BUCKET: Bucket = Bucket::DataAvailability; - type Key<'a> = L1BatchNumber; - - fn encode_key(key: Self::Key<'_>) -> String { - format!("l1_batch_{key}_pubdata.gzip") - } - - fn serialize(&self) -> Result, BoxedError> { - let mut encoder = GzEncoder::new(Vec::new(), Compression::default()); - encoder.write_all(&self.data[..])?; - encoder.finish().map_err(From::from) - } - - fn deserialize(bytes: Vec) -> Result { - let mut decoder = GzDecoder::new(&bytes[..]); - let mut decompressed_bytes = Vec::new(); - decoder - .read_to_end(&mut decompressed_bytes) - .map_err(BoxedError::from)?; - - Ok(Self { - data: decompressed_bytes, - }) - } -} - impl StoredObject for SnapshotStorageLogsChunk where Self: ProtoFmt, diff --git a/core/lib/types/src/pubdata_da.rs b/core/lib/types/src/pubdata_da.rs index fc8d1c6584a2..6705fdc29530 100644 --- a/core/lib/types/src/pubdata_da.rs +++ b/core/lib/types/src/pubdata_da.rs @@ -27,11 +27,6 @@ impl From for PubdataDA { } } -/// Used as a wrapper for the pubdata to be stored in the GCS. -pub struct StorablePubdata { - pub data: Vec, -} - /// Represents a blob in the data availability layer. #[derive(Debug, Clone)] pub struct DataAvailabilityBlob { diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index 91a065e7196a..241bd68a346e 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -3,7 +3,7 @@ use std::{future::Future, time::Duration}; use anyhow::Context; use chrono::{NaiveDateTime, Utc}; use rand::Rng; -use tokio::sync::watch; +use tokio::sync::watch::Receiver; use zksync_config::DADispatcherConfig; use zksync_da_client::{types::DAError, DataAvailabilityClient}; use zksync_dal::{ConnectionPool, Core, CoreDal}; @@ -31,7 +31,7 @@ impl DataAvailabilityDispatcher { } } - pub async fn run(self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + pub async fn run(self, mut stop_receiver: Receiver) -> anyhow::Result<()> { loop { if *stop_receiver.borrow() { break; @@ -39,7 +39,7 @@ impl DataAvailabilityDispatcher { tokio::join!( async { - if let Err(err) = self.dispatch().await { + if let Err(err) = self.dispatch(&mut stop_receiver).await { tracing::error!("dispatch error {err:?}"); } }, @@ -63,7 +63,7 @@ impl DataAvailabilityDispatcher { } /// Dispatches the blobs to the data availability layer, and saves the blob_id in the database. - async fn dispatch(&self) -> anyhow::Result<()> { + async fn dispatch(&self, stop_receiver: &mut Receiver) -> anyhow::Result<()> { let mut conn = self.pool.connection_tagged("da_dispatcher").await?; let batches = conn .data_availability_dal() @@ -73,10 +73,15 @@ impl DataAvailabilityDispatcher { for batch in batches { let dispatch_latency = METRICS.blob_dispatch_latency.start(); - let dispatch_response = retry(self.config.max_retries(), batch.l1_batch_number, || { - self.client - .dispatch_blob(batch.l1_batch_number.0, batch.pubdata.clone()) - }) + let dispatch_response = retry( + self.config.max_retries(), + batch.l1_batch_number, + stop_receiver, + || { + self.client + .dispatch_blob(batch.l1_batch_number.0, batch.pubdata.clone()) + }, + ) .await .with_context(|| { format!( @@ -172,6 +177,7 @@ impl DataAvailabilityDispatcher { async fn retry( max_retries: u16, batch_number: L1BatchNumber, + stop_receiver: &mut Receiver, mut f: F, ) -> Result where @@ -195,7 +201,16 @@ where let sleep_duration = Duration::from_secs(backoff_secs) .mul_f32(rand::thread_rng().gen_range(0.8..1.2)); tracing::warn!(%err, "Failed DA dispatch request {retries}/{max_retries} for batch {batch_number}, retrying in {} milliseconds.", sleep_duration.as_millis()); - tokio::time::sleep(sleep_duration).await; + if tokio::time::timeout(sleep_duration, stop_receiver.changed()) + .await + .is_ok() + { + return Err(DAError { + error: anyhow::anyhow!("stop signal received"), + is_transient: true, + }); + } + backoff_secs = (backoff_secs * 2).min(128); // cap the back-off at 128 seconds } } diff --git a/core/node/eth_sender/src/aggregator.rs b/core/node/eth_sender/src/aggregator.rs index 1a36b7493008..de6a6982088b 100644 --- a/core/node/eth_sender/src/aggregator.rs +++ b/core/node/eth_sender/src/aggregator.rs @@ -216,7 +216,7 @@ impl Aggregator { base_system_contracts_hashes.bootloader, base_system_contracts_hashes.default_aa, protocol_version_id, - self.commitment_mode == L1BatchCommitmentMode::Rollup, + self.commitment_mode != L1BatchCommitmentMode::Rollup, ) .await .unwrap() diff --git a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs index 419d33bc570c..26b4e18477a0 100644 --- a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs +++ b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs @@ -1,7 +1,5 @@ use zksync_config::configs::{chain::StateKeeperConfig, da_dispatcher::DADispatcherConfig}; -use zksync_da_client::DataAvailabilityClient; -use zksync_dal::Core; -use zksync_db_connection::connection_pool::ConnectionPool; +use zksync_da_dispatcher::DataAvailabilityDispatcher; use crate::{ implementations::resources::{ @@ -50,36 +48,23 @@ impl WiringLayer for DataAvailabilityDispatcherLayer { } } - context.add_task(Box::new(DataAvailabilityDispatcherTask { - main_pool: master_pool, - da_config: self.da_config, - client: da_client, - })); + context.add_task(Box::new(DataAvailabilityDispatcher::new( + master_pool, + self.da_config, + da_client, + ))); Ok(()) } } -#[derive(Debug)] -struct DataAvailabilityDispatcherTask { - main_pool: ConnectionPool, - da_config: DADispatcherConfig, - client: Box, -} - #[async_trait::async_trait] -impl Task for DataAvailabilityDispatcherTask { +impl Task for DataAvailabilityDispatcher { fn id(&self) -> TaskId { "da_dispatcher".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - let da_dispatcher = zksync_da_dispatcher::DataAvailabilityDispatcher::new( - self.main_pool, - self.da_config, - self.client, - ); - - da_dispatcher.run(stop_receiver.0).await + (*self).run(stop_receiver.0).await } } From 9beda54b464ecac3d37b080ffedc53437068a710 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Mon, 1 Jul 2024 16:14:19 +0200 Subject: [PATCH 67/69] fix scripts --- core/tests/upgrade-test/tests/upgrade.test.ts | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/core/tests/upgrade-test/tests/upgrade.test.ts b/core/tests/upgrade-test/tests/upgrade.test.ts index 0afdb3bb2e7a..d08319c6e334 100644 --- a/core/tests/upgrade-test/tests/upgrade.test.ts +++ b/core/tests/upgrade-test/tests/upgrade.test.ts @@ -69,10 +69,10 @@ describe('Upgrade test', function () { // Must be > 1s, because bootloader requires l1 batch timestamps to be incremental. process.env.CHAIN_STATE_KEEPER_BLOCK_COMMIT_DEADLINE_MS = '2000'; // Run server in background. - utils.background( - `cd $ZKSYNC_HOME && cargo run --bin zksync_server --release -- --components=${serverComponents}`, - [null, logs, logs] - ); + utils.background({ + command: `cd $ZKSYNC_HOME && cargo run --bin zksync_server --release -- --components=${serverComponents}`, + stdio: [null, logs, logs] + }); // Server may need some time to recompile if it's a cold run, so wait for it. let iter = 0; while (iter < 30 && !mainContract) { @@ -265,10 +265,10 @@ describe('Upgrade test', function () { await utils.sleep(10); // Run again. - utils.background( - `cd $ZKSYNC_HOME && zk f cargo run --bin zksync_server --release -- --components=${serverComponents} &> upgrade.log`, - [null, logs, logs] - ); + utils.background({ + command: `cd $ZKSYNC_HOME && zk f cargo run --bin zksync_server --release -- --components=${serverComponents} &> upgrade.log`, + stdio: [null, logs, logs] + }); await utils.sleep(10); // Trying to send a transaction from the same address again From 4072e4b3a125d931afcd7d75d94d3f7ed685fd6a Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Tue, 2 Jul 2024 10:28:58 +0200 Subject: [PATCH 68/69] use stop_receiver with tokio::select --- Cargo.lock | 1 + core/node/da_dispatcher/Cargo.toml | 1 + core/node/da_dispatcher/src/da_dispatcher.rs | 39 ++++++++------------ 3 files changed, 18 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9e59027b9ade..4e6aca66775e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8367,6 +8367,7 @@ version = "0.1.0" dependencies = [ "anyhow", "chrono", + "futures 0.3.28", "rand 0.8.5", "tokio", "tracing", diff --git a/core/node/da_dispatcher/Cargo.toml b/core/node/da_dispatcher/Cargo.toml index bd24da3da6e8..159c8f40ef47 100644 --- a/core/node/da_dispatcher/Cargo.toml +++ b/core/node/da_dispatcher/Cargo.toml @@ -23,3 +23,4 @@ anyhow.workspace = true tracing.workspace = true chrono.workspace = true rand.workspace = true +futures.workspace = true diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index 241bd68a346e..80c030dff338 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -37,9 +37,9 @@ impl DataAvailabilityDispatcher { break; } - tokio::join!( + let subtasks = futures::future::join( async { - if let Err(err) = self.dispatch(&mut stop_receiver).await { + if let Err(err) = self.dispatch().await { tracing::error!("dispatch error {err:?}"); } }, @@ -47,9 +47,16 @@ impl DataAvailabilityDispatcher { if let Err(err) = self.poll_for_inclusion().await { tracing::error!("poll_for_inclusion error {err:?}"); } - } + }, ); + tokio::select! { + _ = subtasks => {}, + _ = stop_receiver.changed() => { + break; + } + } + if tokio::time::timeout(self.config.polling_interval(), stop_receiver.changed()) .await .is_ok() @@ -63,7 +70,7 @@ impl DataAvailabilityDispatcher { } /// Dispatches the blobs to the data availability layer, and saves the blob_id in the database. - async fn dispatch(&self, stop_receiver: &mut Receiver) -> anyhow::Result<()> { + async fn dispatch(&self) -> anyhow::Result<()> { let mut conn = self.pool.connection_tagged("da_dispatcher").await?; let batches = conn .data_availability_dal() @@ -73,15 +80,10 @@ impl DataAvailabilityDispatcher { for batch in batches { let dispatch_latency = METRICS.blob_dispatch_latency.start(); - let dispatch_response = retry( - self.config.max_retries(), - batch.l1_batch_number, - stop_receiver, - || { - self.client - .dispatch_blob(batch.l1_batch_number.0, batch.pubdata.clone()) - }, - ) + let dispatch_response = retry(self.config.max_retries(), batch.l1_batch_number, || { + self.client + .dispatch_blob(batch.l1_batch_number.0, batch.pubdata.clone()) + }) .await .with_context(|| { format!( @@ -177,7 +179,6 @@ impl DataAvailabilityDispatcher { async fn retry( max_retries: u16, batch_number: L1BatchNumber, - stop_receiver: &mut Receiver, mut f: F, ) -> Result where @@ -201,15 +202,7 @@ where let sleep_duration = Duration::from_secs(backoff_secs) .mul_f32(rand::thread_rng().gen_range(0.8..1.2)); tracing::warn!(%err, "Failed DA dispatch request {retries}/{max_retries} for batch {batch_number}, retrying in {} milliseconds.", sleep_duration.as_millis()); - if tokio::time::timeout(sleep_duration, stop_receiver.changed()) - .await - .is_ok() - { - return Err(DAError { - error: anyhow::anyhow!("stop signal received"), - is_transient: true, - }); - } + tokio::time::sleep(sleep_duration).await; backoff_secs = (backoff_secs * 2).min(128); // cap the back-off at 128 seconds } From be7277062d4b5150cfbca491828a24a490163502 Mon Sep 17 00:00:00 2001 From: dimazhornyk Date: Tue, 2 Jul 2024 11:04:54 +0200 Subject: [PATCH 69/69] fix da_client node_framework --- core/node/da_dispatcher/src/lib.rs | 2 -- .../src/implementations/layers/da_dispatcher.rs | 8 ++++---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/core/node/da_dispatcher/src/lib.rs b/core/node/da_dispatcher/src/lib.rs index 965a44626764..cb41ea1f7c25 100644 --- a/core/node/da_dispatcher/src/lib.rs +++ b/core/node/da_dispatcher/src/lib.rs @@ -1,5 +1,3 @@ -extern crate core; - pub use self::da_dispatcher::DataAvailabilityDispatcher; mod da_dispatcher; diff --git a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs index 26b4e18477a0..d1ba66b6ddd3 100644 --- a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs +++ b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs @@ -34,10 +34,10 @@ impl WiringLayer for DataAvailabilityDispatcherLayer { } async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let master_pool_resource = context.get_resource::>().await?; + let master_pool_resource = context.get_resource::>()?; // A pool with size 2 is used here because there are 2 functions within a task that execute in parallel let master_pool = master_pool_resource.get_custom(2).await?; - let da_client = context.get_resource::().await?.0; + let da_client = context.get_resource::()?.0; if let Some(limit) = da_client.blob_size_limit() { if self.state_keeper_config.max_pubdata_per_batch > limit as u64 { @@ -48,11 +48,11 @@ impl WiringLayer for DataAvailabilityDispatcherLayer { } } - context.add_task(Box::new(DataAvailabilityDispatcher::new( + context.add_task(DataAvailabilityDispatcher::new( master_pool, self.da_config, da_client, - ))); + )); Ok(()) }