From 34179412aa9bb11b8b2809d4028fbc200cf4d712 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Wed, 24 Apr 2024 21:56:25 +0300 Subject: [PATCH 1/7] fix(en): Remove duplicate reorg detector (#1783) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Removes duplicate reorg detector initialization that sneaked in #1627 when updating the PR. Changes the health check API to detect duplicate components. - Fixes a race condition in the snapshot recovery test. ## Why ❔ Running multiple reorg detectors (or multiple component instances in general) doesn't make sense. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `zk spellcheck`. - [x] Linkcheck has been run via `zk linkcheck`. --- Cargo.lock | 1 + core/bin/external_node/src/init.rs | 2 +- core/bin/external_node/src/main.rs | 34 ++++++------------- core/lib/health_check/Cargo.toml | 1 + core/lib/health_check/src/lib.rs | 34 +++++++++++++++---- core/lib/health_check/src/tests.rs | 14 ++++++++ core/lib/zksync_core/src/lib.rs | 16 ++++----- .../layers/commitment_generator.rs | 4 ++- .../layers/consistency_checker.rs | 4 ++- .../layers/metadata_calculator.rs | 4 ++- .../layers/prometheus_exporter.rs | 4 ++- .../implementations/layers/web3_api/server.rs | 4 ++- .../layers/web3_api/tree_api_client.rs | 4 ++- core/node/node_framework/src/wiring_layer.rs | 7 ++++ .../tests/snapshot-recovery.test.ts | 8 ++--- prover/Cargo.lock | 1 + 16 files changed, 93 insertions(+), 49 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 54bb4af27a26..85e0b8bbced8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8705,6 +8705,7 @@ dependencies = [ "futures 0.3.28", "serde", "serde_json", + "thiserror", "tokio", "tracing", "vise", diff --git a/core/bin/external_node/src/init.rs b/core/bin/external_node/src/init.rs index 9cb5fb81c187..b53f4c550649 100644 --- a/core/bin/external_node/src/init.rs +++ b/core/bin/external_node/src/init.rs @@ -98,7 +98,7 @@ pub(crate) async fn ensure_storage_initialized( Box::new(main_node_client.for_component("snapshot_recovery")), blob_store, ); - app_health.insert_component(snapshots_applier_task.health_check()); + app_health.insert_component(snapshots_applier_task.health_check())?; let recovery_started_at = Instant::now(); let stats = snapshots_applier_task diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index e34e663a3ba0..63be52168821 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -166,7 +166,7 @@ async fn run_tree( .with_recovery_pool(recovery_pool); let tree_reader = Arc::new(metadata_calculator.tree_reader()); - app_health.insert_component(metadata_calculator.tree_health_check()); + app_health.insert_component(metadata_calculator.tree_health_check())?; if let Some(api_config) = api_config { let address = (Ipv4Addr::UNSPECIFIED, api_config.port).into(); @@ -201,7 +201,7 @@ async fn run_core( ) -> anyhow::Result { // Create components. let sync_state = SyncState::default(); - app_health.insert_custom_component(Arc::new(sync_state.clone())); + app_health.insert_custom_component(Arc::new(sync_state.clone()))?; let (action_queue_sender, action_queue) = ActionQueue::new(); let (persistence, miniblock_sealer) = StateKeeperPersistence::new( @@ -299,18 +299,6 @@ async fn run_core( task_handles.push(tokio::spawn(db_pruner.run(stop_receiver.clone()))); } - let reorg_detector = ReorgDetector::new(main_node_client.clone(), connection_pool.clone()); - app_health.insert_component(reorg_detector.health_check().clone()); - task_handles.push(tokio::spawn({ - let stop = stop_receiver.clone(); - async move { - reorg_detector - .run(stop) - .await - .context("reorg_detector.run()") - } - })); - let sk_handle = task::spawn(state_keeper.run()); let fee_params_fetcher_handle = tokio::spawn(fee_params_fetcher.clone().run(stop_receiver.clone())); @@ -359,7 +347,7 @@ async fn run_core( .context("cannot initialize consistency checker")? .with_diamond_proxy_addr(diamond_proxy_addr); - app_health.insert_component(consistency_checker.health_check().clone()); + app_health.insert_component(consistency_checker.health_check().clone())?; let consistency_checker_handle = tokio::spawn(consistency_checker.run(stop_receiver.clone())); let batch_status_updater = BatchStatusUpdater::new( @@ -369,14 +357,14 @@ async fn run_core( .await .context("failed to build a connection pool for BatchStatusUpdater")?, ); - app_health.insert_component(batch_status_updater.health_check()); + app_health.insert_component(batch_status_updater.health_check())?; let commitment_generator_pool = singleton_pool_builder .build() .await .context("failed to build a commitment_generator_pool")?; let commitment_generator = CommitmentGenerator::new(commitment_generator_pool); - app_health.insert_component(commitment_generator.health_check()); + app_health.insert_component(commitment_generator.health_check())?; let commitment_generator_handle = tokio::spawn(commitment_generator.run(stop_receiver.clone())); let updater_handle = task::spawn(batch_status_updater.run(stop_receiver.clone())); @@ -533,7 +521,7 @@ async fn run_api( .run(stop_receiver.clone()) .await .context("Failed initializing HTTP JSON-RPC server")?; - app_health.insert_component(http_server_handles.health_check); + app_health.insert_component(http_server_handles.health_check)?; task_handles.extend(http_server_handles.tasks); } @@ -562,7 +550,7 @@ async fn run_api( .run(stop_receiver.clone()) .await .context("Failed initializing WS JSON-RPC server")?; - app_health.insert_component(ws_server_handles.health_check); + app_health.insert_component(ws_server_handles.health_check)?; task_handles.extend(ws_server_handles.tasks); } @@ -674,7 +662,7 @@ async fn init_tasks( if let Some(port) = config.optional.prometheus_port { let (prometheus_health_check, prometheus_health_updater) = ReactiveHealthCheck::new("prometheus_exporter"); - app_health.insert_component(prometheus_health_check); + app_health.insert_component(prometheus_health_check)?; task_handles.push(tokio::spawn(async move { prometheus_health_updater.update(HealthStatus::Ready.into()); let result = PrometheusExporterConfig::pull(port) @@ -887,10 +875,10 @@ async fn run_node( )); app_health.insert_custom_component(Arc::new(MainNodeHealthCheck::from( main_node_client.clone(), - ))); + )))?; app_health.insert_custom_component(Arc::new(ConnectionPoolHealthCheck::new( connection_pool.clone(), - ))); + )))?; // Start the health check server early into the node lifecycle so that its health can be monitored from the very start. let healthcheck_handle = HealthCheckHandle::spawn_server( @@ -983,7 +971,7 @@ async fn run_node( tracing::info!("Rollback successfully completed"); } - app_health.insert_component(reorg_detector.health_check().clone()); + app_health.insert_component(reorg_detector.health_check().clone())?; task_handles.push(tokio::spawn({ let stop = stop_receiver.clone(); async move { diff --git a/core/lib/health_check/Cargo.toml b/core/lib/health_check/Cargo.toml index dc2ea71584b6..c2d4e85d209e 100644 --- a/core/lib/health_check/Cargo.toml +++ b/core/lib/health_check/Cargo.toml @@ -16,6 +16,7 @@ async-trait.workspace = true futures.workspace = true serde = { workspace = true, features = ["derive"] } serde_json.workspace = true +thiserror.workspace = true tokio = { workspace = true, features = ["sync", "time"] } tracing.workspace = true diff --git a/core/lib/health_check/src/lib.rs b/core/lib/health_check/src/lib.rs index 1b2247a8227f..07f3af330765 100644 --- a/core/lib/health_check/src/lib.rs +++ b/core/lib/health_check/src/lib.rs @@ -90,6 +90,14 @@ impl From for Health { } } +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +pub enum AppHealthCheckError { + /// Component is redefined. + #[error("cannot insert health check for component `{0}`: it is redefined")] + RedefinedComponent(&'static str), +} + /// Application health check aggregating health from multiple components. #[derive(Debug)] pub struct AppHealthCheck { @@ -132,24 +140,36 @@ impl AppHealthCheck { } /// Inserts health check for a component. - pub fn insert_component(&self, health_check: ReactiveHealthCheck) { - self.insert_custom_component(Arc::new(health_check)); + /// + /// # Errors + /// + /// Returns an error if the component with the same name is already defined. + pub fn insert_component( + &self, + health_check: ReactiveHealthCheck, + ) -> Result<(), AppHealthCheckError> { + self.insert_custom_component(Arc::new(health_check)) } /// Inserts a custom health check for a component. - pub fn insert_custom_component(&self, health_check: Arc) { + /// + /// # Errors + /// + /// Returns an error if the component with the same name is already defined. + pub fn insert_custom_component( + &self, + health_check: Arc, + ) -> Result<(), AppHealthCheckError> { let health_check_name = health_check.name(); let mut guard = self .components .lock() .expect("`AppHealthCheck` is poisoned"); if guard.iter().any(|check| check.name() == health_check_name) { - tracing::warn!( - "Health check with name `{health_check_name}` is redefined; only the last mention \ - will be present in `/health` endpoint output" - ); + return Err(AppHealthCheckError::RedefinedComponent(health_check_name)); } guard.push(health_check); + Ok(()) } /// Checks the overall application health. This will query all component checks concurrently. diff --git a/core/lib/health_check/src/tests.rs b/core/lib/health_check/src/tests.rs index 70a49964b00e..46c276372ae1 100644 --- a/core/lib/health_check/src/tests.rs +++ b/core/lib/health_check/src/tests.rs @@ -134,3 +134,17 @@ async fn aggregating_health_checks() { HealthStatus::Affected ); } + +#[test] +fn adding_duplicate_component() { + let checks = AppHealthCheck::default(); + let (health_check, _health_updater) = ReactiveHealthCheck::new("test"); + checks.insert_component(health_check.clone()).unwrap(); + + let err = checks.insert_component(health_check.clone()).unwrap_err(); + assert_matches!(err, AppHealthCheckError::RedefinedComponent("test")); + let err = checks + .insert_custom_component(Arc::new(health_check)) + .unwrap_err(); + assert_matches!(err, AppHealthCheckError::RedefinedComponent("test")); +} diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index b16c088ef428..9608f20fa266 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -333,7 +333,7 @@ pub async fn initialize_components( let (prometheus_health_check, prometheus_health_updater) = ReactiveHealthCheck::new("prometheus_exporter"); - app_health.insert_component(prometheus_health_check); + app_health.insert_component(prometheus_health_check)?; let prometheus_task = prom_config.run(stop_receiver.clone()); let prometheus_task = tokio::spawn(async move { prometheus_health_updater.update(HealthStatus::Ready.into()); @@ -802,7 +802,7 @@ pub async fn initialize_components( .await .context("failed to build commitment_generator_pool")?; let commitment_generator = CommitmentGenerator::new(commitment_generator_pool); - app_health.insert_component(commitment_generator.health_check()); + app_health.insert_component(commitment_generator.health_check())?; task_futures.push(tokio::spawn( commitment_generator.run(stop_receiver.clone()), )); @@ -810,7 +810,7 @@ pub async fn initialize_components( // Run healthcheck server for all components. let db_health_check = ConnectionPoolHealthCheck::new(replica_connection_pool); - app_health.insert_custom_component(Arc::new(db_health_check)); + app_health.insert_custom_component(Arc::new(db_health_check))?; let health_check_handle = HealthCheckHandle::spawn_server(health_check_config.bind_addr(), app_health); @@ -1035,7 +1035,7 @@ async fn run_tree( } let tree_health_check = metadata_calculator.tree_health_check(); - app_health.insert_component(tree_health_check); + app_health.insert_component(tree_health_check)?; let tree_task = tokio::spawn(metadata_calculator.run(stop_receiver)); task_futures.push(tree_task); @@ -1331,7 +1331,7 @@ async fn run_http_api( if let Some(tree_api_url) = api_config.web3_json_rpc.tree_api_url() { let tree_api = Arc::new(TreeApiHttpClient::new(tree_api_url)); api_builder = api_builder.with_tree_api(tree_api.clone()); - app_health.insert_custom_component(tree_api); + app_health.insert_custom_component(tree_api)?; } let server_handles = api_builder @@ -1340,7 +1340,7 @@ async fn run_http_api( .run(stop_receiver) .await?; task_futures.extend(server_handles.tasks); - app_health.insert_component(server_handles.health_check); + app_health.insert_component(server_handles.health_check)?; Ok(()) } @@ -1399,7 +1399,7 @@ async fn run_ws_api( if let Some(tree_api_url) = api_config.web3_json_rpc.tree_api_url() { let tree_api = Arc::new(TreeApiHttpClient::new(tree_api_url)); api_builder = api_builder.with_tree_api(tree_api.clone()); - app_health.insert_custom_component(tree_api); + app_health.insert_custom_component(tree_api)?; } let server_handles = api_builder @@ -1408,7 +1408,7 @@ async fn run_ws_api( .run(stop_receiver) .await?; task_futures.extend(server_handles.tasks); - app_health.insert_component(server_handles.health_check); + app_health.insert_component(server_handles.health_check)?; Ok(()) } diff --git a/core/node/node_framework/src/implementations/layers/commitment_generator.rs b/core/node/node_framework/src/implementations/layers/commitment_generator.rs index 9dbb7b7a4c6a..8afdba9a1d5f 100644 --- a/core/node/node_framework/src/implementations/layers/commitment_generator.rs +++ b/core/node/node_framework/src/implementations/layers/commitment_generator.rs @@ -22,7 +22,9 @@ impl WiringLayer for CommitmentGeneratorLayer { let commitment_generator = CommitmentGenerator::new(main_pool); let AppHealthCheckResource(app_health) = context.get_resource_or_default().await; - app_health.insert_component(commitment_generator.health_check()); + app_health + .insert_component(commitment_generator.health_check()) + .map_err(WiringError::internal)?; context.add_task(Box::new(CommitmentGeneratorTask { commitment_generator, diff --git a/core/node/node_framework/src/implementations/layers/consistency_checker.rs b/core/node/node_framework/src/implementations/layers/consistency_checker.rs index 01c174847e43..870732d7c9ec 100644 --- a/core/node/node_framework/src/implementations/layers/consistency_checker.rs +++ b/core/node/node_framework/src/implementations/layers/consistency_checker.rs @@ -58,7 +58,9 @@ impl WiringLayer for ConsistencyCheckerLayer { .with_diamond_proxy_addr(self.diamond_proxy_addr); let AppHealthCheckResource(app_health) = context.get_resource_or_default().await; - app_health.insert_component(consistency_checker.health_check().clone()); + app_health + .insert_component(consistency_checker.health_check().clone()) + .map_err(WiringError::internal)?; // Create and add tasks. context.add_task(Box::new(ConsistencyCheckerTask { diff --git a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs index 7277dfa666a5..8746af995824 100644 --- a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs +++ b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs @@ -62,7 +62,9 @@ impl WiringLayer for MetadataCalculatorLayer { .with_recovery_pool(recovery_pool); let AppHealthCheckResource(app_health) = context.get_resource_or_default().await; - app_health.insert_component(metadata_calculator.tree_health_check()); + app_health + .insert_component(metadata_calculator.tree_health_check()) + .map_err(WiringError::internal)?; let task = Box::new(MetadataCalculatorTask { metadata_calculator, diff --git a/core/node/node_framework/src/implementations/layers/prometheus_exporter.rs b/core/node/node_framework/src/implementations/layers/prometheus_exporter.rs index 0a7e2ee84898..95477291e432 100644 --- a/core/node/node_framework/src/implementations/layers/prometheus_exporter.rs +++ b/core/node/node_framework/src/implementations/layers/prometheus_exporter.rs @@ -34,7 +34,9 @@ impl WiringLayer for PrometheusExporterLayer { ReactiveHealthCheck::new("prometheus_exporter"); let AppHealthCheckResource(app_health) = node.get_resource_or_default().await; - app_health.insert_component(prometheus_health_check); + app_health + .insert_component(prometheus_health_check) + .map_err(WiringError::internal)?; let task = Box::new(PrometheusExporterTask { config: self.0, diff --git a/core/node/node_framework/src/implementations/layers/web3_api/server.rs b/core/node/node_framework/src/implementations/layers/web3_api/server.rs index ab5aaf9382c0..e24ac1e87f9e 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/server.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/server.rs @@ -154,7 +154,9 @@ impl WiringLayer for Web3ServerLayer { // Insert healthcheck. let api_health_check = server.health_check(); let AppHealthCheckResource(app_health) = context.get_resource_or_default().await; - app_health.insert_component(api_health_check); + app_health + .insert_component(api_health_check) + .map_err(WiringError::internal)?; // Insert circuit breaker. let circuit_breaker_resource = context diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tree_api_client.rs b/core/node/node_framework/src/implementations/layers/web3_api/tree_api_client.rs index ac28c13049bc..6380e0bb445a 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/tree_api_client.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/tree_api_client.rs @@ -31,7 +31,9 @@ impl WiringLayer for TreeApiClientLayer { if let Some(url) = &self.url { let client = Arc::new(TreeApiHttpClient::new(url)); let AppHealthCheckResource(app_health) = context.get_resource_or_default().await; - app_health.insert_custom_component(client.clone()); + app_health + .insert_custom_component(client.clone()) + .map_err(WiringError::internal)?; context.insert_resource(TreeApiClientResource(client))?; } Ok(()) diff --git a/core/node/node_framework/src/wiring_layer.rs b/core/node/node_framework/src/wiring_layer.rs index 3fc9c92b65d4..e37bb1c9d487 100644 --- a/core/node/node_framework/src/wiring_layer.rs +++ b/core/node/node_framework/src/wiring_layer.rs @@ -38,3 +38,10 @@ pub enum WiringError { #[error(transparent)] Internal(#[from] anyhow::Error), } + +impl WiringError { + /// Wraps the specified internal error. + pub fn internal(err: impl Into) -> Self { + Self::Internal(err.into()) + } +} diff --git a/core/tests/snapshot-recovery-test/tests/snapshot-recovery.test.ts b/core/tests/snapshot-recovery-test/tests/snapshot-recovery.test.ts index addbf6b8d9e5..dda7dd449a9a 100644 --- a/core/tests/snapshot-recovery-test/tests/snapshot-recovery.test.ts +++ b/core/tests/snapshot-recovery-test/tests/snapshot-recovery.test.ts @@ -267,9 +267,9 @@ describe('snapshot recovery', () => { if (!consistencyCheckerSucceeded) { const status = health.components.consistency_checker?.status; - expect(status).to.be.oneOf([undefined, 'ready']); + expect(status).to.be.oneOf([undefined, 'not_ready', 'ready']); const details = health.components.consistency_checker?.details; - if (details !== undefined) { + if (status === 'ready' && details !== undefined) { console.log('Received consistency checker health details', details); if (details.first_checked_batch !== undefined && details.last_checked_batch !== undefined) { expect(details.first_checked_batch).to.equal(snapshotMetadata.l1BatchNumber + 1); @@ -281,9 +281,9 @@ describe('snapshot recovery', () => { if (!reorgDetectorSucceeded) { const status = health.components.reorg_detector?.status; - expect(status).to.be.oneOf([undefined, 'ready']); + expect(status).to.be.oneOf([undefined, 'not_ready', 'ready']); const details = health.components.reorg_detector?.details; - if (details !== undefined) { + if (status === 'ready' && details !== undefined) { console.log('Received reorg detector health details', details); if (details.last_correct_l1_batch !== undefined && details.last_correct_miniblock !== undefined) { expect(details.last_correct_l1_batch).to.be.greaterThan(snapshotMetadata.l1BatchNumber); diff --git a/prover/Cargo.lock b/prover/Cargo.lock index f19ff517d9df..268dab9e5264 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -7724,6 +7724,7 @@ dependencies = [ "futures 0.3.30", "serde", "serde_json", + "thiserror", "tokio", "tracing", "vise", From a784ea64c847f31010af0ee71b1e64e9961dc5e1 Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Thu, 25 Apr 2024 12:08:44 +0300 Subject: [PATCH 2/7] feat(prover): remove redundant config fields (#1787) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Remove config fields which are meant for sampling Update buckets for witness generation time metrics ## Why ❔ They are not used now anyway ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `zk spellcheck`. - [x] Linkcheck has been run via `zk linkcheck`. --- .../src/configs/fri_witness_generator.rs | 6 - core/lib/config/src/testonly.rs | 3 - .../env_config/src/fri_witness_generator.rs | 9 - .../src/proto/config/prover.proto | 7 +- core/lib/protobuf_config/src/prover.rs | 10 -- etc/env/base/fri_witness_generator.toml | 2 - etc/env/file_based/general.yaml | 2 - .../witness_generator/src/basic_circuits.rs | 157 +----------------- prover/witness_generator/src/metrics.rs | 7 +- 9 files changed, 14 insertions(+), 189 deletions(-) diff --git a/core/lib/config/src/configs/fri_witness_generator.rs b/core/lib/config/src/configs/fri_witness_generator.rs index c4225bf63e02..14ae3637a0da 100644 --- a/core/lib/config/src/configs/fri_witness_generator.rs +++ b/core/lib/config/src/configs/fri_witness_generator.rs @@ -15,16 +15,10 @@ pub struct FriWitnessGeneratorConfig { pub node_generation_timeout_in_secs: Option, /// Max attempts for generating witness pub max_attempts: u32, - // Percentage of the blocks that gets proven in the range [0.0, 1.0] - // when 0.0 implies all blocks are skipped and 1.0 implies all blocks are proven. - pub blocks_proving_percentage: Option, - pub dump_arguments_for_blocks: Vec, // Optional l1 batch number to process block until(inclusive). // This parameter is used in case of performing circuit upgrades(VK/Setup keys), // to not let witness-generator pick new job and finish all the existing jobs with old circuit. pub last_l1_batch_to_process: Option, - // Force process block with specified number when sampling is enabled. - pub force_process_block: Option, // whether to write to public GCS bucket for https://github.com/matter-labs/era-boojum-validator-cli pub shall_save_to_public_bucket: bool, diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 15d22be73e78..2ef04a16e036 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -542,10 +542,7 @@ impl Distribution for EncodeDist { node_generation_timeout_in_secs: self.sample(rng), scheduler_generation_timeout_in_secs: self.sample(rng), max_attempts: self.sample(rng), - blocks_proving_percentage: self.sample(rng), - dump_arguments_for_blocks: self.sample_collect(rng), last_l1_batch_to_process: self.sample(rng), - force_process_block: self.sample(rng), shall_save_to_public_bucket: self.sample(rng), } } diff --git a/core/lib/env_config/src/fri_witness_generator.rs b/core/lib/env_config/src/fri_witness_generator.rs index e5f9f0b6c1da..8e0fff922333 100644 --- a/core/lib/env_config/src/fri_witness_generator.rs +++ b/core/lib/env_config/src/fri_witness_generator.rs @@ -23,10 +23,7 @@ mod tests { node_generation_timeout_in_secs: Some(800u16), scheduler_generation_timeout_in_secs: Some(900u16), max_attempts: 4, - blocks_proving_percentage: Some(30), - dump_arguments_for_blocks: vec![2, 3], last_l1_batch_to_process: None, - force_process_block: Some(1), shall_save_to_public_bucket: true, } } @@ -41,9 +38,6 @@ mod tests { FRI_WITNESS_NODE_GENERATION_TIMEOUT_IN_SECS=800 FRI_WITNESS_SCHEDULER_GENERATION_TIMEOUT_IN_SECS=900 FRI_WITNESS_MAX_ATTEMPTS=4 - FRI_WITNESS_DUMP_ARGUMENTS_FOR_BLOCKS="2,3" - FRI_WITNESS_BLOCKS_PROVING_PERCENTAGE="30" - FRI_WITNESS_FORCE_PROCESS_BLOCK="1" FRI_WITNESS_SHALL_SAVE_TO_PUBLIC_BUCKET=true "#; lock.set_env(config); @@ -64,9 +58,6 @@ mod tests { FRI_WITNESS_BASIC_GENERATION_TIMEOUT_IN_SECS=100 FRI_WITNESS_SCHEDULER_GENERATION_TIMEOUT_IN_SECS=200 FRI_WITNESS_MAX_ATTEMPTS=4 - FRI_WITNESS_DUMP_ARGUMENTS_FOR_BLOCKS="2,3" - FRI_WITNESS_BLOCKS_PROVING_PERCENTAGE="30" - FRI_WITNESS_FORCE_PROCESS_BLOCK="1" FRI_WITNESS_SHALL_SAVE_TO_PUBLIC_BUCKET=true "#; lock.set_env(config); diff --git a/core/lib/protobuf_config/src/proto/config/prover.proto b/core/lib/protobuf_config/src/proto/config/prover.proto index dac7d1221c49..a419b2fab943 100644 --- a/core/lib/protobuf_config/src/proto/config/prover.proto +++ b/core/lib/protobuf_config/src/proto/config/prover.proto @@ -70,16 +70,15 @@ message ProverGateway { message WitnessGenerator { optional uint32 generation_timeout_in_secs = 1; // required; - optional uint32 max_attempts = 2; // required - optional uint32 blocks_proving_percentage = 3; // optional; 0-100 - repeated uint32 dump_arguments_for_blocks = 4; + optional uint32 max_attempts = 2; // required; optional uint32 last_l1_batch_to_process = 5; // optional - optional uint32 force_process_block = 6; // optional optional bool shall_save_to_public_bucket = 7; // required optional uint32 basic_generation_timeout_in_secs = 8; // optional; optional uint32 leaf_generation_timeout_in_secs = 9; // optional; optional uint32 node_generation_timeout_in_secs = 10; // optional; optional uint32 scheduler_generation_timeout_in_secs = 11; // optional; + reserved 3, 4, 6; + reserved "dump_arguments_for_blocks", "force_process_block", "blocks_proving_percentage"; } message WitnessVectorGenerator { diff --git a/core/lib/protobuf_config/src/prover.rs b/core/lib/protobuf_config/src/prover.rs index f82685fdb905..a02f1737ac8e 100644 --- a/core/lib/protobuf_config/src/prover.rs +++ b/core/lib/protobuf_config/src/prover.rs @@ -161,14 +161,7 @@ impl ProtoRepr for proto::WitnessGenerator { .and_then(|x| Ok((*x).try_into()?)) .context("generation_timeout_in_secs")?, max_attempts: *required(&self.max_attempts).context("max_attempts")?, - blocks_proving_percentage: self - .blocks_proving_percentage - .map(|x| x.try_into()) - .transpose() - .context("blocks_proving_percentage")?, - dump_arguments_for_blocks: self.dump_arguments_for_blocks.clone(), last_l1_batch_to_process: self.last_l1_batch_to_process, - force_process_block: self.force_process_block, shall_save_to_public_bucket: *required(&self.shall_save_to_public_bucket) .context("shall_save_to_public_bucket")?, basic_generation_timeout_in_secs: self @@ -198,10 +191,7 @@ impl ProtoRepr for proto::WitnessGenerator { Self { generation_timeout_in_secs: Some(this.generation_timeout_in_secs.into()), max_attempts: Some(this.max_attempts), - blocks_proving_percentage: this.blocks_proving_percentage.map(|x| x.into()), - dump_arguments_for_blocks: this.dump_arguments_for_blocks.clone(), last_l1_batch_to_process: this.last_l1_batch_to_process, - force_process_block: this.force_process_block, shall_save_to_public_bucket: Some(this.shall_save_to_public_bucket), basic_generation_timeout_in_secs: this .basic_generation_timeout_in_secs diff --git a/etc/env/base/fri_witness_generator.toml b/etc/env/base/fri_witness_generator.toml index 184a264ceb66..4ed950ebded6 100644 --- a/etc/env/base/fri_witness_generator.toml +++ b/etc/env/base/fri_witness_generator.toml @@ -5,6 +5,4 @@ leaf_generation_timeout_in_secs = 900 node_generation_timeout_in_secs = 900 scheduler_generation_timeout_in_secs = 900 max_attempts = 10 -dump_arguments_for_blocks = "1" -force_process_block = 1 shall_save_to_public_bucket = true diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 4b071054bceb..a3565a497cbe 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -174,10 +174,8 @@ prover: zone_read_url: http://metadata.google.internal/computeMetadata/v1/instance/zone shall_save_to_public_bucket: true witness_generator: - dump_arguments_for_blocks: [ 2,3 ] generation_timeout_in_secs: 900 max_attempts: 10 - force_process_block: 1 shall_save_to_public_bucket: true witness_vector_generator: prover_instance_wait_timeout_in_secs: 200 diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index 6f99d88cda16..c649f6e0fbf7 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -1,5 +1,5 @@ use std::{ - collections::{hash_map::DefaultHasher, HashMap, HashSet}, + collections::{hash_map::DefaultHasher, HashSet}, hash::{Hash, Hasher}, sync::Arc, time::Instant, @@ -19,20 +19,15 @@ use circuit_definitions::{ use multivm::vm_1_4_2::{ constants::MAX_CYCLES_FOR_TX, HistoryDisabled, StorageOracle as VmStorageOracle, }; -use prover_dal::{ - fri_witness_generator_dal::FriWitnessJobStatus, ConnectionPool, Prover, ProverDal, -}; -use rand::Rng; -use serde::{Deserialize, Serialize}; +use prover_dal::{ConnectionPool, Prover, ProverDal}; use tracing::Instrument; use zkevm_test_harness::{ - geometry_config::get_geometry_config, toolset::GeometryConfig, - utils::generate_eip4844_circuit_and_witness, + geometry_config::get_geometry_config, utils::generate_eip4844_circuit_and_witness, zkevm_circuits::eip_4844::input::EIP4844OutputDataWitness, }; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_dal::{Core, CoreDal}; -use zksync_object_store::{Bucket, ObjectStore, ObjectStoreFactory, StoredObject}; +use zksync_object_store::{ObjectStore, ObjectStoreFactory}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::{ @@ -56,7 +51,7 @@ use zksync_types::{ AggregationRound, Eip4844Blobs, EIP_4844_BLOB_SIZE, MAX_4844_BLOBS_PER_BLOCK, }, protocol_version::ProtocolVersionId, - Address, L1BatchNumber, BOOTLOADER_ADDRESS, H256, U256, + Address, L1BatchNumber, BOOTLOADER_ADDRESS, H256, }; use zksync_utils::{bytes_to_chunks, h256_to_u256, u256_to_h256}; @@ -134,49 +129,15 @@ impl BasicWitnessGenerator { async fn process_job_impl( object_store: Arc, connection_pool: ConnectionPool, - prover_connection_pool: ConnectionPool, basic_job: BasicWitnessGeneratorJob, started_at: Instant, - config: Arc, ) -> Option { let BasicWitnessGeneratorJob { block_number, job, eip_4844_blobs, } = basic_job; - let shall_force_process_block = config - .force_process_block - .map_or(false, |block| block == block_number.0); - - if let Some(blocks_proving_percentage) = config.blocks_proving_percentage { - // Generate random number in (0; 100). - let threshold = rand::thread_rng().gen_range(1..100); - // We get value higher than `blocks_proving_percentage` with prob = `1 - blocks_proving_percentage`. - // In this case job should be skipped. - if threshold > blocks_proving_percentage && !shall_force_process_block { - WITNESS_GENERATOR_METRICS.skipped_blocks.inc(); - tracing::info!( - "Skipping witness generation for block {}, blocks_proving_percentage: {}", - block_number.0, - blocks_proving_percentage - ); - let mut prover_storage = prover_connection_pool.connection().await.unwrap(); - let mut transaction = prover_storage.start_transaction().await.unwrap(); - transaction - .fri_proof_compressor_dal() - .skip_proof_compression_job(block_number) - .await; - transaction - .fri_witness_generator_dal() - .mark_witness_job(FriWitnessJobStatus::Skipped, block_number) - .await; - transaction.commit().await.unwrap(); - return None; - } - } - - WITNESS_GENERATOR_METRICS.sampled_blocks.inc(); tracing::info!( "Starting witness generation of type {:?} for block {}", AggregationRound::BasicCircuits, @@ -186,7 +147,6 @@ impl BasicWitnessGenerator { Some( process_basic_circuits_job( &*object_store, - config, connection_pool, started_at, block_number, @@ -254,22 +214,15 @@ impl JobProcessor for BasicWitnessGenerator { job: BasicWitnessGeneratorJob, started_at: Instant, ) -> tokio::task::JoinHandle>> { - let config = Arc::clone(&self.config); let object_store = Arc::clone(&self.object_store); let connection_pool = self.connection_pool.clone(); - let prover_connection_pool = self.prover_connection_pool.clone(); tokio::spawn(async move { let block_number = job.block_number; - Ok(Self::process_job_impl( - object_store, - connection_pool, - prover_connection_pool, - job, - started_at, - config, + Ok( + Self::process_job_impl(object_store, connection_pool, job, started_at) + .instrument(tracing::info_span!("basic_circuit", %block_number)) + .await, ) - .instrument(tracing::info_span!("basic_circuit", %block_number)) - .await) }) } @@ -335,7 +288,6 @@ impl JobProcessor for BasicWitnessGenerator { #[allow(clippy::too_many_arguments)] async fn process_basic_circuits_job( object_store: &dyn ObjectStore, - config: Arc, connection_pool: ConnectionPool, started_at: Instant, block_number: L1BatchNumber, @@ -348,7 +300,6 @@ async fn process_basic_circuits_job( generate_witness( block_number, object_store, - config, connection_pool, witness_gen_input, eip_4844_blobs, @@ -535,7 +486,6 @@ async fn build_basic_circuits_witness_generator_input( async fn generate_witness( block_number: L1BatchNumber, object_store: &dyn ObjectStore, - config: Arc, connection_pool: ConnectionPool, input: BasicCircuitWitnessGeneratorInput, eip_4844_blobs: Eip4844Blobs, @@ -643,29 +593,6 @@ async fn generate_witness( hasher.finish() ); - let should_dump_arguments = config - .dump_arguments_for_blocks - .contains(&input.block_number.0); - if should_dump_arguments { - save_run_with_fixed_params_args_to_gcs( - object_store, - input.block_number.0, - last_miniblock_number.0, - Address::zero(), - BOOTLOADER_ADDRESS, - bootloader_code.clone(), - bootloader_contents.clone(), - false, - account_code_hash, - used_bytecodes.clone(), - Vec::default(), - MAX_CYCLES_FOR_TX as usize, - geometry_config, - tree.clone(), - ) - .await; - } - // The following part is CPU-heavy, so we move it to a separate thread. let rt_handle = tokio::runtime::Handle::current(); @@ -781,69 +708,3 @@ async fn generate_witness( block_aux_witness, ) } - -#[allow(clippy::too_many_arguments)] -async fn save_run_with_fixed_params_args_to_gcs( - object_store: &dyn ObjectStore, - l1_batch_number: u32, - last_miniblock_number: u32, - caller: Address, - entry_point_address: Address, - entry_point_code: Vec<[u8; 32]>, - initial_heap_content: Vec, - zk_porter_is_available: bool, - default_aa_code_hash: U256, - used_bytecodes: HashMap>, - ram_verification_queries: Vec<(u32, U256)>, - cycle_limit: usize, - geometry: GeometryConfig, - tree: PrecalculatedMerklePathsProvider, -) { - let run_with_fixed_params_input = RunWithFixedParamsInput { - l1_batch_number, - last_miniblock_number, - caller, - entry_point_address, - entry_point_code, - initial_heap_content, - zk_porter_is_available, - default_aa_code_hash, - used_bytecodes, - ram_verification_queries, - cycle_limit, - geometry, - tree, - }; - object_store - .put(L1BatchNumber(l1_batch_number), &run_with_fixed_params_input) - .await - .unwrap(); -} - -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] -pub struct RunWithFixedParamsInput { - pub l1_batch_number: u32, - pub last_miniblock_number: u32, - pub caller: Address, - pub entry_point_address: Address, - pub entry_point_code: Vec<[u8; 32]>, - pub initial_heap_content: Vec, - pub zk_porter_is_available: bool, - pub default_aa_code_hash: U256, - pub used_bytecodes: HashMap>, - pub ram_verification_queries: Vec<(u32, U256)>, - pub cycle_limit: usize, - pub geometry: GeometryConfig, - pub tree: PrecalculatedMerklePathsProvider, -} - -impl StoredObject for RunWithFixedParamsInput { - const BUCKET: Bucket = Bucket::WitnessInput; - type Key<'a> = L1BatchNumber; - - fn encode_key(key: Self::Key<'_>) -> String { - format!("run_with_fixed_params_input_{}.bin", key) - } - - zksync_object_store::serialize_using_bincode!(); -} diff --git a/prover/witness_generator/src/metrics.rs b/prover/witness_generator/src/metrics.rs index f0497dd23a13..dd1ad30a6924 100644 --- a/prover/witness_generator/src/metrics.rs +++ b/prover/witness_generator/src/metrics.rs @@ -1,6 +1,6 @@ use std::time::Duration; -use vise::{Buckets, Counter, Family, Gauge, Histogram, LabeledFamily, Metrics}; +use vise::{Buckets, Family, Gauge, Histogram, LabeledFamily, Metrics}; use zksync_prover_fri_utils::metrics::StageLabel; #[derive(Debug, Metrics)] @@ -10,13 +10,10 @@ pub(crate) struct WitnessGeneratorMetrics { pub blob_fetch_time: Family>, #[metrics(buckets = Buckets::LATENCIES)] pub prepare_job_time: Family>, - #[metrics(buckets = Buckets::LATENCIES)] + #[metrics(buckets = Buckets::exponential(60.0..=61440.0, 2.0))] pub witness_generation_time: Family>, #[metrics(buckets = Buckets::LATENCIES)] pub blob_save_time: Family>, - - pub sampled_blocks: Counter, - pub skipped_blocks: Counter, } #[vise::register] From d1b963749e73ec1956ada16d5eefecd7d3f9f539 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Thu, 25 Apr 2024 13:02:34 +0300 Subject: [PATCH 3/7] =?UTF-8?q?refactor:=20Rename=20"miniblock"=20to=20"L2?= =?UTF-8?q?=20block"=20=E2=80=93=20core=20components=20(#1738)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Renames "miniblock" to "L2 block" in core components (ones remaining in `zksync_core`). ## Why ❔ See https://github.com/matter-labs/zksync-era/pull/1696 for reasoning. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `zk spellcheck`. - [x] Linkcheck has been run via `zk linkcheck`. --- .../external_node/src/version_sync_task.rs | 5 +- .../src/interface/types/inputs/l2_block.rs | 2 +- core/lib/web3_decl/src/namespaces/zks.rs | 2 +- .../src/api_server/execution_sandbox/apply.rs | 57 ++++---- .../src/api_server/execution_sandbox/mod.rs | 22 +-- .../src/api_server/execution_sandbox/tests.rs | 20 +-- core/lib/zksync_core/src/api_server/mod.rs | 2 +- .../src/api_server/tx_sender/mod.rs | 2 +- .../src/api_server/tx_sender/proxy.rs | 2 +- .../src/api_server/tx_sender/tests.rs | 14 +- .../web3/backend_jsonrpsee/metadata.rs | 2 +- .../web3/backend_jsonrpsee/namespaces/zks.rs | 4 +- .../src/api_server/web3/metrics.rs | 4 +- .../zksync_core/src/api_server/web3/mod.rs | 32 ++--- .../src/api_server/web3/namespaces/debug.rs | 4 +- .../src/api_server/web3/namespaces/eth.rs | 24 ++-- .../api_server/web3/namespaces/snapshots.rs | 6 +- .../src/api_server/web3/namespaces/zks.rs | 8 +- .../zksync_core/src/api_server/web3/pubsub.rs | 17 ++- .../zksync_core/src/api_server/web3/state.rs | 42 +++--- .../src/api_server/web3/tests/debug.rs | 22 +-- .../src/api_server/web3/tests/filters.rs | 12 +- .../src/api_server/web3/tests/mod.rs | 84 +++++------ .../src/api_server/web3/tests/snapshots.rs | 2 +- .../src/api_server/web3/tests/vm.rs | 20 +-- .../src/api_server/web3/tests/ws.rs | 32 ++--- .../src/basic_witness_input_producer/mod.rs | 25 ++-- core/lib/zksync_core/src/consensus/fetcher.rs | 4 +- core/lib/zksync_core/src/consensus/mod.rs | 2 +- .../zksync_core/src/consensus/storage/mod.rs | 27 ++-- .../src/consensus/storage/testonly.rs | 5 +- .../lib/zksync_core/src/consensus/testonly.rs | 20 +-- core/lib/zksync_core/src/consensus/tests.rs | 8 +- core/lib/zksync_core/src/db_pruner/README.md | 2 +- core/lib/zksync_core/src/db_pruner/mod.rs | 64 ++++----- core/lib/zksync_core/src/fee_model.rs | 6 +- core/lib/zksync_core/src/genesis.rs | 4 +- core/lib/zksync_core/src/lib.rs | 22 +-- .../src/metadata_calculator/recovery/mod.rs | 25 ++-- .../src/metadata_calculator/recovery/tests.rs | 2 +- .../src/metadata_calculator/tests.rs | 12 +- .../lib/zksync_core/src/reorg_detector/mod.rs | 59 ++++---- .../zksync_core/src/reorg_detector/tests.rs | 134 +++++++++--------- .../src/state_keeper/io/common/tests.rs | 14 +- .../src/state_keeper/io/mempool.rs | 6 +- .../src/state_keeper/io/tests/tester.rs | 4 +- .../sync_layer/batch_status_updater/mod.rs | 22 +-- .../sync_layer/batch_status_updater/tests.rs | 10 +- .../zksync_core/src/sync_layer/external_io.rs | 54 ++++--- .../lib/zksync_core/src/sync_layer/fetcher.rs | 26 ++-- .../lib/zksync_core/src/sync_layer/metrics.rs | 5 +- .../zksync_core/src/sync_layer/sync_action.rs | 118 ++++++++------- .../zksync_core/src/sync_layer/sync_state.rs | 32 ++--- core/lib/zksync_core/src/sync_layer/tests.rs | 124 ++++++++-------- core/lib/zksync_core/src/utils/mod.rs | 20 +-- core/lib/zksync_core/src/utils/testonly.rs | 49 ++++--- .../zksync_core/src/vm_runner/tests/mod.rs | 6 +- core/node/shared_metrics/src/lib.rs | 4 +- .../tests/snapshot-recovery.test.ts | 10 +- 59 files changed, 664 insertions(+), 704 deletions(-) diff --git a/core/bin/external_node/src/version_sync_task.rs b/core/bin/external_node/src/version_sync_task.rs index 15f8d8740b66..01926cad373b 100644 --- a/core/bin/external_node/src/version_sync_task.rs +++ b/core/bin/external_node/src/version_sync_task.rs @@ -13,10 +13,7 @@ pub async fn get_l1_batch_remote_protocol_version( main_node_client: &BoxedL2Client, l1_batch_number: L1BatchNumber, ) -> anyhow::Result> { - let Some((miniblock, _)) = main_node_client - .get_miniblock_range(l1_batch_number) - .await? - else { + let Some((miniblock, _)) = main_node_client.get_l2_block_range(l1_batch_number).await? else { return Ok(None); }; let sync_block = main_node_client diff --git a/core/lib/multivm/src/interface/types/inputs/l2_block.rs b/core/lib/multivm/src/interface/types/inputs/l2_block.rs index b832266cae9d..249a9a63445e 100644 --- a/core/lib/multivm/src/interface/types/inputs/l2_block.rs +++ b/core/lib/multivm/src/interface/types/inputs/l2_block.rs @@ -9,7 +9,7 @@ pub struct L2BlockEnv { } impl L2BlockEnv { - pub fn from_miniblock_data(miniblock_execution_data: &L2BlockExecutionData) -> Self { + pub fn from_l2_block_data(miniblock_execution_data: &L2BlockExecutionData) -> Self { Self { number: miniblock_execution_data.number.0, timestamp: miniblock_execution_data.timestamp, diff --git a/core/lib/web3_decl/src/namespaces/zks.rs b/core/lib/web3_decl/src/namespaces/zks.rs index 1ec78676ca3c..a31d74600b0f 100644 --- a/core/lib/web3_decl/src/namespaces/zks.rs +++ b/core/lib/web3_decl/src/namespaces/zks.rs @@ -78,7 +78,7 @@ pub trait ZksNamespace { async fn get_l1_batch_number(&self) -> RpcResult; #[method(name = "getL1BatchBlockRange")] - async fn get_miniblock_range(&self, batch: L1BatchNumber) -> RpcResult>; + async fn get_l2_block_range(&self, batch: L1BatchNumber) -> RpcResult>; #[method(name = "getBlockDetails")] async fn get_block_details( diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs index 7a06c471d914..aa7a541588f5 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs @@ -67,7 +67,7 @@ impl<'a> Sandbox<'a> { tracing::debug!("Resolved block numbers (took {resolve_time:?})"); } - if block_args.resolves_to_latest_sealed_miniblock() { + if block_args.resolves_to_latest_sealed_l2_block() { shared_args .caches .schedule_values_update(resolved_block_info.state_l2_block_number); @@ -75,7 +75,7 @@ impl<'a> Sandbox<'a> { let (next_l2_block_info, l2_block_info_to_reset) = Self::load_l2_block_info( &mut connection, - block_args.is_pending_miniblock(), + block_args.is_pending_l2_block(), &resolved_block_info, ) .await?; @@ -126,7 +126,7 @@ impl<'a> Sandbox<'a> { number: current_l2_block_info.l2_block_number + 1, timestamp: resolved_block_info.l1_batch_timestamp, prev_block_hash: current_l2_block_info.l2_block_hash, - // For simplicity we assume each miniblock create one virtual block. + // For simplicity, we assume each L2 block create one virtual block. // This may be wrong only during transition period. max_virtual_blocks_to_create: 1, } @@ -352,11 +352,11 @@ struct StoredL2BlockInfo { } impl StoredL2BlockInfo { - /// If `miniblock_hash` is `None`, it needs to be fetched from the storage. + /// If `l2_block_hash` is `None`, it needs to be fetched from the storage. async fn new( connection: &mut Connection<'_, Core>, - miniblock_number: L2BlockNumber, - miniblock_hash: Option, + l2_block_number: L2BlockNumber, + l2_block_hash: Option, ) -> anyhow::Result { let l2_block_info_key = StorageKey::new( AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), @@ -364,10 +364,11 @@ impl StoredL2BlockInfo { ); let l2_block_info = connection .storage_web3_dal() - .get_historical_value_unchecked(&l2_block_info_key, miniblock_number) + .get_historical_value_unchecked(&l2_block_info_key, l2_block_number) .await .context("failed reading L2 block info from VM state")?; - let (l2_block_number, l2_block_timestamp) = unpack_block_info(h256_to_u256(l2_block_info)); + let (l2_block_number_from_state, l2_block_timestamp) = + unpack_block_info(h256_to_u256(l2_block_info)); let l2_block_txs_rolling_hash_key = StorageKey::new( AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), @@ -375,23 +376,23 @@ impl StoredL2BlockInfo { ); let txs_rolling_hash = connection .storage_web3_dal() - .get_historical_value_unchecked(&l2_block_txs_rolling_hash_key, miniblock_number) + .get_historical_value_unchecked(&l2_block_txs_rolling_hash_key, l2_block_number) .await .context("failed reading transaction rolling hash from VM state")?; - let l2_block_hash = if let Some(hash) = miniblock_hash { + let l2_block_hash = if let Some(hash) = l2_block_hash { hash } else { connection .blocks_web3_dal() - .get_l2_block_hash(miniblock_number) + .get_l2_block_hash(l2_block_number) .await .map_err(DalError::generalize)? - .with_context(|| format!("miniblock #{miniblock_number} not present in storage"))? + .with_context(|| format!("L2 block #{l2_block_number} not present in storage"))? }; Ok(Self { - l2_block_number: l2_block_number as u32, + l2_block_number: l2_block_number_from_state as u32, l2_block_timestamp, l2_block_hash, txs_rolling_hash, @@ -410,7 +411,7 @@ struct ResolvedBlockInfo { } impl BlockArgs { - fn is_pending_miniblock(&self) -> bool { + fn is_pending_l2_block(&self) -> bool { matches!( self.block_id, api::BlockId::Number(api::BlockNumber::Pending) @@ -432,28 +433,28 @@ impl BlockArgs { ) -> anyhow::Result { let (state_l2_block_number, vm_l1_batch_number, l1_batch_timestamp); - let miniblock_header = if self.is_pending_miniblock() { + let l2_block_header = if self.is_pending_l2_block() { vm_l1_batch_number = connection .blocks_dal() .get_sealed_l1_batch_number() .await? .context("no L1 batches in storage")?; - let sealed_miniblock_header = connection + let sealed_l2_block_header = connection .blocks_dal() .get_last_sealed_l2_block_header() .await? - .context("no miniblocks in storage")?; + .context("no L2 blocks in storage")?; - state_l2_block_number = sealed_miniblock_header.number; - // Timestamp of the next L1 batch must be greater than the timestamp of the last miniblock. - l1_batch_timestamp = seconds_since_epoch().max(sealed_miniblock_header.timestamp + 1); - sealed_miniblock_header + state_l2_block_number = sealed_l2_block_header.number; + // Timestamp of the next L1 batch must be greater than the timestamp of the last L2 block. + l1_batch_timestamp = seconds_since_epoch().max(sealed_l2_block_header.timestamp + 1); + sealed_l2_block_header } else { vm_l1_batch_number = connection .storage_web3_dal() .resolve_l1_batch_number_of_l2_block(self.resolved_block_number) .await - .context("failed resolving L1 batch for miniblock")? + .context("failed resolving L1 batch for L2 block")? .expected_l1_batch(); l1_batch_timestamp = self .l1_batch_timestamp_s @@ -464,29 +465,29 @@ impl BlockArgs { .blocks_dal() .get_l2_block_header(self.resolved_block_number) .await? - .context("resolved miniblock disappeared from storage")? + .context("resolved L2 block disappeared from storage")? }; let historical_fee_input = if !self.is_estimate_like() { - let miniblock_header = connection + let l2_block_header = connection .blocks_dal() .get_l2_block_header(self.resolved_block_number) .await? - .context("resolved miniblock is not in storage")?; - Some(miniblock_header.batch_fee_input) + .context("resolved L2 block is not in storage")?; + Some(l2_block_header.batch_fee_input) } else { None }; // Blocks without version specified are considered to be of `Version9`. // TODO: remove `unwrap_or` when protocol version ID will be assigned for each block. - let protocol_version = miniblock_header + let protocol_version = l2_block_header .protocol_version .unwrap_or(ProtocolVersionId::last_potentially_undefined()); Ok(ResolvedBlockInfo { state_l2_block_number, - state_l2_block_hash: miniblock_header.hash, + state_l2_block_hash: l2_block_header.hash, vm_l1_batch_number, l1_batch_timestamp, protocol_version, diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs index 706aa57e4ed3..c1659c9fd25e 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs @@ -214,7 +214,7 @@ impl BlockStartInfoInner { } } -/// Information about first L1 batch / miniblock in the node storage. +/// Information about first L1 batch / L2 block in the node storage. #[derive(Debug, Clone)] pub(crate) struct BlockStartInfo { cached_pruning_info: Arc>, @@ -275,7 +275,7 @@ impl BlockStartInfo { } } - pub async fn first_miniblock( + pub async fn first_l2_block( &self, storage: &mut Connection<'_, Core>, ) -> anyhow::Result { @@ -300,26 +300,26 @@ impl BlockStartInfo { } /// Checks whether a block with the specified ID is pruned and returns an error if it is. - /// The `Err` variant wraps the first non-pruned miniblock. + /// The `Err` variant wraps the first non-pruned L2 block. pub async fn ensure_not_pruned_block( &self, block: api::BlockId, storage: &mut Connection<'_, Core>, ) -> Result<(), BlockArgsError> { - let first_miniblock = self - .first_miniblock(storage) + let first_l2_block = self + .first_l2_block(storage) .await .map_err(BlockArgsError::Database)?; match block { api::BlockId::Number(api::BlockNumber::Number(number)) - if number < first_miniblock.0.into() => + if number < first_l2_block.0.into() => { - Err(BlockArgsError::Pruned(first_miniblock)) + Err(BlockArgsError::Pruned(first_l2_block)) } api::BlockId::Number(api::BlockNumber::Earliest) - if first_miniblock > L2BlockNumber(0) => + if first_l2_block > L2BlockNumber(0) => { - Err(BlockArgsError::Pruned(first_miniblock)) + Err(BlockArgsError::Pruned(first_l2_block)) } _ => Ok(()), } @@ -385,7 +385,7 @@ impl BlockArgs { .resolve_l1_batch_number_of_l2_block(resolved_block_number) .await .with_context(|| { - format!("failed resolving L1 batch number of miniblock #{resolved_block_number}") + format!("failed resolving L1 batch number of L2 block #{resolved_block_number}") })?; let l1_batch_timestamp = connection .blocks_web3_dal() @@ -404,7 +404,7 @@ impl BlockArgs { self.resolved_block_number } - pub fn resolves_to_latest_sealed_miniblock(&self) -> bool { + pub fn resolves_to_latest_sealed_l2_block(&self) -> bool { matches!( self.block_id, api::BlockId::Number( diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs index 27709307c48a..1281b82c68f6 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs @@ -7,7 +7,7 @@ use super::*; use crate::{ api_server::{execution_sandbox::apply::apply_vm_in_sandbox, tx_sender::ApiContracts}, genesis::{insert_genesis_batch, GenesisParams}, - utils::testonly::{create_l2_transaction, create_miniblock, prepare_recovery_snapshot}, + utils::testonly::{create_l2_block, create_l2_transaction, prepare_recovery_snapshot}, }; #[tokio::test] @@ -17,10 +17,10 @@ async fn creating_block_args() { insert_genesis_batch(&mut storage, &GenesisParams::mock()) .await .unwrap(); - let miniblock = create_miniblock(1); + let l2_block = create_l2_block(1); storage .blocks_dal() - .insert_l2_block(&miniblock) + .insert_l2_block(&l2_block) .await .unwrap(); @@ -34,7 +34,7 @@ async fn creating_block_args() { let start_info = BlockStartInfo::new(&mut storage).await.unwrap(); assert_eq!( - start_info.first_miniblock(&mut storage).await.unwrap(), + start_info.first_l2_block(&mut storage).await.unwrap(), L2BlockNumber(0) ); assert_eq!( @@ -50,7 +50,7 @@ async fn creating_block_args() { assert_eq!(latest_block_args.resolved_block_number, L2BlockNumber(1)); assert_eq!( latest_block_args.l1_batch_timestamp_s, - Some(miniblock.timestamp) + Some(l2_block.timestamp) ); let earliest_block = api::BlockId::Number(api::BlockNumber::Earliest); @@ -88,7 +88,7 @@ async fn creating_block_args_after_snapshot_recovery() { let start_info = BlockStartInfo::new(&mut storage).await.unwrap(); assert_eq!( - start_info.first_miniblock(&mut storage).await.unwrap(), + start_info.first_l2_block(&mut storage).await.unwrap(), snapshot_recovery.l2_block_number + 1 ); assert_eq!( @@ -127,10 +127,10 @@ async fn creating_block_args_after_snapshot_recovery() { assert_matches!(err, BlockArgsError::Missing); } - let miniblock = create_miniblock(snapshot_recovery.l2_block_number.0 + 1); + let l2_block = create_l2_block(snapshot_recovery.l2_block_number.0 + 1); storage .blocks_dal() - .insert_l2_block(&miniblock) + .insert_l2_block(&l2_block) .await .unwrap(); @@ -138,10 +138,10 @@ async fn creating_block_args_after_snapshot_recovery() { .await .unwrap(); assert_eq!(latest_block_args.block_id, latest_block); - assert_eq!(latest_block_args.resolved_block_number, miniblock.number); + assert_eq!(latest_block_args.resolved_block_number, l2_block.number); assert_eq!( latest_block_args.l1_batch_timestamp_s, - Some(miniblock.timestamp) + Some(l2_block.timestamp) ); for pruned_block in pruned_blocks { diff --git a/core/lib/zksync_core/src/api_server/mod.rs b/core/lib/zksync_core/src/api_server/mod.rs index b05214e69724..fb036fbaa484 100644 --- a/core/lib/zksync_core/src/api_server/mod.rs +++ b/core/lib/zksync_core/src/api_server/mod.rs @@ -1,4 +1,4 @@ -// Everywhere in this module the word "block" actually means "miniblock". +// Everywhere in this module the word "block" actually means "L2 block". pub mod contract_verification; pub mod execution_sandbox; diff --git a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs index e37fd6cf62e4..9c1c83cbc8cf 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs @@ -537,7 +537,7 @@ impl TxSender { .get_address_historical_nonce(initiator_account, latest_block_number) .await .with_context(|| { - format!("failed getting nonce for address {initiator_account:?} at miniblock #{latest_block_number}") + format!("failed getting nonce for address {initiator_account:?} at L2 block #{latest_block_number}") })?; let nonce = u32::try_from(nonce) .map_err(|err| anyhow::anyhow!("failed converting nonce to u32: {err}"))?; diff --git a/core/lib/zksync_core/src/api_server/tx_sender/proxy.rs b/core/lib/zksync_core/src/api_server/tx_sender/proxy.rs index f6f87a4d51ab..eeb429940175 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/proxy.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/proxy.rs @@ -59,7 +59,7 @@ impl TxCache { async fn remove_tx(&self, tx_hash: H256) { self.inner.write().await.tx_cache.remove(&tx_hash); - // We intentionally don't change `nonces_by_account`; they should only be changed in response to new miniblocks + // We intentionally don't change `nonces_by_account`; they should only be changed in response to new L2 blocks } async fn run_updates( diff --git a/core/lib/zksync_core/src/api_server/tx_sender/tests.rs b/core/lib/zksync_core/src/api_server/tx_sender/tests.rs index 4544b30ffa1d..3cfd6593688d 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/tests.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/tests.rs @@ -7,7 +7,7 @@ use super::*; use crate::{ api_server::execution_sandbox::{testonly::MockTransactionExecutor, VmConcurrencyBarrier}, genesis::{insert_genesis_batch, GenesisParams}, - utils::testonly::{create_miniblock, prepare_recovery_snapshot, MockBatchFeeParamsProvider}, + utils::testonly::{create_l2_block, prepare_recovery_snapshot, MockBatchFeeParamsProvider}, }; pub(crate) async fn create_test_tx_sender( @@ -66,10 +66,10 @@ async fn getting_nonce_for_account() { let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); assert_eq!(nonce, Nonce(123)); - // Insert another miniblock with a new nonce log. + // Insert another L2 block with a new nonce log. storage .blocks_dal() - .insert_l2_block(&create_miniblock(1)) + .insert_l2_block(&create_l2_block(1)) .await .unwrap(); let nonce_log = StorageLog { @@ -91,7 +91,7 @@ async fn getting_nonce_for_account() { #[tokio::test] async fn getting_nonce_for_account_after_snapshot_recovery() { - const SNAPSHOT_MINIBLOCK_NUMBER: L2BlockNumber = L2BlockNumber(42); + const SNAPSHOT_L2_BLOCK_NUMBER: L2BlockNumber = L2BlockNumber(42); let pool = ConnectionPool::::test_pool().await; let mut storage = pool.connection().await.unwrap(); @@ -104,7 +104,7 @@ async fn getting_nonce_for_account_after_snapshot_recovery() { prepare_recovery_snapshot( &mut storage, L1BatchNumber(23), - SNAPSHOT_MINIBLOCK_NUMBER, + SNAPSHOT_L2_BLOCK_NUMBER, &nonce_logs, ) .await; @@ -115,7 +115,7 @@ async fn getting_nonce_for_account_after_snapshot_recovery() { storage .blocks_dal() - .insert_l2_block(&create_miniblock(SNAPSHOT_MINIBLOCK_NUMBER.0 + 1)) + .insert_l2_block(&create_l2_block(SNAPSHOT_L2_BLOCK_NUMBER.0 + 1)) .await .unwrap(); let new_nonce_logs = vec![StorageLog::new_write_log( @@ -125,7 +125,7 @@ async fn getting_nonce_for_account_after_snapshot_recovery() { storage .storage_logs_dal() .insert_storage_logs( - SNAPSHOT_MINIBLOCK_NUMBER + 1, + SNAPSHOT_L2_BLOCK_NUMBER + 1, &[(H256::default(), new_nonce_logs)], ) .await diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/metadata.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/metadata.rs index 6fb1975bb242..a2f8082aeeaa 100644 --- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/metadata.rs +++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/metadata.rs @@ -77,7 +77,7 @@ impl MethodTracer { } } - /// Sets the difference between the latest sealed miniblock and the requested miniblock for the current JSON-RPC method call. + /// Sets the difference between the latest sealed L2 block and the requested L2 block for the current JSON-RPC method call. /// It will be used as a metric label for method latency etc. /// /// This should be called inside JSON-RPC method handlers; otherwise, this method is a no-op. diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs index ebcb9ca19702..087de5e20d88 100644 --- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs +++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs @@ -95,8 +95,8 @@ impl ZksNamespaceServer for ZksNamespace { .map_err(|err| self.current_method().map_err(err)) } - async fn get_miniblock_range(&self, batch: L1BatchNumber) -> RpcResult> { - self.get_miniblock_range_impl(batch) + async fn get_l2_block_range(&self, batch: L1BatchNumber) -> RpcResult> { + self.get_l2_block_range_impl(batch) .await .map_err(|err| self.current_method().map_err(err)) } diff --git a/core/lib/zksync_core/src/api_server/web3/metrics.rs b/core/lib/zksync_core/src/api_server/web3/metrics.rs index 96ea96b03eee..78efa331eecf 100644 --- a/core/lib/zksync_core/src/api_server/web3/metrics.rs +++ b/core/lib/zksync_core/src/api_server/web3/metrics.rs @@ -282,12 +282,12 @@ pub(in crate::api_server) struct ApiMetrics { web3_info: Family>, /// Latency of a Web3 call. Calls that take block ID as an input have block ID and block diff - /// labels (the latter is the difference between the latest sealed miniblock and the resolved miniblock). + /// labels (the latter is the difference between the latest sealed L2 block and the resolved L2 block). #[metrics(buckets = Buckets::LATENCIES)] web3_call: Family>, #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] web3_dropped_call_latency: Family>, - /// Difference between the latest sealed miniblock and the resolved miniblock for a web3 call. + /// Difference between the latest sealed L2 block and the resolved L2 block for a web3 call. #[metrics(buckets = BLOCK_DIFF_BUCKETS, labels = ["method"])] web3_call_block_diff: LabeledFamily<&'static str, Histogram>, /// Serialized response size in bytes. Only recorded for successful responses. diff --git a/core/lib/zksync_core/src/api_server/web3/mod.rs b/core/lib/zksync_core/src/api_server/web3/mod.rs index 7a3a46f6d2f4..9aa8cda6cade 100644 --- a/core/lib/zksync_core/src/api_server/web3/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/mod.rs @@ -39,7 +39,7 @@ use self::{ ZksNamespace, }, pubsub::{EthSubscribe, EthSubscriptionIdProvider, PubSubEvent}, - state::{Filters, InternalApiConfig, RpcState, SealedMiniblockNumber}, + state::{Filters, InternalApiConfig, RpcState, SealedL2BlockNumber}, }; use crate::{ api_server::{ @@ -324,7 +324,7 @@ impl ApiServer { async fn build_rpc_state( self, - last_sealed_miniblock: SealedMiniblockNumber, + last_sealed_l2_block: SealedL2BlockNumber, ) -> anyhow::Result { let mut storage = self.updaters_pool.connection_tagged("api").await?; let start_info = BlockStartInfo::new(&mut storage).await?; @@ -349,7 +349,7 @@ impl ApiServer { api_config: self.config, start_info, mempool_cache: self.optional.mempool_cache, - last_sealed_miniblock, + last_sealed_l2_block, tree_api: self.optional.tree_api, }) } @@ -357,11 +357,11 @@ impl ApiServer { async fn build_rpc_module( self, pub_sub: Option, - last_sealed_miniblock: SealedMiniblockNumber, + last_sealed_l2_block: SealedL2BlockNumber, ) -> anyhow::Result> { let namespaces = self.namespaces.clone(); let zksync_network_id = self.config.l2_chain_id; - let rpc_state = self.build_rpc_state(last_sealed_miniblock).await?; + let rpc_state = self.build_rpc_state(last_sealed_l2_block).await?; // Collect all the methods into a single RPC module. let mut rpc = RpcModule::new(()); @@ -455,21 +455,21 @@ impl ApiServer { self, stop_receiver: watch::Receiver, ) -> anyhow::Result { - // Chosen to be significantly smaller than the interval between miniblocks, but larger than - // the latency of getting the latest sealed miniblock number from Postgres. If the API server - // processes enough requests, information about the latest sealed miniblock will be updated + // Chosen to be significantly smaller than the interval between L2 blocks, but larger than + // the latency of getting the latest sealed L2 block number from Postgres. If the API server + // processes enough requests, information about the latest sealed L2 block will be updated // by reporting block difference metrics, so the actual update lag would be much smaller than this value. - const SEALED_MINIBLOCK_UPDATE_INTERVAL: Duration = Duration::from_millis(25); + const SEALED_L2_BLOCK_UPDATE_INTERVAL: Duration = Duration::from_millis(25); let transport = self.transport; - let (last_sealed_miniblock, sealed_miniblock_update_task) = SealedMiniblockNumber::new( + let (last_sealed_l2_block, sealed_l2_block_update_task) = SealedL2BlockNumber::new( self.updaters_pool.clone(), - SEALED_MINIBLOCK_UPDATE_INTERVAL, + SEALED_L2_BLOCK_UPDATE_INTERVAL, stop_receiver.clone(), ); - let mut tasks = vec![tokio::spawn(sealed_miniblock_update_task)]; + let mut tasks = vec![tokio::spawn(sealed_l2_block_update_task)]; let pub_sub = if matches!(transport, ApiTransport::WebSocket(_)) && self.namespaces.contains(&Namespace::Pubsub) { @@ -495,7 +495,7 @@ impl ApiServer { let server_task = tokio::spawn(self.run_jsonrpsee_server( stop_receiver, pub_sub, - last_sealed_miniblock, + last_sealed_l2_block, local_addr_sender, )); @@ -566,7 +566,7 @@ impl ApiServer { self, mut stop_receiver: watch::Receiver, pub_sub: Option, - last_sealed_miniblock: SealedMiniblockNumber, + last_sealed_l2_block: SealedL2BlockNumber, local_addr_sender: oneshot::Sender, ) -> anyhow::Result<()> { let transport = self.transport; @@ -622,9 +622,7 @@ impl ApiServer { tracing::info!("Enabled extended call tracing for {transport_str} API server; this might negatively affect performance"); } - let rpc = self - .build_rpc_module(pub_sub, last_sealed_miniblock) - .await?; + let rpc = self.build_rpc_module(pub_sub, last_sealed_l2_block).await?; let registered_method_names = Arc::new(rpc.method_names().collect::>()); tracing::debug!( "Built RPC module for {transport_str} server with {} methods: {registered_method_names:?}", diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs index 7feede944194..b3b0e9ce8ab5 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs @@ -70,7 +70,7 @@ impl DebugNamespace { let mut connection = self.state.acquire_connection().await?; let block_number = self.state.resolve_block(&mut connection, block_id).await?; self.current_method() - .set_block_diff(self.state.last_sealed_miniblock.diff(block_number)); + .set_block_diff(self.state.last_sealed_l2_block.diff(block_number)); let call_traces = connection .blocks_web3_dal() @@ -148,7 +148,7 @@ impl DebugNamespace { self.current_method().set_block_diff( self.state - .last_sealed_miniblock + .last_sealed_l2_block .diff_with_block_args(&block_args), ); let tx = L2Tx::from_request(request.into(), MAX_ENCODED_TX_SIZE)?; diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs index 8d8e7fb3aefa..21b4d1ece25d 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs @@ -70,7 +70,7 @@ impl EthNamespace { .await?; self.current_method().set_block_diff( self.state - .last_sealed_miniblock + .last_sealed_l2_block .diff_with_block_args(&block_args), ); drop(connection); @@ -164,7 +164,7 @@ impl EthNamespace { } fn set_block_diff(&self, block_number: L2BlockNumber) { - let diff = self.state.last_sealed_miniblock.diff(block_number); + let diff = self.state.last_sealed_l2_block.diff(block_number); self.current_method().set_block_diff(diff); } @@ -303,7 +303,7 @@ impl EthNamespace { .map_err(DalError::generalize)?; if tx_count.is_some() { - self.set_block_diff(block_number); // only report block diff for existing miniblocks + self.set_block_diff(block_number); // only report block diff for existing L2 blocks } Ok(tx_count.map(Into::into)) } @@ -336,7 +336,7 @@ impl EthNamespace { else { return Ok(None); }; - self.set_block_diff(block_number); // only report block diff for existing miniblocks + self.set_block_diff(block_number); // only report block diff for existing L2 blocks let mut receipts = storage .transactions_web3_dal() @@ -509,7 +509,7 @@ impl EthNamespace { .get_sealed_l2_block_number() .await .map_err(DalError::generalize)? - .context("no miniblocks in storage")?; + .context("no L2 blocks in storage")?; let next_block_number = last_block_number + 1; drop(storage); @@ -652,21 +652,21 @@ impl EthNamespace { .max(1); let mut connection = self.state.acquire_connection().await?; - let newest_miniblock = self + let newest_l2_block = self .state .resolve_block(&mut connection, BlockId::Number(newest_block)) .await?; - self.set_block_diff(newest_miniblock); + self.set_block_diff(newest_l2_block); let mut base_fee_per_gas = connection .blocks_web3_dal() - .get_fee_history(newest_miniblock, block_count) + .get_fee_history(newest_l2_block, block_count) .await .map_err(DalError::generalize)?; // DAL method returns fees in DESC order while we need ASC. base_fee_per_gas.reverse(); - let oldest_block = newest_miniblock.0 + 1 - base_fee_per_gas.len() as u32; + let oldest_block = newest_l2_block.0 + 1 - base_fee_per_gas.len() as u32; // We do not store gas used ratio for blocks, returns array of zeroes as a placeholder. let gas_used_ratio = vec![0.0; base_fee_per_gas.len()]; // Effective priority gas price is currently 0. @@ -675,7 +675,7 @@ impl EthNamespace { base_fee_per_gas.len() ]); - // `base_fee_per_gas` for next miniblock cannot be calculated, appending last fee as a placeholder. + // `base_fee_per_gas` for next L2 block cannot be calculated, appending last fee as a placeholder. base_fee_per_gas.push(*base_fee_per_gas.last().unwrap()); Ok(FeeHistory { oldest_block: web3::types::BlockNumber::Number(oldest_block.into()), @@ -783,7 +783,7 @@ impl EthNamespace { // Check if there is more than one block in range and there are more than `req_entities_limit` logs that satisfies filter. // In this case we should return error and suggest requesting logs with smaller block range. if *from_block != to_block { - if let Some(miniblock_number) = storage + if let Some(l2_block_number) = storage .events_web3_dal() .get_log_block_number( &get_logs_filter, @@ -795,7 +795,7 @@ impl EthNamespace { return Err(Web3Error::LogsLimitExceeded( self.state.api_config.req_entities_limit, from_block.0, - miniblock_number.0 - 1, + l2_block_number.0 - 1, )); } } diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs index 34855083f751..9397d24479df 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs @@ -63,17 +63,17 @@ impl SnapshotsNamespace { }) }) .collect(); - let (_, miniblock_number) = storage_processor + let (_, l2_block_number) = storage_processor .blocks_dal() .get_l2_block_range_of_l1_batch(l1_batch_number) .await .map_err(DalError::generalize)? - .with_context(|| format!("missing miniblocks for L1 batch #{l1_batch_number}"))?; + .with_context(|| format!("missing L2 blocks for L1 batch #{l1_batch_number}"))?; Ok(Some(SnapshotHeader { version: snapshot_metadata.version.into(), l1_batch_number: snapshot_metadata.l1_batch_number, - l2_block_number: miniblock_number, + l2_block_number, storage_logs_chunks: chunks, factory_deps_filepath: snapshot_metadata.factory_deps_filepath, })) diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs index 294de2009a57..6e3aa80cf806 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs @@ -225,12 +225,12 @@ impl ZksNamespace { else { return Ok(None); }; - let (first_miniblock_of_l1_batch, _) = storage + let (first_l2_block_of_l1_batch, _) = storage .blocks_web3_dal() .get_l2_block_range_of_l1_batch(l1_batch_number) .await .map_err(DalError::generalize)? - .context("L1 batch should contain at least one miniblock")?; + .context("L1 batch should contain at least one L2 block")?; // Position of l1 log in L1 batch relative to logs with identical data let l1_log_relative_position = if let Some(l2_log_position) = l2_log_position { @@ -238,7 +238,7 @@ impl ZksNamespace { .events_web3_dal() .get_logs( GetLogsFilter { - from_block: first_miniblock_of_l1_batch, + from_block: first_l2_block_of_l1_batch, to_block: block_number, addresses: vec![L1_MESSENGER_ADDRESS], topics: vec![(2, vec![address_to_h256(&sender)]), (3, vec![msg])], @@ -361,7 +361,7 @@ impl ZksNamespace { } #[tracing::instrument(skip(self))] - pub async fn get_miniblock_range_impl( + pub async fn get_l2_block_range_impl( &self, batch: L1BatchNumber, ) -> Result, Web3Error> { diff --git a/core/lib/zksync_core/src/api_server/web3/pubsub.rs b/core/lib/zksync_core/src/api_server/web3/pubsub.rs index 8a52c1dfa5c2..b8dc46c85e71 100644 --- a/core/lib/zksync_core/src/api_server/web3/pubsub.rs +++ b/core/lib/zksync_core/src/api_server/web3/pubsub.rs @@ -43,7 +43,7 @@ impl IdProvider for EthSubscriptionIdProvider { pub(super) enum PubSubEvent { Subscribed(SubscriptionType), NotifyIterationFinished(SubscriptionType), - MiniblockAdvanced(SubscriptionType, L2BlockNumber), + L2BlockAdvanced(SubscriptionType, L2BlockNumber), } /// Manager of notifications for a certain type of subscriptions. @@ -58,16 +58,15 @@ struct PubSubNotifier { impl PubSubNotifier { // Notifier tasks are spawned independently of the main server task, so we need to wait for // Postgres to be non-empty separately. - async fn get_starting_miniblock_number( + async fn get_starting_l2_block_number( &self, stop_receiver: &mut watch::Receiver, ) -> anyhow::Result> { while !*stop_receiver.borrow_and_update() { let mut storage = self.connection_pool.connection_tagged("api").await?; - if let Some(miniblock_number) = - storage.blocks_dal().get_sealed_l2_block_number().await? + if let Some(l2_block_number) = storage.blocks_dal().get_sealed_l2_block_number().await? { - return Ok(Some(miniblock_number)); + return Ok(Some(l2_block_number)); } drop(storage); @@ -91,7 +90,7 @@ impl PubSubNotifier { impl PubSubNotifier { async fn notify_blocks(self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { let Some(mut last_block_number) = self - .get_starting_miniblock_number(&mut stop_receiver) + .get_starting_l2_block_number(&mut stop_receiver) .await? else { tracing::info!("Stop signal received, pubsub_block_notifier is shutting down"); @@ -114,7 +113,7 @@ impl PubSubNotifier { last_block_number = L2BlockNumber(last_block.number.unwrap().as_u32()); let new_blocks = new_blocks.into_iter().map(PubSubResult::Header).collect(); self.send_pub_sub_results(new_blocks, SubscriptionType::Blocks); - self.emit_event(PubSubEvent::MiniblockAdvanced( + self.emit_event(PubSubEvent::L2BlockAdvanced( SubscriptionType::Blocks, last_block_number, )); @@ -187,7 +186,7 @@ impl PubSubNotifier { async fn notify_logs(self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { let Some(mut last_block_number) = self - .get_starting_miniblock_number(&mut stop_receiver) + .get_starting_l2_block_number(&mut stop_receiver) .await? else { tracing::info!("Stop signal received, pubsub_logs_notifier is shutting down"); @@ -210,7 +209,7 @@ impl PubSubNotifier { last_block_number = L2BlockNumber(last_log.block_number.unwrap().as_u32()); let new_logs = new_logs.into_iter().map(PubSubResult::Log).collect(); self.send_pub_sub_results(new_logs, SubscriptionType::Logs); - self.emit_event(PubSubEvent::MiniblockAdvanced( + self.emit_event(PubSubEvent::L2BlockAdvanced( SubscriptionType::Logs, last_block_number, )); diff --git a/core/lib/zksync_core/src/api_server/web3/state.rs b/core/lib/zksync_core/src/api_server/web3/state.rs index 52ada6d34069..31433a623c3c 100644 --- a/core/lib/zksync_core/src/api_server/web3/state.rs +++ b/core/lib/zksync_core/src/api_server/web3/state.rs @@ -65,7 +65,7 @@ impl From for PruneQuery { impl From for Web3Error { fn from(value: BlockArgsError) -> Self { match value { - BlockArgsError::Pruned(miniblock) => Web3Error::PrunedBlock(miniblock), + BlockArgsError::Pruned(l2_block) => Web3Error::PrunedBlock(l2_block), BlockArgsError::Missing => Web3Error::NoBlock, BlockArgsError::Database(error) => Web3Error::InternalError(error), } @@ -161,16 +161,16 @@ impl InternalApiConfig { } } -/// Thread-safe updatable information about the last sealed miniblock number. +/// Thread-safe updatable information about the last sealed L2 block number. /// /// The information may be temporarily outdated and thus should only be used where this is OK /// (e.g., for metrics reporting). The value is updated by [`Self::diff()`] and [`Self::diff_with_block_args()`] /// and on an interval specified when creating an instance. #[derive(Debug, Clone)] -pub(crate) struct SealedMiniblockNumber(Arc); +pub(crate) struct SealedL2BlockNumber(Arc); -impl SealedMiniblockNumber { - /// Creates a handle to the last sealed miniblock number together with a task that will update +impl SealedL2BlockNumber { + /// Creates a handle to the last sealed L2 block number together with a task that will update /// it on a schedule. pub fn new( connection_pool: ConnectionPool, @@ -183,12 +183,12 @@ impl SealedMiniblockNumber { let update_task = async move { loop { if *stop_receiver.borrow() { - tracing::debug!("Stopping latest sealed miniblock updates"); + tracing::debug!("Stopping latest sealed L2 block updates"); return Ok(()); } let mut connection = connection_pool.connection_tagged("api").await.unwrap(); - let Some(last_sealed_miniblock) = + let Some(last_sealed_l2_block) = connection.blocks_dal().get_sealed_l2_block_number().await? else { tokio::time::sleep(update_interval).await; @@ -196,7 +196,7 @@ impl SealedMiniblockNumber { }; drop(connection); - number_updater.update(last_sealed_miniblock); + number_updater.update(last_sealed_l2_block); tokio::time::sleep(update_interval).await; } }; @@ -204,29 +204,29 @@ impl SealedMiniblockNumber { (this, update_task) } - /// Potentially updates the last sealed miniblock number by comparing it to the provided - /// sealed miniblock number (not necessarily the last one). + /// Potentially updates the last sealed L2 block number by comparing it to the provided + /// sealed L2 block number (not necessarily the last one). /// - /// Returns the last sealed miniblock number after the update. - fn update(&self, maybe_newer_miniblock_number: L2BlockNumber) -> L2BlockNumber { + /// Returns the last sealed L2 block number after the update. + fn update(&self, maybe_newer_l2_block_number: L2BlockNumber) -> L2BlockNumber { let prev_value = self .0 - .fetch_max(maybe_newer_miniblock_number.0, Ordering::Relaxed); - L2BlockNumber(prev_value).max(maybe_newer_miniblock_number) + .fetch_max(maybe_newer_l2_block_number.0, Ordering::Relaxed); + L2BlockNumber(prev_value).max(maybe_newer_l2_block_number) } - pub fn diff(&self, miniblock_number: L2BlockNumber) -> u32 { - let sealed_miniblock_number = self.update(miniblock_number); - sealed_miniblock_number.0.saturating_sub(miniblock_number.0) + pub fn diff(&self, l2_block_number: L2BlockNumber) -> u32 { + let sealed_l2_block_number = self.update(l2_block_number); + sealed_l2_block_number.0.saturating_sub(l2_block_number.0) } - /// Returns the difference between the latest miniblock number and the resolved miniblock number + /// Returns the difference between the latest L2 block number and the resolved L2 block number /// from `block_args`. pub fn diff_with_block_args(&self, block_args: &BlockArgs) -> u32 { // We compute the difference in any case, since it may update the stored value. let diff = self.diff(block_args.resolved_block_number()); - if block_args.resolves_to_latest_sealed_miniblock() { + if block_args.resolves_to_latest_sealed_l2_block() { 0 // Overwrite potentially inaccurate value } else { diff @@ -244,11 +244,11 @@ pub(crate) struct RpcState { pub(super) tx_sender: TxSender, pub(super) sync_state: Option, pub(super) api_config: InternalApiConfig, - /// Number of the first locally available miniblock / L1 batch. May differ from 0 if the node state was recovered + /// Number of the first locally available L2 block / L1 batch. May differ from 0 if the node state was recovered /// from a snapshot. pub(super) start_info: BlockStartInfo, pub(super) mempool_cache: Option, - pub(super) last_sealed_miniblock: SealedMiniblockNumber, + pub(super) last_sealed_l2_block: SealedL2BlockNumber, } impl RpcState { diff --git a/core/lib/zksync_core/src/api_server/web3/tests/debug.rs b/core/lib/zksync_core/src/api_server/web3/tests/debug.rs index 4ffba5f0ff87..a025aff3085b 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/debug.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/debug.rs @@ -37,13 +37,13 @@ impl HttpTest for TraceBlockTest { async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let tx_results = [0, 1, 2].map(execute_l2_transaction_with_traces); let mut storage = pool.connection().await?; - let new_miniblock = store_miniblock(&mut storage, self.0, &tx_results).await?; + let new_l2_block = store_l2_block(&mut storage, self.0, &tx_results).await?; drop(storage); let block_ids = [ api::BlockId::Number((*self.0).into()), api::BlockId::Number(api::BlockNumber::Latest), - api::BlockId::Hash(new_miniblock.hash), + api::BlockId::Hash(new_l2_block.hash), ]; for block_id in block_ids { @@ -100,7 +100,7 @@ impl HttpTest for TraceBlockFlatTest { async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let tx_results = [0, 1, 2].map(execute_l2_transaction_with_traces); let mut storage = pool.connection().await?; - let _new_miniblock = store_miniblock(&mut storage, self.0, &tx_results).await?; + store_l2_block(&mut storage, self.0, &tx_results).await?; drop(storage); let block_ids = [ @@ -176,7 +176,7 @@ impl HttpTest for TraceTransactionTest { async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let tx_results = [execute_l2_transaction_with_traces(0)]; let mut storage = pool.connection().await?; - store_miniblock(&mut storage, L2BlockNumber(1), &tx_results).await?; + store_l2_block(&mut storage, L2BlockNumber(1), &tx_results).await?; drop(storage); let expected_calls: Vec<_> = tx_results[0] @@ -213,22 +213,22 @@ impl HttpTest for TraceBlockTestWithSnapshotRecovery { } async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { - let snapshot_miniblock_number = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK; - let missing_miniblock_numbers = [ + let snapshot_l2_block_number = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK; + let missing_l2_block_numbers = [ L2BlockNumber(0), - snapshot_miniblock_number - 1, - snapshot_miniblock_number, + snapshot_l2_block_number - 1, + snapshot_l2_block_number, ]; - for number in missing_miniblock_numbers { + for number in missing_l2_block_numbers { let error = client .trace_block_by_number(number.0.into(), None) .await .unwrap_err(); - assert_pruned_block_error(&error, snapshot_miniblock_number + 1); + assert_pruned_block_error(&error, snapshot_l2_block_number + 1); } - TraceBlockTest(snapshot_miniblock_number + 2) + TraceBlockTest(snapshot_l2_block_number + 2) .test(client, pool) .await?; Ok(()) diff --git a/core/lib/zksync_core/src/api_server/web3/tests/filters.rs b/core/lib/zksync_core/src/api_server/web3/tests/filters.rs index 83f336da0ba8..7fb384253101 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/filters.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/filters.rs @@ -31,7 +31,7 @@ impl HttpTest for BasicFilterChangesTest { let tx_result = execute_l2_transaction(create_l2_transaction(1, 2)); let new_tx_hash = tx_result.hash; - let new_miniblock = store_miniblock( + let new_l2_block = store_l2_block( &mut pool.connection().await?, if self.snapshot_recovery { StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 2 @@ -45,7 +45,7 @@ impl HttpTest for BasicFilterChangesTest { let block_filter_changes = client.get_filter_changes(block_filter_id).await?; assert_matches!( block_filter_changes, - FilterChanges::Hashes(hashes) if hashes == [new_miniblock.hash] + FilterChanges::Hashes(hashes) if hashes == [new_l2_block.hash] ); let block_filter_changes = client.get_filter_changes(block_filter_id).await?; assert_matches!(block_filter_changes, FilterChanges::Hashes(hashes) if hashes.is_empty()); @@ -118,12 +118,12 @@ impl HttpTest for LogFilterChangesTest { let topics_filter_id = client.new_filter(topics_filter).await?; let mut storage = pool.connection().await?; - let next_local_miniblock = if self.snapshot_recovery { + let next_local_l2_block = if self.snapshot_recovery { StorageInitialization::SNAPSHOT_RECOVERY_BLOCK.0 + 2 } else { 1 }; - let (_, events) = store_events(&mut storage, next_local_miniblock, 0).await?; + let (_, events) = store_events(&mut storage, next_local_l2_block, 0).await?; drop(storage); let events: Vec<_> = events.iter().collect(); @@ -217,7 +217,7 @@ impl HttpTest for LogFilterChangesWithBlockBoundariesTest { }; assert_eq!(bounded_logs, upper_bound_logs); - // Add another miniblock with events to the storage. + // Add another L2 block with events to the storage. let mut storage = pool.connection().await?; let (_, new_events) = store_events(&mut storage, 2, 4).await?; drop(storage); @@ -234,7 +234,7 @@ impl HttpTest for LogFilterChangesWithBlockBoundariesTest { let new_bounded_logs = client.get_filter_changes(bounded_filter_id).await?; assert_matches!(new_bounded_logs, FilterChanges::Hashes(hashes) if hashes.is_empty()); - // Add miniblock #3. It should not be picked up by the bounded and upper bound filters, + // Add L2 block #3. It should not be picked up by the bounded and upper bound filters, // and should be picked up by the lower bound filter. let mut storage = pool.connection().await?; let (_, new_events) = store_events(&mut storage, 3, 8).await?; diff --git a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs index e97d5bf753e8..45258d92074d 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs @@ -55,7 +55,7 @@ use crate::{ }, genesis::{insert_genesis_batch, mock_genesis_config, GenesisParams}, utils::testonly::{ - create_l1_batch, create_l1_batch_metadata, create_l2_transaction, create_miniblock, + create_l1_batch, create_l1_batch_metadata, create_l2_block, create_l2_transaction, l1_batch_metadata_to_commitment_artifacts, prepare_recovery_snapshot, }, }; @@ -329,7 +329,7 @@ impl StorageInitialization { .await?; // Insert the next L1 batch in the storage so that the API server doesn't hang up. - store_miniblock(storage, Self::SNAPSHOT_RECOVERY_BLOCK + 1, &[]).await?; + store_l2_block(storage, Self::SNAPSHOT_RECOVERY_BLOCK + 1, &[]).await?; seal_l1_batch(storage, Self::SNAPSHOT_RECOVERY_BATCH + 1).await?; } } @@ -408,8 +408,8 @@ fn execute_l2_transaction(transaction: L2Tx) -> TransactionExecutionResult { } } -/// Stores miniblock #1 with a single transaction and returns the miniblock header + transaction hash. -async fn store_miniblock( +/// Stores L2 block with a single transaction and returns the L2 block header + transaction hash. +async fn store_l2_block( storage: &mut Connection<'_, Core>, number: L2BlockNumber, transaction_results: &[TransactionExecutionResult], @@ -424,13 +424,13 @@ async fn store_miniblock( assert_matches!(tx_submission_result, L2TxSubmissionResult::Added); } - let new_miniblock = create_miniblock(number.0); - storage.blocks_dal().insert_l2_block(&new_miniblock).await?; + let new_l2_block = create_l2_block(number.0); + storage.blocks_dal().insert_l2_block(&new_l2_block).await?; storage .transactions_dal() - .mark_txs_as_executed_in_l2_block(new_miniblock.number, transaction_results, 1.into()) + .mark_txs_as_executed_in_l2_block(new_l2_block.number, transaction_results, 1.into()) .await?; - Ok(new_miniblock) + Ok(new_l2_block) } async fn seal_l1_batch( @@ -460,12 +460,12 @@ async fn seal_l1_batch( async fn store_events( storage: &mut Connection<'_, Core>, - miniblock_number: u32, + l2_block_number: u32, start_idx: u32, ) -> anyhow::Result<(IncludedTxLocation, Vec)> { - let new_miniblock = create_miniblock(miniblock_number); - let l1_batch_number = L1BatchNumber(miniblock_number); - storage.blocks_dal().insert_l2_block(&new_miniblock).await?; + let new_l2_block = create_l2_block(l2_block_number); + let l1_batch_number = L1BatchNumber(l2_block_number); + storage.blocks_dal().insert_l2_block(&new_l2_block).await?; let tx_location = IncludedTxLocation { tx_hash: H256::repeat_byte(1), tx_index_in_l2_block: 0, @@ -504,7 +504,7 @@ async fn store_events( storage .events_dal() .save_events( - L2BlockNumber(miniblock_number), + L2BlockNumber(l2_block_number), &[(tx_location, events.iter().collect())], ) .await?; @@ -620,27 +620,27 @@ impl HttpTest for L1BatchMethodsWithSnapshotRecovery { } async fn test(&self, client: &HttpClient, _pool: &ConnectionPool) -> anyhow::Result<()> { - let miniblock_number = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; + let l2_block_number = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; let l1_batch_number = StorageInitialization::SNAPSHOT_RECOVERY_BATCH + 1; assert_eq!( client.get_l1_batch_number().await?, l1_batch_number.0.into() ); - // `get_miniblock_range` method - let miniblock_range = client - .get_miniblock_range(l1_batch_number) + // `get_l2_block_range` method + let l2_block_range = client + .get_l2_block_range(l1_batch_number) .await? .context("no range for sealed L1 batch")?; - assert_eq!(miniblock_range.0, miniblock_number.0.into()); - assert_eq!(miniblock_range.1, miniblock_number.0.into()); + assert_eq!(l2_block_range.0, l2_block_number.0.into()); + assert_eq!(l2_block_range.1, l2_block_number.0.into()); - let miniblock_range_for_future_batch = - client.get_miniblock_range(l1_batch_number + 1).await?; - assert_eq!(miniblock_range_for_future_batch, None); + let l2_block_range_for_future_batch = + client.get_l2_block_range(l1_batch_number + 1).await?; + assert_eq!(l2_block_range_for_future_batch, None); let error = client - .get_miniblock_range(l1_batch_number - 1) + .get_l2_block_range(l1_batch_number - 1) .await .unwrap_err(); assert_pruned_l1_batch_error(&error, l1_batch_number); @@ -712,21 +712,21 @@ impl HttpTest for StorageAccessWithSnapshotRecovery { async fn test(&self, client: &HttpClient, _pool: &ConnectionPool) -> anyhow::Result<()> { let address = Address::repeat_byte(1); - let first_local_miniblock = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; - for number in [0, 1, first_local_miniblock.0 - 1] { + let first_local_l2_block = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; + for number in [0, 1, first_local_l2_block.0 - 1] { let number = api::BlockIdVariant::BlockNumber(number.into()); let error = client.get_code(address, Some(number)).await.unwrap_err(); - assert_pruned_block_error(&error, first_local_miniblock); + assert_pruned_block_error(&error, first_local_l2_block); let error = client.get_balance(address, Some(number)).await.unwrap_err(); - assert_pruned_block_error(&error, first_local_miniblock); + assert_pruned_block_error(&error, first_local_l2_block); let error = client .get_storage_at(address, 0.into(), Some(number)) .await .unwrap_err(); - assert_pruned_block_error(&error, first_local_miniblock); + assert_pruned_block_error(&error, first_local_l2_block); } - for number in [api::BlockNumber::Latest, first_local_miniblock.0.into()] { + for number in [api::BlockNumber::Latest, first_local_l2_block.0.into()] { let number = api::BlockIdVariant::BlockNumber(number); let code = client.get_code(address, Some(number)).await?; assert_eq!(code.0, b"code"); @@ -754,15 +754,15 @@ impl HttpTest for TransactionCountTest { async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let test_address = Address::repeat_byte(11); let mut storage = pool.connection().await?; - let mut miniblock_number = L2BlockNumber(0); + let mut l2_block_number = L2BlockNumber(0); for nonce in [0, 1] { let mut committed_tx = create_l2_transaction(10, 200); committed_tx.common_data.initiator_address = test_address; committed_tx.common_data.nonce = Nonce(nonce); - miniblock_number += 1; - store_miniblock( + l2_block_number += 1; + store_l2_block( &mut storage, - miniblock_number, + l2_block_number, &[execute_l2_transaction(committed_tx)], ) .await?; @@ -772,7 +772,7 @@ impl HttpTest for TransactionCountTest { ); storage .storage_logs_dal() - .insert_storage_logs(miniblock_number, &[(H256::zero(), vec![nonce_log])]) + .insert_storage_logs(l2_block_number, &[(H256::zero(), vec![nonce_log])]) .await?; } @@ -791,7 +791,7 @@ impl HttpTest for TransactionCountTest { let pending_count = client.get_transaction_count(test_address, None).await?; assert_eq!(pending_count, 3.into()); - let latest_block_numbers = [api::BlockNumber::Latest, miniblock_number.0.into()]; + let latest_block_numbers = [api::BlockNumber::Latest, l2_block_number.0.into()]; for number in latest_block_numbers { let number = api::BlockIdVariant::BlockNumber(number); let latest_count = client @@ -881,8 +881,8 @@ impl HttpTest for TransactionCountAfterSnapshotRecoveryTest { assert_pruned_block_error(&error, StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1); } - let latest_miniblock_number = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; - let latest_block_numbers = [api::BlockNumber::Latest, latest_miniblock_number.0.into()]; + let latest_l2_block_number = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; + let latest_block_numbers = [api::BlockNumber::Latest, latest_l2_block_number.0.into()]; for number in latest_block_numbers { let number = api::BlockIdVariant::BlockNumber(number); let latest_count = client @@ -906,7 +906,7 @@ struct TransactionReceiptsTest; impl HttpTest for TransactionReceiptsTest { async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let mut storage = pool.connection().await?; - let miniblock_number = L2BlockNumber(1); + let l2_block_number = L2BlockNumber(1); let tx1 = create_l2_transaction(10, 200); let tx2 = create_l2_transaction(10, 200); @@ -914,7 +914,7 @@ impl HttpTest for TransactionReceiptsTest { execute_l2_transaction(tx1.clone()), execute_l2_transaction(tx2.clone()), ]; - store_miniblock(&mut storage, miniblock_number, &tx_results).await?; + store_l2_block(&mut storage, l2_block_number, &tx_results).await?; let mut expected_receipts = Vec::new(); for tx in &tx_results { @@ -930,7 +930,7 @@ impl HttpTest for TransactionReceiptsTest { } let receipts = client - .get_block_receipts(api::BlockId::Number(miniblock_number.0.into())) + .get_block_receipts(api::BlockId::Number(l2_block_number.0.into())) .await? .context("no receipts")?; assert_eq!(receipts.len(), 2); @@ -967,7 +967,7 @@ impl HttpTest for AllAccountBalancesTest { assert_eq!(balances, HashMap::new()); let mut storage = pool.connection().await?; - store_miniblock(&mut storage, L2BlockNumber(1), &[]).await?; + store_l2_block(&mut storage, L2BlockNumber(1), &[]).await?; let eth_balance_key = storage_key_for_eth_balance(&Self::ADDRESS); let eth_balance = U256::one() << 64; @@ -990,7 +990,7 @@ impl HttpTest for AllAccountBalancesTest { let balances = client.get_all_account_balances(Self::ADDRESS).await?; assert_eq!(balances, HashMap::from([(Address::zero(), eth_balance)])); - store_miniblock(&mut storage, L2BlockNumber(2), &[]).await?; + store_l2_block(&mut storage, L2BlockNumber(2), &[]).await?; let token_balance_key = storage_key_for_standard_token_balance( AccountTreeId::new(custom_token.l2_address), &Self::ADDRESS, diff --git a/core/lib/zksync_core/src/api_server/web3/tests/snapshots.rs b/core/lib/zksync_core/src/api_server/web3/tests/snapshots.rs index 63c616f321c7..d8af509dd6bd 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/snapshots.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/snapshots.rs @@ -30,7 +30,7 @@ impl SnapshotBasicsTest { impl HttpTest for SnapshotBasicsTest { async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let mut storage = pool.connection().await.unwrap(); - store_miniblock( + store_l2_block( &mut storage, L2BlockNumber(1), &[execute_l2_transaction(create_l2_transaction(1, 2))], diff --git a/core/lib/zksync_core/src/api_server/web3/tests/vm.rs b/core/lib/zksync_core/src/api_server/web3/tests/vm.rs index 334ab6de8b43..d2070aa1602f 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/vm.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/vm.rs @@ -98,8 +98,8 @@ impl HttpTest for CallTestAfterSnapshotRecovery { } fn transaction_executor(&self) -> MockTransactionExecutor { - let first_local_miniblock = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; - CallTest::create_executor(first_local_miniblock) + let first_local_l2_block = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; + CallTest::create_executor(first_local_l2_block) } async fn test(&self, client: &HttpClient, _pool: &ConnectionPool) -> anyhow::Result<()> { @@ -116,7 +116,7 @@ impl HttpTest for CallTestAfterSnapshotRecovery { .await?; assert_eq!(call_result.0, b"output"); - let first_local_miniblock = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; + let first_local_l2_block = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; let pruned_block_numbers = [0, 1, StorageInitialization::SNAPSHOT_RECOVERY_BLOCK.0]; for number in pruned_block_numbers { let number = api::BlockIdVariant::BlockNumber(number.into()); @@ -124,11 +124,11 @@ impl HttpTest for CallTestAfterSnapshotRecovery { .call(CallTest::call_request(b"pruned"), Some(number)) .await .unwrap_err(); - assert_pruned_block_error(&error, first_local_miniblock); + assert_pruned_block_error(&error, first_local_l2_block); } - let first_miniblock_numbers = [api::BlockNumber::Latest, first_local_miniblock.0.into()]; - for number in first_miniblock_numbers { + let first_l2_block_numbers = [api::BlockNumber::Latest, first_local_l2_block.0.into()]; + for number in first_l2_block_numbers { let number = api::BlockIdVariant::BlockNumber(number); let call_result = client .call(CallTest::call_request(b"first"), Some(number)) @@ -355,7 +355,7 @@ impl HttpTest for TraceCallTestAfterSnapshotRecovery { .await?; TraceCallTest::assert_debug_call(&call_request, &call_result); - let first_local_miniblock = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; + let first_local_l2_block = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; let pruned_block_numbers = [0, 1, StorageInitialization::SNAPSHOT_RECOVERY_BLOCK.0]; for number in pruned_block_numbers { let number = api::BlockIdVariant::BlockNumber(number.into()); @@ -363,12 +363,12 @@ impl HttpTest for TraceCallTestAfterSnapshotRecovery { .call(CallTest::call_request(b"pruned"), Some(number)) .await .unwrap_err(); - assert_pruned_block_error(&error, first_local_miniblock); + assert_pruned_block_error(&error, first_local_l2_block); } let call_request = CallTest::call_request(b"first"); - let first_miniblock_numbers = [api::BlockNumber::Latest, first_local_miniblock.0.into()]; - for number in first_miniblock_numbers { + let first_l2_block_numbers = [api::BlockNumber::Latest, first_local_l2_block.0.into()]; + for number in first_l2_block_numbers { let number = api::BlockId::Number(number); let call_result = client .trace_call(call_request.clone(), Some(number), None) diff --git a/core/lib/zksync_core/src/api_server/web3/tests/ws.rs b/core/lib/zksync_core/src/api_server/web3/tests/ws.rs index 88e0603848ff..646fb96d4780 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/ws.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/ws.rs @@ -71,7 +71,7 @@ async fn wait_for_notifiers( } #[allow(clippy::needless_pass_by_ref_mut)] // false positive -async fn wait_for_notifier_miniblock( +async fn wait_for_notifier_l2_block( events: &mut mpsc::UnboundedReceiver, sub_type: SubscriptionType, expected: L2BlockNumber, @@ -82,7 +82,7 @@ async fn wait_for_notifier_miniblock( .recv() .await .expect("Events emitter unexpectedly dropped"); - if let PubSubEvent::MiniblockAdvanced(ty, number) = event { + if let PubSubEvent::L2BlockAdvanced(ty, number) = event { if ty == sub_type && number >= expected { break; } @@ -120,9 +120,9 @@ async fn notifiers_start_after_snapshot_recovery() { assert!(!handle.is_finished()); } - // Emulate creating the first miniblock; check that notifiers react to it. - let first_local_miniblock = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; - store_miniblock(&mut storage, first_local_miniblock, &[]) + // Emulate creating the first L2 block; check that notifiers react to it. + let first_local_l2_block = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; + store_l2_block(&mut storage, first_local_l2_block, &[]) .await .unwrap(); @@ -270,12 +270,12 @@ impl WsTest for BasicSubscriptionsTest { let mut storage = pool.connection().await?; let tx_result = execute_l2_transaction(create_l2_transaction(1, 2)); let new_tx_hash = tx_result.hash; - let miniblock_number = if self.snapshot_recovery { + let l2_block_number = if self.snapshot_recovery { StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 2 } else { L2BlockNumber(1) }; - let new_miniblock = store_miniblock(&mut storage, miniblock_number, &[tx_result]).await?; + let new_l2_block = store_l2_block(&mut storage, l2_block_number, &[tx_result]).await?; drop(storage); let received_tx_hash = tokio::time::timeout(TEST_TIMEOUT, txs_subscription.next()) @@ -289,12 +289,12 @@ impl WsTest for BasicSubscriptionsTest { .context("New blocks subscription terminated")??; assert_eq!( received_block_header.number, - Some(new_miniblock.number.0.into()) + Some(new_l2_block.number.0.into()) ); - assert_eq!(received_block_header.hash, Some(new_miniblock.hash)); + assert_eq!(received_block_header.hash, Some(new_l2_block.hash)); assert_eq!( received_block_header.timestamp, - new_miniblock.timestamp.into() + new_l2_block.timestamp.into() ); blocks_subscription.unsubscribe().await?; Ok(()) @@ -393,12 +393,12 @@ impl WsTest for LogSubscriptionsTest { } = LogSubscriptions::new(client, &mut pub_sub_events).await?; let mut storage = pool.connection().await?; - let next_miniblock_number = if self.snapshot_recovery { + let next_l2_block_number = if self.snapshot_recovery { StorageInitialization::SNAPSHOT_RECOVERY_BLOCK.0 + 2 } else { 1 }; - let (tx_location, events) = store_events(&mut storage, next_miniblock_number, 0).await?; + let (tx_location, events) = store_events(&mut storage, next_l2_block_number, 0).await?; drop(storage); let events: Vec<_> = events.iter().collect(); @@ -407,7 +407,7 @@ impl WsTest for LogSubscriptionsTest { assert_eq!(log.transaction_index, Some(0.into())); assert_eq!(log.log_index, Some(i.into())); assert_eq!(log.transaction_hash, Some(tx_location.tx_hash)); - assert_eq!(log.block_number, Some(next_miniblock_number.into())); + assert_eq!(log.block_number, Some(next_l2_block_number.into())); } assert_logs_match(&all_logs, &events); @@ -572,13 +572,13 @@ impl WsTest for LogSubscriptionsWithDelayTest { // Wait until notifiers are initialized. wait_for_notifiers(&mut pub_sub_events, &[SubscriptionType::Logs]).await; - // Store a miniblock w/o subscriptions being present. + // Store an L2 block w/o subscriptions being present. let mut storage = pool.connection().await?; store_events(&mut storage, 1, 0).await?; drop(storage); - // Wait for the log notifier to process the new miniblock. - wait_for_notifier_miniblock( + // Wait for the log notifier to process the new L2 block. + wait_for_notifier_l2_block( &mut pub_sub_events, SubscriptionType::Logs, L2BlockNumber(1), diff --git a/core/lib/zksync_core/src/basic_witness_input_producer/mod.rs b/core/lib/zksync_core/src/basic_witness_input_producer/mod.rs index e97ed12900ab..34d54e3d449d 100644 --- a/core/lib/zksync_core/src/basic_witness_input_producer/mod.rs +++ b/core/lib/zksync_core/src/basic_witness_input_producer/mod.rs @@ -51,7 +51,7 @@ impl BasicWitnessInputProducer { .block_on(connection_pool.connection()) .context("failed to get connection for BasicWitnessInputProducer")?; - let miniblocks_execution_data = rt_handle.block_on( + let l2_blocks_execution_data = rt_handle.block_on( connection .transactions_dal() .get_l2_blocks_to_execute_for_l1_batch(l1_batch_number), @@ -63,33 +63,30 @@ impl BasicWitnessInputProducer { tracing::info!("Started execution of l1_batch: {l1_batch_number:?}"); - let next_miniblocks_data = miniblocks_execution_data + let next_l2_blocks_data = l2_blocks_execution_data .iter() .skip(1) .map(Some) .chain([None]); - let miniblocks_data = miniblocks_execution_data.iter().zip(next_miniblocks_data); + let l2_blocks_data = l2_blocks_execution_data.iter().zip(next_l2_blocks_data); - for (miniblock_data, next_miniblock_data) in miniblocks_data { + for (l2_block_data, next_l2_block_data) in l2_blocks_data { tracing::debug!( - "Started execution of miniblock: {:?}, executing {:?} transactions", - miniblock_data.number, - miniblock_data.txs.len(), + "Started execution of L2 block: {:?}, executing {:?} transactions", + l2_block_data.number, + l2_block_data.txs.len(), ); - for tx in &miniblock_data.txs { + for tx in &l2_block_data.txs { tracing::trace!("Started execution of tx: {tx:?}"); execute_tx(tx, &mut vm) .context("failed to execute transaction in BasicWitnessInputProducer")?; tracing::trace!("Finished execution of tx: {tx:?}"); } - if let Some(next_miniblock_data) = next_miniblock_data { - vm.start_new_l2_block(L2BlockEnv::from_miniblock_data(next_miniblock_data)); + if let Some(next_l2_block_data) = next_l2_block_data { + vm.start_new_l2_block(L2BlockEnv::from_l2_block_data(next_l2_block_data)); } - tracing::debug!( - "Finished execution of miniblock: {:?}", - miniblock_data.number - ); + tracing::debug!("Finished execution of L2 block: {:?}", l2_block_data.number); } vm.finish_batch(); tracing::info!("Finished execution of l1_batch: {l1_batch_number:?}"); diff --git a/core/lib/zksync_core/src/consensus/fetcher.rs b/core/lib/zksync_core/src/consensus/fetcher.rs index febd9140d5d9..f83700787c5d 100644 --- a/core/lib/zksync_core/src/consensus/fetcher.rs +++ b/core/lib/zksync_core/src/consensus/fetcher.rs @@ -14,7 +14,7 @@ use crate::{ pub type P2PConfig = executor::Config; -/// Miniblock fetcher. +/// L2 block fetcher. pub struct Fetcher { pub store: Store, pub sync_state: SyncState, @@ -89,7 +89,7 @@ impl Fetcher { } } - /// Task fetching miniblocks using json RPC endpoint of the main node. + /// Task fetching L2 blocks using JSON-RPC endpoint of the main node. pub async fn run_centralized( self, ctx: &ctx::Ctx, diff --git a/core/lib/zksync_core/src/consensus/mod.rs b/core/lib/zksync_core/src/consensus/mod.rs index 5fa684cce02a..fc6471afd57d 100644 --- a/core/lib/zksync_core/src/consensus/mod.rs +++ b/core/lib/zksync_core/src/consensus/mod.rs @@ -26,7 +26,7 @@ pub struct MainNodeConfig { } impl MainNodeConfig { - /// Task generating consensus certificates for the miniblocks generated by `StateKeeper`. + /// Task generating consensus certificates for the L2 blocks generated by `StateKeeper`. /// Broadcasts the blocks with certificates to gossip network peers. pub async fn run(self, ctx: &ctx::Ctx, store: Store) -> anyhow::Result<()> { scope::run!(&ctx, |ctx, s| async { diff --git a/core/lib/zksync_core/src/consensus/storage/mod.rs b/core/lib/zksync_core/src/consensus/storage/mod.rs index df0ffeae5856..22685538dfbe 100644 --- a/core/lib/zksync_core/src/consensus/storage/mod.rs +++ b/core/lib/zksync_core/src/consensus/storage/mod.rs @@ -203,7 +203,7 @@ impl Store { )) } - /// Waits for the `number` miniblock. + /// Waits for the `number` L2 block. pub async fn wait_for_payload( &self, ctx: &ctx::Ctx, @@ -240,7 +240,7 @@ impl Store { async fn certificates_range(&self, ctx: &ctx::Ctx) -> ctx::Result { let mut conn = self.access(ctx).await.wrap("access()")?; - // Fetch the range of miniblocks in storage. + // Fetch the range of L2 blocks in storage. let block_range = conn.block_range(ctx).await.context("block_range")?; // Fetch the range of certificates in storage. @@ -258,7 +258,7 @@ impl Store { .as_ref() .map_or(first_expected_cert, |cert| cert.header().number.next()); - // Check that the first certificate in storage has the expected miniblock number. + // Check that the first certificate in storage has the expected L2 block number. if let Some(got) = conn .first_certificate(ctx) .await @@ -275,7 +275,7 @@ impl Store { // Check that the node has all the blocks before the next expected certificate, because // the node needs to know the state of the chain up to block `X` to process block `X+1`. if block_range.end < next_expected_cert { - return Err(anyhow::format_err!("inconsistent storage: cannot start consensus for miniblock {next_expected_cert}, because earlier blocks are missing").into()); + return Err(anyhow::format_err!("inconsistent storage: cannot start consensus for L2 block {next_expected_cert}, because earlier blocks are missing").into()); } Ok(storage::BlockStoreState { first: first_expected_cert, @@ -296,14 +296,14 @@ impl Store { .payload(ctx, number) .await .wrap("payload()")? - .context("miniblock disappeared from storage")?; + .context("L2 block disappeared from storage")?; Ok(Some(validator::FinalBlock { payload: payload.encode(), justification, })) } - /// Initializes consensus genesis (with 1 validator) to start at the last miniblock in storage. + /// Initializes consensus genesis (with 1 validator) to start at the last L2 block in storage. /// No-op if db already contains a genesis. pub(super) async fn try_init_genesis( &self, @@ -433,11 +433,11 @@ impl storage::PersistentBlockStore for BlockStore { /// If actions queue is set (and the block has not been stored yet), /// the block will be translated into a sequence of actions. /// The received actions should be fed - /// to `ExternalIO`, so that `StateKeeper` will store the corresponding miniblock in the db. + /// to `ExternalIO`, so that `StateKeeper` will store the corresponding L2 block in the db. /// - /// `store_next_block()` call will wait synchronously for the miniblock. - /// Once miniblock is observed in storage, `store_next_block()` will store a cert for this - /// miniblock. + /// `store_next_block()` call will wait synchronously for the L2 block. + /// Once the L2 block is observed in storage, `store_next_block()` will store a cert for this + /// L2 block. async fn queue_next_block( &self, ctx: &ctx::Ctx, @@ -501,8 +501,7 @@ impl storage::ReplicaStore for Store { #[async_trait::async_trait] impl PayloadManager for Store { - /// Currently (for the main node) proposing is implemented as just converting a miniblock from db (without a cert) into a - /// payload. + /// Currently (for the main node) proposing is implemented as just converting an L2 block from db (without a cert) into a payload. async fn propose( &self, ctx: &ctx::Ctx, @@ -524,8 +523,8 @@ impl PayloadManager for Store { } /// Verify that `payload` is a correct proposal for the block `block_number`. - /// Currently (for the main node) it is implemented as checking whether the received payload - /// matches the miniblock in the db. + /// Currently, (for the main node) it is implemented as checking whether the received payload + /// matches the L2 block in the db. async fn verify( &self, ctx: &ctx::Ctx, diff --git a/core/lib/zksync_core/src/consensus/storage/testonly.rs b/core/lib/zksync_core/src/consensus/storage/testonly.rs index 23f7f1eade28..e0a802294e16 100644 --- a/core/lib/zksync_core/src/consensus/storage/testonly.rs +++ b/core/lib/zksync_core/src/consensus/storage/testonly.rs @@ -1,4 +1,5 @@ //! Storage test helpers. + use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, time}; use zksync_consensus_roles::validator; @@ -11,7 +12,7 @@ use crate::{ }; impl Store { - /// Waits for the `number` miniblock to have a certificate. + /// Waits for the `number` L2 block to have a certificate. pub async fn wait_for_certificate( &self, ctx: &ctx::Ctx, @@ -60,7 +61,7 @@ impl Store { Self(pool) } - /// Waits for `want_last` block to have certificate then fetches all miniblocks with certificates. + /// Waits for `want_last` block to have certificate then fetches all L2 blocks with certificates. pub async fn wait_for_certificates( &self, ctx: &ctx::Ctx, diff --git a/core/lib/zksync_core/src/consensus/testonly.rs b/core/lib/zksync_core/src/consensus/testonly.rs index ddb8814a9bf7..e0823f241530 100644 --- a/core/lib/zksync_core/src/consensus/testonly.rs +++ b/core/lib/zksync_core/src/consensus/testonly.rs @@ -47,7 +47,7 @@ pub(crate) struct MockMainNodeClient { impl MockMainNodeClient { pub fn for_snapshot_recovery(snapshot: &SnapshotRecoveryStatus) -> Self { // This block may be requested during node initialization - let last_miniblock_in_snapshot_batch = api::en::SyncBlock { + let last_l2_block_in_snapshot_batch = api::en::SyncBlock { number: snapshot.l2_block_number, l1_batch_number: snapshot.l1_batch_number, last_in_batch: true, @@ -64,7 +64,7 @@ impl MockMainNodeClient { }; Self { - l2_blocks: vec![last_miniblock_in_snapshot_batch], + l2_blocks: vec![last_l2_block_in_snapshot_batch], block_number_offset: snapshot.l2_block_number.0, ..Self::default() } @@ -222,12 +222,12 @@ impl StateKeeper { }, }, number: self.last_batch, - first_miniblock_number: self.last_block, + first_l2_block_number: self.last_block, } } else { self.last_block += 1; self.last_timestamp += 2; - SyncAction::Miniblock { + SyncAction::L2Block { params: L2BlockParams { timestamp: self.last_timestamp, virtual_blocks: 0, @@ -237,7 +237,7 @@ impl StateKeeper { } } - /// Pushes a new miniblock with `transactions` transactions to the `StateKeeper`. + /// Pushes a new L2 block with `transactions` transactions to the `StateKeeper`. pub async fn push_block(&mut self, transactions: usize) { assert!(transactions > 0); let mut actions = vec![self.open_block()]; @@ -245,7 +245,7 @@ impl StateKeeper { let tx = create_l2_transaction(self.fee_per_gas, self.gas_per_pubdata); actions.push(FetchedTransaction::new(tx.into()).into()); } - actions.push(SyncAction::SealMiniblock); + actions.push(SyncAction::SealL2Block); self.actions_sender.push_actions(actions).await; } @@ -258,7 +258,7 @@ impl StateKeeper { self.batch_sealed = true; } - /// Pushes `count` random miniblocks to the StateKeeper. + /// Pushes `count` random L2 blocks to the StateKeeper. pub async fn push_random_blocks(&mut self, rng: &mut impl Rng, count: usize) { for _ in 0..count { // 20% chance to seal an L1 batch. @@ -360,7 +360,7 @@ impl StateKeeperRunner { pub async fn run(self, ctx: &ctx::Ctx) -> anyhow::Result<()> { let res = scope::run!(ctx, |ctx, s| async { let (stop_send, stop_recv) = sync::watch::channel(false); - let (persistence, miniblock_sealer) = + let (persistence, l2_block_sealer) = StateKeeperPersistence::new(self.store.0.clone(), Address::repeat_byte(11), 5); let io = ExternalIO::new( @@ -371,10 +371,10 @@ impl StateKeeperRunner { ) .await?; s.spawn_bg(async { - Ok(miniblock_sealer + Ok(l2_block_sealer .run() .await - .context("miniblock_sealer.run()")?) + .context("l2_block_sealer.run()")?) }); s.spawn_bg::<()>(async { loop { diff --git a/core/lib/zksync_core/src/consensus/tests.rs b/core/lib/zksync_core/src/consensus/tests.rs index f52b7a855362..2aa6bb05bf5c 100644 --- a/core/lib/zksync_core/src/consensus/tests.rs +++ b/core/lib/zksync_core/src/consensus/tests.rs @@ -28,7 +28,7 @@ async fn test_validator_block_store() { let rng = &mut ctx.rng(); let store = new_store(false).await; - // Fill storage with unsigned miniblocks. + // Fill storage with unsigned L2 blocks. // Fetch a suffix of blocks that we will generate (fake) certs for. let want = scope::run!(ctx, |ctx, s| async { // Start state keeper. @@ -95,8 +95,8 @@ fn executor_config(cfg: &network::Config) -> executor::Config { } // In the current implementation, consensus certificates are created asynchronously -// for the miniblocks constructed by the StateKeeper. This means that consensus actor -// is effectively just back filling the consensus certificates for the miniblocks in storage. +// for the L2 blocks constructed by the StateKeeper. This means that consensus actor +// is effectively just back filling the consensus certificates for the L2 blocks in storage. #[test_casing(2, [false, true])] #[tokio::test(flavor = "multi_thread")] async fn test_validator(from_snapshot: bool) { @@ -117,7 +117,7 @@ async fn test_validator(from_snapshot: bool) { store .wait_for_payload(ctx, sk.last_block()) .await - .context("sk.wait_for_miniblocks(<1st phase>)")?; + .context("sk.wait_for_payload(<1st phase>)")?; tracing::info!("Restart consensus actor a couple times, making it process a bunch of blocks each time."); for iteration in 0..3 { diff --git a/core/lib/zksync_core/src/db_pruner/README.md b/core/lib/zksync_core/src/db_pruner/README.md index 8ffcc8ce367c..7192a544af82 100644 --- a/core/lib/zksync_core/src/db_pruner/README.md +++ b/core/lib/zksync_core/src/db_pruner/README.md @@ -1,7 +1,7 @@ # Db pruner Database pruner is a component that regularly removes the oldest l1 batches from the database together with -corresponding miniblocks, events, etc. +corresponding L2 blocks, events, etc. **There are two types of objects that are not fully cleaned:** diff --git a/core/lib/zksync_core/src/db_pruner/mod.rs b/core/lib/zksync_core/src/db_pruner/mod.rs index f28c9900a3a3..8024cc93cff9 100644 --- a/core/lib/zksync_core/src/db_pruner/mod.rs +++ b/core/lib/zksync_core/src/db_pruner/mod.rs @@ -144,21 +144,21 @@ impl DbPruner { return Ok(false); } - let (_, next_miniblock_to_prune) = transaction + let (_, next_l2_block_to_prune) = transaction .blocks_dal() .get_l2_block_range_of_l1_batch(next_l1_batch_to_prune) .await? - .with_context(|| format!("L1 batch #{next_l1_batch_to_prune} is ready to be pruned, but has no miniblocks"))?; + .with_context(|| format!("L1 batch #{next_l1_batch_to_prune} is ready to be pruned, but has no L2 blocks"))?; transaction .pruning_dal() - .soft_prune_batches_range(next_l1_batch_to_prune, next_miniblock_to_prune) + .soft_prune_batches_range(next_l1_batch_to_prune, next_l2_block_to_prune) .await?; transaction.commit().await?; let latency = latency.observe(); tracing::info!( - "Soft pruned db l1_batches up to {next_l1_batch_to_prune} and miniblocks up to {next_miniblock_to_prune}, operation took {latency:?}", + "Soft pruned db l1_batches up to {next_l1_batch_to_prune} and L2 blocks up to {next_l2_block_to_prune}, operation took {latency:?}", ); Ok(true) @@ -173,14 +173,14 @@ impl DbPruner { current_pruning_info.last_soft_pruned_l1_batch.with_context(|| { format!("bogus pruning info {current_pruning_info:?}: trying to hard-prune data, but there is no soft-pruned L1 batch") })?; - let last_soft_pruned_miniblock = + let last_soft_pruned_l2_block = current_pruning_info.last_soft_pruned_l2_block.with_context(|| { - format!("bogus pruning info {current_pruning_info:?}: trying to hard-prune data, but there is no soft-pruned miniblock") + format!("bogus pruning info {current_pruning_info:?}: trying to hard-prune data, but there is no soft-pruned L2 block") })?; let stats = transaction .pruning_dal() - .hard_prune_batches_range(last_soft_pruned_l1_batch, last_soft_pruned_miniblock) + .hard_prune_batches_range(last_soft_pruned_l1_batch, last_soft_pruned_l2_block) .await?; Self::report_hard_pruning_stats(stats); transaction.commit().await?; @@ -193,7 +193,7 @@ impl DbPruner { let latency = latency.observe(); tracing::info!( - "Hard pruned db l1_batches up to {last_soft_pruned_l1_batch} and miniblocks up to {last_soft_pruned_miniblock}, \ + "Hard pruned db l1_batches up to {last_soft_pruned_l1_batch} and L2 blocks up to {last_soft_pruned_l2_block}, \ operation took {latency:?}" ); Ok(()) @@ -202,7 +202,7 @@ impl DbPruner { fn report_hard_pruning_stats(stats: HardPruningStats) { let HardPruningStats { deleted_l1_batches, - deleted_l2_blocks: deleted_miniblocks, + deleted_l2_blocks, deleted_storage_logs_from_past_batches, deleted_storage_logs_from_pruned_batches, deleted_events, @@ -212,7 +212,7 @@ impl DbPruner { let deleted_storage_logs = deleted_storage_logs_from_past_batches + deleted_storage_logs_from_pruned_batches; tracing::info!( - "Performed pruning of database, deleted {deleted_l1_batches} L1 batches, {deleted_miniblocks} miniblocks, \ + "Performed pruning of database, deleted {deleted_l1_batches} L1 batches, {deleted_l2_blocks} L2 blocks, \ {deleted_storage_logs} storage logs ({deleted_storage_logs_from_pruned_batches} from pruned batches + \ {deleted_storage_logs_from_past_batches} from past batches), \ {deleted_events} events, {deleted_call_traces} call traces, {deleted_l2_to_l1_logs} L2-to-L1 logs" @@ -277,13 +277,13 @@ mod tests { use std::collections::HashMap; use anyhow::anyhow; - use multivm::zk_evm_latest::ethereum_types::H256; use test_log::test; use zksync_dal::pruning_dal::PruningInfo; use zksync_db_connection::connection::Connection; - use zksync_types::{block::L2BlockHeader, Address, L2BlockNumber, ProtocolVersion}; + use zksync_types::{L2BlockNumber, ProtocolVersion}; use super::*; + use crate::utils::testonly::create_l2_block; #[derive(Debug)] struct ConditionMock { @@ -364,10 +364,10 @@ mod tests { assert!(pruner.is_l1_batch_prunable(L1BatchNumber(4)).await); } - async fn insert_miniblocks( + async fn insert_l2_blocks( conn: &mut Connection<'_, Core>, - l1_batches_count: u64, - miniblocks_per_batch: u64, + l1_batches_count: u32, + l2_blocks_per_batch: u32, ) { conn.protocol_versions_dal() .save_protocol_version_with_tx(&ProtocolVersion::default()) @@ -375,32 +375,16 @@ mod tests { .unwrap(); for l1_batch_number in 0..l1_batches_count { - for miniblock_index in 0..miniblocks_per_batch { - let miniblock_number = L2BlockNumber( - (l1_batch_number * miniblocks_per_batch + miniblock_index) as u32, - ); - let miniblock_header = L2BlockHeader { - number: miniblock_number, - timestamp: 0, - hash: H256::from_low_u64_be(u64::from(miniblock_number.0)), - l1_tx_count: 0, - l2_tx_count: 0, - fee_account_address: Address::repeat_byte(1), - base_fee_per_gas: 0, - gas_per_pubdata_limit: 0, - batch_fee_input: Default::default(), - base_system_contracts_hashes: Default::default(), - protocol_version: Some(Default::default()), - virtual_blocks: 0, - gas_limit: 0, - }; + for l2_block_index in 0..l2_blocks_per_batch { + let l2_block_number = l1_batch_number * l2_blocks_per_batch + l2_block_index; + let l2_block_header = create_l2_block(l2_block_number); conn.blocks_dal() - .insert_l2_block(&miniblock_header) + .insert_l2_block(&l2_block_header) .await .unwrap(); conn.blocks_dal() - .mark_l2_blocks_as_executed_in_l1_batch(L1BatchNumber(l1_batch_number as u32)) + .mark_l2_blocks_as_executed_in_l1_batch(L1BatchNumber(l1_batch_number)) .await .unwrap(); } @@ -412,7 +396,7 @@ mod tests { let pool = ConnectionPool::::test_pool().await; let mut conn = pool.connection().await.unwrap(); - insert_miniblocks(&mut conn, 10, 2).await; + insert_l2_blocks(&mut conn, 10, 2).await; conn.pruning_dal() .soft_prune_batches_range(L1BatchNumber(2), L2BlockNumber(5)) .await @@ -448,7 +432,7 @@ mod tests { let pool = ConnectionPool::::test_pool().await; let mut conn = pool.connection().await.unwrap(); - insert_miniblocks(&mut conn, 10, 2).await; + insert_l2_blocks(&mut conn, 10, 2).await; conn.pruning_dal() .soft_prune_batches_range(L1BatchNumber(2), L2BlockNumber(5)) .await @@ -493,7 +477,7 @@ mod tests { let pool = ConnectionPool::::test_pool().await; let mut conn = pool.connection().await.unwrap(); - insert_miniblocks(&mut conn, 10, 2).await; + insert_l2_blocks(&mut conn, 10, 2).await; let pruner = DbPruner::with_conditions( DbPrunerConfig { @@ -535,7 +519,7 @@ mod tests { let pool = ConnectionPool::::test_pool().await; let mut conn = pool.connection().await.unwrap(); - insert_miniblocks(&mut conn, 10, 2).await; + insert_l2_blocks(&mut conn, 10, 2).await; let first_chunk_prunable_check = Arc::new( ConditionMock::name("first chunk prunable").with_response(L1BatchNumber(3), true), diff --git a/core/lib/zksync_core/src/fee_model.rs b/core/lib/zksync_core/src/fee_model.rs index 50ba4f5669a0..d905e22f3564 100644 --- a/core/lib/zksync_core/src/fee_model.rs +++ b/core/lib/zksync_core/src/fee_model.rs @@ -80,7 +80,7 @@ impl MainNodeFeeInputProvider { } /// The fee model provider to be used in the API. It returns the maximal batch fee input between the projected main node one and -/// the one from the last sealed miniblock. +/// the one from the last sealed L2 block. #[derive(Debug)] pub(crate) struct ApiFeeInputProvider { inner: Arc, @@ -110,7 +110,7 @@ impl BatchFeeModelInputProvider for ApiFeeInputProvider { .inner .get_batch_fee_input_scaled(l1_gas_price_scale_factor, l1_pubdata_price_scale_factor) .await; - let last_miniblock_params = self + let last_l2_block_params = self .connection_pool .connection_tagged("api_fee_input_provider") .await @@ -120,7 +120,7 @@ impl BatchFeeModelInputProvider for ApiFeeInputProvider { .await .unwrap(); - last_miniblock_params + last_l2_block_params .map(|header| inner_input.stricter(header.batch_fee_input)) .unwrap_or(inner_input) } diff --git a/core/lib/zksync_core/src/genesis.rs b/core/lib/zksync_core/src/genesis.rs index 160e167e536c..cdf1b2718021 100644 --- a/core/lib/zksync_core/src/genesis.rs +++ b/core/lib/zksync_core/src/genesis.rs @@ -485,7 +485,7 @@ pub(crate) async fn create_genesis_l1_batch( protocol_version, ); - let genesis_miniblock_header = L2BlockHeader { + let genesis_l2_block_header = L2BlockHeader { number: L2BlockNumber(0), timestamp: 0, hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), @@ -520,7 +520,7 @@ pub(crate) async fn create_genesis_l1_batch( .await?; transaction .blocks_dal() - .insert_l2_block(&genesis_miniblock_header) + .insert_l2_block(&genesis_l2_block_header) .await?; transaction .blocks_dal() diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index 9608f20fa266..dc6e3dc016ba 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -187,7 +187,7 @@ pub enum Component { Housekeeper, /// Component for exposing APIs to prover for providing proof generation data and accepting proofs. ProofDataHandler, - /// Component generating BFT consensus certificates for miniblocks. + /// Component generating BFT consensus certificates for L2 blocks. Consensus, /// Component generating commitment for L1 batches. CommitmentGenerator, @@ -848,18 +848,18 @@ async fn add_state_keeper_to_task_futures( mempool }; - let miniblock_sealer_pool = ConnectionPool::::singleton(postgres_config.master_url()?) + let l2_block_sealer_pool = ConnectionPool::::singleton(postgres_config.master_url()?) .build() .await - .context("failed to build miniblock_sealer_pool")?; - let (persistence, miniblock_sealer) = StateKeeperPersistence::new( - miniblock_sealer_pool, + .context("failed to build l2_block_sealer_pool")?; + let (persistence, l2_block_sealer) = StateKeeperPersistence::new( + l2_block_sealer_pool, contracts_config .l2_shared_bridge_addr - .expect("`l2_shared_bridge_addr` config is missing"), + .context("`l2_shared_bridge_addr` config is missing")?, state_keeper_config.l2_block_seal_queue_capacity, ); - task_futures.push(tokio::spawn(miniblock_sealer.run())); + task_futures.push(tokio::spawn(l2_block_sealer.run())); // One (potentially held long-term) connection for `AsyncCatchupTask` and another connection // to access `AsyncRocksdbCache` as a storage. @@ -1315,7 +1315,7 @@ async fn run_http_api( let updaters_pool = ConnectionPool::::builder(postgres_config.replica_url()?, 2) .build() .await - .context("failed to build last_miniblock_pool")?; + .context("failed to build updaters_pool")?; let mut api_builder = web3::ApiBuilder::jsonrpsee_backend(internal_api.clone(), replica_connection_pool) @@ -1370,10 +1370,10 @@ async fn run_ws_api( storage_caches, ) .await; - let last_miniblock_pool = ConnectionPool::::singleton(postgres_config.replica_url()?) + let updaters_pool = ConnectionPool::::singleton(postgres_config.replica_url()?) .build() .await - .context("failed to build last_miniblock_pool")?; + .context("failed to build updaters_pool")?; let mut namespaces = Namespace::DEFAULT.to_vec(); namespaces.push(Namespace::Snapshots); @@ -1381,7 +1381,7 @@ async fn run_ws_api( let mut api_builder = web3::ApiBuilder::jsonrpsee_backend(internal_api.clone(), replica_connection_pool) .ws(api_config.web3_json_rpc.ws_port) - .with_updaters_pool(last_miniblock_pool) + .with_updaters_pool(updaters_pool) .with_filter_limit(api_config.web3_json_rpc.filters_limit()) .with_subscriptions_limit(api_config.web3_json_rpc.subscriptions_limit()) .with_batch_request_size_limit(api_config.web3_json_rpc.max_batch_request_size()) diff --git a/core/lib/zksync_core/src/metadata_calculator/recovery/mod.rs b/core/lib/zksync_core/src/metadata_calculator/recovery/mod.rs index f660315f5341..7e621531dc86 100644 --- a/core/lib/zksync_core/src/metadata_calculator/recovery/mod.rs +++ b/core/lib/zksync_core/src/metadata_calculator/recovery/mod.rs @@ -112,7 +112,7 @@ impl HandleRecoveryEvent for RecoveryHealthUpdater<'_> { #[derive(Debug, Clone, Copy)] struct SnapshotParameters { - miniblock: L2BlockNumber, + l2_block: L2BlockNumber, expected_root_hash: H256, log_count: u64, } @@ -126,18 +126,17 @@ impl SnapshotParameters { pool: &ConnectionPool, recovery: &SnapshotRecoveryStatus, ) -> anyhow::Result { - let miniblock = recovery.l2_block_number; + let l2_block = recovery.l2_block_number; let expected_root_hash = recovery.l1_batch_root_hash; let mut storage = pool.connection().await?; let log_count = storage .storage_logs_dal() - .get_storage_logs_row_count(miniblock) - .await - .with_context(|| format!("Failed getting number of logs for miniblock #{miniblock}"))?; + .get_storage_logs_row_count(l2_block) + .await?; Ok(Self { - miniblock, + l2_block, expected_root_hash, log_count, }) @@ -240,7 +239,7 @@ impl AsyncTreeRecovery { let mut storage = pool.connection().await?; let remaining_chunks = self - .filter_chunks(&mut storage, snapshot.miniblock, &chunks) + .filter_chunks(&mut storage, snapshot.l2_block, &chunks) .await?; drop(storage); options @@ -259,7 +258,7 @@ impl AsyncTreeRecovery { .await .context("semaphore is never closed")?; options.events.chunk_started().await; - Self::recover_key_chunk(&tree, snapshot.miniblock, chunk, pool, stop_receiver).await?; + Self::recover_key_chunk(&tree, snapshot.l2_block, chunk, pool, stop_receiver).await?; options.events.chunk_recovered().await; anyhow::Ok(()) }); @@ -290,14 +289,14 @@ impl AsyncTreeRecovery { async fn filter_chunks( &mut self, storage: &mut Connection<'_, Core>, - snapshot_miniblock: L2BlockNumber, + snapshot_l2_block: L2BlockNumber, key_chunks: &[ops::RangeInclusive], ) -> anyhow::Result>> { let chunk_starts_latency = RECOVERY_METRICS.latency[&RecoveryStage::LoadChunkStarts].start(); let chunk_starts = storage .storage_logs_dal() - .get_chunk_starts_for_l2_block(snapshot_miniblock, key_chunks) + .get_chunk_starts_for_l2_block(snapshot_l2_block, key_chunks) .await?; let chunk_starts_latency = chunk_starts_latency.observe(); tracing::debug!( @@ -323,7 +322,7 @@ impl AsyncTreeRecovery { } anyhow::ensure!( tree_entry.value == db_entry.value && tree_entry.leaf_index == db_entry.leaf_index, - "Mismatch between entry for key {:0>64x} in Postgres snapshot for miniblock #{snapshot_miniblock} \ + "Mismatch between entry for key {:0>64x} in Postgres snapshot for L2 block #{snapshot_l2_block} \ ({db_entry:?}) and tree ({tree_entry:?}); the recovery procedure may be corrupted", db_entry.key ); @@ -333,7 +332,7 @@ impl AsyncTreeRecovery { async fn recover_key_chunk( tree: &Mutex, - snapshot_miniblock: L2BlockNumber, + snapshot_l2_block: L2BlockNumber, key_chunk: ops::RangeInclusive, pool: &ConnectionPool, stop_receiver: &watch::Receiver, @@ -351,7 +350,7 @@ impl AsyncTreeRecovery { RECOVERY_METRICS.chunk_latency[&ChunkRecoveryStage::LoadEntries].start(); let all_entries = storage .storage_logs_dal() - .get_tree_entries_for_l2_block(snapshot_miniblock, key_chunk.clone()) + .get_tree_entries_for_l2_block(snapshot_l2_block, key_chunk.clone()) .await?; drop(storage); let entries_latency = entries_latency.observe(); diff --git a/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs b/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs index ac7225409e39..b7b8f4acf067 100644 --- a/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs +++ b/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs @@ -32,7 +32,7 @@ use crate::{ #[test] fn calculating_chunk_count() { let mut snapshot = SnapshotParameters { - miniblock: L2BlockNumber(1), + l2_block: L2BlockNumber(1), log_count: 160_000_000, expected_root_hash: H256::zero(), }; diff --git a/core/lib/zksync_core/src/metadata_calculator/tests.rs b/core/lib/zksync_core/src/metadata_calculator/tests.rs index fd942bc5a351..a554d2f7d8ca 100644 --- a/core/lib/zksync_core/src/metadata_calculator/tests.rs +++ b/core/lib/zksync_core/src/metadata_calculator/tests.rs @@ -24,7 +24,7 @@ use zksync_utils::u32_to_h256; use super::{GenericAsyncTree, L1BatchWithLogs, MetadataCalculator, MetadataCalculatorConfig}; use crate::{ genesis::{insert_genesis_batch, GenesisParams}, - utils::testonly::{create_l1_batch, create_miniblock}, + utils::testonly::{create_l1_batch, create_l2_block}, }; const RUN_TIMEOUT: Duration = Duration::from_secs(30); @@ -512,9 +512,9 @@ pub(super) async fn extend_db_state_from_l1_batch( for (idx, batch_logs) in (next_l1_batch.0..).zip(new_logs) { let header = create_l1_batch(idx); let batch_number = header.number; - // Assumes that L1 batch consists of only one miniblock. - let miniblock_header = create_miniblock(idx); - let miniblock_number = miniblock_header.number; + // Assumes that L1 batch consists of only one L2 block. + let l2_block_header = create_l2_block(idx); + let l2_block_number = l2_block_header.number; storage .blocks_dal() @@ -523,12 +523,12 @@ pub(super) async fn extend_db_state_from_l1_batch( .unwrap(); storage .blocks_dal() - .insert_l2_block(&miniblock_header) + .insert_l2_block(&l2_block_header) .await .unwrap(); storage .storage_logs_dal() - .insert_storage_logs(miniblock_number, &[(H256::zero(), batch_logs)]) + .insert_storage_logs(l2_block_number, &[(H256::zero(), batch_logs)]) .await .unwrap(); storage diff --git a/core/lib/zksync_core/src/reorg_detector/mod.rs b/core/lib/zksync_core/src/reorg_detector/mod.rs index a5de7112a3d6..9561649c0c8c 100644 --- a/core/lib/zksync_core/src/reorg_detector/mod.rs +++ b/core/lib/zksync_core/src/reorg_detector/mod.rs @@ -80,11 +80,11 @@ impl From for Error { #[async_trait] trait MainNodeClient: fmt::Debug + Send + Sync { - async fn sealed_miniblock_number(&self) -> EnrichedClientResult; + async fn sealed_l2_block_number(&self) -> EnrichedClientResult; async fn sealed_l1_batch_number(&self) -> EnrichedClientResult; - async fn miniblock_hash(&self, number: L2BlockNumber) -> EnrichedClientResult>; + async fn l2_block_hash(&self, number: L2BlockNumber) -> EnrichedClientResult>; async fn l1_batch_root_hash(&self, number: L1BatchNumber) -> EnrichedClientResult>; @@ -92,10 +92,10 @@ trait MainNodeClient: fmt::Debug + Send + Sync { #[async_trait] impl MainNodeClient for BoxedL2Client { - async fn sealed_miniblock_number(&self) -> EnrichedClientResult { + async fn sealed_l2_block_number(&self) -> EnrichedClientResult { let number = self .get_block_number() - .rpc_context("sealed_miniblock_number") + .rpc_context("sealed_l2_block_number") .await?; let number = u32::try_from(number).map_err(|err| { EnrichedClientError::custom(err, "u32::try_from").with_arg("number", &number) @@ -114,10 +114,10 @@ impl MainNodeClient for BoxedL2Client { Ok(L1BatchNumber(number)) } - async fn miniblock_hash(&self, number: L2BlockNumber) -> EnrichedClientResult> { + async fn l2_block_hash(&self, number: L2BlockNumber) -> EnrichedClientResult> { Ok(self .get_block_by_number(number.0.into(), false) - .rpc_context("miniblock_hash") + .rpc_context("l2_block_hash") .with_arg("number", &number) .await? .map(|block| block.hash)) @@ -141,7 +141,7 @@ trait HandleReorgDetectorEvent: fmt::Debug + Send + Sync { fn update_correct_block( &mut self, - last_correct_miniblock: L2BlockNumber, + last_correct_l2_block: L2BlockNumber, last_correct_l1_batch: L1BatchNumber, ); @@ -158,15 +158,15 @@ impl HandleReorgDetectorEvent for HealthUpdater { fn update_correct_block( &mut self, - last_correct_miniblock: L2BlockNumber, + last_correct_l2_block: L2BlockNumber, last_correct_l1_batch: L1BatchNumber, ) { - let last_correct_miniblock = last_correct_miniblock.0.into(); - let prev_checked_miniblock = EN_METRICS.last_correct_miniblock + let last_correct_l2_block = last_correct_l2_block.0.into(); + let prev_checked_l2_block = EN_METRICS.last_correct_l2_block [&CheckerComponent::ReorgDetector] - .set(last_correct_miniblock); - if prev_checked_miniblock != last_correct_miniblock { - tracing::debug!("No reorg at miniblock #{last_correct_miniblock}"); + .set(last_correct_l2_block); + if prev_checked_l2_block != last_correct_l2_block { + tracing::debug!("No reorg at L2 block #{last_correct_l2_block}"); } let last_correct_l1_batch = last_correct_l1_batch.0.into(); @@ -177,7 +177,7 @@ impl HandleReorgDetectorEvent for HealthUpdater { } let health_details = serde_json::json!({ - "last_correct_miniblock": last_correct_miniblock, + "last_correct_l2_block": last_correct_l2_block, "last_correct_l1_batch": last_correct_l1_batch, }); self.update(Health::from(HealthStatus::Ready).with_details(health_details)); @@ -248,7 +248,7 @@ impl ReorgDetector { else { return Ok(()); }; - let Some(local_miniblock) = storage + let Some(local_l2_block) = storage .blocks_dal() .get_sealed_l2_block_number() .await @@ -259,23 +259,23 @@ impl ReorgDetector { drop(storage); let remote_l1_batch = self.client.sealed_l1_batch_number().await?; - let remote_miniblock = self.client.sealed_miniblock_number().await?; + let remote_l2_block = self.client.sealed_l2_block_number().await?; let checked_l1_batch = local_l1_batch.min(remote_l1_batch); - let checked_miniblock = local_miniblock.min(remote_miniblock); + let checked_l2_block = local_l2_block.min(remote_l2_block); let root_hashes_match = self.root_hashes_match(checked_l1_batch).await?; - let miniblock_hashes_match = self.miniblock_hashes_match(checked_miniblock).await?; + let l2_block_hashes_match = self.l2_block_hashes_match(checked_l2_block).await?; // The only event that triggers re-org detection and node rollback is if the - // hash mismatch at the same block height is detected, be it miniblocks or batches. + // hash mismatch at the same block height is detected, be it L2 blocks or batches. // // In other cases either there is only a height mismatch which means that one of // the nodes needs to do catching up; however, it is not certain that there is actually // a re-org taking place. - if root_hashes_match && miniblock_hashes_match { + if root_hashes_match && l2_block_hashes_match { self.event_handler - .update_correct_block(checked_miniblock, checked_l1_batch); + .update_correct_block(checked_l2_block, checked_l1_batch); return Ok(()); } let diverged_l1_batch = checked_l1_batch + (root_hashes_match as u32); @@ -306,33 +306,30 @@ impl ReorgDetector { Err(Error::ReorgDetected(last_correct_l1_batch)) } - /// Compares hashes of the given local miniblock and the same miniblock from main node. - async fn miniblock_hashes_match( - &self, - miniblock: L2BlockNumber, - ) -> Result { + /// Compares hashes of the given local L2 block and the same L2 block from main node. + async fn l2_block_hashes_match(&self, l2_block: L2BlockNumber) -> Result { let mut storage = self.pool.connection().await.context("connection()")?; let local_hash = storage .blocks_dal() - .get_l2_block_header(miniblock) + .get_l2_block_header(l2_block) .await .map_err(DalError::generalize)? - .with_context(|| format!("Header does not exist for local miniblock #{miniblock}"))? + .with_context(|| format!("Header does not exist for local L2 block #{l2_block}"))? .hash; drop(storage); - let Some(remote_hash) = self.client.miniblock_hash(miniblock).await? else { + let Some(remote_hash) = self.client.l2_block_hash(l2_block).await? else { // Due to reorg, locally we may be ahead of the main node. // Lack of the hash on the main node is treated as a hash match, // We need to wait for our knowledge of main node to catch up. - tracing::info!("Remote miniblock #{miniblock} is missing"); + tracing::info!("Remote L2 block #{l2_block} is missing"); return Err(HashMatchError::RemoteHashMissing); }; if remote_hash != local_hash { tracing::warn!( "Reorg detected: local hash {local_hash:?} doesn't match the hash from \ - main node {remote_hash:?} (miniblock #{miniblock})" + main node {remote_hash:?} (L2 block #{l2_block})" ); } Ok(remote_hash == local_hash) diff --git a/core/lib/zksync_core/src/reorg_detector/tests.rs b/core/lib/zksync_core/src/reorg_detector/tests.rs index d0106dcf7c10..32b768b9c81c 100644 --- a/core/lib/zksync_core/src/reorg_detector/tests.rs +++ b/core/lib/zksync_core/src/reorg_detector/tests.rs @@ -18,13 +18,13 @@ use zksync_web3_decl::jsonrpsee::core::ClientError as RpcError; use super::*; use crate::{ genesis::{insert_genesis_batch, GenesisParams}, - utils::testonly::{create_l1_batch, create_miniblock}, + utils::testonly::{create_l1_batch, create_l2_block}, }; -async fn store_miniblock(storage: &mut Connection<'_, Core>, number: u32, hash: H256) { +async fn store_l2_block(storage: &mut Connection<'_, Core>, number: u32, hash: H256) { let header = L2BlockHeader { hash, - ..create_miniblock(number) + ..create_l2_block(number) }; storage.blocks_dal().insert_l2_block(&header).await.unwrap(); } @@ -75,7 +75,7 @@ impl From for RpcError { #[derive(Debug, Default)] struct MockMainNodeClient { - miniblock_hashes: BTreeMap, + l2_block_hashes: BTreeMap, l1_batch_root_hashes: BTreeMap, error_kind: Arc>>, } @@ -91,10 +91,10 @@ impl MockMainNodeClient { #[async_trait] impl MainNodeClient for MockMainNodeClient { - async fn sealed_miniblock_number(&self) -> EnrichedClientResult { - self.check_error("sealed_miniblock_number")?; + async fn sealed_l2_block_number(&self) -> EnrichedClientResult { + self.check_error("sealed_l2_block_number")?; Ok(self - .miniblock_hashes + .l2_block_hashes .last_key_value() .map(|x| *x.0) .unwrap_or_default()) @@ -109,10 +109,10 @@ impl MainNodeClient for MockMainNodeClient { .unwrap_or_default()) } - async fn miniblock_hash(&self, number: L2BlockNumber) -> EnrichedClientResult> { - self.check_error("miniblock_hash") + async fn l2_block_hash(&self, number: L2BlockNumber) -> EnrichedClientResult> { + self.check_error("l2_block_hash") .map_err(|err| err.with_arg("number", &number))?; - Ok(self.miniblock_hashes.get(&number).copied()) + Ok(self.l2_block_hashes.get(&number).copied()) } async fn l1_batch_root_hash( @@ -132,10 +132,10 @@ impl HandleReorgDetectorEvent for mpsc::UnboundedSender<(L2BlockNumber, L1BatchN fn update_correct_block( &mut self, - last_correct_miniblock: L2BlockNumber, + last_correct_l2_block: L2BlockNumber, last_correct_l1_batch: L1BatchNumber, ) { - self.send((last_correct_miniblock, last_correct_l1_batch)) + self.send((last_correct_l2_block, last_correct_l1_batch)) .ok(); } @@ -175,7 +175,7 @@ async fn normal_reorg_function(snapshot_recovery: bool, with_transient_errors: b let genesis_batch = insert_genesis_batch(&mut storage, &GenesisParams::mock()) .await .unwrap(); - client.miniblock_hashes.insert( + client.l2_block_hashes.insert( L2BlockNumber(0), L2BlockHasher::legacy_hash(L2BlockNumber(0)), ); @@ -190,18 +190,18 @@ async fn normal_reorg_function(snapshot_recovery: bool, with_transient_errors: b 1_u32..=10 }; let last_l1_batch_number = L1BatchNumber(*l1_batch_numbers.end()); - let last_miniblock_number = L2BlockNumber(*l1_batch_numbers.end()); - let miniblock_and_l1_batch_hashes: Vec<_> = l1_batch_numbers + let last_l2_block_number = L2BlockNumber(*l1_batch_numbers.end()); + let l2_block_and_l1_batch_hashes: Vec<_> = l1_batch_numbers .map(|number| { - let miniblock_hash = H256::from_low_u64_be(number.into()); + let l2_block_hash = H256::from_low_u64_be(number.into()); client - .miniblock_hashes - .insert(L2BlockNumber(number), miniblock_hash); + .l2_block_hashes + .insert(L2BlockNumber(number), l2_block_hash); let l1_batch_hash = H256::repeat_byte(number as u8); client .l1_batch_root_hashes .insert(L1BatchNumber(number), l1_batch_hash); - (number, miniblock_hash, l1_batch_hash) + (number, l2_block_hash, l1_batch_hash) }) .collect(); @@ -224,17 +224,17 @@ async fn normal_reorg_function(snapshot_recovery: bool, with_transient_errors: b }; let detector_task = tokio::spawn(detector.run(stop_receiver)); - for (number, miniblock_hash, l1_batch_hash) in miniblock_and_l1_batch_hashes { - store_miniblock(&mut storage, number, miniblock_hash).await; + for (number, l2_block_hash, l1_batch_hash) in l2_block_and_l1_batch_hashes { + store_l2_block(&mut storage, number, l2_block_hash).await; tokio::time::sleep(Duration::from_millis(10)).await; seal_l1_batch(&mut storage, number, l1_batch_hash).await; tokio::time::sleep(Duration::from_millis(10)).await; } - while let Some((miniblock, l1_batch)) = block_update_receiver.recv().await { - assert!(miniblock <= last_miniblock_number); + while let Some((l2_block, l1_batch)) = block_update_receiver.recv().await { + assert!(l2_block <= last_l2_block_number); assert!(l1_batch <= last_l1_batch_number); - if miniblock == last_miniblock_number && l1_batch == last_l1_batch_number { + if l2_block == last_l2_block_number && l1_batch == last_l1_batch_number { break; } } @@ -269,7 +269,7 @@ async fn reorg_is_detected_on_batch_hash_mismatch() { .await .unwrap(); let mut client = MockMainNodeClient::default(); - client.miniblock_hashes.insert( + client.l2_block_hashes.insert( L2BlockNumber(0), L2BlockHasher::legacy_hash(L2BlockNumber(0)), ); @@ -277,25 +277,25 @@ async fn reorg_is_detected_on_batch_hash_mismatch() { .l1_batch_root_hashes .insert(L1BatchNumber(0), genesis_batch.root_hash); - let miniblock_hash = H256::from_low_u64_be(23); + let l2_block_hash = H256::from_low_u64_be(23); client - .miniblock_hashes - .insert(L2BlockNumber(1), miniblock_hash); + .l2_block_hashes + .insert(L2BlockNumber(1), l2_block_hash); client .l1_batch_root_hashes .insert(L1BatchNumber(1), H256::repeat_byte(1)); client - .miniblock_hashes - .insert(L2BlockNumber(2), miniblock_hash); + .l2_block_hashes + .insert(L2BlockNumber(2), l2_block_hash); client .l1_batch_root_hashes .insert(L1BatchNumber(2), H256::repeat_byte(2)); let mut detector = create_mock_detector(client, pool.clone()); - store_miniblock(&mut storage, 1, miniblock_hash).await; + store_l2_block(&mut storage, 1, l2_block_hash).await; seal_l1_batch(&mut storage, 1, H256::repeat_byte(1)).await; - store_miniblock(&mut storage, 2, miniblock_hash).await; + store_l2_block(&mut storage, 2, l2_block_hash).await; detector.check_consistency().await.unwrap(); seal_l1_batch(&mut storage, 2, H256::repeat_byte(0xff)).await; @@ -307,14 +307,14 @@ async fn reorg_is_detected_on_batch_hash_mismatch() { } #[tokio::test] -async fn reorg_is_detected_on_miniblock_hash_mismatch() { +async fn reorg_is_detected_on_l2_block_hash_mismatch() { let pool = ConnectionPool::::test_pool().await; let mut storage = pool.connection().await.unwrap(); let mut client = MockMainNodeClient::default(); let genesis_batch = insert_genesis_batch(&mut storage, &GenesisParams::mock()) .await .unwrap(); - client.miniblock_hashes.insert( + client.l2_block_hashes.insert( L2BlockNumber(0), L2BlockHasher::legacy_hash(L2BlockNumber(0)), ); @@ -322,29 +322,29 @@ async fn reorg_is_detected_on_miniblock_hash_mismatch() { .l1_batch_root_hashes .insert(L1BatchNumber(0), genesis_batch.root_hash); - let miniblock_hash = H256::from_low_u64_be(23); + let l2_block_hash = H256::from_low_u64_be(23); client - .miniblock_hashes - .insert(L2BlockNumber(1), miniblock_hash); + .l2_block_hashes + .insert(L2BlockNumber(1), l2_block_hash); client .l1_batch_root_hashes .insert(L1BatchNumber(1), H256::repeat_byte(1)); client - .miniblock_hashes - .insert(L2BlockNumber(2), miniblock_hash); + .l2_block_hashes + .insert(L2BlockNumber(2), l2_block_hash); client - .miniblock_hashes - .insert(L2BlockNumber(3), miniblock_hash); + .l2_block_hashes + .insert(L2BlockNumber(3), l2_block_hash); let mut detector = create_mock_detector(client, pool.clone()); - store_miniblock(&mut storage, 1, miniblock_hash).await; + store_l2_block(&mut storage, 1, l2_block_hash).await; seal_l1_batch(&mut storage, 1, H256::repeat_byte(1)).await; - store_miniblock(&mut storage, 2, miniblock_hash).await; + store_l2_block(&mut storage, 2, l2_block_hash).await; detector.check_consistency().await.unwrap(); - store_miniblock(&mut storage, 3, H256::repeat_byte(42)).await; - // ^ Hash of the miniblock #3 differs from that on the main node. + store_l2_block(&mut storage, 3, H256::repeat_byte(42)).await; + // ^ Hash of the L2 block #3 differs from that on the main node. assert_matches!( detector.check_consistency().await, Err(Error::ReorgDetected(L1BatchNumber(1))) @@ -387,39 +387,39 @@ async fn reorg_is_detected_on_historic_batch_hash_mismatch( .save_protocol_version_with_tx(&ProtocolVersion::default()) .await .unwrap(); - store_miniblock(&mut storage, earliest_l1_batch_number, H256::zero()).await; + store_l2_block(&mut storage, earliest_l1_batch_number, H256::zero()).await; seal_l1_batch(&mut storage, earliest_l1_batch_number, H256::zero()).await; } let mut client = MockMainNodeClient::default(); client - .miniblock_hashes + .l2_block_hashes .insert(L2BlockNumber(earliest_l1_batch_number), H256::zero()); client .l1_batch_root_hashes .insert(L1BatchNumber(earliest_l1_batch_number), H256::zero()); - let miniblock_and_l1_batch_hashes = l1_batch_numbers.clone().map(|number| { - let mut miniblock_hash = H256::from_low_u64_be(number.into()); + let l2_block_and_l1_batch_hashes = l1_batch_numbers.clone().map(|number| { + let mut l2_block_hash = H256::from_low_u64_be(number.into()); client - .miniblock_hashes - .insert(L2BlockNumber(number), miniblock_hash); + .l2_block_hashes + .insert(L2BlockNumber(number), l2_block_hash); let mut l1_batch_hash = H256::repeat_byte(number as u8); client .l1_batch_root_hashes .insert(L1BatchNumber(number), l1_batch_hash); if number > last_correct_batch { - miniblock_hash = H256::zero(); + l2_block_hash = H256::zero(); l1_batch_hash = H256::zero(); } - (number, miniblock_hash, l1_batch_hash) + (number, l2_block_hash, l1_batch_hash) }); - let mut miniblock_and_l1_batch_hashes: Vec<_> = miniblock_and_l1_batch_hashes.collect(); + let mut l2_block_and_l1_batch_hashes: Vec<_> = l2_block_and_l1_batch_hashes.collect(); if matches!(storage_update_strategy, StorageUpdateStrategy::Prefill) { let mut storage = pool.connection().await.unwrap(); - for &(number, miniblock_hash, l1_batch_hash) in &miniblock_and_l1_batch_hashes { - store_miniblock(&mut storage, number, miniblock_hash).await; + for &(number, l2_block_hash, l1_batch_hash) in &l2_block_and_l1_batch_hashes { + store_l2_block(&mut storage, number, l2_block_hash).await; seal_l1_batch(&mut storage, number, l1_batch_hash).await; } } @@ -435,13 +435,13 @@ async fn reorg_is_detected_on_historic_batch_hash_mismatch( tokio::spawn(async move { let mut storage = pool.connection().await.unwrap(); let mut last_number = earliest_l1_batch_number; - while let Some((miniblock, l1_batch)) = block_update_receiver.recv().await { - if miniblock == L2BlockNumber(last_number) && l1_batch == L1BatchNumber(last_number) + while let Some((l2_block, l1_batch)) = block_update_receiver.recv().await { + if l2_block == L2BlockNumber(last_number) && l1_batch == L1BatchNumber(last_number) { - let (number, miniblock_hash, l1_batch_hash) = - miniblock_and_l1_batch_hashes.remove(0); + let (number, l2_block_hash, l1_batch_hash) = + l2_block_and_l1_batch_hashes.remove(0); assert_eq!(number, last_number + 1); - store_miniblock(&mut storage, number, miniblock_hash).await; + store_l2_block(&mut storage, number, l2_block_hash).await; seal_l1_batch(&mut storage, number, l1_batch_hash).await; last_number = number; } @@ -484,7 +484,7 @@ async fn detector_errors_on_earliest_batch_hash_mismatch() { .l1_batch_root_hashes .insert(L1BatchNumber(0), H256::zero()); client - .miniblock_hashes + .l2_block_hashes .insert(L2BlockNumber(0), H256::zero()); let mut detector = create_mock_detector(client, pool.clone()); @@ -502,7 +502,7 @@ async fn detector_errors_on_earliest_batch_hash_mismatch_with_snapshot_recovery( .l1_batch_root_hashes .insert(L1BatchNumber(3), H256::zero()); client - .miniblock_hashes + .l2_block_hashes .insert(L2BlockNumber(3), H256::zero()); let detector = create_mock_detector(client, pool.clone()); @@ -514,7 +514,7 @@ async fn detector_errors_on_earliest_batch_hash_mismatch_with_snapshot_recovery( .save_protocol_version_with_tx(&ProtocolVersion::default()) .await .unwrap(); - store_miniblock(&mut storage, 3, H256::from_low_u64_be(3)).await; + store_l2_block(&mut storage, 3, H256::from_low_u64_be(3)).await; seal_l1_batch(&mut storage, 3, H256::from_low_u64_be(3)).await; }); @@ -533,7 +533,7 @@ async fn reorg_is_detected_without_waiting_for_main_node_to_catch_up() { .unwrap(); // Fill in local storage with some data, so that it's ahead of the main node. for number in 1..5 { - store_miniblock(&mut storage, number, H256::zero()).await; + store_l2_block(&mut storage, number, H256::zero()).await; seal_l1_batch(&mut storage, number, H256::zero()).await; } drop(storage); @@ -544,14 +544,14 @@ async fn reorg_is_detected_without_waiting_for_main_node_to_catch_up() { .insert(L1BatchNumber(0), genesis_batch.root_hash); for number in 1..3 { client - .miniblock_hashes + .l2_block_hashes .insert(L2BlockNumber(number), H256::zero()); client .l1_batch_root_hashes .insert(L1BatchNumber(number), H256::zero()); } client - .miniblock_hashes + .l2_block_hashes .insert(L2BlockNumber(3), H256::zero()); client .l1_batch_root_hashes diff --git a/core/lib/zksync_core/src/state_keeper/io/common/tests.rs b/core/lib/zksync_core/src/state_keeper/io/common/tests.rs index 5f1467c014e4..5eaa122bcadc 100644 --- a/core/lib/zksync_core/src/state_keeper/io/common/tests.rs +++ b/core/lib/zksync_core/src/state_keeper/io/common/tests.rs @@ -19,7 +19,7 @@ use super::*; use crate::{ genesis::{insert_genesis_batch, mock_genesis_config, GenesisParams}, utils::testonly::{ - create_l1_batch, create_l2_transaction, create_miniblock, execute_l2_transaction, + create_l1_batch, create_l2_block, create_l2_transaction, execute_l2_transaction, prepare_recovery_snapshot, }, }; @@ -51,7 +51,7 @@ async fn creating_io_cursor_with_genesis() { L2BlockHasher::legacy_hash(L2BlockNumber(0)) ); - let l2_block = create_miniblock(1); + let l2_block = create_l2_block(1); storage .blocks_dal() .insert_l2_block(&l2_block) @@ -82,7 +82,7 @@ async fn creating_io_cursor_with_snapshot_recovery() { assert_eq!(cursor.prev_l2_block_hash, snapshot_recovery.l2_block_hash); // Add an L2 block so that we have L2 blocks (but not an L1 batch) in the storage. - let l2_block = create_miniblock(snapshot_recovery.l2_block_number.0 + 1); + let l2_block = create_l2_block(snapshot_recovery.l2_block_number.0 + 1); storage .blocks_dal() .insert_l2_block(&l2_block) @@ -200,13 +200,13 @@ async fn getting_first_l2_block_in_batch_with_genesis() { ]); assert_first_l2_block_numbers(&provider, &mut storage, &batches_and_l2_blocks).await; - let new_l2_block = create_miniblock(1); + let new_l2_block = create_l2_block(1); storage .blocks_dal() .insert_l2_block(&new_l2_block) .await .unwrap(); - let new_l2_block = create_miniblock(2); + let new_l2_block = create_l2_block(2); storage .blocks_dal() .insert_l2_block(&new_l2_block) @@ -275,7 +275,7 @@ async fn getting_first_l2_block_in_batch_after_snapshot_recovery() { ]); assert_first_l2_block_numbers(&provider, &mut storage, &batches_and_l2_blocks).await; - let new_l2_block = create_miniblock(snapshot_recovery.l2_block_number.0 + 1); + let new_l2_block = create_l2_block(snapshot_recovery.l2_block_number.0 + 1); storage .blocks_dal() .insert_l2_block(&new_l2_block) @@ -362,7 +362,7 @@ async fn store_pending_l2_blocks( .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) .await .unwrap(); - let mut new_l2_block = create_miniblock(l2_block_number); + let mut new_l2_block = create_l2_block(l2_block_number); new_l2_block.base_system_contracts_hashes = contract_hashes; storage .blocks_dal() diff --git a/core/lib/zksync_core/src/state_keeper/io/mempool.rs b/core/lib/zksync_core/src/state_keeper/io/mempool.rs index 0af933918de2..0aa176a2d4c8 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mempool.rs @@ -44,7 +44,7 @@ pub struct MempoolIO { mempool: MempoolGuard, pool: ConnectionPool, timeout_sealer: TimeoutSealer, - miniblock_max_payload_size_sealer: L2BlockMaxPayloadSizeSealer, + l2_block_max_payload_size_sealer: L2BlockMaxPayloadSizeSealer, filter: L2TxFilter, l1_batch_params_provider: L1BatchParamsProvider, fee_account: Address, @@ -66,7 +66,7 @@ impl IoSealCriteria for MempoolIO { if self.timeout_sealer.should_seal_l2_block(manager) { return true; } - self.miniblock_max_payload_size_sealer + self.l2_block_max_payload_size_sealer .should_seal_l2_block(manager) } } @@ -417,7 +417,7 @@ impl MempoolIO { mempool, pool, timeout_sealer: TimeoutSealer::new(config), - miniblock_max_payload_size_sealer: L2BlockMaxPayloadSizeSealer::new(config), + l2_block_max_payload_size_sealer: L2BlockMaxPayloadSizeSealer::new(config), filter: L2TxFilter::default(), // ^ Will be initialized properly on the first newly opened batch l1_batch_params_provider, diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs index 528f29b2cd00..b7ad72478828 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs @@ -27,7 +27,7 @@ use crate::{ l1_gas_price::{GasAdjuster, PubdataPricing, RollupPubdataPricing, ValidiumPubdataPricing}, state_keeper::{MempoolGuard, MempoolIO}, utils::testonly::{ - create_l1_batch, create_l2_transaction, create_miniblock, execute_l2_transaction, + create_l1_batch, create_l2_block, create_l2_transaction, execute_l2_transaction, DeploymentMode, }, }; @@ -173,7 +173,7 @@ impl Tester { base_fee_per_gas, batch_fee_input: fee_input, base_system_contracts_hashes: self.base_system_contracts.hashes(), - ..create_miniblock(number) + ..create_l2_block(number) }) .await .unwrap(); diff --git a/core/lib/zksync_core/src/sync_layer/batch_status_updater/mod.rs b/core/lib/zksync_core/src/sync_layer/batch_status_updater/mod.rs index 64cf8d857ea7..34a970e41217 100644 --- a/core/lib/zksync_core/src/sync_layer/batch_status_updater/mod.rs +++ b/core/lib/zksync_core/src/sync_layer/batch_status_updater/mod.rs @@ -74,8 +74,8 @@ impl From for UpdaterError { #[async_trait] trait MainNodeClient: fmt::Debug + Send + Sync { - /// Returns any miniblock in the specified L1 batch. - async fn resolve_l1_batch_to_miniblock( + /// Returns any L2 block in the specified L1 batch. + async fn resolve_l1_batch_to_l2_block( &self, number: L1BatchNumber, ) -> EnrichedClientResult>; @@ -88,14 +88,14 @@ trait MainNodeClient: fmt::Debug + Send + Sync { #[async_trait] impl MainNodeClient for BoxedL2Client { - async fn resolve_l1_batch_to_miniblock( + async fn resolve_l1_batch_to_l2_block( &self, number: L1BatchNumber, ) -> EnrichedClientResult> { - let request_latency = FETCHER_METRICS.requests[&FetchStage::GetMiniblockRange].start(); + let request_latency = FETCHER_METRICS.requests[&FetchStage::GetL2BlockRange].start(); let number = self - .get_miniblock_range(number) - .rpc_context("resolve_l1_batch_to_miniblock") + .get_l2_block_range(number) + .rpc_context("resolve_l1_batch_to_l2_block") .with_arg("number", &number) .await? .map(|(start, _)| L2BlockNumber(start.as_u32())); @@ -351,16 +351,16 @@ impl BatchStatusUpdater { while batch <= last_sealed_batch { // While we may receive `None` for the `self.current_l1_batch`, it's OK: open batch is guaranteed to not // be sent to L1. - let miniblock_number = self.client.resolve_l1_batch_to_miniblock(batch).await?; - let Some(miniblock_number) = miniblock_number else { + let l2_block_number = self.client.resolve_l1_batch_to_l2_block(batch).await?; + let Some(l2_block_number) = l2_block_number else { return Ok(()); }; - let Some(batch_info) = self.client.block_details(miniblock_number).await? else { + let Some(batch_info) = self.client.block_details(l2_block_number).await? else { // We cannot recover from an external API inconsistency. let err = anyhow::anyhow!( - "Node API is inconsistent: miniblock {miniblock_number} was reported to be a part of {batch} L1 batch, \ - but API has no information about this miniblock", + "Node API is inconsistent: L2 block {l2_block_number} was reported to be a part of {batch} L1 batch, \ + but API has no information about this L2 block", ); return Err(err.into()); }; diff --git a/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs b/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs index 308fc7163d7b..8d9a06b31e50 100644 --- a/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs @@ -12,16 +12,16 @@ use super::*; use crate::{ genesis::{insert_genesis_batch, GenesisParams}, sync_layer::metrics::L1BatchStage, - utils::testonly::{create_l1_batch, create_miniblock, prepare_recovery_snapshot}, + utils::testonly::{create_l1_batch, create_l2_block, prepare_recovery_snapshot}, }; async fn seal_l1_batch(storage: &mut Connection<'_, Core>, number: L1BatchNumber) { let mut storage = storage.start_transaction().await.unwrap(); - // Insert a mock miniblock so that `get_block_details()` will return values. - let miniblock = create_miniblock(number.0); + // Insert a mock L2 block so that `get_block_details()` will return values. + let l2_block = create_l2_block(number.0); storage .blocks_dal() - .insert_l2_block(&miniblock) + .insert_l2_block(&l2_block) .await .unwrap(); @@ -180,7 +180,7 @@ impl From for MockMainNodeClient { #[async_trait] impl MainNodeClient for MockMainNodeClient { - async fn resolve_l1_batch_to_miniblock( + async fn resolve_l1_batch_to_l2_block( &self, number: L1BatchNumber, ) -> EnrichedClientResult> { diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs index d9744947a7cc..482c0fbe2de2 100644 --- a/core/lib/zksync_core/src/sync_layer/external_io.rs +++ b/core/lib/zksync_core/src/sync_layer/external_io.rs @@ -65,7 +65,7 @@ impl ExternalIO { async fn get_base_system_contract( &self, hash: H256, - current_miniblock_number: L2BlockNumber, + current_l2_block_number: L2BlockNumber, ) -> anyhow::Result { let bytecode = self .pool @@ -88,19 +88,17 @@ impl ExternalIO { let contract_bytecode = self .main_node_client .fetch_system_contract_by_hash(hash) - .await - .context("failed to fetch base system contract bytecode from the main node")? + .await? .context("base system contract is missing on the main node")?; self.pool .connection_tagged("sync_layer") .await? .factory_deps_dal() .insert_factory_deps( - current_miniblock_number, + current_l2_block_number, &HashMap::from([(hash, contract_bytecode.clone())]), ) - .await - .context("failed persisting system contract")?; + .await?; SystemContractCode { code: bytes_to_be_words(contract_bytecode), hash, @@ -120,7 +118,7 @@ impl IoSealCriteria for ExternalIO { } fn should_seal_l2_block(&mut self, _manager: &UpdatesManager) -> bool { - if !matches!(self.actions.peek_action(), Some(SyncAction::SealMiniblock)) { + if !matches!(self.actions.peek_action(), Some(SyncAction::SealL2Block)) { return false; } self.actions.pop_action(); @@ -138,35 +136,35 @@ impl StateKeeperIO for ExternalIO { let mut storage = self.pool.connection_tagged("sync_layer").await?; let cursor = IoCursor::new(&mut storage).await?; tracing::info!( - "Initialized the ExternalIO: current L1 batch number {}, current miniblock number {}", + "Initialized the ExternalIO: current L1 batch number {}, current L2 block number {}", cursor.l1_batch, cursor.next_l2_block, ); - let pending_miniblock_header = self + let pending_l2_block_header = self .l1_batch_params_provider .load_first_l2_block_in_batch(&mut storage, cursor.l1_batch) .await .with_context(|| { format!( - "failed loading first miniblock for L1 batch #{}", + "failed loading first L2 block for L1 batch #{}", cursor.l1_batch ) })?; - let Some(mut pending_miniblock_header) = pending_miniblock_header else { + let Some(mut pending_l2_block_header) = pending_l2_block_header else { return Ok((cursor, None)); }; - if !pending_miniblock_header.has_protocol_version() { - let pending_miniblock_number = pending_miniblock_header.number(); - // Fetch protocol version ID for pending miniblocks to know which VM to use to re-execute them. + if !pending_l2_block_header.has_protocol_version() { + let pending_l2_block_number = pending_l2_block_header.number(); + // Fetch protocol version ID for pending L2 blocks to know which VM to use to re-execute them. let sync_block = self .main_node_client - .fetch_l2_block(pending_miniblock_number, false) + .fetch_l2_block(pending_l2_block_number, false) .await .context("failed to fetch block from the main node")? .with_context(|| { - format!("pending miniblock #{pending_miniblock_number} is missing on main node") + format!("pending L2 block #{pending_l2_block_number} is missing on main node") })?; // Loading base system contracts will insert protocol version in the database if it's not present there. let protocol_version = sync_block.protocol_version; @@ -181,15 +179,15 @@ impl StateKeeperIO for ExternalIO { .blocks_dal() .set_protocol_version_for_pending_l2_blocks(protocol_version) .await - .context("failed setting protocol version for pending miniblocks")?; - pending_miniblock_header.set_protocol_version(protocol_version); + .context("failed setting protocol version for pending L2 blocks")?; + pending_l2_block_header.set_protocol_version(protocol_version); } let (system_env, l1_batch_env) = self .l1_batch_params_provider .load_l1_batch_params( &mut storage, - &pending_miniblock_header, + &pending_l2_block_header, super::VALIDATION_COMPUTATIONAL_GAS_LIMIT, self.chain_id, ) @@ -224,7 +222,7 @@ impl StateKeeperIO for ExternalIO { SyncAction::OpenBatch { params, number, - first_miniblock_number, + first_l2_block_number, } => { anyhow::ensure!( number == cursor.l1_batch, @@ -232,8 +230,8 @@ impl StateKeeperIO for ExternalIO { cursor.l1_batch ); anyhow::ensure!( - first_miniblock_number == cursor.next_l2_block, - "Miniblock number mismatch: expected {}, got {first_miniblock_number}", + first_l2_block_number == cursor.next_l2_block, + "L2 block number mismatch: expected {}, got {first_l2_block_number}", cursor.next_l2_block ); return Ok(Some(params)); @@ -249,22 +247,22 @@ impl StateKeeperIO for ExternalIO { cursor: &IoCursor, max_wait: Duration, ) -> anyhow::Result> { - // Wait for the next miniblock to appear in the queue. + // Wait for the next L2 block to appear in the queue. let Some(action) = self.actions.recv_action(max_wait).await else { return Ok(None); }; match action { - SyncAction::Miniblock { params, number } => { + SyncAction::L2Block { params, number } => { anyhow::ensure!( number == cursor.next_l2_block, - "Miniblock number mismatch: expected {}, got {number}", + "L2 block number mismatch: expected {}, got {number}", cursor.next_l2_block ); return Ok(Some(params)); } other => { anyhow::bail!( - "Unexpected action in the queue while waiting for the next miniblock: {other:?}" + "Unexpected action in the queue while waiting for the next L2 block: {other:?}" ); } } @@ -286,8 +284,8 @@ impl StateKeeperIO for ExternalIO { self.actions.pop_action().unwrap(); return Ok(Some(Transaction::from(*tx))); } - SyncAction::SealMiniblock | SyncAction::SealBatch => { - // No more transactions in the current miniblock; the state keeper should seal it. + SyncAction::SealL2Block | SyncAction::SealBatch => { + // No more transactions in the current L2 block; the state keeper should seal it. return Ok(None); } other => { diff --git a/core/lib/zksync_core/src/sync_layer/fetcher.rs b/core/lib/zksync_core/src/sync_layer/fetcher.rs index 138f39ed8cee..8f5922afa3af 100644 --- a/core/lib/zksync_core/src/sync_layer/fetcher.rs +++ b/core/lib/zksync_core/src/sync_layer/fetcher.rs @@ -54,8 +54,8 @@ pub(crate) struct FetchedBlock { } impl FetchedBlock { - fn compute_hash(&self, prev_miniblock_hash: H256) -> H256 { - let mut hasher = L2BlockHasher::new(self.number, self.timestamp, prev_miniblock_hash); + fn compute_hash(&self, prev_l2_block_hash: H256) -> H256 { + let mut hasher = L2BlockHasher::new(self.number, self.timestamp, prev_l2_block_hash); for tx in &self.transactions { hasher.push_tx_hash(tx.hash()); } @@ -73,7 +73,7 @@ impl TryFrom for FetchedBlock { if transactions.is_empty() && !block.last_in_batch { return Err(anyhow::anyhow!( - "Only last miniblock of the batch can be empty" + "Only last L2 block of the batch can be empty" )); } @@ -116,10 +116,10 @@ impl IoCursor { if let Some(reference_hash) = block.reference_hash { if local_block_hash != reference_hash { // This is a warning, not an assertion because hash mismatch may occur after a reorg. - // Indeed, `self.prev_miniblock_hash` may differ from the hash of the updated previous miniblock. + // Indeed, `self.prev_l2_block_hash` may differ from the hash of the updated previous L2 block. tracing::warn!( - "Mismatch between the locally computed and received miniblock hash for {block:?}; \ - local_block_hash = {local_block_hash:?}, prev_miniblock_hash = {:?}", + "Mismatch between the locally computed and received L2 block hash for {block:?}; \ + local_block_hash = {local_block_hash:?}, prev_l2_block_hash = {:?}", self.prev_l2_block_hash ); } @@ -130,7 +130,7 @@ impl IoCursor { assert_eq!( block.l1_batch_number, self.l1_batch.next(), - "Unexpected batch number in the next received miniblock" + "Unexpected batch number in the next received L2 block" ); tracing::info!( @@ -156,14 +156,14 @@ impl IoCursor { }, }, number: block.l1_batch_number, - first_miniblock_number: block.number, + first_l2_block_number: block.number, }); FETCHER_METRICS.l1_batch[&L1BatchStage::Open].set(block.l1_batch_number.0.into()); self.l1_batch += 1; } else { - // New batch implicitly means a new miniblock, so we only need to push the miniblock action + // New batch implicitly means a new L2 block, so we only need to push the L2 block action // if it's not a new batch. - new_actions.push(SyncAction::Miniblock { + new_actions.push(SyncAction::L2Block { params: L2BlockParams { timestamp: block.timestamp, virtual_blocks: block.virtual_blocks, @@ -177,12 +177,12 @@ impl IoCursor { .inc_by(block.transactions.len() as u64); new_actions.extend(block.transactions.into_iter().map(Into::into)); - // Last miniblock of the batch is a "fictive" miniblock and would be replicated locally. - // We don't need to seal it explicitly, so we only put the seal miniblock command if it's not the last miniblock. + // Last L2 block of the batch is a "fictive" L2 block and would be replicated locally. + // We don't need to seal it explicitly, so we only put the seal L2 block command if it's not the last L2 block. if block.last_in_batch { new_actions.push(SyncAction::SealBatch); } else { - new_actions.push(SyncAction::SealMiniblock); + new_actions.push(SyncAction::SealL2Block); } self.next_l2_block += 1; self.prev_l2_block_hash = local_block_hash; diff --git a/core/lib/zksync_core/src/sync_layer/metrics.rs b/core/lib/zksync_core/src/sync_layer/metrics.rs index aa214719bfd0..805c6f913df2 100644 --- a/core/lib/zksync_core/src/sync_layer/metrics.rs +++ b/core/lib/zksync_core/src/sync_layer/metrics.rs @@ -8,7 +8,9 @@ use zksync_types::aggregated_operations::AggregatedActionType; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "stage", rename_all = "snake_case")] pub(super) enum FetchStage { - GetMiniblockRange, + // uses legacy naming for L2 blocks for compatibility reasons + #[metrics(name = "get_miniblock_range")] + GetL2BlockRange, GetBlockDetails, } @@ -40,6 +42,7 @@ pub(super) struct FetcherMetrics { #[metrics(buckets = Buckets::LATENCIES)] pub requests: Family>, pub l1_batch: Family>, + // uses legacy naming for L2 blocks for compatibility reasons pub miniblock: Gauge, } diff --git a/core/lib/zksync_core/src/sync_layer/sync_action.rs b/core/lib/zksync_core/src/sync_layer/sync_action.rs index 4c329b67ec1f..820f045445d5 100644 --- a/core/lib/zksync_core/src/sync_layer/sync_action.rs +++ b/core/lib/zksync_core/src/sync_layer/sync_action.rs @@ -10,8 +10,8 @@ pub struct ActionQueueSender(mpsc::Sender); impl ActionQueueSender { /// Pushes a set of actions to the queue. /// - /// Requires that the actions are in the correct order: starts with a new open batch/miniblock, - /// followed by 0 or more transactions, have mandatory `SealMiniblock` and optional `SealBatch` at the end. + /// Requires that the actions are in the correct order: starts with a new open L1 batch / L2 block, + /// followed by 0 or more transactions, have mandatory `SealL2Block` and optional `SealBatch` at the end. /// Would panic if the order is incorrect. pub(crate) async fn push_actions(&self, actions: Vec) { Self::check_action_sequence(&actions).unwrap(); @@ -28,36 +28,36 @@ impl ActionQueueSender { /// error. This function itself does not panic for the ease of testing. fn check_action_sequence(actions: &[SyncAction]) -> Result<(), String> { // Rules for the sequence: - // 1. Must start with either `OpenBatch` or `Miniblock`, both of which may be met only once. + // 1. Must start with either `OpenBatch` or `L2Block`, both of which may be met only once. // 2. Followed by a sequence of `Tx` actions which consists of 0 or more elements. - // 3. Must have either `SealMiniblock` or `SealBatch` at the end. + // 3. Must have either `SealL2Block` or `SealBatch` at the end. let mut opened = false; - let mut miniblock_sealed = false; + let mut l2_block_sealed = false; for action in actions { match action { - SyncAction::OpenBatch { .. } | SyncAction::Miniblock { .. } => { + SyncAction::OpenBatch { .. } | SyncAction::L2Block { .. } => { if opened { - return Err(format!("Unexpected OpenBatch/Miniblock: {:?}", actions)); + return Err(format!("Unexpected OpenBatch / L2Block: {actions:?}")); } opened = true; } SyncAction::Tx(_) => { - if !opened || miniblock_sealed { - return Err(format!("Unexpected Tx: {:?}", actions)); + if !opened || l2_block_sealed { + return Err(format!("Unexpected Tx: {actions:?}")); } } - SyncAction::SealMiniblock | SyncAction::SealBatch => { - if !opened || miniblock_sealed { - return Err(format!("Unexpected SealMiniblock/SealBatch: {:?}", actions)); + SyncAction::SealL2Block | SyncAction::SealBatch => { + if !opened || l2_block_sealed { + return Err(format!("Unexpected SealL2Block / SealBatch: {actions:?}")); } - miniblock_sealed = true; + l2_block_sealed = true; } } } - if !miniblock_sealed { - return Err(format!("Incomplete sequence: {:?}", actions)); + if !l2_block_sealed { + return Err(format!("Incomplete sequence: {actions:?}")); } Ok(()) } @@ -141,20 +141,20 @@ pub(crate) enum SyncAction { params: L1BatchParams, // Additional parameters used only for sanity checks number: L1BatchNumber, - first_miniblock_number: L2BlockNumber, + first_l2_block_number: L2BlockNumber, }, - Miniblock { + L2Block { params: L2BlockParams, // Additional parameters used only for sanity checks number: L2BlockNumber, }, Tx(Box), - /// We need an explicit action for the miniblock sealing, since we fetch the whole miniblocks and already know - /// that they are sealed, but at the same time the next miniblock may not exist yet. - /// By having a dedicated action for that we prevent a situation where the miniblock is kept open on the EN until + /// We need an explicit action for the L2 block sealing, since we fetch the whole L2 blocks and already know + /// that they are sealed, but at the same time the next L2 block may not exist yet. + /// By having a dedicated action for that we prevent a situation where the L2 block is kept open on the EN until /// the next one is sealed on the main node. - SealMiniblock, - /// Similarly to `SealMiniblock` we must be able to seal the batch even if there is no next miniblock yet. + SealL2Block, + /// Similarly to `SealL2Block` we must be able to seal the batch even if there is no next L2 block yet. SealBatch, } @@ -183,12 +183,12 @@ mod tests { }, }, number: L1BatchNumber(1), - first_miniblock_number: L2BlockNumber(1), + first_l2_block_number: L2BlockNumber(1), } } - fn miniblock() -> SyncAction { - SyncAction::Miniblock { + fn l2_block() -> SyncAction { + SyncAction::L2Block { params: L2BlockParams { timestamp: 1, virtual_blocks: 1, @@ -213,8 +213,8 @@ mod tests { FetchedTransaction::new(tx.into()).into() } - fn seal_miniblock() -> SyncAction { - SyncAction::SealMiniblock + fn seal_l2_block() -> SyncAction { + SyncAction::SealL2Block } fn seal_batch() -> SyncAction { @@ -224,19 +224,19 @@ mod tests { #[test] fn correct_sequence() { let test_vector = vec![ - vec![open_batch(), seal_miniblock()], + vec![open_batch(), seal_l2_block()], vec![open_batch(), seal_batch()], - vec![open_batch(), tx(), seal_miniblock()], - vec![open_batch(), tx(), tx(), tx(), seal_miniblock()], + vec![open_batch(), tx(), seal_l2_block()], + vec![open_batch(), tx(), tx(), tx(), seal_l2_block()], vec![open_batch(), tx(), seal_batch()], - vec![miniblock(), seal_miniblock()], - vec![miniblock(), seal_batch()], - vec![miniblock(), tx(), seal_miniblock()], - vec![miniblock(), tx(), seal_batch()], + vec![l2_block(), seal_l2_block()], + vec![l2_block(), seal_batch()], + vec![l2_block(), tx(), seal_l2_block()], + vec![l2_block(), tx(), seal_batch()], ]; for (idx, sequence) in test_vector.into_iter().enumerate() { ActionQueueSender::check_action_sequence(&sequence) - .unwrap_or_else(|_| panic!("Valid sequence #{} failed", idx)); + .unwrap_or_else(|_| panic!("Valid sequence #{idx} failed")); } } @@ -248,53 +248,47 @@ mod tests { // Incomplete sequences. (vec![open_batch()], "Incomplete sequence"), (vec![open_batch(), tx()], "Incomplete sequence"), - (vec![miniblock()], "Incomplete sequence"), - (vec![miniblock(), tx()], "Incomplete sequence"), + (vec![l2_block()], "Incomplete sequence"), + (vec![l2_block(), tx()], "Incomplete sequence"), // Unexpected tx (vec![tx()], "Unexpected Tx"), - (vec![open_batch(), seal_miniblock(), tx()], "Unexpected Tx"), - // Unexpected `OpenBatch/Miniblock` + (vec![open_batch(), seal_l2_block(), tx()], "Unexpected Tx"), + // Unexpected `OpenBatch / L2Block` ( - vec![miniblock(), miniblock()], - "Unexpected OpenBatch/Miniblock", + vec![l2_block(), l2_block()], + "Unexpected OpenBatch / L2Block", ), ( - vec![miniblock(), open_batch()], - "Unexpected OpenBatch/Miniblock", + vec![l2_block(), open_batch()], + "Unexpected OpenBatch / L2Block", ), ( - vec![open_batch(), miniblock()], - "Unexpected OpenBatch/Miniblock", + vec![open_batch(), l2_block()], + "Unexpected OpenBatch / L2Block", ), - // Unexpected `SealMiniblock` - (vec![seal_miniblock()], "Unexpected SealMiniblock"), + // Unexpected `SealL2Block` + (vec![seal_l2_block()], "Unexpected SealL2Block"), ( - vec![miniblock(), seal_miniblock(), seal_miniblock()], - "Unexpected SealMiniblock", + vec![l2_block(), seal_l2_block(), seal_l2_block()], + "Unexpected SealL2Block", ), ( - vec![open_batch(), seal_miniblock(), seal_batch(), seal_batch()], - "Unexpected SealMiniblock/SealBatch", + vec![open_batch(), seal_l2_block(), seal_batch(), seal_batch()], + "Unexpected SealL2Block / SealBatch", ), ( - vec![miniblock(), seal_miniblock(), seal_batch(), seal_batch()], - "Unexpected SealMiniblock/SealBatch", + vec![l2_block(), seal_l2_block(), seal_batch(), seal_batch()], + "Unexpected SealL2Block / SealBatch", ), - (vec![seal_batch()], "Unexpected SealMiniblock/SealBatch"), + (vec![seal_batch()], "Unexpected SealL2Block / SealBatch"), ]; for (idx, (sequence, expected_err)) in test_vector.into_iter().enumerate() { let Err(err) = ActionQueueSender::check_action_sequence(&sequence) else { - panic!( - "Invalid sequence passed the test. Sequence #{}, expected error: {}", - idx, expected_err - ); + panic!("Invalid sequence passed the test. Sequence #{idx}, expected error: {expected_err}"); }; assert!( err.starts_with(expected_err), - "Sequence #{} failed. Expected error: {}, got: {}", - idx, - expected_err, - err + "Sequence #{idx} failed. Expected error: {expected_err}, got: {err}" ); } } diff --git a/core/lib/zksync_core/src/sync_layer/sync_state.rs b/core/lib/zksync_core/src/sync_layer/sync_state.rs index e5d55573351c..68d5e4be51cd 100644 --- a/core/lib/zksync_core/src/sync_layer/sync_state.rs +++ b/core/lib/zksync_core/src/sync_layer/sync_state.rs @@ -1,6 +1,5 @@ use std::{sync::Arc, time::Duration}; -use anyhow::Context; use async_trait::async_trait; use serde::Serialize; use tokio::sync::watch; @@ -16,21 +15,21 @@ use crate::state_keeper::{io::IoCursor, updates::UpdatesManager, StateKeeperOutp /// `SyncState` is a structure that holds the state of the syncing process. /// The intended use case is to signalize to Web3 API whether the node is fully synced. /// Data inside is expected to be updated by both `MainNodeFetcher` (on last block available on the main node) -/// and `ExternalIO` (on latest sealed miniblock). +/// and `ExternalIO` (on latest sealed L2 block). /// -/// This structure operates on miniblocks rather than L1 batches, since this is the default unit used in the web3 API. +/// This structure operates on L2 blocks rather than L1 batches, since this is the default unit used in the web3 API. #[derive(Debug, Clone)] -pub struct SyncState(Arc>); +pub struct SyncState(Arc>); impl Default for SyncState { fn default() -> Self { - Self(Arc::new(sync::watch::channel(SyncStateInner::default()).0)) + Self(Arc::new(watch::channel(SyncStateInner::default()).0)) } } /// A threshold constant intended to keep the sync status less flaky. -/// This gives the external node some room to fetch new miniblocks without losing the sync status. -const SYNC_MINIBLOCK_DELTA: u32 = 10; +/// This gives the external node some room to fetch new L2 blocks without losing the sync status. +const SYNC_L2_BLOCK_DELTA: u32 = 10; impl SyncState { pub(crate) fn get_main_node_block(&self) -> L2BlockNumber { @@ -87,17 +86,12 @@ impl SyncState { while !*stop_receiver.borrow_and_update() { let local_block = connection_pool .connection() - .await - .context("Failed to get a connection from the pool in sync state updater")? + .await? .blocks_dal() .get_sealed_l2_block_number() - .await - .context("Failed to get the miniblock number from DB")?; + .await?; - let main_node_block = main_node_client - .get_block_number() - .await - .context("Failed to request last miniblock number from main node")?; + let main_node_block = main_node_client.get_block_number().await?; if let Some(local_block) = local_block { self.set_local_block(local_block); @@ -183,7 +177,7 @@ impl SyncStateInner { // We're ahead of the main node, this situation is handled by the re-org detector. return (true, Some(0)); }; - (block_diff <= SYNC_MINIBLOCK_DELTA, Some(block_diff)) + (block_diff <= SYNC_L2_BLOCK_DELTA, Some(block_diff)) } else { (false, None) } @@ -243,7 +237,7 @@ mod tests { // The gap is too big, still not synced. sync_state.set_local_block(L2BlockNumber(0)); - sync_state.set_main_node_block(L2BlockNumber(SYNC_MINIBLOCK_DELTA + 1)); + sync_state.set_main_node_block(L2BlockNumber(SYNC_L2_BLOCK_DELTA + 1)); assert!(!sync_state.is_synced()); let health = sync_state.check_health().await; @@ -257,11 +251,11 @@ mod tests { assert_matches!(health.status(), HealthStatus::Ready); // Can reach the main node last block. - sync_state.set_local_block(L2BlockNumber(SYNC_MINIBLOCK_DELTA + 1)); + sync_state.set_local_block(L2BlockNumber(SYNC_L2_BLOCK_DELTA + 1)); assert!(sync_state.is_synced()); // Main node can again move forward. - sync_state.set_main_node_block(L2BlockNumber(2 * SYNC_MINIBLOCK_DELTA + 2)); + sync_state.set_main_node_block(L2BlockNumber(2 * SYNC_L2_BLOCK_DELTA + 2)); assert!(!sync_state.is_synced()); } diff --git a/core/lib/zksync_core/src/sync_layer/tests.rs b/core/lib/zksync_core/src/sync_layer/tests.rs index 57d97f9b5202..cb0813700431 100644 --- a/core/lib/zksync_core/src/sync_layer/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/tests.rs @@ -31,7 +31,7 @@ const TEST_TIMEOUT: Duration = Duration::from_secs(10); const POLL_INTERVAL: Duration = Duration::from_millis(50); pub(crate) const OPERATOR_ADDRESS: Address = Address::repeat_byte(1); -fn open_l1_batch(number: u32, timestamp: u64, first_miniblock_number: u32) -> SyncAction { +fn open_l1_batch(number: u32, timestamp: u64, first_l2_block_number: u32) -> SyncAction { SyncAction::OpenBatch { params: L1BatchParams { protocol_version: ProtocolVersionId::latest(), @@ -44,7 +44,7 @@ fn open_l1_batch(number: u32, timestamp: u64, first_miniblock_number: u32) -> Sy }, }, number: L1BatchNumber(number), - first_miniblock_number: L2BlockNumber(first_miniblock_number), + first_l2_block_number: L2BlockNumber(first_l2_block_number), } } @@ -67,12 +67,12 @@ impl StateKeeperHandles { assert!(tx_hashes.iter().all(|tx_hashes| !tx_hashes.is_empty())); let sync_state = SyncState::default(); - let (persistence, miniblock_sealer) = + let (persistence, l2_block_sealer) = StateKeeperPersistence::new(pool.clone(), Address::repeat_byte(1), 5); let output_handler = OutputHandler::new(Box::new(persistence.with_tx_insertion())) .with_handler(Box::new(sync_state.clone())); - tokio::spawn(miniblock_sealer.run()); + tokio::spawn(l2_block_sealer.run()); let io = ExternalIO::new( pool, actions, @@ -114,7 +114,7 @@ impl StateKeeperHandles { } } () = tokio::time::sleep(TEST_TIMEOUT) => { - panic!("Timed out waiting for miniblock to be sealed"); + panic!("Timed out waiting for L2 block to be sealed"); } () = self.sync_state.wait_for_local_block(want) => { self.stop_sender.send_replace(true); @@ -179,7 +179,7 @@ async fn external_io_basics(snapshot_recovery: bool) { let tx = create_l2_transaction(10, 100); let tx_hash = tx.hash(); let tx = FetchedTransaction::new(tx.into()); - let actions = vec![open_l1_batch, tx.into(), SyncAction::SealMiniblock]; + let actions = vec![open_l1_batch, tx.into(), SyncAction::SealL2Block]; let (actions_sender, action_queue) = ActionQueue::new(); let client = MockMainNodeClient::default(); @@ -191,19 +191,19 @@ async fn external_io_basics(snapshot_recovery: bool) { ) .await; actions_sender.push_actions(actions).await; - // Wait until the miniblock is sealed. + // Wait until the L2 block is sealed. state_keeper .wait_for_local_block(snapshot.l2_block_number + 1) .await; - // Check that the miniblock is persisted. - let miniblock = storage + // Check that the L2 block is persisted. + let l2_block = storage .blocks_dal() .get_l2_block_header(snapshot.l2_block_number + 1) .await .unwrap() - .expect("New miniblock is not persisted"); - assert_eq!(miniblock.timestamp, snapshot.l2_block_timestamp + 1); + .expect("New L2 block is not persisted"); + assert_eq!(l2_block.timestamp, snapshot.l2_block_timestamp + 1); let expected_fee_input = BatchFeeInput::PubdataIndependent(PubdataIndependentBatchFeeModelInput { @@ -212,9 +212,9 @@ async fn external_io_basics(snapshot_recovery: bool) { l1_gas_price: 2, }); - assert_eq!(miniblock.batch_fee_input, expected_fee_input); - assert_eq!(miniblock.l1_tx_count, 0); - assert_eq!(miniblock.l2_tx_count, 1); + assert_eq!(l2_block.batch_fee_input, expected_fee_input); + assert_eq!(l2_block.l1_tx_count, 0); + assert_eq!(l2_block.l2_tx_count, 1); let tx_receipt = storage .transactions_web3_dal() @@ -256,7 +256,7 @@ async fn external_io_works_without_local_protocol_version(snapshot_recovery: boo let tx = create_l2_transaction(10, 100); let tx = FetchedTransaction::new(tx.into()); - let actions = vec![open_l1_batch, tx.into(), SyncAction::SealMiniblock]; + let actions = vec![open_l1_batch, tx.into(), SyncAction::SealL2Block]; let (actions_sender, action_queue) = ActionQueue::new(); let mut client = MockMainNodeClient::default(); @@ -279,12 +279,12 @@ async fn external_io_works_without_local_protocol_version(snapshot_recovery: boo ) .await; actions_sender.push_actions(actions).await; - // Wait until the miniblock is sealed. + // Wait until the L2 block is sealed. state_keeper .wait_for_local_block(snapshot.l2_block_number + 1) .await; - // Check that the miniblock and the protocol version for it are persisted. + // Check that the L2 block and the protocol version for it are persisted. let persisted_protocol_version = storage .protocol_versions_dal() .get_protocol_version(ProtocolVersionId::next()) @@ -300,17 +300,17 @@ async fn external_io_works_without_local_protocol_version(snapshot_recovery: boo next_protocol_version.base_system_contracts ); - let miniblock = storage + let l2_block = storage .blocks_dal() .get_l2_block_header(snapshot.l2_block_number + 1) .await .unwrap() - .expect("New miniblock is not persisted"); - assert_eq!(miniblock.timestamp, snapshot.l2_block_timestamp + 1); - assert_eq!(miniblock.protocol_version, Some(ProtocolVersionId::next())); + .expect("New L2 block is not persisted"); + assert_eq!(l2_block.timestamp, snapshot.l2_block_timestamp + 1); + assert_eq!(l2_block.protocol_version, Some(ProtocolVersionId::next())); } -pub(super) async fn run_state_keeper_with_multiple_miniblocks( +pub(super) async fn run_state_keeper_with_multiple_l2_blocks( pool: ConnectionPool, snapshot_recovery: bool, ) -> (SnapshotRecoveryStatus, Vec) { @@ -332,12 +332,12 @@ pub(super) async fn run_state_keeper_with_multiple_miniblocks( let tx = create_l2_transaction(10, 100); FetchedTransaction::new(tx.into()).into() }); - let first_miniblock_actions: Vec<_> = iter::once(open_l1_batch) + let first_l2_block_actions: Vec<_> = iter::once(open_l1_batch) .chain(txs) - .chain([SyncAction::SealMiniblock]) + .chain([SyncAction::SealL2Block]) .collect(); - let open_miniblock = SyncAction::Miniblock { + let open_l2_block = SyncAction::L2Block { params: L2BlockParams { timestamp: snapshot.l2_block_timestamp + 2, virtual_blocks: 1, @@ -348,22 +348,22 @@ pub(super) async fn run_state_keeper_with_multiple_miniblocks( let tx = create_l2_transaction(10, 100); FetchedTransaction::new(tx.into()).into() }); - let second_miniblock_actions: Vec<_> = iter::once(open_miniblock) + let second_l2_block_actions: Vec<_> = iter::once(open_l2_block) .chain(more_txs) - .chain([SyncAction::SealMiniblock]) + .chain([SyncAction::SealL2Block]) .collect(); let tx_hashes = extract_tx_hashes( - first_miniblock_actions + first_l2_block_actions .iter() - .chain(&second_miniblock_actions), + .chain(&second_l2_block_actions), ); let (actions_sender, action_queue) = ActionQueue::new(); let client = MockMainNodeClient::default(); let state_keeper = StateKeeperHandles::new(pool, client, action_queue, &[&tx_hashes]).await; - actions_sender.push_actions(first_miniblock_actions).await; - actions_sender.push_actions(second_miniblock_actions).await; - // Wait until both miniblocks are sealed. + actions_sender.push_actions(first_l2_block_actions).await; + actions_sender.push_actions(second_l2_block_actions).await; + // Wait until both L2 blocks are sealed. state_keeper .wait_for_local_block(snapshot.l2_block_number + 2) .await; @@ -372,27 +372,27 @@ pub(super) async fn run_state_keeper_with_multiple_miniblocks( #[test_casing(2, [false, true])] #[tokio::test] -async fn external_io_with_multiple_miniblocks(snapshot_recovery: bool) { +async fn external_io_with_multiple_l2_blocks(snapshot_recovery: bool) { let pool = ConnectionPool::::test_pool().await; let (snapshot, tx_hashes) = - run_state_keeper_with_multiple_miniblocks(pool.clone(), snapshot_recovery).await; + run_state_keeper_with_multiple_l2_blocks(pool.clone(), snapshot_recovery).await; assert_eq!(tx_hashes.len(), 8); - // Check that both miniblocks are persisted. - let tx_hashes_by_miniblock = [ + // Check that both L2 blocks are persisted. + let tx_hashes_by_l2_block = [ (snapshot.l2_block_number + 1, &tx_hashes[..5]), (snapshot.l2_block_number + 2, &tx_hashes[5..]), ]; let mut storage = pool.connection().await.unwrap(); - for (number, expected_tx_hashes) in tx_hashes_by_miniblock { - let miniblock = storage + for (number, expected_tx_hashes) in tx_hashes_by_l2_block { + let l2_block = storage .blocks_dal() .get_l2_block_header(number) .await .unwrap() - .unwrap_or_else(|| panic!("Miniblock #{number} is not persisted")); - assert_eq!(miniblock.l2_tx_count, expected_tx_hashes.len() as u16); - assert_eq!(miniblock.timestamp, u64::from(number.0)); + .unwrap_or_else(|| panic!("L2 block #{number} is not persisted")); + assert_eq!(l2_block.l2_tx_count, expected_tx_hashes.len() as u16); + assert_eq!(l2_block.timestamp, u64::from(number.0)); let sync_block = storage .sync_dal() @@ -435,29 +435,29 @@ async fn test_external_io_recovery( .wait_for_local_block(snapshot.l2_block_number + 2) .await; - // Send new actions and wait until the new miniblock is sealed. - let open_miniblock = SyncAction::Miniblock { + // Send new actions and wait until the new L2 block is sealed. + let open_l2_block = SyncAction::L2Block { params: L2BlockParams { timestamp: snapshot.l2_block_timestamp + 3, virtual_blocks: 1, }, number: snapshot.l2_block_number + 3, }; - let actions = vec![open_miniblock, new_tx.into(), SyncAction::SealMiniblock]; + let actions = vec![open_l2_block, new_tx.into(), SyncAction::SealL2Block]; actions_sender.push_actions(actions).await; state_keeper .wait_for_local_block(snapshot.l2_block_number + 3) .await; let mut storage = pool.connection().await.unwrap(); - let miniblock = storage + let l2_block = storage .blocks_dal() .get_l2_block_header(snapshot.l2_block_number + 3) .await .unwrap() - .expect("New miniblock is not persisted"); - assert_eq!(miniblock.l2_tx_count, 1); - assert_eq!(miniblock.timestamp, snapshot.l2_block_timestamp + 3); + .expect("New L2 block is not persisted"); + assert_eq!(l2_block.l2_tx_count, 1); + assert_eq!(l2_block.timestamp, snapshot.l2_block_timestamp + 3); } pub(super) async fn mock_l1_batch_hash_computation(pool: ConnectionPool, number: u32) { @@ -505,16 +505,16 @@ pub(super) async fn run_state_keeper_with_multiple_l1_batches( let first_tx = create_l2_transaction(10, 100); let first_tx_hash = first_tx.hash(); let first_tx = FetchedTransaction::new(first_tx.into()); - let first_l1_batch_actions = vec![l1_batch, first_tx.into(), SyncAction::SealMiniblock]; + let first_l1_batch_actions = vec![l1_batch, first_tx.into(), SyncAction::SealL2Block]; - let fictive_miniblock = SyncAction::Miniblock { + let fictive_l2_block = SyncAction::L2Block { params: L2BlockParams { timestamp: snapshot.l2_block_timestamp + 2, virtual_blocks: 0, }, number: snapshot.l2_block_number + 2, }; - let fictive_miniblock_actions = vec![fictive_miniblock, SyncAction::SealBatch]; + let fictive_l2_block_actions = vec![fictive_l2_block, SyncAction::SealBatch]; let l1_batch = open_l1_batch( snapshot.l1_batch_number.0 + 2, @@ -524,7 +524,7 @@ pub(super) async fn run_state_keeper_with_multiple_l1_batches( let second_tx = create_l2_transaction(10, 100); let second_tx_hash = second_tx.hash(); let second_tx = FetchedTransaction::new(second_tx.into()); - let second_l1_batch_actions = vec![l1_batch, second_tx.into(), SyncAction::SealMiniblock]; + let second_l1_batch_actions = vec![l1_batch, second_tx.into(), SyncAction::SealL2Block]; let (actions_sender, action_queue) = ActionQueue::new(); let state_keeper = StateKeeperHandles::new( @@ -535,14 +535,14 @@ pub(super) async fn run_state_keeper_with_multiple_l1_batches( ) .await; actions_sender.push_actions(first_l1_batch_actions).await; - actions_sender.push_actions(fictive_miniblock_actions).await; + actions_sender.push_actions(fictive_l2_block_actions).await; actions_sender.push_actions(second_l1_batch_actions).await; let hash_task = tokio::spawn(mock_l1_batch_hash_computation( pool.clone(), snapshot.l1_batch_number.0 + 1, )); - // Wait until the miniblocks are sealed. + // Wait until the L2 blocks are sealed. state_keeper .wait_for_local_block(snapshot.l2_block_number + 3) .await; @@ -566,21 +566,21 @@ async fn external_io_with_multiple_l1_batches() { assert_eq!(l1_batch_header.timestamp, 1); assert_eq!(l1_batch_header.l2_tx_count, 1); - let (first_miniblock, last_miniblock) = storage + let (first_l2_block, last_l2_block) = storage .blocks_dal() .get_l2_block_range_of_l1_batch(L1BatchNumber(1)) .await .unwrap() - .expect("Miniblock range for L1 batch #1 is not persisted"); - assert_eq!(first_miniblock, L2BlockNumber(1)); - assert_eq!(last_miniblock, L2BlockNumber(2)); + .expect("L2 block range for L1 batch #1 is not persisted"); + assert_eq!(first_l2_block, L2BlockNumber(1)); + assert_eq!(last_l2_block, L2BlockNumber(2)); - let fictive_miniblock = storage + let fictive_l2_block = storage .blocks_dal() .get_l2_block_header(L2BlockNumber(2)) .await .unwrap() - .expect("Fictive miniblock #2 is not persisted"); - assert_eq!(fictive_miniblock.timestamp, 2); - assert_eq!(fictive_miniblock.l2_tx_count, 0); + .expect("Fictive L2 block #2 is not persisted"); + assert_eq!(fictive_l2_block.timestamp, 2); + assert_eq!(fictive_l2_block.l2_tx_count, 0); } diff --git a/core/lib/zksync_core/src/utils/mod.rs b/core/lib/zksync_core/src/utils/mod.rs index 29f175f4684e..5bb67647ec8e 100644 --- a/core/lib/zksync_core/src/utils/mod.rs +++ b/core/lib/zksync_core/src/utils/mod.rs @@ -135,34 +135,34 @@ pub(crate) async fn projected_first_l1_batch( Ok(snapshot_recovery.map_or(L1BatchNumber(0), |recovery| recovery.l1_batch_number + 1)) } -/// Obtains a protocol version projected to be applied for the next miniblock. This is either the version used by the last -/// sealed miniblock, or (if there are no miniblocks), one referenced in the snapshot recovery record. +/// Obtains a protocol version projected to be applied for the next L2 block. This is either the version used by the last +/// sealed L2 block, or (if there are no L2 blocks), one referenced in the snapshot recovery record. pub(crate) async fn pending_protocol_version( storage: &mut Connection<'_, Core>, ) -> anyhow::Result { static WARNED_ABOUT_NO_VERSION: AtomicBool = AtomicBool::new(false); - let last_miniblock = storage + let last_l2_block = storage .blocks_dal() .get_last_sealed_l2_block_header() .await?; - if let Some(last_miniblock) = last_miniblock { - return Ok(last_miniblock.protocol_version.unwrap_or_else(|| { - // Protocol version should be set for the most recent miniblock even in cases it's not filled - // for old miniblocks, hence the warning. We don't want to rely on this assumption, so we treat + if let Some(last_l2_block) = last_l2_block { + return Ok(last_l2_block.protocol_version.unwrap_or_else(|| { + // Protocol version should be set for the most recent L2 block even in cases it's not filled + // for old L2 blocks, hence the warning. We don't want to rely on this assumption, so we treat // the lack of it as in other similar places, replacing with the default value. if !WARNED_ABOUT_NO_VERSION.fetch_or(true, Ordering::Relaxed) { - tracing::warn!("Protocol version not set for recent miniblock: {last_miniblock:?}"); + tracing::warn!("Protocol version not set for recent L2 block: {last_l2_block:?}"); } ProtocolVersionId::last_potentially_undefined() })); } - // No miniblocks in the storage; use snapshot recovery information. + // No L2 blocks in the storage; use snapshot recovery information. let snapshot_recovery = storage .snapshot_recovery_dal() .get_applied_snapshot_status() .await? - .context("storage contains neither miniblocks, nor snapshot recovery info")?; + .context("storage contains neither L2 blocks, nor snapshot recovery info")?; Ok(snapshot_recovery.protocol_version) } diff --git a/core/lib/zksync_core/src/utils/testonly.rs b/core/lib/zksync_core/src/utils/testonly.rs index a35fa6544a51..171c78e0cd99 100644 --- a/core/lib/zksync_core/src/utils/testonly.rs +++ b/core/lib/zksync_core/src/utils/testonly.rs @@ -24,9 +24,8 @@ use zksync_types::{ use crate::{fee_model::BatchFeeModelInputProvider, genesis::GenesisParams}; -// FIXME: rename -/// Creates a miniblock header with the specified number and deterministic contents. -pub(crate) fn create_miniblock(number: u32) -> L2BlockHeader { +/// Creates an L2 block header with the specified number and deterministic contents. +pub(crate) fn create_l2_block(number: u32) -> L2BlockHeader { L2BlockHeader { number: L2BlockNumber(number), timestamp: number.into(), @@ -153,7 +152,7 @@ pub(crate) fn execute_l2_transaction(transaction: L2Tx) -> TransactionExecutionR #[derive(Debug)] pub(crate) struct Snapshot { pub l1_batch: L1BatchHeader, - pub miniblock: L2BlockHeader, + pub l2_block: L2BlockHeader, pub storage_logs: Vec, pub factory_deps: HashMap>, } @@ -162,7 +161,7 @@ impl Snapshot { // Constructs a dummy Snapshot based on the provided values. pub fn make( l1_batch: L1BatchNumber, - miniblock: L2BlockNumber, + l2_block: L2BlockNumber, storage_logs: &[StorageLog], ) -> Self { let genesis_params = GenesisParams::mock(); @@ -173,10 +172,10 @@ impl Snapshot { contracts.hashes(), genesis_params.protocol_version(), ); - let miniblock = L2BlockHeader { - number: miniblock, - timestamp: miniblock.0.into(), - hash: H256::from_low_u64_be(miniblock.0.into()), + let l2_block = L2BlockHeader { + number: l2_block, + timestamp: l2_block.0.into(), + hash: H256::from_low_u64_be(l2_block.0.into()), l1_tx_count: 0, l2_tx_count: 0, base_fee_per_gas: 100, @@ -192,7 +191,7 @@ impl Snapshot { }; Snapshot { l1_batch, - miniblock, + l2_block, factory_deps: [&contracts.bootloader, &contracts.default_aa] .into_iter() .map(|c| (c.hash, zksync_utils::be_words_to_bytes(&c.code))) @@ -206,10 +205,10 @@ impl Snapshot { pub(crate) async fn prepare_recovery_snapshot( storage: &mut Connection<'_, Core>, l1_batch: L1BatchNumber, - miniblock: L2BlockNumber, + l2_block: L2BlockNumber, storage_logs: &[StorageLog], ) -> SnapshotRecoveryStatus { - recover(storage, Snapshot::make(l1_batch, miniblock, storage_logs)).await + recover(storage, Snapshot::make(l1_batch, l2_block, storage_logs)).await } /// Takes a storage snapshot at the last sealed L1 batch. @@ -226,7 +225,7 @@ pub(crate) async fn snapshot(storage: &mut Connection<'_, Core>) -> Snapshot { .await .unwrap() .unwrap(); - let (_, miniblock) = storage + let (_, l2_block) = storage .blocks_dal() .get_l2_block_range_of_l1_batch(l1_batch.number) .await @@ -234,15 +233,15 @@ pub(crate) async fn snapshot(storage: &mut Connection<'_, Core>) -> Snapshot { .unwrap(); let all_hashes = H256::zero()..=H256::repeat_byte(0xff); Snapshot { - miniblock: storage + l2_block: storage .blocks_dal() - .get_l2_block_header(miniblock) + .get_l2_block_header(l2_block) .await .unwrap() .unwrap(), storage_logs: storage .snapshots_creator_dal() - .get_storage_logs_chunk(miniblock, l1_batch.number, all_hashes) + .get_storage_logs_chunk(l2_block, l1_batch.number, all_hashes) .await .unwrap() .into_iter() @@ -250,7 +249,7 @@ pub(crate) async fn snapshot(storage: &mut Connection<'_, Core>) -> Snapshot { .collect(), factory_deps: storage .snapshots_creator_dal() - .get_all_factory_deps(miniblock) + .get_all_factory_deps(l2_block) .await .unwrap() .into_iter() @@ -260,7 +259,7 @@ pub(crate) async fn snapshot(storage: &mut Connection<'_, Core>) -> Snapshot { } /// Recovers storage from a snapshot. -/// Miniblock and L1 batch are intentionally **not** inserted into the storage. +/// L2 block and L1 batch are intentionally **not** inserted into the storage. pub(crate) async fn recover( storage: &mut Connection<'_, Core>, snapshot: Snapshot, @@ -298,7 +297,7 @@ pub(crate) async fn recover( } storage .factory_deps_dal() - .insert_factory_deps(snapshot.miniblock.number, &snapshot.factory_deps) + .insert_factory_deps(snapshot.l2_block.number, &snapshot.factory_deps) .await .unwrap(); @@ -310,7 +309,7 @@ pub(crate) async fn recover( storage .storage_logs_dal() .insert_storage_logs( - snapshot.miniblock.number, + snapshot.l2_block.number, &[(H256::zero(), snapshot.storage_logs)], ) .await @@ -320,9 +319,9 @@ pub(crate) async fn recover( l1_batch_number: snapshot.l1_batch.number, l1_batch_timestamp: snapshot.l1_batch.timestamp, l1_batch_root_hash, - l2_block_number: snapshot.miniblock.number, - l2_block_timestamp: snapshot.miniblock.timestamp, - l2_block_hash: snapshot.miniblock.hash, + l2_block_number: snapshot.l2_block.number, + l2_block_timestamp: snapshot.l2_block.timestamp, + l2_block_hash: snapshot.l2_block.hash, protocol_version: snapshot.l1_batch.protocol_version.unwrap(), storage_logs_chunks_processed: vec![true; 100], }; @@ -334,13 +333,13 @@ pub(crate) async fn recover( storage .pruning_dal() - .soft_prune_batches_range(snapshot.l1_batch.number, snapshot.miniblock.number) + .soft_prune_batches_range(snapshot.l1_batch.number, snapshot.l2_block.number) .await .unwrap(); storage .pruning_dal() - .hard_prune_batches_range(snapshot.l1_batch.number, snapshot.miniblock.number) + .hard_prune_batches_range(snapshot.l1_batch.number, snapshot.l2_block.number) .await .unwrap(); diff --git a/core/lib/zksync_core/src/vm_runner/tests/mod.rs b/core/lib/zksync_core/src/vm_runner/tests/mod.rs index 48f4d5073069..bc56ead5d9da 100644 --- a/core/lib/zksync_core/src/vm_runner/tests/mod.rs +++ b/core/lib/zksync_core/src/vm_runner/tests/mod.rs @@ -23,7 +23,7 @@ use super::{BatchExecuteData, VmRunnerStorage, VmRunnerStorageLoader}; use crate::{ genesis::{insert_genesis_batch, GenesisParams}, utils::testonly::{ - create_l1_batch_metadata, create_l2_transaction, create_miniblock, execute_l2_transaction, + create_l1_batch_metadata, create_l2_block, create_l2_transaction, execute_l2_transaction, l1_batch_metadata_to_commitment_artifacts, }, }; @@ -194,7 +194,7 @@ async fn store_l2_blocks( conn.factory_deps_dal() .insert_factory_deps(l2_block_number, &factory_deps) .await?; - let mut new_l2_block = create_miniblock(l2_block_number.0); + let mut new_l2_block = create_l2_block(l2_block_number.0); l2_block_number += 1; new_l2_block.base_system_contracts_hashes = contract_hashes; new_l2_block.l2_tx_count = 1; @@ -205,7 +205,7 @@ async fn store_l2_blocks( .await?; // Insert a fictive L2 block at the end of the batch - let fictive_l2_block = create_miniblock(l2_block_number.0); + let fictive_l2_block = create_l2_block(l2_block_number.0); l2_block_number += 1; conn.blocks_dal().insert_l2_block(&fictive_l2_block).await?; diff --git a/core/node/shared_metrics/src/lib.rs b/core/node/shared_metrics/src/lib.rs index c1b4d147396b..75f9c520565f 100644 --- a/core/node/shared_metrics/src/lib.rs +++ b/core/node/shared_metrics/src/lib.rs @@ -188,8 +188,8 @@ pub struct ExternalNodeMetrics { pub sync_lag: Gauge, /// Number of the last L1 batch checked by the re-org detector or consistency checker. pub last_correct_batch: Family>, - /// Number of the last miniblock checked by the re-org detector or consistency checker. - pub last_correct_miniblock: Family>, + /// Number of the last L2 block checked by the re-org detector. + pub last_correct_l2_block: Family>, } #[vise::register] diff --git a/core/tests/snapshot-recovery-test/tests/snapshot-recovery.test.ts b/core/tests/snapshot-recovery-test/tests/snapshot-recovery.test.ts index dda7dd449a9a..80f586ec58d7 100644 --- a/core/tests/snapshot-recovery-test/tests/snapshot-recovery.test.ts +++ b/core/tests/snapshot-recovery-test/tests/snapshot-recovery.test.ts @@ -59,7 +59,7 @@ interface ConsistencyCheckerDetails { interface ReorgDetectorDetails { readonly last_correct_l1_batch?: number; - readonly last_correct_miniblock?: number; + readonly last_correct_l2_block?: number; } interface HealthCheckResponse { @@ -153,7 +153,7 @@ describe('snapshot recovery', () => { const l1BatchNumber = Math.max(...newBatchNumbers); snapshotMetadata = await getSnapshot(l1BatchNumber); console.log('Obtained latest snapshot', snapshotMetadata); - const miniblockNumber = snapshotMetadata.miniblockNumber; + const l2BlockNumber = snapshotMetadata.miniblockNumber; const protoPath = path.join(homeDir, 'core/lib/types/src/proto/mod.proto'); const root = await protobuf.load(protoPath); @@ -182,7 +182,7 @@ describe('snapshot recovery', () => { const valueOnBlockchain = await mainNode.getStorageAt( snapshotAccountAddress, snapshotKey, - miniblockNumber + l2BlockNumber ); expect(snapshotValue).to.equal(valueOnBlockchain); expect(snapshotL1BatchNumber).to.be.lessThanOrEqual(l1BatchNumber); @@ -285,9 +285,9 @@ describe('snapshot recovery', () => { const details = health.components.reorg_detector?.details; if (status === 'ready' && details !== undefined) { console.log('Received reorg detector health details', details); - if (details.last_correct_l1_batch !== undefined && details.last_correct_miniblock !== undefined) { + if (details.last_correct_l1_batch !== undefined && details.last_correct_l2_block !== undefined) { expect(details.last_correct_l1_batch).to.be.greaterThan(snapshotMetadata.l1BatchNumber); - expect(details.last_correct_miniblock).to.be.greaterThan(snapshotMetadata.miniblockNumber); + expect(details.last_correct_l2_block).to.be.greaterThan(snapshotMetadata.miniblockNumber); reorgDetectorSucceeded = true; } } From 31db44f31316d1e838f87e6f8cf9412ce0d6c9e4 Mon Sep 17 00:00:00 2001 From: koloz193 Date: Thu, 25 Apr 2024 08:36:54 -0400 Subject: [PATCH 4/7] chore(eth_sender): refactor commit txn encoding (#1781) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. - [ ] Spellcheck has been run via `zk spellcheck`. - [ ] Linkcheck has been run via `zk linkcheck`. --- .../src/eth_sender/eth_tx_aggregator.rs | 133 ++++++++---------- 1 file changed, 57 insertions(+), 76 deletions(-) diff --git a/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs b/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs index 9e15c3702d08..1707469bddcb 100644 --- a/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs +++ b/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs @@ -13,9 +13,9 @@ use zksync_l1_contract_interface::{ use zksync_shared_metrics::BlockL1Stage; use zksync_types::{ aggregated_operations::AggregatedActionType, - commitment::SerializeCommitment, + commitment::{L1BatchWithMetadata, SerializeCommitment}, eth_sender::{EthTx, EthTxBlobSidecar, EthTxBlobSidecarV1, SidecarBlobV1}, - ethabi::Token, + ethabi::{Function, Token}, l2_to_l1_log::UserL2ToL1Log, protocol_version::{L1VerifierConfig, VerifierParams}, pubdata_da::PubdataDA, @@ -436,87 +436,31 @@ impl EthTxAggregator { let (calldata, sidecar) = match op.clone() { AggregatedOperation::Commit(last_committed_l1_batch, l1_batches, pubdata_da) => { - let commit_data = self.l1_commit_data_generator.l1_commit_batches( + let commit_data_base = self.l1_commit_data_generator.l1_commit_batches( &last_committed_l1_batch, &l1_batches, &pubdata_da, ); - if contracts_are_pre_shared_bridge { - if let PubdataDA::Blobs = self.aggregator.pubdata_da() { - let calldata = self - .functions - .pre_shared_bridge_commit - .encode_input(&commit_data) - .expect("Failed to encode commit transaction data"); - - let side_car = l1_batches[0] - .header - .pubdata_input - .clone() - .unwrap() - .chunks(ZK_SYNC_BYTES_PER_BLOB) - .map(|blob| { - let kzg_info = KzgInfo::new(blob); - SidecarBlobV1 { - blob: kzg_info.blob.to_vec(), - commitment: kzg_info.kzg_commitment.to_vec(), - proof: kzg_info.blob_proof.to_vec(), - versioned_hash: kzg_info.versioned_hash.to_vec(), - } - }) - .collect::>(); - - let eth_tx_sidecar = EthTxBlobSidecarV1 { blobs: side_car }; - (calldata, Some(eth_tx_sidecar.into())) - } else { - let calldata = self - .functions - .pre_shared_bridge_commit - .encode_input(&commit_data) - .expect("Failed to encode commit transaction data"); - (calldata, None) - } + let (encoding_fn, commit_data) = if contracts_are_pre_shared_bridge { + (&self.functions.pre_shared_bridge_commit, commit_data_base) } else { - args.extend(commit_data); - if let PubdataDA::Blobs = self.aggregator.pubdata_da() { - let calldata = self - .functions + args.extend(commit_data_base); + ( + self.functions .post_shared_bridge_commit .as_ref() - .expect("Missing ABI for commitBatchesSharedBridge") - .encode_input(&args) - .expect("Failed to encode commit transaction data"); - - let side_car = l1_batches[0] - .header - .pubdata_input - .clone() - .unwrap() - .chunks(ZK_SYNC_BYTES_PER_BLOB) - .map(|blob| { - let kzg_info = KzgInfo::new(blob); - SidecarBlobV1 { - blob: kzg_info.blob.to_vec(), - commitment: kzg_info.kzg_commitment.to_vec(), - proof: kzg_info.blob_proof.to_vec(), - versioned_hash: kzg_info.versioned_hash.to_vec(), - } - }) - .collect::>(); - - let eth_tx_sidecar = EthTxBlobSidecarV1 { blobs: side_car }; - (calldata, Some(eth_tx_sidecar.into())) - } else { - let calldata = self - .functions - .post_shared_bridge_commit - .as_ref() - .expect("Missing ABI for commitBatchesSharedBridge") - .encode_input(&args) - .expect("Failed to encode commit transaction data"); - (calldata, None) - } - } + .expect("Missing ABI for commitBatchesSharedBridge"), + args, + ) + }; + + let l1_batch_for_sidecar = if PubdataDA::Blobs == self.aggregator.pubdata_da() { + Some(l1_batches[0].clone()) + } else { + None + }; + + Self::encode_commit_data(encoding_fn, &commit_data, l1_batch_for_sidecar) } AggregatedOperation::PublishProofOnchain(op) => { let calldata = if contracts_are_pre_shared_bridge { @@ -556,6 +500,43 @@ impl EthTxAggregator { TxData { calldata, sidecar } } + fn encode_commit_data( + commit_fn: &Function, + commit_payload: &[Token], + l1_batch: Option, + ) -> (Vec, Option) { + let calldata = commit_fn + .encode_input(commit_payload) + .expect("Failed to encode commit transaction data"); + + let sidecar = match l1_batch { + None => None, + Some(l1_batch) => { + let sidecar = l1_batch + .header + .pubdata_input + .clone() + .unwrap() + .chunks(ZK_SYNC_BYTES_PER_BLOB) + .map(|blob| { + let kzg_info = KzgInfo::new(blob); + SidecarBlobV1 { + blob: kzg_info.blob.to_vec(), + commitment: kzg_info.kzg_commitment.to_vec(), + proof: kzg_info.blob_proof.to_vec(), + versioned_hash: kzg_info.versioned_hash.to_vec(), + } + }) + .collect::>(); + + let eth_tx_blob_sidecar = EthTxBlobSidecarV1 { blobs: sidecar }; + Some(eth_tx_blob_sidecar.into()) + } + }; + + (calldata, sidecar) + } + pub(super) async fn save_eth_tx( &self, storage: &mut Connection<'_, Core>, From 11898c2b58a0e04ce8bd8d4a587442b06d6c4ef3 Mon Sep 17 00:00:00 2001 From: Stanislav Bezkorovainyi Date: Thu, 25 Apr 2024 21:28:22 +0200 Subject: [PATCH 5/7] fix: Weaker assert for protocol version for operations (#1800) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. - [ ] Spellcheck has been run via `zk spellcheck`. - [ ] Linkcheck has been run via `zk linkcheck`. --- core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs b/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs index 1707469bddcb..8667eceeadc1 100644 --- a/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs +++ b/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs @@ -427,10 +427,11 @@ impl EthTxAggregator { contracts_are_pre_shared_bridge: bool, ) -> TxData { let operation_is_pre_shared_bridge = op.protocol_version().is_pre_shared_bridge(); - assert_eq!( - contracts_are_pre_shared_bridge, - operation_is_pre_shared_bridge - ); + + // The post shared bridge contracts support pre-shared bridge operations, but vice versa is not true. + if contracts_are_pre_shared_bridge { + assert!(operation_is_pre_shared_bridge); + } let mut args = vec![Token::Uint(self.rollup_chain_id.as_u64().into())]; From 9c6ed838ffcfe6bd8fe157c69fbfe8823826849b Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 26 Apr 2024 09:16:52 +0300 Subject: [PATCH 6/7] fix(api): Fix extra DB connection acquisition during tx submission (#1793) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Currently, `TxSender::submit_tx()` on the main node acquires 2 DB connections at the same time: one explicitly, and another in batch fee input provider. This PR eliminates this. Also, propagates errors in batch fee input provider, so that they don't lead to panics. ## Why ❔ Holding multiple DB connections at the same time can lead to connection starvation, or at worst case (temporary) deadlocks. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `zk spellcheck`. - [x] Linkcheck has been run via `zk linkcheck`. --- .../src/api_server/tx_sender/mod.rs | 79 ++++++++++--------- .../src/api_server/tx_sender/tests.rs | 56 ++++++++++++- .../web3/backend_jsonrpsee/namespaces/zks.rs | 4 +- .../zksync_core/src/api_server/web3/mod.rs | 22 +++--- .../src/api_server/web3/namespaces/debug.rs | 25 +++--- .../src/api_server/web3/namespaces/zks.rs | 14 +--- core/lib/zksync_core/src/fee_model.rs | 34 ++++---- .../src/state_keeper/io/mempool.rs | 4 +- .../src/state_keeper/io/tests/mod.rs | 9 ++- .../src/state_keeper/mempool_actor.rs | 27 ++++--- core/lib/zksync_core/src/utils/testonly.rs | 4 +- 11 files changed, 173 insertions(+), 105 deletions(-) diff --git a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs index 9c1c83cbc8cf..bea5f24d4967 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs @@ -309,14 +309,16 @@ impl TxSender { pub async fn submit_tx(&self, tx: L2Tx) -> Result { let stage_latency = SANDBOX_METRICS.submit_tx[&SubmitTxStage::Validate].start(); let mut connection = self.acquire_replica_connection().await?; - let protocol_verison = pending_protocol_version(&mut connection).await?; - self.validate_tx(&tx, protocol_verison).await?; + let protocol_version = pending_protocol_version(&mut connection).await?; + drop(connection); + self.validate_tx(&tx, protocol_version).await?; stage_latency.observe(); let stage_latency = SANDBOX_METRICS.submit_tx[&SubmitTxStage::DryRun].start(); - let shared_args = self.shared_args().await; + let shared_args = self.shared_args().await?; let vm_permit = self.0.vm_concurrency_limiter.acquire().await; let vm_permit = vm_permit.ok_or(SubmitTxError::ServerShuttingDown)?; + let mut connection = self.acquire_replica_connection().await?; let block_args = BlockArgs::pending(&mut connection).await?; drop(connection); @@ -406,10 +408,18 @@ impl TxSender { } } - async fn shared_args(&self) -> TxSharedArgs { - TxSharedArgs { + /// **Important.** For the main node, this method acquires a DB connection inside `get_batch_fee_input()`. + /// Thus, you shouldn't call it if you're holding a DB connection already. + async fn shared_args(&self) -> anyhow::Result { + let fee_input = self + .0 + .batch_fee_input_provider + .get_batch_fee_input() + .await + .context("cannot get batch fee input")?; + Ok(TxSharedArgs { operator_account: AccountTreeId::new(self.0.sender_config.fee_account_addr), - fee_input: self.0.batch_fee_input_provider.get_batch_fee_input().await, + fee_input, base_system_contracts: self.0.api_contracts.eth_call.clone(), caches: self.storage_caches(), validation_computational_gas_limit: self @@ -418,7 +428,7 @@ impl TxSender { .validation_computational_gas_limit, chain_id: self.0.sender_config.chain_id, whitelisted_tokens_for_aa: self.read_whitelisted_tokens_for_aa_cache().await, - } + }) } async fn validate_tx( @@ -439,7 +449,11 @@ impl TxSender { return Err(SubmitTxError::GasLimitIsTooBig); } - let fee_input = self.0.batch_fee_input_provider.get_batch_fee_input().await; + let fee_input = self + .0 + .batch_fee_input_provider + .get_batch_fee_input() + .await?; // TODO (SMA-1715): do not subsidize the overhead for the transaction @@ -676,25 +690,14 @@ impl TxSender { let max_gas_limit = get_max_batch_gas_limit(protocol_version.into()); drop(connection); - let fee_input = { - // For now, both L1 gas price and pubdata price are scaled with the same coefficient - let fee_input = self - .0 - .batch_fee_input_provider - .get_batch_fee_input_scaled( - self.0.sender_config.gas_price_scale_factor, - self.0.sender_config.gas_price_scale_factor, - ) - .await; - adjust_pubdata_price_for_tx( - fee_input, - tx.gas_per_pubdata_byte_limit(), - // We do not have to adjust the params to the `gasPrice` of the transaction, since - // its gas price will be amended later on to suit the `fee_input` - None, - protocol_version.into(), - ) - }; + let fee_input = adjust_pubdata_price_for_tx( + self.scaled_batch_fee_input().await?, + tx.gas_per_pubdata_byte_limit(), + // We do not have to adjust the params to the `gasPrice` of the transaction, since + // its gas price will be amended later on to suit the `fee_input` + None, + protocol_version.into(), + ); let (base_fee, gas_per_pubdata_byte) = derive_base_fee_and_gas_per_pubdata(fee_input, protocol_version.into()); @@ -910,6 +913,17 @@ impl TxSender { }) } + // For now, both L1 gas price and pubdata price are scaled with the same coefficient + async fn scaled_batch_fee_input(&self) -> anyhow::Result { + self.0 + .batch_fee_input_provider + .get_batch_fee_input_scaled( + self.0.sender_config.gas_price_scale_factor, + self.0.sender_config.gas_price_scale_factor, + ) + .await + } + pub(super) async fn eth_call( &self, block_args: BlockArgs, @@ -923,7 +937,7 @@ impl TxSender { .executor .execute_tx_eth_call( vm_permit, - self.shared_args().await, + self.shared_args().await?, self.0.replica_connection_pool.clone(), tx, block_args, @@ -942,14 +956,7 @@ impl TxSender { drop(connection); let (base_fee, _) = derive_base_fee_and_gas_per_pubdata( - // For now, both the L1 gas price and the L1 pubdata price are scaled with the same coefficient - self.0 - .batch_fee_input_provider - .get_batch_fee_input_scaled( - self.0.sender_config.gas_price_scale_factor, - self.0.sender_config.gas_price_scale_factor, - ) - .await, + self.scaled_batch_fee_input().await?, protocol_version.into(), ); Ok(base_fee) diff --git a/core/lib/zksync_core/src/api_server/tx_sender/tests.rs b/core/lib/zksync_core/src/api_server/tx_sender/tests.rs index 3cfd6593688d..b98addf62cd9 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/tests.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/tests.rs @@ -1,13 +1,19 @@ //! Tests for the transaction sender. +use assert_matches::assert_matches; +use multivm::interface::ExecutionResult; use zksync_config::configs::wallets::Wallets; use zksync_types::{get_nonce_key, L1BatchNumber, L2BlockNumber, StorageLog}; +use zksync_utils::u256_to_h256; use super::*; use crate::{ api_server::execution_sandbox::{testonly::MockTransactionExecutor, VmConcurrencyBarrier}, genesis::{insert_genesis_batch, GenesisParams}, - utils::testonly::{create_l2_block, prepare_recovery_snapshot, MockBatchFeeParamsProvider}, + utils::testonly::{ + create_l2_block, create_l2_transaction, prepare_recovery_snapshot, + MockBatchFeeParamsProvider, + }, }; pub(crate) async fn create_test_tx_sender( @@ -139,3 +145,51 @@ async fn getting_nonce_for_account_after_snapshot_recovery() { let nonce = tx_sender.get_expected_nonce(missing_address).await.unwrap(); assert_eq!(nonce, Nonce(0)); } + +#[tokio::test] +async fn submitting_tx_requires_one_connection() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + + let l2_chain_id = L2ChainId::default(); + let fee_input = MockBatchFeeParamsProvider::default() + .get_batch_fee_input_scaled(1.0, 1.0) + .await + .unwrap(); + let (base_fee, gas_per_pubdata) = + derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); + let tx = create_l2_transaction(base_fee, gas_per_pubdata); + let tx_hash = tx.hash(); + + // Manually set sufficient balance for the tx initiator. + let balance_key = storage_key_for_eth_balance(&tx.initiator_account()); + let storage_log = StorageLog::new_write_log(balance_key, u256_to_h256(U256::one() << 64)); + storage + .storage_logs_dal() + .append_storage_logs(L2BlockNumber(0), &[(H256::zero(), vec![storage_log])]) + .await + .unwrap(); + drop(storage); + + let mut tx_executor = MockTransactionExecutor::default(); + tx_executor.set_tx_responses(move |received_tx, _| { + assert_eq!(received_tx.hash(), tx_hash); + ExecutionResult::Success { output: vec![] } + }); + let tx_executor = tx_executor.into(); + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + + let submission_result = tx_sender.submit_tx(tx).await.unwrap(); + assert_matches!(submission_result, L2TxSubmissionResult::Added); + + let mut storage = pool.connection().await.unwrap(); + storage + .transactions_web3_dal() + .get_transaction_by_hash(tx_hash, l2_chain_id) + .await + .unwrap() + .expect("transaction is not persisted"); +} diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs index 087de5e20d88..d5f7c1d652f6 100644 --- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs +++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs @@ -141,7 +141,9 @@ impl ZksNamespaceServer for ZksNamespace { } async fn get_l1_gas_price(&self) -> RpcResult { - Ok(self.get_l1_gas_price_impl().await) + self.get_l1_gas_price_impl() + .await + .map_err(|err| self.current_method().map_err(err)) } async fn get_fee_params(&self) -> RpcResult { diff --git a/core/lib/zksync_core/src/api_server/web3/mod.rs b/core/lib/zksync_core/src/api_server/web3/mod.rs index 9aa8cda6cade..12fd0d8e2727 100644 --- a/core/lib/zksync_core/src/api_server/web3/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/mod.rs @@ -367,36 +367,36 @@ impl ApiServer { let mut rpc = RpcModule::new(()); if let Some(pub_sub) = pub_sub { rpc.merge(pub_sub.into_rpc()) - .expect("Can't merge eth pubsub namespace"); + .context("cannot merge eth pubsub namespace")?; } + if namespaces.contains(&Namespace::Debug) { + rpc.merge(DebugNamespace::new(rpc_state.clone()).await?.into_rpc()) + .context("cannot merge debug namespace")?; + } if namespaces.contains(&Namespace::Eth) { rpc.merge(EthNamespace::new(rpc_state.clone()).into_rpc()) - .expect("Can't merge eth namespace"); + .context("cannot merge eth namespace")?; } if namespaces.contains(&Namespace::Net) { rpc.merge(NetNamespace::new(zksync_network_id).into_rpc()) - .expect("Can't merge net namespace"); + .context("cannot merge net namespace")?; } if namespaces.contains(&Namespace::Web3) { rpc.merge(Web3Namespace.into_rpc()) - .expect("Can't merge web3 namespace"); + .context("cannot merge web3 namespace")?; } if namespaces.contains(&Namespace::Zks) { rpc.merge(ZksNamespace::new(rpc_state.clone()).into_rpc()) - .expect("Can't merge zks namespace"); + .context("cannot merge zks namespace")?; } if namespaces.contains(&Namespace::En) { rpc.merge(EnNamespace::new(rpc_state.clone()).into_rpc()) - .expect("Can't merge en namespace"); - } - if namespaces.contains(&Namespace::Debug) { - rpc.merge(DebugNamespace::new(rpc_state.clone()).await.into_rpc()) - .expect("Can't merge debug namespace"); + .context("cannot merge en namespace")?; } if namespaces.contains(&Namespace::Snapshots) { rpc.merge(SnapshotsNamespace::new(rpc_state).into_rpc()) - .expect("Can't merge snapshots namespace"); + .context("cannot merge snapshots namespace")?; } Ok(rpc) } diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs index b3b0e9ce8ab5..8bb5e06ddc05 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs @@ -30,22 +30,23 @@ pub(crate) struct DebugNamespace { } impl DebugNamespace { - pub async fn new(state: RpcState) -> Self { + pub async fn new(state: RpcState) -> anyhow::Result { let api_contracts = ApiContracts::load_from_disk(); - Self { + let fee_input_provider = &state.tx_sender.0.batch_fee_input_provider; + let batch_fee_input = fee_input_provider + .get_batch_fee_input_scaled( + state.api_config.estimate_gas_scale_factor, + state.api_config.estimate_gas_scale_factor, + ) + .await + .context("cannot get batch fee input")?; + + Ok(Self { // For now, the same scaling is used for both the L1 gas price and the pubdata price - batch_fee_input: state - .tx_sender - .0 - .batch_fee_input_provider - .get_batch_fee_input_scaled( - state.api_config.estimate_gas_scale_factor, - state.api_config.estimate_gas_scale_factor, - ) - .await, + batch_fee_input, state, api_contracts, - } + }) } fn sender_config(&self) -> &TxSenderConfig { diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs index 6e3aa80cf806..970b89f30d75 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs @@ -465,16 +465,10 @@ impl ZksNamespace { } #[tracing::instrument(skip(self))] - pub async fn get_l1_gas_price_impl(&self) -> U64 { - let gas_price = self - .state - .tx_sender - .0 - .batch_fee_input_provider - .get_batch_fee_input() - .await - .l1_gas_price(); - gas_price.into() + pub async fn get_l1_gas_price_impl(&self) -> Result { + let fee_input_provider = &self.state.tx_sender.0.batch_fee_input_provider; + let fee_input = fee_input_provider.get_batch_fee_input().await?; + Ok(fee_input.l1_gas_price().into()) } #[tracing::instrument(skip(self))] diff --git a/core/lib/zksync_core/src/fee_model.rs b/core/lib/zksync_core/src/fee_model.rs index d905e22f3564..165f5f73b9f0 100644 --- a/core/lib/zksync_core/src/fee_model.rs +++ b/core/lib/zksync_core/src/fee_model.rs @@ -1,5 +1,6 @@ use std::{fmt, sync::Arc}; +use anyhow::Context as _; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_types::{ fee_model::{ @@ -21,10 +22,10 @@ pub trait BatchFeeModelInputProvider: fmt::Debug + 'static + Send + Sync { &self, l1_gas_price_scale_factor: f64, l1_pubdata_price_scale_factor: f64, - ) -> BatchFeeInput { + ) -> anyhow::Result { let params = self.get_fee_model_params(); - match params { + Ok(match params { FeeParams::V1(params) => BatchFeeInput::L1Pegged(compute_batch_fee_model_input_v1( params, l1_gas_price_scale_factor, @@ -36,16 +37,18 @@ pub trait BatchFeeModelInputProvider: fmt::Debug + 'static + Send + Sync { l1_pubdata_price_scale_factor, )) } - } + }) } + /// Returns the fee model parameters. + fn get_fee_model_params(&self) -> FeeParams; +} + +impl dyn BatchFeeModelInputProvider { /// Returns the batch fee input as-is, i.e. without any scaling for the L1 gas and pubdata prices. - async fn get_batch_fee_input(&self) -> BatchFeeInput { + pub async fn get_batch_fee_input(&self) -> anyhow::Result { self.get_batch_fee_input_scaled(1.0, 1.0).await } - - /// Returns the fee model parameters. - fn get_fee_model_params(&self) -> FeeParams; } /// The struct that represents the batch fee input provider to be used in the main node of the server, i.e. @@ -79,7 +82,7 @@ impl MainNodeFeeInputProvider { } } -/// The fee model provider to be used in the API. It returns the maximal batch fee input between the projected main node one and +/// The fee model provider to be used in the API. It returns the maximum batch fee input between the projected main node one and /// the one from the last sealed L2 block. #[derive(Debug)] pub(crate) struct ApiFeeInputProvider { @@ -105,24 +108,23 @@ impl BatchFeeModelInputProvider for ApiFeeInputProvider { &self, l1_gas_price_scale_factor: f64, l1_pubdata_price_scale_factor: f64, - ) -> BatchFeeInput { + ) -> anyhow::Result { let inner_input = self .inner .get_batch_fee_input_scaled(l1_gas_price_scale_factor, l1_pubdata_price_scale_factor) - .await; + .await + .context("cannot get batch fee input from base provider")?; let last_l2_block_params = self .connection_pool .connection_tagged("api_fee_input_provider") - .await - .unwrap() + .await? .blocks_dal() .get_last_sealed_l2_block_header() - .await - .unwrap(); + .await?; - last_l2_block_params + Ok(last_l2_block_params .map(|header| inner_input.stricter(header.batch_fee_input)) - .unwrap_or(inner_input) + .unwrap_or(inner_input)) } /// Returns the fee model parameters. diff --git a/core/lib/zksync_core/src/state_keeper/io/mempool.rs b/core/lib/zksync_core/src/state_keeper/io/mempool.rs index 0aa176a2d4c8..89159482f316 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mempool.rs @@ -179,7 +179,9 @@ impl StateKeeperIO for MempoolIO { self.batch_fee_input_provider.as_ref(), protocol_version.into(), ) - .await; + .await + .context("failed creating L2 transaction filter")?; + if !self.mempool.has_next(&self.filter) { tokio::time::sleep(self.delay_interval).await; continue; diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs index f440d6ec7921..279977beaf9f 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs @@ -111,7 +111,8 @@ async fn test_filter_with_no_pending_batch(deployment_mode: DeploymentMode) { &tester.create_batch_fee_input_provider().await, ProtocolVersionId::latest().into(), ) - .await; + .await + .unwrap(); // Create a mempool without pending batch and ensure that filter is not initialized just yet. let (mut mempool, mut guard) = tester.create_test_mempool_io(connection_pool).await; @@ -160,7 +161,8 @@ async fn test_timestamps_are_distinct( &tester.create_batch_fee_input_provider().await, ProtocolVersionId::latest().into(), ) - .await; + .await + .unwrap(); tester.insert_tx(&mut guard, tx_filter.fee_per_gas, tx_filter.gas_per_pubdata); let l1_batch_params = mempool @@ -402,7 +404,8 @@ async fn l2_block_processing_after_snapshot_recovery(deployment_mode: Deployment &tester.create_batch_fee_input_provider().await, ProtocolVersionId::latest().into(), ) - .await; + .await + .unwrap(); let tx = tester.insert_tx( &mut mempool_guard, tx_filter.fee_per_gas, diff --git a/core/lib/zksync_core/src/state_keeper/mempool_actor.rs b/core/lib/zksync_core/src/state_keeper/mempool_actor.rs index a5af44fb9405..119c39d399fc 100644 --- a/core/lib/zksync_core/src/state_keeper/mempool_actor.rs +++ b/core/lib/zksync_core/src/state_keeper/mempool_actor.rs @@ -21,15 +21,14 @@ use crate::{fee_model::BatchFeeModelInputProvider, utils::pending_protocol_versi pub async fn l2_tx_filter( batch_fee_input_provider: &dyn BatchFeeModelInputProvider, vm_version: VmVersion, -) -> L2TxFilter { - let fee_input = batch_fee_input_provider.get_batch_fee_input().await; - +) -> anyhow::Result { + let fee_input = batch_fee_input_provider.get_batch_fee_input().await?; let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata(fee_input, vm_version); - L2TxFilter { + Ok(L2TxFilter { fee_input, fee_per_gas: base_fee, gas_per_pubdata: gas_per_pubdata as u32, - } + }) } #[derive(Debug)] @@ -92,7 +91,8 @@ impl MempoolFetcher { self.batch_fee_input_provider.as_ref(), protocol_version.into(), ) - .await; + .await + .context("failed creating L2 transaction filter")?; let transactions = storage .transactions_dal() @@ -221,8 +221,9 @@ mod tests { drop(storage); let mempool = MempoolGuard::new(PriorityOpId(0), 100); - let fee_params_provider = Arc::new(MockBatchFeeParamsProvider::default()); - let fee_input = fee_params_provider.get_batch_fee_input().await; + let fee_params_provider: Arc = + Arc::new(MockBatchFeeParamsProvider::default()); + let fee_input = fee_params_provider.get_batch_fee_input().await.unwrap(); let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); @@ -279,8 +280,9 @@ mod tests { drop(storage); let mempool = MempoolGuard::new(PriorityOpId(0), 100); - let fee_params_provider = Arc::new(MockBatchFeeParamsProvider::default()); - let fee_input = fee_params_provider.get_batch_fee_input().await; + let fee_params_provider: Arc = + Arc::new(MockBatchFeeParamsProvider::default()); + let fee_input = fee_params_provider.get_batch_fee_input().await.unwrap(); let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); @@ -320,8 +322,9 @@ mod tests { drop(storage); let mempool = MempoolGuard::new(PriorityOpId(0), 100); - let fee_params_provider = Arc::new(MockBatchFeeParamsProvider::default()); - let fee_input = fee_params_provider.get_batch_fee_input().await; + let fee_params_provider: Arc = + Arc::new(MockBatchFeeParamsProvider::default()); + let fee_input = fee_params_provider.get_batch_fee_input().await.unwrap(); let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); diff --git a/core/lib/zksync_core/src/utils/testonly.rs b/core/lib/zksync_core/src/utils/testonly.rs index 171c78e0cd99..a7e89ed03727 100644 --- a/core/lib/zksync_core/src/utils/testonly.rs +++ b/core/lib/zksync_core/src/utils/testonly.rs @@ -5,7 +5,7 @@ use multivm::utils::get_max_gas_per_pubdata_byte; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{Connection, Core, CoreDal}; use zksync_merkle_tree::{domain::ZkSyncTree, TreeInstruction}; -use zksync_system_constants::ZKPORTER_IS_AVAILABLE; +use zksync_system_constants::{get_intrinsic_constants, ZKPORTER_IS_AVAILABLE}; use zksync_types::{ block::{L1BatchHeader, L2BlockHeader}, commitment::{ @@ -110,7 +110,7 @@ pub(crate) fn l1_batch_metadata_to_commitment_artifacts( /// Creates an L2 transaction with randomized parameters. pub(crate) fn create_l2_transaction(fee_per_gas: u64, gas_per_pubdata: u64) -> L2Tx { let fee = Fee { - gas_limit: 1000_u64.into(), + gas_limit: (get_intrinsic_constants().l2_tx_intrinsic_gas * 2).into(), max_fee_per_gas: fee_per_gas.into(), max_priority_fee_per_gas: 0_u64.into(), gas_per_pubdata_limit: gas_per_pubdata.into(), From 356be4eb58fe1610535d3ca3c005b3d4a48c4d14 Mon Sep 17 00:00:00 2001 From: pompon0 Date: Fri, 26 Apr 2024 09:57:51 +0200 Subject: [PATCH 7/7] fix: updated protobuf ci (#1803) It turns out that the current implementation of the check compares a given pr against the head of the base branch. If some proto files have been updated on the base branch (so there is a fork between the pr and the base branch), compatibility check will think that the pr wants to revert the changes made on the base branch. This pr fixes that by making the check compare pr against the lowest common ancestor between the pr and the base branch. --- .github/workflows/protobuf.yaml | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/.github/workflows/protobuf.yaml b/.github/workflows/protobuf.yaml index 9e2d909125bb..0b17cb74c008 100644 --- a/.github/workflows/protobuf.yaml +++ b/.github/workflows/protobuf.yaml @@ -11,7 +11,10 @@ on: # (unless we improve our github setup). # Therefore on post-merge we will execute the # compatibility check as well (TODO: alerting). - branches: ["main"] + branches: [ "main" ] + +permissions: + contents: read env: CARGO_TERM_COLOR: always @@ -21,18 +24,27 @@ env: SCCACHE_GHA_ENABLED: "true" RUST_BACKTRACE: "1" SQLX_OFFLINE: true, + # github.base_ref -> github.head_ref for pull_request + BASE: ${{ github.event.pull_request.base.sha || github.event.before }} + # github.event.before -> github.event.after for push + HEAD: ${{ github.event.pull_request.head.sha || github.event.after }} jobs: compatibility: runs-on: [ubuntu-22.04-github-hosted-16core] steps: - # github.base_ref -> github.head_ref for pull_request - # github.event.before -> github.event.after for push - uses: mozilla-actions/sccache-action@v0.0.3 + + # before - uses: actions/checkout@v4 with: - ref: ${{ github.base_ref || github.event.before }} + ref: ${{ env.BASE }} path: before + fetch-depth: 0 # fetches all branches and tags, which is needed to compute the LCA. + - name: checkout LCA + run: + git checkout $(git merge-base $BASE $HEAD) + working-directory: ./before - name: compile before run: cargo check --all-targets working-directory: ./before/ @@ -41,9 +53,11 @@ jobs: perl -ne 'print "$1\n" if /PROTOBUF_DESCRIPTOR="(.*)"/' `find ./before/target/debug/build/*/output` | xargs cat > ./before.binpb + + # after - uses: actions/checkout@v4 with: - ref: ${{ github.head_ref || github.event.after }} + ref: ${{ env.HEAD }} path: after - name: compile after run: cargo check --all-targets @@ -53,8 +67,10 @@ jobs: perl -ne 'print "$1\n" if /PROTOBUF_DESCRIPTOR="(.*)"/' `find ./after/target/debug/build/*/output` | xargs cat > ./after.binpb + + # compare - uses: bufbuild/buf-setup-action@v1 with: github_token: ${{ github.token }} - name: buf breaking - run: buf breaking './after.binpb' --against './before.binpb' --config '{"version":"v1","breaking":{"use":["WIRE_JSON"]}}' --error-format 'github-actions' + run: buf breaking './after.binpb' --against './before.binpb' --config '{"version":"v1","breaking":{"use":["WIRE_JSON","WIRE"]}}' --error-format 'github-actions'