From ac61fedb5756ed700e35f231a364b9c933423ab8 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Wed, 5 Jun 2024 10:15:27 +0300 Subject: [PATCH 1/8] feat(en): Allow recovery from specific snapshot (#2137) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Allows recovering a node from a specific snapshot specified at the start of recovery. ## Why ❔ Useful at least for testing recovery and pruning end-to-end on the testnet. There, L1 batches are produced very slowly, so it makes sense to recover from an earlier snapshot in order to meaningfully test pruning. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `zk spellcheck`. --- core/bin/external_node/src/config/mod.rs | 27 ++-- core/bin/external_node/src/init.rs | 30 ++-- core/bin/external_node/src/main.rs | 11 +- core/lib/snapshots_applier/src/lib.rs | 66 +++++++-- core/lib/snapshots_applier/src/tests/mod.rs | 138 +++++++++++++++++- core/lib/snapshots_applier/src/tests/utils.rs | 99 +++++++++++-- 6 files changed, 314 insertions(+), 57 deletions(-) diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 08fd955297ed..3d94e833217a 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -25,8 +25,8 @@ use zksync_node_api_server::{ use zksync_protobuf_config::proto; use zksync_snapshots_applier::SnapshotsApplierConfig; use zksync_types::{ - api::BridgeAddresses, commitment::L1BatchCommitmentMode, url::SensitiveUrl, Address, L1ChainId, - L2ChainId, ETHEREUM_ADDRESS, + api::BridgeAddresses, commitment::L1BatchCommitmentMode, url::SensitiveUrl, Address, + L1BatchNumber, L1ChainId, L2ChainId, ETHEREUM_ADDRESS, }; use zksync_web3_decl::{ client::{DynClient, L2}, @@ -746,6 +746,8 @@ pub(crate) struct ExperimentalENConfig { pub state_keeper_db_max_open_files: Option, // Snapshot recovery + /// L1 batch number of the snapshot to use during recovery. Specifying this parameter is mostly useful for testing. + pub snapshots_recovery_l1_batch: Option, /// Approximate chunk size (measured in the number of entries) to recover in a single iteration. /// Reasonable values are order of 100,000 (meaning an iteration takes several seconds). /// @@ -775,6 +777,7 @@ impl ExperimentalENConfig { state_keeper_db_block_cache_capacity_mb: Self::default_state_keeper_db_block_cache_capacity_mb(), state_keeper_db_max_open_files: None, + snapshots_recovery_l1_batch: None, snapshots_recovery_tree_chunk_size: Self::default_snapshots_recovery_tree_chunk_size(), commitment_generator_max_parallelism: None, } @@ -807,21 +810,11 @@ pub(crate) fn read_consensus_config() -> anyhow::Result> )) } -/// Configuration for snapshot recovery. Loaded optionally, only if snapshot recovery is enabled. -#[derive(Debug)] -pub(crate) struct SnapshotsRecoveryConfig { - pub snapshots_object_store: ObjectStoreConfig, -} - -impl SnapshotsRecoveryConfig { - pub fn new() -> anyhow::Result { - let snapshots_object_store = envy::prefixed("EN_SNAPSHOTS_OBJECT_STORE_") - .from_env::() - .context("failed loading snapshot object store config from env variables")?; - Ok(Self { - snapshots_object_store, - }) - } +/// Configuration for snapshot recovery. Should be loaded optionally, only if snapshot recovery is enabled. +pub(crate) fn snapshot_recovery_object_store_config() -> anyhow::Result { + envy::prefixed("EN_SNAPSHOTS_OBJECT_STORE_") + .from_env::() + .context("failed loading snapshot object store config from env variables") } #[derive(Debug, Deserialize)] diff --git a/core/bin/external_node/src/init.rs b/core/bin/external_node/src/init.rs index 0f4ae9a80362..fb30628e3890 100644 --- a/core/bin/external_node/src/init.rs +++ b/core/bin/external_node/src/init.rs @@ -12,7 +12,13 @@ use zksync_snapshots_applier::{SnapshotsApplierConfig, SnapshotsApplierTask}; use zksync_types::{L1BatchNumber, L2ChainId}; use zksync_web3_decl::client::{DynClient, L2}; -use crate::config::SnapshotsRecoveryConfig; +use crate::config::snapshot_recovery_object_store_config; + +#[derive(Debug)] +pub(crate) struct SnapshotRecoveryConfig { + /// If not specified, the latest snapshot will be used. + pub snapshot_l1_batch_override: Option, +} #[derive(Debug)] enum InitDecision { @@ -27,7 +33,7 @@ pub(crate) async fn ensure_storage_initialized( main_node_client: Box>, app_health: &AppHealthCheck, l2_chain_id: L2ChainId, - consider_snapshot_recovery: bool, + recovery_config: Option, ) -> anyhow::Result<()> { let mut storage = pool.connection_tagged("en").await?; let genesis_l1_batch = storage @@ -57,7 +63,7 @@ pub(crate) async fn ensure_storage_initialized( } (None, None) => { tracing::info!("Node has neither genesis L1 batch, nor snapshot recovery info"); - if consider_snapshot_recovery { + if recovery_config.is_some() { InitDecision::SnapshotRecovery } else { InitDecision::Genesis @@ -78,25 +84,31 @@ pub(crate) async fn ensure_storage_initialized( .context("performing genesis failed")?; } InitDecision::SnapshotRecovery => { - anyhow::ensure!( - consider_snapshot_recovery, + let recovery_config = recovery_config.context( "Snapshot recovery is required to proceed, but it is not enabled. Enable by setting \ `EN_SNAPSHOTS_RECOVERY_ENABLED=true` env variable to the node binary, or use a Postgres dump for recovery" - ); + )?; tracing::warn!("Proceeding with snapshot recovery. This is an experimental feature; use at your own risk"); - let recovery_config = SnapshotsRecoveryConfig::new()?; - let blob_store = ObjectStoreFactory::new(recovery_config.snapshots_object_store) + let object_store_config = snapshot_recovery_object_store_config()?; + let blob_store = ObjectStoreFactory::new(object_store_config) .create_store() .await; let config = SnapshotsApplierConfig::default(); - let snapshots_applier_task = SnapshotsApplierTask::new( + let mut snapshots_applier_task = SnapshotsApplierTask::new( config, pool, Box::new(main_node_client.for_component("snapshot_recovery")), blob_store, ); + if let Some(snapshot_l1_batch) = recovery_config.snapshot_l1_batch_override { + tracing::info!( + "Using a specific snapshot with L1 batch #{snapshot_l1_batch}; this may not work \ + if the snapshot is too old (order of several weeks old) or non-existent" + ); + snapshots_applier_task.set_snapshot_l1_batch(snapshot_l1_batch); + } app_health.insert_component(snapshots_applier_task.health_check())?; let recovery_started_at = Instant::now(); diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index 584356e755bf..05f4b2ba9d43 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -55,7 +55,7 @@ use zksync_web3_decl::{ use crate::{ config::ExternalNodeConfig, helpers::{MainNodeHealthCheck, ValidateChainIdsTask}, - init::ensure_storage_initialized, + init::{ensure_storage_initialized, SnapshotRecoveryConfig}, metrics::RUST_METRICS, }; @@ -908,12 +908,19 @@ async fn run_node( task_handles.extend(prometheus_task); // Make sure that the node storage is initialized either via genesis or snapshot recovery. + let recovery_config = + config + .optional + .snapshots_recovery_enabled + .then_some(SnapshotRecoveryConfig { + snapshot_l1_batch_override: config.experimental.snapshots_recovery_l1_batch, + }); ensure_storage_initialized( connection_pool.clone(), main_node_client.clone(), &app_health, config.required.l2_chain_id, - config.optional.snapshots_recovery_enabled, + recovery_config, ) .await?; let sigint_receiver = env.setup_sigint_handler(); diff --git a/core/lib/snapshots_applier/src/lib.rs b/core/lib/snapshots_applier/src/lib.rs index bcf4b3c14329..b0024f78433f 100644 --- a/core/lib/snapshots_applier/src/lib.rs +++ b/core/lib/snapshots_applier/src/lib.rs @@ -123,7 +123,14 @@ pub trait SnapshotsApplierMainNodeClient: fmt::Debug + Send + Sync { number: L2BlockNumber, ) -> EnrichedClientResult>; - async fn fetch_newest_snapshot(&self) -> EnrichedClientResult>; + async fn fetch_newest_snapshot_l1_batch_number( + &self, + ) -> EnrichedClientResult>; + + async fn fetch_snapshot( + &self, + l1_batch_number: L1BatchNumber, + ) -> EnrichedClientResult>; async fn fetch_tokens( &self, @@ -153,17 +160,23 @@ impl SnapshotsApplierMainNodeClient for Box> { .await } - async fn fetch_newest_snapshot(&self) -> EnrichedClientResult> { + async fn fetch_newest_snapshot_l1_batch_number( + &self, + ) -> EnrichedClientResult> { let snapshots = self .get_all_snapshots() .rpc_context("get_all_snapshots") .await?; - let Some(newest_snapshot) = snapshots.snapshots_l1_batch_numbers.first() else { - return Ok(None); - }; - self.get_snapshot_by_l1_batch_number(*newest_snapshot) + Ok(snapshots.snapshots_l1_batch_numbers.first().copied()) + } + + async fn fetch_snapshot( + &self, + l1_batch_number: L1BatchNumber, + ) -> EnrichedClientResult> { + self.get_snapshot_by_l1_batch_number(l1_batch_number) .rpc_context("get_snapshot_by_l1_batch_number") - .with_arg("number", newest_snapshot) + .with_arg("number", &l1_batch_number) .await } @@ -179,7 +192,7 @@ impl SnapshotsApplierMainNodeClient for Box> { } /// Snapshot applier configuration options. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct SnapshotsApplierConfig { /// Number of retries for transient errors before giving up on recovery (i.e., returning an error /// from [`Self::run()`]). @@ -223,6 +236,7 @@ pub struct SnapshotApplierTaskStats { #[derive(Debug)] pub struct SnapshotsApplierTask { + snapshot_l1_batch: Option, config: SnapshotsApplierConfig, health_updater: HealthUpdater, connection_pool: ConnectionPool, @@ -238,6 +252,7 @@ impl SnapshotsApplierTask { blob_store: Arc, ) -> Self { Self { + snapshot_l1_batch: None, config, health_updater: ReactiveHealthCheck::new("snapshot_recovery").1, connection_pool, @@ -246,6 +261,11 @@ impl SnapshotsApplierTask { } } + /// Specifies the L1 batch to recover from. This setting is ignored if recovery is complete or resumed. + pub fn set_snapshot_l1_batch(&mut self, number: L1BatchNumber) { + self.snapshot_l1_batch = Some(number); + } + /// Returns the health check for snapshot recovery. pub fn health_check(&self) -> ReactiveHealthCheck { self.health_updater.subscribe() @@ -270,6 +290,7 @@ impl SnapshotsApplierTask { self.main_node_client.as_ref(), &self.blob_store, &self.health_updater, + self.snapshot_l1_batch, self.config.max_concurrency.get(), ) .await; @@ -324,6 +345,7 @@ impl SnapshotRecoveryStrategy { async fn new( storage: &mut Connection<'_, Core>, main_node_client: &dyn SnapshotsApplierMainNodeClient, + snapshot_l1_batch: Option, ) -> Result<(Self, SnapshotRecoveryStatus), SnapshotsApplierError> { let latency = METRICS.initial_stage_duration[&InitialStage::FetchMetadataFromMainNode].start(); @@ -350,7 +372,8 @@ impl SnapshotRecoveryStrategy { return Err(SnapshotsApplierError::Fatal(err)); } - let recovery_status = Self::create_fresh_recovery_status(main_node_client).await?; + let recovery_status = + Self::create_fresh_recovery_status(main_node_client, snapshot_l1_batch).await?; let storage_logs_count = storage .storage_logs_dal() @@ -373,12 +396,20 @@ impl SnapshotRecoveryStrategy { async fn create_fresh_recovery_status( main_node_client: &dyn SnapshotsApplierMainNodeClient, + snapshot_l1_batch: Option, ) -> Result { - let snapshot_response = main_node_client.fetch_newest_snapshot().await?; + let l1_batch_number = match snapshot_l1_batch { + Some(num) => num, + None => main_node_client + .fetch_newest_snapshot_l1_batch_number() + .await? + .context("no snapshots on main node; snapshot recovery is impossible")?, + }; + let snapshot_response = main_node_client.fetch_snapshot(l1_batch_number).await?; - let snapshot = snapshot_response - .context("no snapshots on main node; snapshot recovery is impossible")?; - let l1_batch_number = snapshot.l1_batch_number; + let snapshot = snapshot_response.with_context(|| { + format!("snapshot for L1 batch #{l1_batch_number} is not present on main node") + })?; let l2_block_number = snapshot.l2_block_number; tracing::info!( "Found snapshot with data up to L1 batch #{l1_batch_number}, L2 block #{l2_block_number}, \ @@ -461,6 +492,7 @@ impl<'a> SnapshotsApplier<'a> { main_node_client: &'a dyn SnapshotsApplierMainNodeClient, blob_store: &'a dyn ObjectStore, health_updater: &'a HealthUpdater, + snapshot_l1_batch: Option, max_concurrency: usize, ) -> Result<(SnapshotRecoveryStrategy, SnapshotRecoveryStatus), SnapshotsApplierError> { // While the recovery is in progress, the node is healthy (no error has occurred), @@ -472,8 +504,12 @@ impl<'a> SnapshotsApplier<'a> { .await?; let mut storage_transaction = storage.start_transaction().await?; - let (strategy, applied_snapshot_status) = - SnapshotRecoveryStrategy::new(&mut storage_transaction, main_node_client).await?; + let (strategy, applied_snapshot_status) = SnapshotRecoveryStrategy::new( + &mut storage_transaction, + main_node_client, + snapshot_l1_batch, + ) + .await?; tracing::info!("Chosen snapshot recovery strategy: {strategy:?} with status: {applied_snapshot_status:?}"); let created_from_scratch = match strategy { SnapshotRecoveryStrategy::Completed => return Ok((strategy, applied_snapshot_status)), diff --git a/core/lib/snapshots_applier/src/tests/mod.rs b/core/lib/snapshots_applier/src/tests/mod.rs index 59a95792c1ca..e61f76455372 100644 --- a/core/lib/snapshots_applier/src/tests/mod.rs +++ b/core/lib/snapshots_applier/src/tests/mod.rs @@ -21,6 +21,7 @@ use self::utils::{ random_storage_logs, MockMainNodeClient, ObjectStoreWithErrors, }; use super::*; +use crate::tests::utils::HangingObjectStore; mod utils; @@ -142,6 +143,131 @@ async fn snapshots_creator_can_successfully_recover_db( assert!(!stats.done_work); } +#[tokio::test] +async fn applier_recovers_explicitly_specified_snapshot() { + let pool = ConnectionPool::::test_pool().await; + let expected_status = mock_recovery_status(); + let storage_logs = random_storage_logs(expected_status.l1_batch_number, 200); + let (object_store, client) = prepare_clients(&expected_status, &storage_logs).await; + + let mut task = SnapshotsApplierTask::new( + SnapshotsApplierConfig::for_tests(), + pool.clone(), + Box::new(client), + object_store, + ); + task.set_snapshot_l1_batch(expected_status.l1_batch_number); + let stats = task.run().await.unwrap(); + assert!(stats.done_work); + + let mut storage = pool.connection().await.unwrap(); + let all_storage_logs = storage + .storage_logs_dal() + .dump_all_storage_logs_for_tests() + .await; + assert_eq!(all_storage_logs.len(), storage_logs.len()); +} + +#[tokio::test] +async fn applier_error_for_missing_explicitly_specified_snapshot() { + let pool = ConnectionPool::::test_pool().await; + let expected_status = mock_recovery_status(); + let storage_logs = random_storage_logs(expected_status.l1_batch_number, 200); + let (object_store, client) = prepare_clients(&expected_status, &storage_logs).await; + + let mut task = SnapshotsApplierTask::new( + SnapshotsApplierConfig::for_tests(), + pool, + Box::new(client), + object_store, + ); + task.set_snapshot_l1_batch(expected_status.l1_batch_number + 1); + + let err = task.run().await.unwrap_err(); + assert!( + format!("{err:#}").contains("not present on main node"), + "{err:#}" + ); +} + +#[tokio::test] +async fn snapshot_applier_recovers_after_stopping() { + let pool = ConnectionPool::::test_pool().await; + let mut expected_status = mock_recovery_status(); + expected_status.storage_logs_chunks_processed = vec![true; 10]; + let storage_logs = random_storage_logs(expected_status.l1_batch_number, 200); + let (object_store, client) = prepare_clients(&expected_status, &storage_logs).await; + let (stopping_object_store, mut stop_receiver) = + HangingObjectStore::new(object_store.clone(), 1); + + let mut config = SnapshotsApplierConfig::for_tests(); + config.max_concurrency = NonZeroUsize::new(1).unwrap(); + let task = SnapshotsApplierTask::new( + config.clone(), + pool.clone(), + Box::new(client.clone()), + Arc::new(stopping_object_store), + ); + let task_handle = tokio::spawn(task.run()); + + // Wait until the first storage logs chunk is requested (the object store hangs up at this point) + stop_receiver.wait_for(|&count| count > 1).await.unwrap(); + assert!(!task_handle.is_finished()); + task_handle.abort(); + + // Check that factory deps have been persisted, but no storage logs. + let mut storage = pool.connection().await.unwrap(); + let all_factory_deps = storage + .factory_deps_dal() + .dump_all_factory_deps_for_tests() + .await; + assert!(!all_factory_deps.is_empty()); + let all_storage_logs = storage + .storage_logs_dal() + .dump_all_storage_logs_for_tests() + .await; + assert!(all_storage_logs.is_empty(), "{all_storage_logs:?}"); + + // Recover 3 storage log chunks and stop again + let (stopping_object_store, mut stop_receiver) = + HangingObjectStore::new(object_store.clone(), 3); + + let task = SnapshotsApplierTask::new( + config.clone(), + pool.clone(), + Box::new(client.clone()), + Arc::new(stopping_object_store), + ); + let task_handle = tokio::spawn(task.run()); + + stop_receiver.wait_for(|&count| count > 3).await.unwrap(); + assert!(!task_handle.is_finished()); + task_handle.abort(); + + let all_storage_logs = storage + .storage_logs_dal() + .dump_all_storage_logs_for_tests() + .await; + assert!(all_storage_logs.len() < storage_logs.len()); + + // Recover remaining 7 (10 - 3) storage log chunks. + let (stopping_object_store, _) = HangingObjectStore::new(object_store.clone(), 7); + let mut task = SnapshotsApplierTask::new( + config, + pool.clone(), + Box::new(client), + Arc::new(stopping_object_store), + ); + task.set_snapshot_l1_batch(expected_status.l1_batch_number); // check that this works fine + task.run().await.unwrap(); + + let all_storage_logs = storage + .storage_logs_dal() + .dump_all_storage_logs_for_tests() + .await; + assert_eq!(all_storage_logs.len(), storage_logs.len()); +} + #[tokio::test] async fn health_status_immediately_after_task_start() { #[derive(Debug, Clone)] @@ -165,7 +291,17 @@ async fn health_status_immediately_after_task_start() { future::pending().await } - async fn fetch_newest_snapshot(&self) -> EnrichedClientResult> { + async fn fetch_newest_snapshot_l1_batch_number( + &self, + ) -> EnrichedClientResult> { + self.0.wait().await; + future::pending().await + } + + async fn fetch_snapshot( + &self, + _l1_batch_number: L1BatchNumber, + ) -> EnrichedClientResult> { self.0.wait().await; future::pending().await } diff --git a/core/lib/snapshots_applier/src/tests/utils.rs b/core/lib/snapshots_applier/src/tests/utils.rs index 4629d8c0a2fb..c853481ab53b 100644 --- a/core/lib/snapshots_applier/src/tests/utils.rs +++ b/core/lib/snapshots_applier/src/tests/utils.rs @@ -1,8 +1,9 @@ //! Test utils. -use std::{collections::HashMap, fmt, sync::Arc}; +use std::{collections::HashMap, fmt, future, sync::Arc}; use async_trait::async_trait; +use tokio::sync::watch; use zksync_object_store::{Bucket, ObjectStore, ObjectStoreError, ObjectStoreFactory}; use zksync_types::{ api, @@ -45,8 +46,23 @@ impl SnapshotsApplierMainNodeClient for MockMainNodeClient { Ok(self.fetch_l2_block_responses.get(&number).cloned()) } - async fn fetch_newest_snapshot(&self) -> EnrichedClientResult> { - Ok(self.fetch_newest_snapshot_response.clone()) + async fn fetch_newest_snapshot_l1_batch_number( + &self, + ) -> EnrichedClientResult> { + Ok(self + .fetch_newest_snapshot_response + .as_ref() + .map(|response| response.l1_batch_number)) + } + + async fn fetch_snapshot( + &self, + l1_batch_number: L1BatchNumber, + ) -> EnrichedClientResult> { + Ok(self + .fetch_newest_snapshot_response + .clone() + .filter(|response| response.l1_batch_number == l1_batch_number)) } async fn fetch_tokens( @@ -223,16 +239,12 @@ pub(super) fn mock_snapshot_header(status: &SnapshotRecoveryStatus) -> SnapshotH version: SnapshotVersion::Version0.into(), l1_batch_number: status.l1_batch_number, l2_block_number: status.l2_block_number, - storage_logs_chunks: vec![ - SnapshotStorageLogsChunkMetadata { - chunk_id: 0, - filepath: "file0".to_string(), - }, - SnapshotStorageLogsChunkMetadata { - chunk_id: 1, - filepath: "file1".to_string(), - }, - ], + storage_logs_chunks: (0..status.storage_logs_chunks_processed.len() as u64) + .map(|chunk_id| SnapshotStorageLogsChunkMetadata { + chunk_id, + filepath: format!("file{chunk_id}"), + }) + .collect(), factory_deps_filepath: "some_filepath".to_string(), } } @@ -289,3 +301,64 @@ pub(super) async fn prepare_clients( ); (object_store, client) } + +/// Object store wrapper that hangs up after processing the specified number of requests. +/// Used to emulate the snapshot applier being restarted since, if it's configured to have concurrency 1, +/// the applier will request an object from the store strictly after fully processing all previously requested objects. +#[derive(Debug)] +pub(super) struct HangingObjectStore { + inner: Arc, + stop_after_count: usize, + count_sender: watch::Sender, +} + +impl HangingObjectStore { + pub fn new( + inner: Arc, + stop_after_count: usize, + ) -> (Self, watch::Receiver) { + let (count_sender, count_receiver) = watch::channel(0); + let this = Self { + inner, + stop_after_count, + count_sender, + }; + (this, count_receiver) + } +} + +#[async_trait] +impl ObjectStore for HangingObjectStore { + async fn get_raw(&self, bucket: Bucket, key: &str) -> Result, ObjectStoreError> { + let mut should_proceed = true; + self.count_sender.send_modify(|count| { + *count += 1; + if dbg!(*count) > self.stop_after_count { + should_proceed = false; + } + }); + + if dbg!(should_proceed) { + self.inner.get_raw(bucket, key).await + } else { + future::pending().await // Hang up the snapshot applier task + } + } + + async fn put_raw( + &self, + _bucket: Bucket, + _key: &str, + _value: Vec, + ) -> Result<(), ObjectStoreError> { + unreachable!("Should not be used in snapshot applier") + } + + async fn remove_raw(&self, _bucket: Bucket, _key: &str) -> Result<(), ObjectStoreError> { + unreachable!("Should not be used in snapshot applier") + } + + fn storage_prefix_raw(&self, bucket: Bucket) -> String { + self.inner.storage_prefix_raw(bucket) + } +} From 9e39f13c29788e66645ea57f623555c4b36b8aff Mon Sep 17 00:00:00 2001 From: Marcin M <128217157+mm-zk@users.noreply.github.com> Date: Wed, 5 Jun 2024 10:54:59 +0200 Subject: [PATCH 2/8] feat!: updated boojum and nightly rust compiler (#2126) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ * Updated boojum version, which allows us to update nightly rust compiler. ## Why ❔ * Our rust nightly is quite old (around 1 year) --- .dockerignore | 1 + .../build-contract-verifier-template.yml | 2 +- .github/workflows/build-core-template.yml | 2 +- Cargo.lock | 19 ++------ core/bin/external_node/src/helpers.rs | 12 +++-- core/lib/basic_types/src/web3/mod.rs | 44 +++++++++---------- .../src/eip712_signature/struct_builder.rs | 4 +- core/lib/dal/src/events_dal.rs | 4 +- core/lib/dal/src/models/mod.rs | 1 - .../lib/dal/src/models/storage_fee_monitor.rs | 16 ------- core/lib/dal/src/pruning_dal/tests.rs | 4 +- core/lib/eth_client/src/clients/mock.rs | 6 +-- core/lib/mempool/src/mempool_store.rs | 2 +- core/lib/merkle_tree/src/storage/patch.rs | 2 +- core/lib/mini_merkle_tree/src/lib.rs | 2 +- .../src/versions/vm_1_3_2/event_sink.rs | 3 +- .../versions/vm_1_3_2/oracles/tracer/utils.rs | 2 +- core/lib/multivm/src/versions/vm_1_3_2/vm.rs | 3 +- .../versions/vm_1_4_1/old_vm/event_sink.rs | 3 +- .../src/versions/vm_1_4_1/old_vm/utils.rs | 3 ++ .../src/versions/vm_1_4_1/tracers/utils.rs | 2 +- .../versions/vm_1_4_2/old_vm/event_sink.rs | 3 +- .../src/versions/vm_1_4_2/old_vm/utils.rs | 3 ++ .../src/versions/vm_1_4_2/tracers/utils.rs | 2 +- .../old_vm/event_sink.rs | 3 +- .../vm_boojum_integration/old_vm/utils.rs | 3 ++ .../vm_boojum_integration/tracers/utils.rs | 2 +- .../versions/vm_latest/old_vm/event_sink.rs | 3 +- .../src/versions/vm_latest/old_vm/utils.rs | 3 ++ .../vm_latest/tests/require_eip712.rs | 6 +-- .../src/versions/vm_latest/tracers/utils.rs | 2 +- .../multivm/src/versions/vm_m5/event_sink.rs | 3 +- .../src/versions/vm_m5/oracles/tracer.rs | 2 +- .../multivm/src/versions/vm_m6/event_sink.rs | 3 +- .../versions/vm_m6/oracles/tracer/utils.rs | 2 +- core/lib/multivm/src/versions/vm_m6/vm.rs | 3 +- .../old_vm/event_sink.rs | 3 +- .../vm_refunds_enhancement/old_vm/utils.rs | 3 ++ .../vm_refunds_enhancement/tracers/utils.rs | 2 +- .../vm_virtual_blocks/old_vm/event_sink.rs | 3 +- .../vm_virtual_blocks/old_vm/utils.rs | 3 ++ .../vm_virtual_blocks/tracers/traits.rs | 1 + .../vm_virtual_blocks/tracers/utils.rs | 2 +- core/lib/protobuf_config/src/secrets.rs | 2 +- core/lib/snapshots_applier/src/tests/mod.rs | 3 +- core/lib/state/src/shadow_storage.rs | 1 + core/lib/types/src/l1/mod.rs | 2 +- core/lib/types/src/protocol_upgrade.rs | 2 +- .../types/src/storage_writes_deduplicator.rs | 2 +- core/lib/types/src/transaction_request.rs | 2 +- core/node/eth_sender/src/publish_criterion.rs | 1 + core/node/eth_watch/src/lib.rs | 2 +- .../src/batch_status_updater/tests.rs | 4 +- core/node/node_sync/src/tests.rs | 2 +- .../src/request_processor.rs | 8 ++-- .../io/seal_logic/l2_block_seal_subtasks.rs | 2 +- core/tests/loadnext/src/sdk/mod.rs | 5 +-- docker/build-base/Dockerfile | 4 +- docker/proof-fri-gpu-compressor/Dockerfile | 4 +- docker/prover-gpu-fri/Dockerfile | 4 +- .../20.04_amd64_cuda_11_8.Dockerfile | 2 +- .../20.04_amd64_cuda_12_0.Dockerfile | 2 +- docker/zk-environment/Dockerfile | 4 +- prover/Cargo.lock | 19 ++------ prover/proof_fri_compressor/README.md | 2 +- prover/rust-toolchain | 2 +- prover/witness_vector_generator/README.md | 2 +- rust-toolchain | 2 +- 68 files changed, 135 insertions(+), 147 deletions(-) delete mode 100644 core/lib/dal/src/models/storage_fee_monitor.rs diff --git a/.dockerignore b/.dockerignore index 88f241c5275e..ee2e8af78dd3 100644 --- a/.dockerignore +++ b/.dockerignore @@ -46,3 +46,4 @@ contracts/.git !etc/env/dev.toml !etc/env/consensus_secrets.yaml !etc/env/consensus_config.yaml +!rust-toolchain diff --git a/.github/workflows/build-contract-verifier-template.yml b/.github/workflows/build-contract-verifier-template.yml index f4f6939389bf..3c2e8377129f 100644 --- a/.github/workflows/build-contract-verifier-template.yml +++ b/.github/workflows/build-contract-verifier-template.yml @@ -138,7 +138,7 @@ jobs: COMPONENT: ${{ matrix.components }} PLATFORM: ${{ matrix.platforms }} run: | - ci_run rustup default nightly-2023-08-21 + ci_run rustup default nightly-2024-05-07 platform=$(echo $PLATFORM | tr '/' '-') ci_run zk docker $DOCKER_ACTION --custom-tag=${IMAGE_TAG_SUFFIX} --platform=${PLATFORM} $COMPONENT - name: Show sccache stats diff --git a/.github/workflows/build-core-template.yml b/.github/workflows/build-core-template.yml index de8ab1505d8b..1a8d4e610bb9 100644 --- a/.github/workflows/build-core-template.yml +++ b/.github/workflows/build-core-template.yml @@ -147,7 +147,7 @@ jobs: COMPONENT: ${{ matrix.components }} PLATFORM: ${{ matrix.platforms }} run: | - ci_run rustup default nightly-2023-08-21 + ci_run rustup default nightly-2024-05-07 platform=$(echo $PLATFORM | tr '/' '-') ci_run zk docker $DOCKER_ACTION --custom-tag=${IMAGE_TAG_SUFFIX} --platform=${PLATFORM} $COMPONENT - name: Show sccache stats diff --git a/Cargo.lock b/Cargo.lock index af0d4d352203..fd45d942b146 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -692,7 +692,7 @@ dependencies = [ [[package]] name = "boojum" version = "0.2.0" -source = "git+https://github.com/matter-labs/era-boojum.git?branch=main#cd631c9a1d61ec21d7bd22eb74949d43ecfad0fd" +source = "git+https://github.com/matter-labs/era-boojum.git?branch=main#4bcb11f0610302110ae8109af01d5b652191b2f6" dependencies = [ "arrayvec 0.7.4", "bincode", @@ -709,7 +709,6 @@ dependencies = [ "lazy_static", "num-modular", "num_cpus", - "packed_simd", "pairing_ce 0.28.5 (git+https://github.com/matter-labs/pairing.git)", "rand 0.8.5", "rayon", @@ -1533,7 +1532,7 @@ dependencies = [ [[package]] name = "cs_derive" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-boojum.git?branch=main#cd631c9a1d61ec21d7bd22eb74949d43ecfad0fd" +source = "git+https://github.com/matter-labs/era-boojum.git?branch=main#4bcb11f0610302110ae8109af01d5b652191b2f6" dependencies = [ "proc-macro-error", "proc-macro2 1.0.69", @@ -1562,9 +1561,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.1.1" +version = "4.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" +checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" dependencies = [ "cfg-if 1.0.0", "cpufeatures", @@ -4208,16 +4207,6 @@ dependencies = [ "sha2 0.10.8", ] -[[package]] -name = "packed_simd" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f9f08af0c877571712e2e3e686ad79efad9657dbf0f7c3c8ba943ff6c38932d" -dependencies = [ - "cfg-if 1.0.0", - "num-traits", -] - [[package]] name = "pairing_ce" version = "0.28.5" diff --git a/core/bin/external_node/src/helpers.rs b/core/bin/external_node/src/helpers.rs index 3cac556e1d7b..1290428a231c 100644 --- a/core/bin/external_node/src/helpers.rs +++ b/core/bin/external_node/src/helpers.rs @@ -176,13 +176,11 @@ impl ValidateChainIdsTask { .fuse(); let main_node_l2_check = Self::check_l2_chain_using_main_node(self.main_node_client, self.l2_chain_id).fuse(); - loop { - tokio::select! { - Err(err) = eth_client_check => return Err(err), - Err(err) = main_node_l1_check => return Err(err), - Err(err) = main_node_l2_check => return Err(err), - _ = stop_receiver.changed() => return Ok(()), - } + tokio::select! { + Err(err) = eth_client_check => Err(err), + Err(err) = main_node_l1_check => Err(err), + Err(err) = main_node_l2_check => Err(err), + _ = stop_receiver.changed() => Ok(()), } } } diff --git a/core/lib/basic_types/src/web3/mod.rs b/core/lib/basic_types/src/web3/mod.rs index d684b9b6c7b2..af9cd1eea3fc 100644 --- a/core/lib/basic_types/src/web3/mod.rs +++ b/core/lib/basic_types/src/web3/mod.rs @@ -867,6 +867,28 @@ pub enum SyncState { NotSyncing, } +// Sync info from subscription has a different key format +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +struct SubscriptionSyncInfo { + /// The block at which import began. + pub starting_block: U256, + /// The highest currently synced block. + pub current_block: U256, + /// The estimated highest block. + pub highest_block: U256, +} + +impl From for SyncInfo { + fn from(s: SubscriptionSyncInfo) -> Self { + Self { + starting_block: s.starting_block, + current_block: s.current_block, + highest_block: s.highest_block, + } + } +} + // The `eth_syncing` method returns either `false` or an instance of the sync info object. // This doesn't play particularly well with the features exposed by `serde_derive`, // so we use the custom impls below to ensure proper behavior. @@ -875,28 +897,6 @@ impl<'de> Deserialize<'de> for SyncState { where D: Deserializer<'de>, { - // Sync info from subscription has a different key format - #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] - #[serde(rename_all = "PascalCase")] - struct SubscriptionSyncInfo { - /// The block at which import began. - pub starting_block: U256, - /// The highest currently synced block. - pub current_block: U256, - /// The estimated highest block. - pub highest_block: U256, - } - - impl From for SyncInfo { - fn from(s: SubscriptionSyncInfo) -> Self { - Self { - starting_block: s.starting_block, - current_block: s.current_block, - highest_block: s.highest_block, - } - } - } - #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] struct SubscriptionSyncState { pub syncing: bool, diff --git a/core/lib/crypto_primitives/src/eip712_signature/struct_builder.rs b/core/lib/crypto_primitives/src/eip712_signature/struct_builder.rs index 74bf13c91e88..655e2d2d9333 100644 --- a/core/lib/crypto_primitives/src/eip712_signature/struct_builder.rs +++ b/core/lib/crypto_primitives/src/eip712_signature/struct_builder.rs @@ -61,13 +61,13 @@ impl OuterTypeBuilder { let mut result = BTreeMap::new(); while let Some(front_element) = self.inner_members_queue.pop_front() { - if result.get(&front_element.member_type).is_some() { + if result.contains_key(&front_element.member_type) { continue; } result.insert(front_element.member_type.clone(), front_element.clone()); for inner_member in front_element.inner_members { - if inner_member.is_reference_type && result.get(&inner_member.member_type).is_none() + if inner_member.is_reference_type && !result.contains_key(&inner_member.member_type) { self.inner_members_queue.push_back(inner_member); } diff --git a/core/lib/dal/src/events_dal.rs b/core/lib/dal/src/events_dal.rs index 3a6b86afee97..ebe159577bb2 100644 --- a/core/lib/dal/src/events_dal.rs +++ b/core/lib/dal/src/events_dal.rs @@ -84,7 +84,7 @@ impl EventsDal<'_, '_> { write_str!( &mut buffer, r"\\x{topic0:x}|\\x{topic1:x}|\\x{topic2:x}|\\x{topic3:x}|", - topic0 = EventTopic(event.indexed_topics.get(0)), + topic0 = EventTopic(event.indexed_topics.first()), topic1 = EventTopic(event.indexed_topics.get(1)), topic2 = EventTopic(event.indexed_topics.get(2)), topic3 = EventTopic(event.indexed_topics.get(3)) @@ -454,7 +454,7 @@ mod tests { tx_index_in_l2_block: 0, tx_initiator_address: Address::default(), }; - let first_events = vec![create_vm_event(0, 0), create_vm_event(1, 4)]; + let first_events = [create_vm_event(0, 0), create_vm_event(1, 4)]; let second_location = IncludedTxLocation { tx_hash: H256([2; 32]), tx_index_in_l2_block: 1, diff --git a/core/lib/dal/src/models/mod.rs b/core/lib/dal/src/models/mod.rs index 66ab73040d68..bc0e2c657da5 100644 --- a/core/lib/dal/src/models/mod.rs +++ b/core/lib/dal/src/models/mod.rs @@ -5,7 +5,6 @@ use zksync_types::{ProtocolVersionId, H160, H256}; pub mod storage_eth_tx; pub mod storage_event; -pub mod storage_fee_monitor; pub mod storage_log; pub mod storage_oracle_info; pub mod storage_protocol_version; diff --git a/core/lib/dal/src/models/storage_fee_monitor.rs b/core/lib/dal/src/models/storage_fee_monitor.rs deleted file mode 100644 index 989308f79fea..000000000000 --- a/core/lib/dal/src/models/storage_fee_monitor.rs +++ /dev/null @@ -1,16 +0,0 @@ -#[derive(Debug, Clone, sqlx::FromRow)] -pub struct StorageBlockGasData { - pub number: i64, - - pub commit_gas: Option, - pub commit_base_gas_price: Option, - pub commit_priority_gas_price: Option, - - pub prove_gas: Option, - pub prove_base_gas_price: Option, - pub prove_priority_gas_price: Option, - - pub execute_gas: Option, - pub execute_base_gas_price: Option, - pub execute_priority_gas_price: Option, -} diff --git a/core/lib/dal/src/pruning_dal/tests.rs b/core/lib/dal/src/pruning_dal/tests.rs index ab976f52d219..7583065a8ec4 100644 --- a/core/lib/dal/src/pruning_dal/tests.rs +++ b/core/lib/dal/src/pruning_dal/tests.rs @@ -44,7 +44,7 @@ async fn insert_l2_to_l1_logs(conn: &mut Connection<'_, Core>, l2_block_number: tx_index_in_l2_block: 0, tx_initiator_address: Address::default(), }; - let first_logs = vec![mock_l2_to_l1_log(), mock_l2_to_l1_log()]; + let first_logs = [mock_l2_to_l1_log(), mock_l2_to_l1_log()]; let second_location = IncludedTxLocation { tx_hash: H256([2; 32]), tx_index_in_l2_block: 1, @@ -71,7 +71,7 @@ async fn insert_events(conn: &mut Connection<'_, Core>, l2_block_number: L2Block tx_index_in_l2_block: 0, tx_initiator_address: Address::default(), }; - let first_events = vec![mock_vm_event(0), mock_vm_event(1)]; + let first_events = [mock_vm_event(0), mock_vm_event(1)]; let second_location = IncludedTxLocation { tx_hash: H256([2; 32]), tx_index_in_l2_block: 1, diff --git a/core/lib/eth_client/src/clients/mock.rs b/core/lib/eth_client/src/clients/mock.rs index a3f9dde7c6ea..03162c2cfeb4 100644 --- a/core/lib/eth_client/src/clients/mock.rs +++ b/core/lib/eth_client/src/clients/mock.rs @@ -31,9 +31,9 @@ impl From> for MockTx { fn from(tx: Vec) -> Self { let len = tx.len(); let recipient = Address::from_slice(&tx[len - 116..len - 96]); - let max_fee_per_gas = U256::try_from(&tx[len - 96..len - 64]).unwrap(); - let max_priority_fee_per_gas = U256::try_from(&tx[len - 64..len - 32]).unwrap(); - let nonce = U256::try_from(&tx[len - 32..]).unwrap().as_u64(); + let max_fee_per_gas = U256::from(&tx[len - 96..len - 64]); + let max_priority_fee_per_gas = U256::from(&tx[len - 64..len - 32]); + let nonce = U256::from(&tx[len - 32..]).as_u64(); let hash = { let mut buffer = [0_u8; 32]; buffer.copy_from_slice(&tx[..32]); diff --git a/core/lib/mempool/src/mempool_store.rs b/core/lib/mempool/src/mempool_store.rs index 51a8d708a740..334a4783a76c 100644 --- a/core/lib/mempool/src/mempool_store.rs +++ b/core/lib/mempool/src/mempool_store.rs @@ -124,7 +124,7 @@ impl MempoolStore { /// Returns `true` if there is a transaction in the mempool satisfying the filter. pub fn has_next(&self, filter: &L2TxFilter) -> bool { - self.l1_transactions.get(&self.next_priority_id).is_some() + self.l1_transactions.contains_key(&self.next_priority_id) || self .l2_priority_queue .iter() diff --git a/core/lib/merkle_tree/src/storage/patch.rs b/core/lib/merkle_tree/src/storage/patch.rs index 21371dc51cac..329f748a8913 100644 --- a/core/lib/merkle_tree/src/storage/patch.rs +++ b/core/lib/merkle_tree/src/storage/patch.rs @@ -305,7 +305,7 @@ impl WorkingPatchSet { if nibble_count == 0 { // Copy the root node to all parts. for part in &mut parts { - part.changes_by_nibble_count[0] = level.clone(); + part.changes_by_nibble_count[0].clone_from(&level); } } else { for (nibbles, node) in level { diff --git a/core/lib/mini_merkle_tree/src/lib.rs b/core/lib/mini_merkle_tree/src/lib.rs index f4f66d8fe61b..deb929518762 100644 --- a/core/lib/mini_merkle_tree/src/lib.rs +++ b/core/lib/mini_merkle_tree/src/lib.rs @@ -79,7 +79,7 @@ where assert!( tree_depth_by_size(binary_tree_size) <= MAX_TREE_DEPTH, "Tree contains more than {} items; this is not supported", - 1 << MAX_TREE_DEPTH + 1u64 << MAX_TREE_DEPTH ); Self { diff --git a/core/lib/multivm/src/versions/vm_1_3_2/event_sink.rs b/core/lib/multivm/src/versions/vm_1_3_2/event_sink.rs index b9aea7e09afc..7f7b44071a1a 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/event_sink.rs @@ -74,7 +74,8 @@ impl InMemoryEventSink { // since if rollbacks of parents were not appended anywhere we just still keep them for el in history { // we are time ordered here in terms of rollbacks - if tmp.get(&el.timestamp.0).is_some() { + #[allow(clippy::map_entry)] + if tmp.contains_key(&el.timestamp.0) { assert!(el.rollback); tmp.remove(&el.timestamp.0); } else { diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/utils.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/utils.rs index 5ee8d8554b65..86ed02365a94 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/utils.rs @@ -88,7 +88,7 @@ pub(crate) fn get_debug_log( let data = U256::from_big_endian(&data); // For long data, it is better to use hex-encoding for greater readability - let data_str = if data > U256::from(u64::max_value()) { + let data_str = if data > U256::from(u64::MAX) { let mut bytes = [0u8; 32]; data.to_big_endian(&mut bytes); format!("0x{}", hex::encode(bytes)) diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs index a672811cefa8..d76704f892b8 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs @@ -213,7 +213,8 @@ impl VmInterface for Vm { }); let compressed_bytecodes: Vec<_> = filtered_deps.collect(); - self.last_tx_compressed_bytecodes = compressed_bytecodes.clone(); + self.last_tx_compressed_bytecodes + .clone_from(&compressed_bytecodes); crate::vm_1_3_2::vm_with_bootloader::push_transaction_to_bootloader_memory( &mut self.vm, &tx, diff --git a/core/lib/multivm/src/versions/vm_1_4_1/old_vm/event_sink.rs b/core/lib/multivm/src/versions/vm_1_4_1/old_vm/event_sink.rs index 5886ea067760..0c9d1bb01cb2 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/old_vm/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/old_vm/event_sink.rs @@ -164,7 +164,8 @@ impl InMemoryEventSink { // since if rollbacks of parents were not appended anywhere we just still keep them for el in history { // we are time ordered here in terms of rollbacks - if tmp.get(&el.timestamp.0).is_some() { + #[allow(clippy::map_entry)] + if tmp.contains_key(&el.timestamp.0) { assert!(el.rollback); tmp.remove(&el.timestamp.0); } else { diff --git a/core/lib/multivm/src/versions/vm_1_4_1/old_vm/utils.rs b/core/lib/multivm/src/versions/vm_1_4_1/old_vm/utils.rs index 3f63e9377c90..ef73f9a54c1d 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/old_vm/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/old_vm/utils.rs @@ -16,6 +16,7 @@ pub(crate) enum VmExecutionResult { Ok(Vec), Revert(Vec), Panic, + #[allow(dead_code)] MostLikelyDidNotFinish(Address, u16), } @@ -31,6 +32,7 @@ pub(crate) const fn aux_heap_page_from_base(base: MemoryPage) -> MemoryPage { MemoryPage(base.0 + 3) } +#[allow(dead_code)] pub(crate) trait FixedLengthIterator<'a, I: 'a, const N: usize>: Iterator where Self: 'a, @@ -42,6 +44,7 @@ where pub(crate) trait IntoFixedLengthByteIterator { type IntoIter: FixedLengthIterator<'static, u8, N>; + #[allow(dead_code)] fn into_le_iter(self) -> Self::IntoIter; fn into_be_iter(self) -> Self::IntoIter; } diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tracers/utils.rs b/core/lib/multivm/src/versions/vm_1_4_1/tracers/utils.rs index 86becfbbc96b..7b24e482b72d 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/tracers/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/tracers/utils.rs @@ -99,7 +99,7 @@ pub(crate) fn get_debug_log( let data = U256::from_big_endian(&data); // For long data, it is better to use hex-encoding for greater readability - let data_str = if data > U256::from(u64::max_value()) { + let data_str = if data > U256::from(u64::MAX) { let mut bytes = [0u8; 32]; data.to_big_endian(&mut bytes); format!("0x{}", hex::encode(bytes)) diff --git a/core/lib/multivm/src/versions/vm_1_4_2/old_vm/event_sink.rs b/core/lib/multivm/src/versions/vm_1_4_2/old_vm/event_sink.rs index a85259bbc2ba..ce946ba77c8f 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/old_vm/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/old_vm/event_sink.rs @@ -164,7 +164,8 @@ impl InMemoryEventSink { // since if rollbacks of parents were not appended anywhere we just still keep them for el in history { // we are time ordered here in terms of rollbacks - if tmp.get(&el.timestamp.0).is_some() { + #[allow(clippy::map_entry)] + if tmp.contains_key(&el.timestamp.0) { assert!(el.rollback); tmp.remove(&el.timestamp.0); } else { diff --git a/core/lib/multivm/src/versions/vm_1_4_2/old_vm/utils.rs b/core/lib/multivm/src/versions/vm_1_4_2/old_vm/utils.rs index a7d592c4853f..4ea0a526f6e8 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/old_vm/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/old_vm/utils.rs @@ -16,6 +16,7 @@ pub(crate) enum VmExecutionResult { Ok(Vec), Revert(Vec), Panic, + #[allow(dead_code)] MostLikelyDidNotFinish(Address, u16), } @@ -31,6 +32,7 @@ pub(crate) const fn aux_heap_page_from_base(base: MemoryPage) -> MemoryPage { MemoryPage(base.0 + 3) } +#[allow(dead_code)] pub(crate) trait FixedLengthIterator<'a, I: 'a, const N: usize>: Iterator where Self: 'a, @@ -42,6 +44,7 @@ where pub(crate) trait IntoFixedLengthByteIterator { type IntoIter: FixedLengthIterator<'static, u8, N>; + #[allow(dead_code)] fn into_le_iter(self) -> Self::IntoIter; fn into_be_iter(self) -> Self::IntoIter; } diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tracers/utils.rs b/core/lib/multivm/src/versions/vm_1_4_2/tracers/utils.rs index 35f916d3c456..5832241d262d 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tracers/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tracers/utils.rs @@ -99,7 +99,7 @@ pub(crate) fn get_debug_log( let data = U256::from_big_endian(&data); // For long data, it is better to use hex-encoding for greater readability - let data_str = if data > U256::from(u64::max_value()) { + let data_str = if data > U256::from(u64::MAX) { let mut bytes = [0u8; 32]; data.to_big_endian(&mut bytes); format!("0x{}", hex::encode(bytes)) diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/old_vm/event_sink.rs b/core/lib/multivm/src/versions/vm_boojum_integration/old_vm/event_sink.rs index 6638057643d4..2bd932d42b7f 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/old_vm/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/old_vm/event_sink.rs @@ -164,7 +164,8 @@ impl InMemoryEventSink { // since if rollbacks of parents were not appended anywhere we just still keep them for el in history { // we are time ordered here in terms of rollbacks - if tmp.get(&el.timestamp.0).is_some() { + #[allow(clippy::map_entry)] + if tmp.contains_key(&el.timestamp.0) { assert!(el.rollback); tmp.remove(&el.timestamp.0); } else { diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/old_vm/utils.rs b/core/lib/multivm/src/versions/vm_boojum_integration/old_vm/utils.rs index 342cc64ea2ac..130bad49e38a 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/old_vm/utils.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/old_vm/utils.rs @@ -19,6 +19,7 @@ pub(crate) enum VmExecutionResult { Ok(Vec), Revert(Vec), Panic, + #[allow(dead_code)] MostLikelyDidNotFinish(Address, u16), } @@ -34,6 +35,7 @@ pub(crate) const fn aux_heap_page_from_base(base: MemoryPage) -> MemoryPage { MemoryPage(base.0 + 3) } +#[allow(dead_code)] pub(crate) trait FixedLengthIterator<'a, I: 'a, const N: usize>: Iterator where Self: 'a, @@ -45,6 +47,7 @@ where pub(crate) trait IntoFixedLengthByteIterator { type IntoIter: FixedLengthIterator<'static, u8, N>; + #[allow(dead_code)] fn into_le_iter(self) -> Self::IntoIter; fn into_be_iter(self) -> Self::IntoIter; } diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/utils.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/utils.rs index 58264d89c8ea..aafdab9ee428 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/utils.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/utils.rs @@ -99,7 +99,7 @@ pub(crate) fn get_debug_log( let data = U256::from_big_endian(&data); // For long data, it is better to use hex-encoding for greater readability - let data_str = if data > U256::from(u64::max_value()) { + let data_str = if data > U256::from(u64::MAX) { let mut bytes = [0u8; 32]; data.to_big_endian(&mut bytes); format!("0x{}", hex::encode(bytes)) diff --git a/core/lib/multivm/src/versions/vm_latest/old_vm/event_sink.rs b/core/lib/multivm/src/versions/vm_latest/old_vm/event_sink.rs index e0569f3586d4..58fad96dec86 100644 --- a/core/lib/multivm/src/versions/vm_latest/old_vm/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_latest/old_vm/event_sink.rs @@ -164,7 +164,8 @@ impl InMemoryEventSink { // since if rollbacks of parents were not appended anywhere we just still keep them for el in history { // we are time ordered here in terms of rollbacks - if tmp.get(&el.timestamp.0).is_some() { + #[allow(clippy::map_entry)] + if tmp.contains_key(&el.timestamp.0) { assert!(el.rollback); tmp.remove(&el.timestamp.0); } else { diff --git a/core/lib/multivm/src/versions/vm_latest/old_vm/utils.rs b/core/lib/multivm/src/versions/vm_latest/old_vm/utils.rs index dd354e983d9e..f7933b4f603f 100644 --- a/core/lib/multivm/src/versions/vm_latest/old_vm/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/old_vm/utils.rs @@ -18,6 +18,7 @@ pub(crate) enum VmExecutionResult { Ok(Vec), Revert(Vec), Panic, + #[allow(dead_code)] MostLikelyDidNotFinish(Address, u16), } @@ -33,6 +34,7 @@ pub(crate) const fn aux_heap_page_from_base(base: MemoryPage) -> MemoryPage { MemoryPage(base.0 + 3) } +#[allow(dead_code)] pub(crate) trait FixedLengthIterator<'a, I: 'a, const N: usize>: Iterator where Self: 'a, @@ -44,6 +46,7 @@ where pub(crate) trait IntoFixedLengthByteIterator { type IntoIter: FixedLengthIterator<'static, u8, N>; + #[allow(dead_code)] fn into_le_iter(self) -> Self::IntoIter; fn into_be_iter(self) -> Self::IntoIter; } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs index e55aa4075072..719d2a393af3 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs @@ -1,5 +1,3 @@ -use std::convert::TryInto; - use ethabi::Token; use zksync_eth_signer::{EthereumSigner, TransactionParameters}; use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; @@ -104,7 +102,7 @@ async fn test_require_eip712() { l2_tx.set_input(aa_tx, hash); // Pretend that operator is malicious and sets the initiator to the AA account. l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); + let transaction: Transaction = l2_tx.into(); vm.vm.push_transaction(transaction); let result = vm.vm.execute(VmExecutionMode::OneTx); @@ -153,7 +151,7 @@ async fn test_require_eip712() { let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); l2_tx.set_input(encoded_tx, aa_hash); - let transaction: Transaction = l2_tx.try_into().unwrap(); + let transaction: Transaction = l2_tx.into(); vm.vm.push_transaction(transaction); vm.vm.execute(VmExecutionMode::OneTx); diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs b/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs index 2aa827b84637..bad09617b8f0 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs @@ -102,7 +102,7 @@ pub(crate) fn get_debug_log( let data = U256::from_big_endian(&data); // For long data, it is better to use hex-encoding for greater readability - let data_str = if data > U256::from(u64::max_value()) { + let data_str = if data > U256::from(u64::MAX) { let mut bytes = [0u8; 32]; data.to_big_endian(&mut bytes); format!("0x{}", hex::encode(bytes)) diff --git a/core/lib/multivm/src/versions/vm_m5/event_sink.rs b/core/lib/multivm/src/versions/vm_m5/event_sink.rs index 0bb1ee498f61..782aa1d662f7 100644 --- a/core/lib/multivm/src/versions/vm_m5/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_m5/event_sink.rs @@ -75,7 +75,8 @@ impl InMemoryEventSink { // since if rollbacks of parents were not appended anywhere we just still keep them for el in history.into_iter() { // we are time ordered here in terms of rollbacks - if tmp.get(&el.timestamp.0).is_some() { + #[allow(clippy::map_entry)] + if tmp.contains_key(&el.timestamp.0) { assert!(el.rollback); tmp.remove(&el.timestamp.0); } else { diff --git a/core/lib/multivm/src/versions/vm_m5/oracles/tracer.rs b/core/lib/multivm/src/versions/vm_m5/oracles/tracer.rs index 7094fb6f068a..45f8ed88f834 100644 --- a/core/lib/multivm/src/versions/vm_m5/oracles/tracer.rs +++ b/core/lib/multivm/src/versions/vm_m5/oracles/tracer.rs @@ -799,7 +799,7 @@ fn get_debug_log(state: &VmLocalStateData<'_>, memory: &SimpleMemory) -> String let data = U256::from_big_endian(&data); // For long data, it is better to use hex-encoding for greater readability - let data_str = if data > U256::from(u64::max_value()) { + let data_str = if data > U256::from(u64::MAX) { let mut bytes = [0u8; 32]; data.to_big_endian(&mut bytes); format!("0x{}", hex::encode(bytes)) diff --git a/core/lib/multivm/src/versions/vm_m6/event_sink.rs b/core/lib/multivm/src/versions/vm_m6/event_sink.rs index 2fb5d934e969..56fe8dcb11e2 100644 --- a/core/lib/multivm/src/versions/vm_m6/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_m6/event_sink.rs @@ -67,7 +67,8 @@ impl InMemoryEventSink { // since if rollbacks of parents were not appended anywhere we just still keep them for el in history { // we are time ordered here in terms of rollbacks - if tmp.get(&el.timestamp.0).is_some() { + #[allow(clippy::map_entry)] + if tmp.contains_key(&el.timestamp.0) { assert!(el.rollback); tmp.remove(&el.timestamp.0); } else { diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/utils.rs b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/utils.rs index 2df22aa2d3f8..4d963d08952d 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/utils.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/utils.rs @@ -88,7 +88,7 @@ pub(crate) fn get_debug_log( let data = U256::from_big_endian(&data); // For long data, it is better to use hex-encoding for greater readability - let data_str = if data > U256::from(u64::max_value()) { + let data_str = if data > U256::from(u64::MAX) { let mut bytes = [0u8; 32]; data.to_big_endian(&mut bytes); format!("0x{}", hex::encode(bytes)) diff --git a/core/lib/multivm/src/versions/vm_m6/vm.rs b/core/lib/multivm/src/versions/vm_m6/vm.rs index 9f29fa995b69..36303c577448 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm.rs @@ -241,7 +241,8 @@ impl VmInterface for Vm { }); let compressed_bytecodes: Vec<_> = filtered_deps.collect(); - self.last_tx_compressed_bytecodes = compressed_bytecodes.clone(); + self.last_tx_compressed_bytecodes + .clone_from(&compressed_bytecodes); crate::vm_m6::vm_with_bootloader::push_transaction_to_bootloader_memory( &mut self.vm, &tx, diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/event_sink.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/event_sink.rs index 74dca71d10f6..2af642d358da 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/event_sink.rs @@ -74,7 +74,8 @@ impl InMemoryEventSink { // since if rollbacks of parents were not appended anywhere we just still keep them for el in history { // we are time ordered here in terms of rollbacks - if tmp.get(&el.timestamp.0).is_some() { + #[allow(clippy::map_entry)] + if tmp.contains_key(&el.timestamp.0) { assert!(el.rollback); tmp.remove(&el.timestamp.0); } else { diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/utils.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/utils.rs index c2478edf7a89..6d7ab7e7a2d9 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/utils.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/utils.rs @@ -19,6 +19,7 @@ pub(crate) enum VmExecutionResult { Ok(Vec), Revert(Vec), Panic, + #[allow(dead_code)] MostLikelyDidNotFinish(Address, u16), } @@ -34,6 +35,7 @@ pub(crate) const fn aux_heap_page_from_base(base: MemoryPage) -> MemoryPage { MemoryPage(base.0 + 3) } +#[allow(dead_code)] pub(crate) trait FixedLengthIterator<'a, I: 'a, const N: usize>: Iterator where Self: 'a, @@ -45,6 +47,7 @@ where pub(crate) trait IntoFixedLengthByteIterator { type IntoIter: FixedLengthIterator<'static, u8, N>; + #[allow(dead_code)] fn into_le_iter(self) -> Self::IntoIter; fn into_be_iter(self) -> Self::IntoIter; } diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/utils.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/utils.rs index ccacea0cd7e4..1d3e9a272764 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/utils.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/utils.rs @@ -96,7 +96,7 @@ pub(crate) fn get_debug_log( let data = U256::from_big_endian(&data); // For long data, it is better to use hex-encoding for greater readability - let data_str = if data > U256::from(u64::max_value()) { + let data_str = if data > U256::from(u64::MAX) { let mut bytes = [0u8; 32]; data.to_big_endian(&mut bytes); format!("0x{}", hex::encode(bytes)) diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/event_sink.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/event_sink.rs index 00a03ca0adb8..eadfe70d0a7e 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/event_sink.rs @@ -74,7 +74,8 @@ impl InMemoryEventSink { // since if rollbacks of parents were not appended anywhere we just still keep them for el in history { // we are time ordered here in terms of rollbacks - if tmp.get(&el.timestamp.0).is_some() { + #[allow(clippy::map_entry)] + if tmp.contains_key(&el.timestamp.0) { assert!(el.rollback); tmp.remove(&el.timestamp.0); } else { diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/utils.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/utils.rs index 5be62e384378..834b9988f693 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/utils.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/utils.rs @@ -19,6 +19,7 @@ pub(crate) enum VmExecutionResult { Ok(Vec), Revert(Vec), Panic, + #[allow(dead_code)] MostLikelyDidNotFinish(Address, u16), } @@ -34,6 +35,7 @@ pub(crate) const fn aux_heap_page_from_base(base: MemoryPage) -> MemoryPage { MemoryPage(base.0 + 3) } +#[allow(dead_code)] pub(crate) trait FixedLengthIterator<'a, I: 'a, const N: usize>: Iterator where Self: 'a, @@ -45,6 +47,7 @@ where pub(crate) trait IntoFixedLengthByteIterator { type IntoIter: FixedLengthIterator<'static, u8, N>; + #[allow(dead_code)] fn into_le_iter(self) -> Self::IntoIter; fn into_be_iter(self) -> Self::IntoIter; } diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/traits.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/traits.rs index 6d8fdab4e661..ed6ad67b5dcd 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/traits.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/traits.rs @@ -49,6 +49,7 @@ pub trait VmTracer: } pub trait ToTracerPointer { + #[allow(dead_code)] fn into_tracer_pointer(self) -> TracerPointer; } diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/utils.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/utils.rs index 1f3d27d9d20e..ef8219ec2b4d 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/utils.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/utils.rs @@ -96,7 +96,7 @@ pub(crate) fn get_debug_log( let data = U256::from_big_endian(&data); // For long data, it is better to use hex-encoding for greater readability - let data_str = if data > U256::from(u64::max_value()) { + let data_str = if data > U256::from(u64::MAX) { let mut bytes = [0u8; 32]; data.to_big_endian(&mut bytes); format!("0x{}", hex::encode(bytes)) diff --git a/core/lib/protobuf_config/src/secrets.rs b/core/lib/protobuf_config/src/secrets.rs index d67178534ec2..91a05b31f196 100644 --- a/core/lib/protobuf_config/src/secrets.rs +++ b/core/lib/protobuf_config/src/secrets.rs @@ -48,7 +48,7 @@ impl ProtoRepr for proto::DatabaseSecrets { .transpose() .context("replica_url")?; if server_replica_url.is_none() { - server_replica_url = server_url.clone(); + server_replica_url.clone_from(&server_url) } let prover_url = self .prover_url diff --git a/core/lib/snapshots_applier/src/tests/mod.rs b/core/lib/snapshots_applier/src/tests/mod.rs index e61f76455372..4dcc66841934 100644 --- a/core/lib/snapshots_applier/src/tests/mod.rs +++ b/core/lib/snapshots_applier/src/tests/mod.rs @@ -493,7 +493,8 @@ async fn recovering_tokens() { }); } let (object_store, mut client) = prepare_clients(&expected_status, &storage_logs).await; - client.tokens_response = tokens.clone(); + + client.tokens_response.clone_from(&tokens); let task = SnapshotsApplierTask::new( SnapshotsApplierConfig::for_tests(), diff --git a/core/lib/state/src/shadow_storage.rs b/core/lib/state/src/shadow_storage.rs index 0a2bd0fa43ee..9ef1aacca155 100644 --- a/core/lib/state/src/shadow_storage.rs +++ b/core/lib/state/src/shadow_storage.rs @@ -3,6 +3,7 @@ use zksync_types::{L1BatchNumber, StorageKey, StorageValue, H256}; use crate::ReadStorage; +#[allow(clippy::struct_field_names)] #[derive(Debug, Metrics)] #[metrics(prefix = "shadow_storage")] struct ShadowStorageMetrics { diff --git a/core/lib/types/src/l1/mod.rs b/core/lib/types/src/l1/mod.rs index 615574278d29..50d2bd9310ee 100644 --- a/core/lib/types/src/l1/mod.rs +++ b/core/lib/types/src/l1/mod.rs @@ -147,7 +147,7 @@ impl serde::Serialize for L1TxCommonData { to_mint: self.to_mint, refund_recipient: self.refund_recipient, - /// DEPRECATED. + // DEPRECATED. deadline_block: 0, eth_hash: H256::default(), eth_block: self.eth_block, diff --git a/core/lib/types/src/protocol_upgrade.rs b/core/lib/types/src/protocol_upgrade.rs index 804a4083a82a..2cd5953bd730 100644 --- a/core/lib/types/src/protocol_upgrade.rs +++ b/core/lib/types/src/protocol_upgrade.rs @@ -531,7 +531,7 @@ impl serde::Serialize for ProtocolUpgradeTxCommonData { to_mint: self.to_mint, refund_recipient: self.refund_recipient, - /// DEPRECATED. + // DEPRECATED. eth_hash: H256::default(), eth_block: self.eth_block, } diff --git a/core/lib/types/src/storage_writes_deduplicator.rs b/core/lib/types/src/storage_writes_deduplicator.rs index 19bf51b6eb03..a67686a7dc77 100644 --- a/core/lib/types/src/storage_writes_deduplicator.rs +++ b/core/lib/types/src/storage_writes_deduplicator.rs @@ -97,7 +97,7 @@ impl StorageWritesDeduplicator { .initial_values .entry(key) .or_insert(log.log_query.read_value); - let was_key_modified = self.modified_key_values.get(&key).is_some(); + let was_key_modified = self.modified_key_values.contains_key(&key); let modified_value = if log.log_query.rollback { (initial_value != log.log_query.read_value).then_some(log.log_query.read_value) } else { diff --git a/core/lib/types/src/transaction_request.rs b/core/lib/types/src/transaction_request.rs index c2526cc3ed6f..f64cbbaa9c0f 100644 --- a/core/lib/types/src/transaction_request.rs +++ b/core/lib/types/src/transaction_request.rs @@ -870,7 +870,7 @@ impl From for CallRequest { custom_signature: Some(tx.common_data.signature.clone()), paymaster_params: Some(tx.common_data.paymaster_params.clone()), }; - meta.factory_deps = tx.execute.factory_deps.clone(); + meta.factory_deps.clone_from(&tx.execute.factory_deps); let mut request = CallRequestBuilder::default() .from(tx.initiator_account()) .gas(tx.common_data.fee.gas_limit) diff --git a/core/node/eth_sender/src/publish_criterion.rs b/core/node/eth_sender/src/publish_criterion.rs index 6607c33eb903..52d861ce0af6 100644 --- a/core/node/eth_sender/src/publish_criterion.rs +++ b/core/node/eth_sender/src/publish_criterion.rs @@ -16,6 +16,7 @@ use super::{metrics::METRICS, utils::agg_l1_batch_base_cost}; #[async_trait] pub trait L1BatchPublishCriterion: fmt::Debug + Send + Sync { + #[allow(dead_code)] // Takes `&self` receiver for the trait to be object-safe fn name(&self) -> &'static str; diff --git a/core/node/eth_watch/src/lib.rs b/core/node/eth_watch/src/lib.rs index d91427dafcb1..7cb0064c3d73 100644 --- a/core/node/eth_watch/src/lib.rs +++ b/core/node/eth_watch/src/lib.rs @@ -184,7 +184,7 @@ impl EthWatch { let relevant_topic = processor.relevant_topic(); let processor_events = events .iter() - .filter(|event| event.topics.get(0) == Some(&relevant_topic)) + .filter(|event| event.topics.first() == Some(&relevant_topic)) .cloned() .collect(); processor diff --git a/core/node/node_sync/src/batch_status_updater/tests.rs b/core/node/node_sync/src/batch_status_updater/tests.rs index f3850ccfe362..e1386f985a09 100644 --- a/core/node/node_sync/src/batch_status_updater/tests.rs +++ b/core/node/node_sync/src/batch_status_updater/tests.rs @@ -64,9 +64,7 @@ impl L1BatchStagesMap { } fn get(&self, number: L1BatchNumber) -> Option { - let Some(index) = number.0.checked_sub(self.first_batch_number.0) else { - return None; - }; + let index = number.0.checked_sub(self.first_batch_number.0)?; self.stages.get(index as usize).copied() } diff --git a/core/node/node_sync/src/tests.rs b/core/node/node_sync/src/tests.rs index 2b15db9e24ca..1d278d1af385 100644 --- a/core/node/node_sync/src/tests.rs +++ b/core/node/node_sync/src/tests.rs @@ -260,7 +260,7 @@ async fn external_io_basics(snapshot_recovery: bool) { .get_transaction_receipts(&[tx_hash]) .await .unwrap() - .get(0) + .first() .cloned() .expect("Transaction not persisted"); assert_eq!( diff --git a/core/node/proof_data_handler/src/request_processor.rs b/core/node/proof_data_handler/src/request_processor.rs index 010b805a4727..582cb78f70c7 100644 --- a/core/node/proof_data_handler/src/request_processor.rs +++ b/core/node/proof_data_handler/src/request_processor.rs @@ -110,7 +110,7 @@ impl RequestProcessor { .get_l1_batch_header(l1_batch_number) .await .unwrap() - .expect(&format!("Missing header for {}", l1_batch_number)); + .unwrap_or_else(|| panic!("Missing header for {}", l1_batch_number)); let minor_version = header.protocol_version.unwrap(); let protocol_version = self @@ -122,9 +122,9 @@ impl RequestProcessor { .get_protocol_version_with_latest_patch(minor_version) .await .unwrap() - .expect(&format!( - "Missing l1 verifier info for protocol version {minor_version}", - )); + .unwrap_or_else(|| { + panic!("Missing l1 verifier info for protocol version {minor_version}") + }); let batch_header = self .pool diff --git a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs index 48d4696c57ae..68fbd62bd973 100644 --- a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs +++ b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs @@ -517,7 +517,7 @@ mod tests { .get_transaction_receipts(&[tx_hash]) .await .unwrap() - .get(0) + .first() .cloned(); assert!(tx_receipt.is_none()); diff --git a/core/tests/loadnext/src/sdk/mod.rs b/core/tests/loadnext/src/sdk/mod.rs index b2abf133b5c2..26c11eb7a2a6 100644 --- a/core/tests/loadnext/src/sdk/mod.rs +++ b/core/tests/loadnext/src/sdk/mod.rs @@ -1,7 +1,6 @@ -pub use zksync_types::{self, ethabi, network::Network, web3}; +pub use zksync_types::{self, ethabi, web3}; pub use zksync_web3_decl::{ - jsonrpsee::http_client::*, - namespaces::{EthNamespaceClient, NetNamespaceClient, Web3NamespaceClient, ZksNamespaceClient}, + namespaces::{EthNamespaceClient, ZksNamespaceClient}, types, }; diff --git a/docker/build-base/Dockerfile b/docker/build-base/Dockerfile index 1fec4cca7e03..436843eed3de 100644 --- a/docker/build-base/Dockerfile +++ b/docker/build-base/Dockerfile @@ -9,7 +9,7 @@ ENV RUSTUP_HOME=/usr/local/rustup \ PATH=/usr/local/cargo/bin:$PATH RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-08-21 && \ - rustup default nightly-2023-08-21 + rustup install nightly-2024-05-07 && \ + rustup default nightly-2024-05-07 RUN cargo install sqlx-cli --version 0.7.3 diff --git a/docker/proof-fri-gpu-compressor/Dockerfile b/docker/proof-fri-gpu-compressor/Dockerfile index ead48f6af6bb..8249f123081b 100644 --- a/docker/proof-fri-gpu-compressor/Dockerfile +++ b/docker/proof-fri-gpu-compressor/Dockerfile @@ -15,8 +15,8 @@ ENV RUSTUP_HOME=/usr/local/rustup \ PATH=/usr/local/cargo/bin:$PATH RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-08-21 && \ - rustup default nightly-2023-08-21 + rustup install nightly-2024-05-07 && \ + rustup default nightly-2024-05-07 RUN curl -Lo cmake-3.24.2-linux-x86_64.sh https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh && \ chmod +x cmake-3.24.2-linux-x86_64.sh && \ diff --git a/docker/prover-gpu-fri/Dockerfile b/docker/prover-gpu-fri/Dockerfile index 152e768d298f..1093ed9e4ebf 100644 --- a/docker/prover-gpu-fri/Dockerfile +++ b/docker/prover-gpu-fri/Dockerfile @@ -14,8 +14,8 @@ ENV RUSTUP_HOME=/usr/local/rustup \ PATH=/usr/local/cargo/bin:$PATH RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-08-21 && \ - rustup default nightly-2023-08-21 + rustup install nightly-2024-05-07 && \ + rustup default nightly-2024-05-07 RUN curl -Lo cmake-3.24.2-linux-x86_64.sh https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh && \ chmod +x cmake-3.24.2-linux-x86_64.sh && \ diff --git a/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile b/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile index bd77e680d5fc..a50587e9a83b 100644 --- a/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile +++ b/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile @@ -73,7 +73,7 @@ RUN echo "deb http://packages.cloud.google.com/apt cloud-sdk main" > /etc/apt/so gcloud config set metrics/environment github_docker_image RUN wget -c -O - https://sh.rustup.rs | bash -s -- -y -RUN rustup install nightly-2023-08-21 +RUN rustup install nightly-2024-05-07 RUN rustup default stable RUN cargo install --version=0.7.3 sqlx-cli RUN cargo install cargo-nextest diff --git a/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile b/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile index d0bb05fed16e..9e56613f9ead 100644 --- a/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile +++ b/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile @@ -71,7 +71,7 @@ RUN echo "deb http://packages.cloud.google.com/apt cloud-sdk main" > /etc/apt/so gcloud config set metrics/environment github_docker_image RUN wget -c -O - https://sh.rustup.rs | bash -s -- -y -RUN rustup install nightly-2023-08-21 +RUN rustup install nightly-2024-05-07 RUN rustup default stable RUN cargo install --version=0.7.3 sqlx-cli RUN cargo install cargo-nextest diff --git a/docker/zk-environment/Dockerfile b/docker/zk-environment/Dockerfile index 1ed60f4b95f1..9c9393ed5188 100644 --- a/docker/zk-environment/Dockerfile +++ b/docker/zk-environment/Dockerfile @@ -138,5 +138,5 @@ ENV RUSTC_WRAPPER=/usr/local/cargo/bin/sccache FROM rust-lightweight as rust-lightweight-nightly -RUN rustup install nightly-2023-08-21 && \ - rustup default nightly-2023-08-21 +RUN rustup install nightly-2024-05-07 && \ + rustup default nightly-2024-05-07 diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 733fdab19265..f6f0425fa3e0 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -666,7 +666,7 @@ dependencies = [ [[package]] name = "boojum" version = "0.2.0" -source = "git+https://github.com/matter-labs/era-boojum?branch=main#cd631c9a1d61ec21d7bd22eb74949d43ecfad0fd" +source = "git+https://github.com/matter-labs/era-boojum?branch=main#4bcb11f0610302110ae8109af01d5b652191b2f6" dependencies = [ "arrayvec 0.7.4", "bincode", @@ -683,7 +683,6 @@ dependencies = [ "lazy_static", "num-modular", "num_cpus", - "packed_simd", "pairing_ce 0.28.5 (git+https://github.com/matter-labs/pairing.git)", "rand 0.8.5", "rayon", @@ -1435,7 +1434,7 @@ dependencies = [ [[package]] name = "cs_derive" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-boojum?branch=main#cd631c9a1d61ec21d7bd22eb74949d43ecfad0fd" +source = "git+https://github.com/matter-labs/era-boojum?branch=main#4bcb11f0610302110ae8109af01d5b652191b2f6" dependencies = [ "proc-macro-error", "proc-macro2 1.0.78", @@ -1516,9 +1515,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.1.1" +version = "4.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" +checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" dependencies = [ "cfg-if 1.0.0", "cpufeatures", @@ -4103,16 +4102,6 @@ dependencies = [ "sha2 0.10.8", ] -[[package]] -name = "packed_simd" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f9f08af0c877571712e2e3e686ad79efad9657dbf0f7c3c8ba943ff6c38932d" -dependencies = [ - "cfg-if 1.0.0", - "num-traits", -] - [[package]] name = "pairing_ce" version = "0.28.5" diff --git a/prover/proof_fri_compressor/README.md b/prover/proof_fri_compressor/README.md index 3da29b08e7cd..097a59e5d09b 100644 --- a/prover/proof_fri_compressor/README.md +++ b/prover/proof_fri_compressor/README.md @@ -4,4 +4,4 @@ Used to compress FRI proof to Bellman proof that gets sent to L1. ## running -`zk f cargo +nightly-2023-08-21 run --release --bin zksync_proof_fri_compressor` +`zk f cargo +nightly-2024-05-07 run --release --bin zksync_proof_fri_compressor` diff --git a/prover/rust-toolchain b/prover/rust-toolchain index d7aace133ac0..5aaef38cd79d 100644 --- a/prover/rust-toolchain +++ b/prover/rust-toolchain @@ -1 +1 @@ -nightly-2024-02-01 +nightly-2024-05-07 diff --git a/prover/witness_vector_generator/README.md b/prover/witness_vector_generator/README.md index e287e4d53b27..dde192533db3 100644 --- a/prover/witness_vector_generator/README.md +++ b/prover/witness_vector_generator/README.md @@ -4,4 +4,4 @@ Used to generate witness vectors using circuit and sending them to prover over T ## running -`zk f cargo +nightly-2023-08-21 run --release --bin zksync_witness_vector_generator` +`zk f cargo +nightly-2024-05-07 run --release --bin zksync_witness_vector_generator` diff --git a/rust-toolchain b/rust-toolchain index 9a87fb21ccf9..5aaef38cd79d 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -nightly-2023-08-21 +nightly-2024-05-07 From 38fdfe083f61f5aad11b5a0efb41215c674f3186 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Wed, 5 Jun 2024 13:36:32 +0300 Subject: [PATCH 3/8] fix(en): Fix transient error detection in consistency checker (#2140) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Considers a subset of contract call errors as transient. ## Why ❔ Currently, consistency checker considers all contract call errors fatal, which leads to EN terminating when it shouldn't. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `zk spellcheck`. --- core/node/consistency_checker/src/lib.rs | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/core/node/consistency_checker/src/lib.rs b/core/node/consistency_checker/src/lib.rs index ae092b2d1c11..79ce137560c6 100644 --- a/core/node/consistency_checker/src/lib.rs +++ b/core/node/consistency_checker/src/lib.rs @@ -42,10 +42,12 @@ enum CheckError { impl CheckError { fn is_transient(&self) -> bool { - matches!( - self, - Self::Web3(err) if err.is_transient() - ) + match self { + Self::Web3(err) | Self::ContractCall(ContractCallError::EthereumGateway(err)) => { + err.is_transient() + } + _ => false, + } } } @@ -532,7 +534,10 @@ impl ConsistencyChecker { while let Err(err) = self.sanity_check_diamond_proxy_addr().await { if err.is_transient() { - tracing::warn!("Transient error checking diamond proxy contract; will retry after a delay: {err}"); + tracing::warn!( + "Transient error checking diamond proxy contract; will retry after a delay: {:#}", + anyhow::Error::from(err) + ); if tokio::time::timeout(self.sleep_interval, stop_receiver.changed()) .await .is_ok() @@ -629,7 +634,10 @@ impl ConsistencyChecker { } } Err(err) if err.is_transient() => { - tracing::warn!("Transient error while verifying L1 batch #{batch_number}; will retry after a delay: {err}"); + tracing::warn!( + "Transient error while verifying L1 batch #{batch_number}; will retry after a delay: {:#}", + anyhow::Error::from(err) + ); if tokio::time::timeout(self.sleep_interval, stop_receiver.changed()) .await .is_ok() From 006ea16113b4ebf31873eb5b0b78c4d9da3dec98 Mon Sep 17 00:00:00 2001 From: Igor Borodin Date: Wed, 5 Jun 2024 12:54:13 +0200 Subject: [PATCH 4/8] chore(docs): Fix healthcheck command in EN docker-compose example (#2148) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Tweaking postgres `healthcheck` command in EN Docker Compose example to work from shells other than bash - Minor formatting fixes to the aforementioned Docker Compose example files - EN bump to the latest stable in the aforementioned Docker Compose example files ## Why ❔ Fixes: **https://github.com/zkSync-Community-Hub/zksync-developers/discussions/526#discussioncomment-9552115** ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [ ] Spellcheck has been run via `zk spellcheck`. --- .../mainnet-external-node-docker-compose.yml | 22 +++++++++++-------- .../testnet-external-node-docker-compose.yml | 20 ++++++++++------- 2 files changed, 25 insertions(+), 17 deletions(-) diff --git a/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml b/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml index f99a0b2e491c..8b48ff5ebca7 100644 --- a/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml +++ b/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml @@ -1,4 +1,4 @@ -version: '3.2' +version: "3.2" services: prometheus: image: prom/prometheus:v2.35.0 @@ -21,10 +21,10 @@ services: postgres: image: "postgres:14" command: > - postgres - -c max_connections=200 - -c log_error_verbosity=terse - -c shared_buffers=2GB + postgres + -c max_connections=200 + -c log_error_verbosity=terse + -c shared_buffers=2GB -c effective_cache_size=4GB -c maintenance_work_mem=1GB -c checkpoint_completion_target=0.9 @@ -41,12 +41,16 @@ services: healthcheck: interval: 1s timeout: 3s - test: psql -U postgres -c "select exists (select * from pg_stat_activity where datname = '{{ database_name }}' and application_name = 'pg_restore')" | grep -e ".f$" + test: + [ + "CMD-SHELL", + 'psql -U postgres -c "select exists (select * from pg_stat_activity where datname = ''{{ database_name }}'' and application_name = ''pg_restore'')" | grep -e ".f$$"', + ] environment: - POSTGRES_PASSWORD=notsecurepassword - PGPORT=5430 external-node: - image: "matterlabs/external-node:2.0-v24.2.0" + image: "matterlabs/external-node:2.0-v24.6.0" depends_on: postgres: condition: service_healthy @@ -81,5 +85,5 @@ services: volumes: mainnet-postgres: {} mainnet-rocksdb: {} - mainnet-prometheus-data: { } - mainnet-grafana-data: { } + mainnet-prometheus-data: {} + mainnet-grafana-data: {} diff --git a/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml b/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml index f0fc51be2796..f0402c290ebf 100644 --- a/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml +++ b/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml @@ -1,4 +1,4 @@ -version: '3.2' +version: "3.2" services: prometheus: image: prom/prometheus:v2.35.0 @@ -20,11 +20,11 @@ services: - "127.0.0.1:3000:3000" postgres: image: "postgres:14" - command: > - postgres - -c max_connections=200 - -c log_error_verbosity=terse - -c shared_buffers=2GB + command: > + postgres + -c max_connections=200 + -c log_error_verbosity=terse + -c shared_buffers=2GB -c effective_cache_size=4GB -c maintenance_work_mem=1GB -c checkpoint_completion_target=0.9 @@ -41,12 +41,16 @@ services: healthcheck: interval: 1s timeout: 3s - test: psql -U postgres -c "select exists (select * from pg_stat_activity where datname = '{{ database_name }}' and application_name = 'pg_restore')" | grep -e ".f$" + test: + [ + "CMD-SHELL", + 'psql -U postgres -c "select exists (select * from pg_stat_activity where datname = ''{{ database_name }}'' and application_name = ''pg_restore'')" | grep -e ".f$$"', + ] environment: - POSTGRES_PASSWORD=notsecurepassword - PGPORT=5430 external-node: - image: "matterlabs/external-node:2.0-v24.2.0" + image: "matterlabs/external-node:2.0-v24.6.0" depends_on: postgres: condition: service_healthy From b1ad01b50392a0ee241c2263ac22bb3258fae2d7 Mon Sep 17 00:00:00 2001 From: Marcin M <128217157+mm-zk@users.noreply.github.com> Date: Wed, 5 Jun 2024 12:58:26 +0200 Subject: [PATCH 5/8] feat: added debug_proof to prover_cli (#2052) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ * Prover_cli has a new command - `debug-proof`, that prints a lot more detailed information about any basic proof file. ## Why ❔ * This will speed up debugging of the failed proofs. * Small caveat - the cli has to be compiled with `verbose_circuits` feature - which will include the whole zkevm_test_harness and all the other crypto libraries. How to use: * Simply download the .bin file with the proof, and run `./prover_cli debug-proof file.bin` Part of EVM-639 --- prover/Cargo.lock | 29 ++++++++++-------- prover/prover_cli/Cargo.toml | 5 ++++ prover/prover_cli/README.md | 30 +++++++++++++++++++ prover/prover_cli/src/cli.rs | 4 ++- prover/prover_cli/src/commands/debug_proof.rs | 19 ++++++++++++ prover/prover_cli/src/commands/mod.rs | 2 +- 6 files changed, 74 insertions(+), 15 deletions(-) create mode 100644 prover/prover_cli/src/commands/debug_proof.rs diff --git a/prover/Cargo.lock b/prover/Cargo.lock index f6f0425fa3e0..b4d25a191ff0 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -673,7 +673,7 @@ dependencies = [ "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", "const_format", "convert_case", - "crossbeam 0.7.3", + "crossbeam 0.8.4", "crypto-bigint 0.5.5", "cs_derive 0.1.0 (git+https://github.com/matter-labs/era-boojum?branch=main)", "derivative", @@ -894,7 +894,7 @@ dependencies = [ [[package]] name = "circuit_definitions" version = "1.5.0" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.5.0#ecb08797ced36fcc7d3696ffd2ec6a2d534b9395" +source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.5.0#e6fa3cbf2c9c898c3b93046162951d42d5454d5b" dependencies = [ "circuit_encodings 0.1.50", "crossbeam 0.8.4", @@ -902,8 +902,6 @@ dependencies = [ "seq-macro", "serde", "snark_wrapper", - "zk_evm 1.5.0", - "zkevm_circuits 1.5.0", ] [[package]] @@ -942,7 +940,7 @@ dependencies = [ [[package]] name = "circuit_encodings" version = "0.1.50" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.5.0#ecb08797ced36fcc7d3696ffd2ec6a2d534b9395" +source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.5.0#e6fa3cbf2c9c898c3b93046162951d42d5454d5b" dependencies = [ "derivative", "serde", @@ -1004,14 +1002,13 @@ dependencies = [ [[package]] name = "circuit_sequencer_api" version = "0.1.50" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.5.0#ecb08797ced36fcc7d3696ffd2ec6a2d534b9395" +source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.5.0#e6fa3cbf2c9c898c3b93046162951d42d5454d5b" dependencies = [ "bellman_ce 0.3.2 (git+https://github.com/matter-labs/bellman?branch=dev)", "circuit_encodings 0.1.50", "derivative", "rayon", "serde", - "zk_evm 1.5.0", ] [[package]] @@ -3190,13 +3187,16 @@ dependencies = [ [[package]] name = "kzg" version = "0.1.50" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.5.0#ecb08797ced36fcc7d3696ffd2ec6a2d534b9395" +source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.5.0#e6fa3cbf2c9c898c3b93046162951d42d5454d5b" dependencies = [ "boojum", "derivative", + "hex", + "once_cell", "rayon", "serde", "serde_json", + "serde_with", "zkevm_circuits 1.5.0", ] @@ -4633,6 +4633,7 @@ dependencies = [ "tokio", "tracing", "tracing-subscriber", + "zkevm_test_harness 1.5.0", "zksync_basic_types", "zksync_config", "zksync_contracts", @@ -4870,9 +4871,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.3" +version = "1.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" +checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" dependencies = [ "aho-corasick", "memchr", @@ -5611,6 +5612,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" dependencies = [ "base64 0.13.1", + "hex", "serde", "serde_with_macros", ] @@ -7654,7 +7656,7 @@ dependencies = [ [[package]] name = "zkevm-assembly" version = "1.5.0" -source = "git+https://github.com/matter-labs/era-zkEVM-assembly.git?branch=v1.5.0#2faea98303377cad71f4c7d8dacb9c6546874602" +source = "git+https://github.com/matter-labs/era-zkEVM-assembly.git?branch=v1.5.0#48303aa435810adb12e277494e5dae3764313330" dependencies = [ "env_logger 0.9.3", "hex", @@ -7845,7 +7847,7 @@ dependencies = [ "codegen", "crossbeam 0.8.4", "derivative", - "env_logger 0.9.3", + "env_logger 0.11.2", "hex", "rand 0.4.6", "rayon", @@ -7861,7 +7863,7 @@ dependencies = [ [[package]] name = "zkevm_test_harness" version = "1.5.0" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.5.0#ecb08797ced36fcc7d3696ffd2ec6a2d534b9395" +source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.5.0#e6fa3cbf2c9c898c3b93046162951d42d5454d5b" dependencies = [ "bincode", "circuit_definitions 1.5.0", @@ -7876,6 +7878,7 @@ dependencies = [ "lazy_static", "rand 0.4.6", "rayon", + "regex", "reqwest", "serde", "serde_json", diff --git a/prover/prover_cli/Cargo.toml b/prover/prover_cli/Cargo.toml index 272baaf9491a..ca6a4d2dd65d 100644 --- a/prover/prover_cli/Cargo.toml +++ b/prover/prover_cli/Cargo.toml @@ -33,3 +33,8 @@ strum.workspace = true colored.workspace = true sqlx.workspace = true circuit_definitions.workspace = true +zkevm_test_harness = { workspace = true, optional = true, features = ["verbose_circuits"] } + +[features] +# enable verbose circuits, if you want to use debug_circuit command (as it is quite heavy dependency). +verbose_circuits = ["zkevm_test_harness"] \ No newline at end of file diff --git a/prover/prover_cli/README.md b/prover/prover_cli/README.md index d4da450fccad..74f291c8d573 100644 --- a/prover/prover_cli/README.md +++ b/prover/prover_cli/README.md @@ -161,6 +161,36 @@ TODO TODO +### `prover_cli debug-proof` + +Debug proof is an advanced feature that can be used to debug failing circuit proofs. It will re-run the proving circuit +for a given proof file - and print detailed debug logs. + +**WARNING** - it does require compilation with `--release --features verbose_circuits` enabled (which includes all the +necessary dependencies). + +Example output + +``` +cargo run --release --features verbose_circuits -- debug-proof --file ~/prover_jobs_23_05.bin + +[call_ret_impl/far_call.rs:1012:13] max_passable.witness_hook(&*cs)().unwrap() = 535437 +[call_ret_impl/far_call.rs:1024:13] leftover.witness_hook(&*cs)().unwrap() = 8518 +[call_ret_impl/far_call.rs:1025:13] ergs_to_pass.witness_hook(&*cs)().unwrap() = 544211 +[call_ret_impl/far_call.rs:1036:13] remaining_from_max_passable.witness_hook(&*cs)().unwrap() = 4294958522 +[call_ret_impl/far_call.rs:1037:13] leftover_and_remaining_if_no_uf.witness_hook(&*cs)().unwrap() = 4294967040 +[call_ret_impl/far_call.rs:1047:13] ergs_to_pass.witness_hook(&*cs)().unwrap() = 535437 +[call_ret_impl/far_call.rs:1048:13] remaining_for_this_context.witness_hook(&*cs)().unwrap() = 8518 +[call_ret_impl/far_call.rs:1049:13] extra_ergs_from_caller_to_callee.witness_hook(&*cs)().unwrap() = 0 +[call_ret_impl/far_call.rs:1050:13] callee_stipend.witness_hook(&*cs)().unwrap() = 0 +New frame as a result of FAR CALL: Some(ExecutionContextRecordWitness { this: 0x263eb3945d7cee723110c69da5fabc3c6d5a802f, caller: 0x973a7a18f29699b5b976a5026d795f5169cb3348, code_address: 0x0000000000000000000000000000000000000000, code_page: 2416, base_page: 3075968, heap_upper_bound: 4096, aux_heap_upper_bound: 4096, reverted_queue_head: [0x1d06f395ca74bd80, 0x3c3099adfd7d31cb, 0x119db3dd58b4aca6, 0xb8d2f7bd2c1b5e48], reverted_queue_tail: [0x1d06f395ca74bd80, 0x3c3099adfd7d31cb, 0x119db3dd58b4aca6, 0xb8d2f7bd2c1b5e48], reverted_queue_segment_len: 0, pc: 0, sp: 0, exception_handler_loc: 176, ergs_remaining: 535437, is_static_execution: false, is_kernel_mode: false, this_shard_id: 0, caller_shard_id: 0, code_shard_id: 0, context_u128_value_composite: [0, 0, 0, 0], is_local_call: false, total_pubdata_spent: 0, stipend: 0 }) +thread 'main' panicked at circuit_definitions/src/aux_definitions/witness_oracle.rs:506:13: +assertion `left == right` failed + left: 2097152 + right: 4096 +note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace +``` + ## Development Status | **Command** | **Subcommand** | **Flags** | **Status** | diff --git a/prover/prover_cli/src/cli.rs b/prover/prover_cli/src/cli.rs index bbcf5ac8b98d..08025c904e7c 100644 --- a/prover/prover_cli/src/cli.rs +++ b/prover/prover_cli/src/cli.rs @@ -1,7 +1,7 @@ use clap::{command, Args, Parser, Subcommand}; use zksync_types::url::SensitiveUrl; -use crate::commands::{self, config, delete, get_file_info, requeue, restart}; +use crate::commands::{self, config, debug_proof, delete, get_file_info, requeue, restart}; pub const VERSION_STRING: &str = env!("CARGO_PKG_VERSION"); @@ -27,6 +27,7 @@ pub struct ProverCLIConfig { #[derive(Subcommand)] enum ProverCommand { + DebugProof(debug_proof::Args), FileInfo(get_file_info::Args), Config(ProverCLIConfig), Delete(delete::Args), @@ -45,6 +46,7 @@ pub async fn start() -> anyhow::Result<()> { ProverCommand::Status(cmd) => cmd.run(config).await?, ProverCommand::Requeue(args) => requeue::run(args, config).await?, ProverCommand::Restart(args) => restart::run(args).await?, + ProverCommand::DebugProof(args) => debug_proof::run(args).await?, }; Ok(()) diff --git a/prover/prover_cli/src/commands/debug_proof.rs b/prover/prover_cli/src/commands/debug_proof.rs new file mode 100644 index 000000000000..16abbfcc6e55 --- /dev/null +++ b/prover/prover_cli/src/commands/debug_proof.rs @@ -0,0 +1,19 @@ +use clap::Args as ClapArgs; + +#[derive(ClapArgs)] +pub(crate) struct Args { + /// File with the basic proof. + #[clap(short, long)] + file: String, +} + +pub(crate) async fn run(_args: Args) -> anyhow::Result<()> { + #[cfg(not(feature = "verbose_circuits"))] + anyhow::bail!("Please compile with verbose_circuits feature"); + #[cfg(feature = "verbose_circuits")] + { + let buffer = std::fs::read(_args.file).unwrap(); + zkevm_test_harness::debug::debug_basic_circuit(&buffer); + Ok(()) + } +} diff --git a/prover/prover_cli/src/commands/mod.rs b/prover/prover_cli/src/commands/mod.rs index 34291d91ce60..ec58554da508 100644 --- a/prover/prover_cli/src/commands/mod.rs +++ b/prover/prover_cli/src/commands/mod.rs @@ -1,8 +1,8 @@ pub(crate) mod config; +pub(crate) mod debug_proof; pub(crate) mod delete; pub(crate) mod get_file_info; pub(crate) mod requeue; pub(crate) mod restart; pub(crate) mod status; - pub(crate) use status::StatusCommand; From 351e13d2a5de367e9be3dc2baf26498bc9483700 Mon Sep 17 00:00:00 2001 From: Marcin M <128217157+mm-zk@users.noreply.github.com> Date: Wed, 5 Jun 2024 13:10:11 +0200 Subject: [PATCH 6/8] =?UTF-8?q?feat:=20Added=20workflow=20dispatch=20to=20?= =?UTF-8?q?zk-environment,=20to=20allow=20building=20te=E2=80=A6=20(#2147)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit …st images. ## What ❔ * Added workflow_dispatch to zk_enviroinment ## Why ❔ * This way, you can build the 'experimental' docker images to test things before merging into 'main' branch. * It will build only zk_environment basic (not GPU specific ones), and it will NOT mark it as latest. --- .github/workflows/zk-environment-publish.yml | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/.github/workflows/zk-environment-publish.yml b/.github/workflows/zk-environment-publish.yml index ea3371a094cd..46ceeb2cb5a9 100644 --- a/.github/workflows/zk-environment-publish.yml +++ b/.github/workflows/zk-environment-publish.yml @@ -1,6 +1,10 @@ name: Publish zk-environment Docker images on: + # Workflow dispatch, to allow building and pushing new environments. + # It will NOT mark them as latest. + workflow_dispatch: + push: branches: - main @@ -46,7 +50,7 @@ jobs: - .github/workflows/zk-environment-publish.yml get_short_sha: - if: needs.changed_files.outputs.zk_environment == 'true' + if: ${{ (needs.changed_files.outputs.zk_environment == 'true') || (github.event_name == 'workflow_dispatch') }} needs: [changed_files] runs-on: ubuntu-latest outputs: @@ -60,7 +64,8 @@ jobs: run: echo "short_sha=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT zk_environment: - if: needs.changed_files.outputs.zk_environment == 'true' + # Build and push new environment, if workflow dispatch is requested. + if: ${{ (needs.changed_files.outputs.zk_environment == 'true') || (github.event_name == 'workflow_dispatch') }} needs: [changed_files, get_short_sha] name: Build and optionally push zk-environment Docker images to Docker Hub strategy: @@ -79,7 +84,7 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v2 - name: Log in to Docker Hub - if: github.event_name == 'push' && github.ref == 'refs/heads/main' + if: ${{ (github.event_name == 'push' && github.ref == 'refs/heads/main') || (github.event_name == 'workflow_dispatch') }} uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0 with: username: ${{ secrets.DOCKERHUB_USER }} @@ -91,7 +96,7 @@ jobs: target: rust-lightweight tags: "matterlabs/zk-environment:${{ needs.get_short_sha.outputs.short_sha }}-lightweight-${{ matrix.arch }}" build-args: ARCH=${{ matrix.arch }} - push: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} + push: ${{ (github.event_name == 'push' && github.ref == 'refs/heads/main') || (github.event_name == 'workflow_dispatch') }} - name: Build and optionally push zk-environment lightweight Rust nightly uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 with: @@ -99,9 +104,10 @@ jobs: target: rust-lightweight-nightly tags: "matterlabs/zk-environment:${{ needs.get_short_sha.outputs.short_sha }}-lightweight-nightly-${{ matrix.arch }}" build-args: ARCH=${{ matrix.arch }} - push: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} + push: ${{ (github.event_name == 'push' && github.ref == 'refs/heads/main') || (github.event_name == 'workflow_dispatch') }} zk_environment_multiarch_manifest: + # We'll update the 'latest' tag, only on environments generated from 'main'. if: needs.changed_files.outputs.zk_environment == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main' needs: [changed_files, get_short_sha, zk_environment] runs-on: ubuntu-latest From 5c0396441ea5383da322670a57ca0392d2a647fa Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Wed, 5 Jun 2024 14:56:22 +0300 Subject: [PATCH 7/8] refactor(object-store): Refactor object store to fit into node framework (#2138) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Refactors `ObjectStore` and `ObjectStoreFactory` to fit it into the node framework: - Consistently uses `Arc` in DI - Clearly documents `ObjectStoreFactory` as one possible `ObjectStore` producer - Expose GCS, file-based and mock object stores directly and remove ability to create mock object stores from `ObjectStoreFactory` - Refactors retries as `ObjectStore` "middleware" ## Why ❔ Currently, object store APIs don't fit into the node framework well, leading to suboptimal DevEx. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `zk spellcheck`. --- core/bin/block_reverter/src/main.rs | 2 +- core/bin/external_node/src/init.rs | 6 +- core/bin/snapshots_creator/src/main.rs | 2 +- core/bin/snapshots_creator/src/tests.rs | 38 +-- core/lib/object_store/README.md | 10 +- core/lib/object_store/src/factory.rs | 110 ++++++++ core/lib/object_store/src/file.rs | 8 +- core/lib/object_store/src/gcs.rs | 241 +++++------------- core/lib/object_store/src/lib.rs | 18 +- core/lib/object_store/src/metrics.rs | 10 +- core/lib/object_store/src/mock.rs | 14 +- core/lib/object_store/src/objects.rs | 6 +- core/lib/object_store/src/raw.rs | 137 +--------- core/lib/object_store/src/retries.rs | 184 +++++++++++++ .../tests/job_serialization.rs | 4 +- core/lib/snapshots_applier/src/lib.rs | 2 +- core/lib/snapshots_applier/src/tests/mod.rs | 14 +- core/lib/snapshots_applier/src/tests/utils.rs | 5 +- core/lib/zksync_core_leftovers/src/lib.rs | 8 +- core/node/block_reverter/src/lib.rs | 2 +- core/node/block_reverter/src/tests.rs | 16 +- core/node/eth_sender/src/tests.rs | 5 +- core/node/metadata_calculator/src/tests.rs | 17 +- .../implementations/layers/object_store.rs | 2 +- prover/proof_fri_compressor/src/main.rs | 2 +- prover/prover_fri/src/main.rs | 6 +- prover/prover_fri/tests/basic_test.rs | 4 +- prover/prover_fri_gateway/src/main.rs | 4 +- .../witness_generator/src/basic_circuits.rs | 8 +- .../witness_generator/src/leaf_aggregation.rs | 8 +- prover/witness_generator/src/main.rs | 27 +- .../witness_generator/src/node_aggregation.rs | 8 +- prover/witness_generator/src/recursion_tip.rs | 8 +- prover/witness_generator/src/scheduler.rs | 8 +- prover/witness_generator/tests/basic_test.rs | 6 +- .../witness_vector_generator/src/generator.rs | 8 +- prover/witness_vector_generator/src/main.rs | 6 +- 37 files changed, 512 insertions(+), 452 deletions(-) create mode 100644 core/lib/object_store/src/factory.rs create mode 100644 core/lib/object_store/src/retries.rs diff --git a/core/bin/block_reverter/src/main.rs b/core/bin/block_reverter/src/main.rs index b5e5c4054a3a..8d1198627a83 100644 --- a/core/bin/block_reverter/src/main.rs +++ b/core/bin/block_reverter/src/main.rs @@ -229,7 +229,7 @@ async fn main() -> anyhow::Result<()> { block_reverter.enable_rolling_back_snapshot_objects( ObjectStoreFactory::new(object_store_config.0) .create_store() - .await, + .await?, ); } } diff --git a/core/bin/external_node/src/init.rs b/core/bin/external_node/src/init.rs index fb30628e3890..a9ee796194cc 100644 --- a/core/bin/external_node/src/init.rs +++ b/core/bin/external_node/src/init.rs @@ -91,16 +91,16 @@ pub(crate) async fn ensure_storage_initialized( tracing::warn!("Proceeding with snapshot recovery. This is an experimental feature; use at your own risk"); let object_store_config = snapshot_recovery_object_store_config()?; - let blob_store = ObjectStoreFactory::new(object_store_config) + let object_store = ObjectStoreFactory::new(object_store_config) .create_store() - .await; + .await?; let config = SnapshotsApplierConfig::default(); let mut snapshots_applier_task = SnapshotsApplierTask::new( config, pool, Box::new(main_node_client.for_component("snapshot_recovery")), - blob_store, + object_store, ); if let Some(snapshot_l1_batch) = recovery_config.snapshot_l1_batch_override { tracing::info!( diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs index 8275c0044d3b..91751f6d2ddf 100644 --- a/core/bin/snapshots_creator/src/main.rs +++ b/core/bin/snapshots_creator/src/main.rs @@ -75,7 +75,7 @@ async fn main() -> anyhow::Result<()> { SnapshotsObjectStoreConfig::from_env().context("SnapshotsObjectStoreConfig::from_env()")?; let blob_store = ObjectStoreFactory::new(object_store_config.0) .create_store() - .await; + .await?; let database_secrets = DatabaseSecrets::from_env().context("DatabaseSecrets")?; let creator_config = diff --git a/core/bin/snapshots_creator/src/tests.rs b/core/bin/snapshots_creator/src/tests.rs index d359afd79bd7..59c0e853a621 100644 --- a/core/bin/snapshots_creator/src/tests.rs +++ b/core/bin/snapshots_creator/src/tests.rs @@ -11,7 +11,7 @@ use std::{ use rand::{thread_rng, Rng}; use zksync_dal::{Connection, CoreDal}; -use zksync_object_store::ObjectStore; +use zksync_object_store::{MockObjectStore, ObjectStore}; use zksync_types::{ block::{L1BatchHeader, L1BatchTreeData, L2BlockHeader}, snapshots::{ @@ -257,8 +257,7 @@ async fn prepare_postgres( async fn persisting_snapshot_metadata() { let pool = ConnectionPool::::test_pool().await; let mut rng = thread_rng(); - let object_store_factory = ObjectStoreFactory::mock(); - let object_store = object_store_factory.create_store().await; + let object_store = MockObjectStore::arc(); // Insert some data to Postgres. let mut conn = pool.connection().await.unwrap(); @@ -306,18 +305,16 @@ async fn persisting_snapshot_metadata() { async fn persisting_snapshot_factory_deps() { let pool = ConnectionPool::::test_pool().await; let mut rng = thread_rng(); - let object_store_factory = ObjectStoreFactory::mock(); - let object_store = object_store_factory.create_store().await; + let object_store = MockObjectStore::arc(); let mut conn = pool.connection().await.unwrap(); let expected_outputs = prepare_postgres(&mut rng, &mut conn, 10).await; - SnapshotCreator::for_tests(object_store, pool.clone()) + SnapshotCreator::for_tests(object_store.clone(), pool.clone()) .run(TEST_CONFIG, MIN_CHUNK_COUNT) .await .unwrap(); let snapshot_l1_batch_number = L1BatchNumber(8); - let object_store = object_store_factory.create_store().await; let SnapshotFactoryDependencies { factory_deps } = object_store.get(snapshot_l1_batch_number).await.unwrap(); let actual_deps: HashSet<_> = factory_deps.into_iter().collect(); @@ -328,18 +325,16 @@ async fn persisting_snapshot_factory_deps() { async fn persisting_snapshot_logs() { let pool = ConnectionPool::::test_pool().await; let mut rng = thread_rng(); - let object_store_factory = ObjectStoreFactory::mock(); - let object_store = object_store_factory.create_store().await; + let object_store = MockObjectStore::arc(); let mut conn = pool.connection().await.unwrap(); let expected_outputs = prepare_postgres(&mut rng, &mut conn, 10).await; - SnapshotCreator::for_tests(object_store, pool.clone()) + SnapshotCreator::for_tests(object_store.clone(), pool.clone()) .run(TEST_CONFIG, MIN_CHUNK_COUNT) .await .unwrap(); let snapshot_l1_batch_number = L1BatchNumber(8); - let object_store = object_store_factory.create_store().await; assert_storage_logs(&*object_store, snapshot_l1_batch_number, &expected_outputs).await; } @@ -364,12 +359,11 @@ async fn assert_storage_logs( async fn recovery_workflow() { let pool = ConnectionPool::::test_pool().await; let mut rng = thread_rng(); - let object_store_factory = ObjectStoreFactory::mock(); - let object_store = object_store_factory.create_store().await; + let object_store = MockObjectStore::arc(); let mut conn = pool.connection().await.unwrap(); let expected_outputs = prepare_postgres(&mut rng, &mut conn, 10).await; - SnapshotCreator::for_tests(object_store, pool.clone()) + SnapshotCreator::for_tests(object_store.clone(), pool.clone()) .stop_after_chunk_count(0) .run(SEQUENTIAL_TEST_CONFIG, MIN_CHUNK_COUNT) .await @@ -387,14 +381,13 @@ async fn recovery_workflow() { .iter() .all(Option::is_none)); - let object_store = object_store_factory.create_store().await; let SnapshotFactoryDependencies { factory_deps } = object_store.get(snapshot_l1_batch_number).await.unwrap(); let actual_deps: HashSet<_> = factory_deps.into_iter().collect(); assert_eq!(actual_deps, expected_outputs.deps); // Process 2 storage log chunks, then stop. - SnapshotCreator::for_tests(object_store, pool.clone()) + SnapshotCreator::for_tests(object_store.clone(), pool.clone()) .stop_after_chunk_count(2) .run(SEQUENTIAL_TEST_CONFIG, MIN_CHUNK_COUNT) .await @@ -416,13 +409,11 @@ async fn recovery_workflow() { ); // Process the remaining chunks. - let object_store = object_store_factory.create_store().await; - SnapshotCreator::for_tests(object_store, pool.clone()) + SnapshotCreator::for_tests(object_store.clone(), pool.clone()) .run(SEQUENTIAL_TEST_CONFIG, MIN_CHUNK_COUNT) .await .unwrap(); - let object_store = object_store_factory.create_store().await; assert_storage_logs(&*object_store, snapshot_l1_batch_number, &expected_outputs).await; } @@ -430,12 +421,11 @@ async fn recovery_workflow() { async fn recovery_workflow_with_varying_chunk_size() { let pool = ConnectionPool::::test_pool().await; let mut rng = thread_rng(); - let object_store_factory = ObjectStoreFactory::mock(); - let object_store = object_store_factory.create_store().await; + let object_store = MockObjectStore::arc(); let mut conn = pool.connection().await.unwrap(); let expected_outputs = prepare_postgres(&mut rng, &mut conn, 10).await; - SnapshotCreator::for_tests(object_store, pool.clone()) + SnapshotCreator::for_tests(object_store.clone(), pool.clone()) .stop_after_chunk_count(2) .run(SEQUENTIAL_TEST_CONFIG, MIN_CHUNK_COUNT) .await @@ -461,12 +451,10 @@ async fn recovery_workflow_with_varying_chunk_size() { storage_logs_chunk_size: 1, // << should be ignored ..SEQUENTIAL_TEST_CONFIG }; - let object_store = object_store_factory.create_store().await; - SnapshotCreator::for_tests(object_store, pool.clone()) + SnapshotCreator::for_tests(object_store.clone(), pool.clone()) .run(config_with_other_size, MIN_CHUNK_COUNT) .await .unwrap(); - let object_store = object_store_factory.create_store().await; assert_storage_logs(&*object_store, snapshot_l1_batch_number, &expected_outputs).await; } diff --git a/core/lib/object_store/README.md b/core/lib/object_store/README.md index f7d004e3d2c7..5ffa13e4a72b 100644 --- a/core/lib/object_store/README.md +++ b/core/lib/object_store/README.md @@ -3,11 +3,13 @@ This crate provides the object storage abstraction that allows to get, put and remove binary blobs. The following implementations are available: -- File-based storage saving blobs as separate files in the local filesystem -- GCS-based storage +- File-based store saving blobs as separate files in the local filesystem +- GCS-based store +- Mock in-memory store -These implementations are not exposed externally. Instead, a store trait object can be constructed based on the -[configuration], which can be provided explicitly or constructed from the environment. +Normally, these implementations are not used directly. Instead, a store trait object can be constructed based on the +[configuration], which can be provided explicitly or constructed from the environment. This trait object is what should +be used for dependency injection. Besides the lower-level storage abstraction, the crate provides high-level typesafe methods to store (de)serializable objects. Prefer using these methods whenever possible. diff --git a/core/lib/object_store/src/factory.rs b/core/lib/object_store/src/factory.rs new file mode 100644 index 000000000000..4859b4c2860e --- /dev/null +++ b/core/lib/object_store/src/factory.rs @@ -0,0 +1,110 @@ +use std::sync::Arc; + +use anyhow::Context as _; +use tokio::sync::OnceCell; +use zksync_config::configs::object_store::{ObjectStoreConfig, ObjectStoreMode}; + +use crate::{ + file::FileBackedObjectStore, + gcs::{GoogleCloudStore, GoogleCloudStoreAuthMode}, + raw::{ObjectStore, ObjectStoreError}, + retries::StoreWithRetries, +}; + +/// Factory of [`ObjectStore`]s that caches the store instance once it's created. Used mainly for legacy reasons. +/// +/// Please do not use this factory in dependency injection; rely on `Arc` instead. This allows to +/// inject mock store implementations, decorate an object store with middleware etc. +#[derive(Debug)] +pub struct ObjectStoreFactory { + config: ObjectStoreConfig, + store: OnceCell>, +} + +impl ObjectStoreFactory { + /// Creates an object store factory based on the provided `config`. + pub fn new(config: ObjectStoreConfig) -> Self { + Self { + config, + store: OnceCell::new(), + } + } + + /// Creates an [`ObjectStore`] or returns a cached store if one was created previously. + /// + /// # Errors + /// + /// Returns an error if store initialization fails (e.g., because of incorrect configuration). + pub async fn create_store(&self) -> anyhow::Result> { + self.store + .get_or_try_init(|| async { + Self::create_from_config(&self.config) + .await + .with_context(|| { + format!( + "failed creating object store factory with configuration {:?}", + self.config + ) + }) + }) + .await + .cloned() + } + + async fn create_from_config( + config: &ObjectStoreConfig, + ) -> Result, ObjectStoreError> { + match &config.mode { + ObjectStoreMode::GCS { bucket_base_url } => { + tracing::trace!( + "Initialized GoogleCloudStorage Object store without credential file" + ); + let store = StoreWithRetries::try_new(config.max_retries, || { + GoogleCloudStore::new( + GoogleCloudStoreAuthMode::Authenticated, + bucket_base_url.clone(), + ) + }) + .await?; + Ok(Arc::new(store)) + } + ObjectStoreMode::GCSWithCredentialFile { + bucket_base_url, + gcs_credential_file_path, + } => { + tracing::trace!("Initialized GoogleCloudStorage Object store with credential file"); + let store = StoreWithRetries::try_new(config.max_retries, || { + GoogleCloudStore::new( + GoogleCloudStoreAuthMode::AuthenticatedWithCredentialFile( + gcs_credential_file_path.clone(), + ), + bucket_base_url.clone(), + ) + }) + .await?; + Ok(Arc::new(store)) + } + ObjectStoreMode::FileBacked { + file_backed_base_path, + } => { + tracing::trace!("Initialized FileBacked Object store"); + let store = StoreWithRetries::try_new(config.max_retries, || { + FileBackedObjectStore::new(file_backed_base_path.clone()) + }) + .await?; + Ok(Arc::new(store)) + } + ObjectStoreMode::GCSAnonymousReadOnly { bucket_base_url } => { + tracing::trace!("Initialized GoogleCloudStoragePublicReadOnly store"); + let store = StoreWithRetries::try_new(config.max_retries, || { + GoogleCloudStore::new( + GoogleCloudStoreAuthMode::Anonymous, + bucket_base_url.clone(), + ) + }) + .await?; + Ok(Arc::new(store)) + } + } + } +} diff --git a/core/lib/object_store/src/file.rs b/core/lib/object_store/src/file.rs index f641ab9c74a1..94689c78028f 100644 --- a/core/lib/object_store/src/file.rs +++ b/core/lib/object_store/src/file.rs @@ -17,12 +17,18 @@ impl From for ObjectStoreError { } } +/// [`ObjectStore`] implementation storing objects as files in a local filesystem. Mostly useful for local testing. #[derive(Debug)] -pub(crate) struct FileBackedObjectStore { +pub struct FileBackedObjectStore { base_dir: String, } impl FileBackedObjectStore { + /// Creates a new file-backed store with its root at the specified path. + /// + /// # Errors + /// + /// Propagates I/O errors. pub async fn new(base_dir: String) -> Result { for bucket in &[ Bucket::ProverJobs, diff --git a/core/lib/object_store/src/gcs.rs b/core/lib/object_store/src/gcs.rs index 8cd7b982a058..65d31bf53eaf 100644 --- a/core/lib/object_store/src/gcs.rs +++ b/core/lib/object_store/src/gcs.rs @@ -1,6 +1,6 @@ //! GCS-based [`ObjectStore`] implementation. -use std::{fmt, future::Future, time::Duration}; +use std::{error::Error as StdError, fmt, io}; use async_trait::async_trait; use google_cloud_auth::{credentials::CredentialsFile, error::Error as AuthError}; @@ -17,140 +17,78 @@ use google_cloud_storage::{ }, }; use http::StatusCode; -use rand::Rng; -use crate::{ - metrics::GCS_METRICS, - raw::{Bucket, ObjectStore, ObjectStoreError}, -}; +use crate::raw::{Bucket, ObjectStore, ObjectStoreError}; -async fn retry(max_retries: u16, mut f: F) -> Result -where - Fut: Future>, - F: FnMut() -> Fut, -{ - let mut retries = 1; - let mut backoff_secs = 1; - loop { - match f().await { - Ok(result) => return Ok(result), - Err(err) if err.is_transient() => { - if retries > max_retries { - tracing::warn!(%err, "Exhausted {max_retries} retries performing GCS request; returning last error"); - return Err(err); - } - tracing::info!(%err, "Failed GCS request {retries}/{max_retries}, retrying."); - retries += 1; - // Randomize sleep duration to prevent stampeding the server if multiple requests are initiated at the same time. - let sleep_duration = Duration::from_secs(backoff_secs) - .mul_f32(rand::thread_rng().gen_range(0.8..1.2)); - tokio::time::sleep(sleep_duration).await; - backoff_secs *= 2; - } - Err(err) => { - tracing::warn!(%err, "Failed GCS request with a fatal error"); - return Err(err); - } - } - } -} - -pub(crate) struct GoogleCloudStorage { +/// [`ObjectStore`] implementation based on GCS. +pub struct GoogleCloudStore { bucket_prefix: String, - max_retries: u16, client: Client, } -impl fmt::Debug for GoogleCloudStorage { +impl fmt::Debug for GoogleCloudStore { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter - .debug_struct("GoogleCloudStorage") + .debug_struct("GoogleCloudStore") .field("bucket_prefix", &self.bucket_prefix) - .field("max_retries", &self.max_retries) + // Skip `client` as its representation may contain sensitive info .finish_non_exhaustive() } } +/// Authentication mode for [`GoogleCloudStore`]. #[derive(Debug, Clone)] -pub(crate) enum GoogleCloudStorageAuthMode { +#[non_exhaustive] +pub enum GoogleCloudStoreAuthMode { + /// Authentication via a credentials file at the specified path. AuthenticatedWithCredentialFile(String), + /// Ambient authentication (works if the binary runs on Google Cloud). Authenticated, + /// Anonymous access (only works for public GCS buckets for read operations). Anonymous, } -impl GoogleCloudStorage { +impl GoogleCloudStore { + /// Creates a new cloud store. + /// + /// # Errors + /// + /// Propagates GCS initialization errors. pub async fn new( - auth_mode: GoogleCloudStorageAuthMode, + auth_mode: GoogleCloudStoreAuthMode, bucket_prefix: String, - max_retries: u16, ) -> Result { - let client_config = retry(max_retries, || async { - Self::get_client_config(auth_mode.clone()) - .await - .map_err(Into::into) - }) - .await?; - + let client_config = Self::get_client_config(auth_mode.clone()).await?; Ok(Self { client: Client::new(client_config), bucket_prefix, - max_retries, }) } async fn get_client_config( - auth_mode: GoogleCloudStorageAuthMode, + auth_mode: GoogleCloudStoreAuthMode, ) -> Result { match auth_mode { - GoogleCloudStorageAuthMode::AuthenticatedWithCredentialFile(path) => { + GoogleCloudStoreAuthMode::AuthenticatedWithCredentialFile(path) => { let cred_file = CredentialsFile::new_from_file(path).await?; ClientConfig::default().with_credentials(cred_file).await } - GoogleCloudStorageAuthMode::Authenticated => ClientConfig::default().with_auth().await, - GoogleCloudStorageAuthMode::Anonymous => Ok(ClientConfig::default().anonymous()), + GoogleCloudStoreAuthMode::Authenticated => ClientConfig::default().with_auth().await, + GoogleCloudStoreAuthMode::Anonymous => Ok(ClientConfig::default().anonymous()), } } fn filename(bucket: &str, filename: &str) -> String { format!("{bucket}/{filename}") } - - // For some bizarre reason, `async fn` doesn't work here, failing with the following error: - // - // > hidden type for `impl std::future::Future>` - // > captures lifetime that does not appear in bounds - fn remove_inner( - &self, - bucket: &'static str, - key: &str, - ) -> impl Future> + '_ { - let filename = Self::filename(bucket, key); - tracing::trace!( - "Removing data from GCS for key {filename} from bucket {}", - self.bucket_prefix - ); - - let request = DeleteObjectRequest { - bucket: self.bucket_prefix.clone(), - object: filename, - ..DeleteObjectRequest::default() - }; - async move { - retry(self.max_retries, || async { - self.client - .delete_object(&request) - .await - .map_err(ObjectStoreError::from) - }) - .await - } - } } impl From for ObjectStoreError { fn from(err: AuthError) -> Self { - let is_transient = - matches!(&err, AuthError::HttpError(err) if err.is_timeout() || err.is_connect()); + let is_transient = matches!( + &err, + AuthError::HttpError(err) if err.is_timeout() || err.is_connect() || has_transient_io_source(err) + ); Self::Initialization { source: err.into(), is_transient, @@ -158,6 +96,21 @@ impl From for ObjectStoreError { } } +fn has_transient_io_source(mut err: &(dyn StdError + 'static)) -> bool { + loop { + if err.is::() { + // We treat any I/O errors as transient. This isn't always true, but frequently occurring I/O errors + // (e.g., "connection reset by peer") *are* transient, and treating an error as transient is a safer option, + // even if it can lead to unnecessary retries. + return true; + } + err = match err.source() { + Some(source) => source, + None => return false, + }; + } +} + impl From for ObjectStoreError { fn from(err: HttpError) -> Self { let is_not_found = match &err { @@ -171,8 +124,10 @@ impl From for ObjectStoreError { if is_not_found { ObjectStoreError::KeyNotFound(err.into()) } else { - let is_transient = - matches!(&err, HttpError::HttpClient(err) if err.is_timeout() || err.is_connect()); + let is_transient = matches!( + &err, + HttpError::HttpClient(err) if err.is_timeout() || err.is_connect() || has_transient_io_source(err) + ); ObjectStoreError::Other { is_transient, source: err.into(), @@ -182,9 +137,8 @@ impl From for ObjectStoreError { } #[async_trait] -impl ObjectStore for GoogleCloudStorage { +impl ObjectStore for GoogleCloudStore { async fn get_raw(&self, bucket: Bucket, key: &str) -> Result, ObjectStoreError> { - let fetch_latency = GCS_METRICS.start_fetch(bucket); let filename = Self::filename(bucket.as_str(), key); tracing::trace!( "Fetching data from GCS for key {filename} from bucket {}", @@ -196,20 +150,10 @@ impl ObjectStore for GoogleCloudStorage { object: filename, ..GetObjectRequest::default() }; - let range = Range::default(); - let blob = retry(self.max_retries, || async { - self.client - .download_object(&request, &range) - .await - .map_err(Into::into) - }) - .await; - - let elapsed = fetch_latency.observe(); - tracing::trace!( - "Fetched data from GCS for key {key} from bucket {bucket} and it took: {elapsed:?}" - ); - blob + self.client + .download_object(&request, &Range::default()) + .await + .map_err(Into::into) } async fn put_raw( @@ -218,7 +162,6 @@ impl ObjectStore for GoogleCloudStorage { key: &str, value: Vec, ) -> Result<(), ObjectStoreError> { - let store_latency = GCS_METRICS.start_store(bucket); let filename = Self::filename(bucket.as_str(), key); tracing::trace!( "Storing data to GCS for key {filename} from bucket {}", @@ -230,23 +173,26 @@ impl ObjectStore for GoogleCloudStorage { bucket: self.bucket_prefix.clone(), ..Default::default() }; - let object = retry(self.max_retries, || async { - self.client - .upload_object(&request, value.clone(), &upload_type) - .await - .map_err(Into::into) - }) - .await; + self.client + .upload_object(&request, value.clone(), &upload_type) + .await?; + Ok(()) + } - let elapsed = store_latency.observe(); + async fn remove_raw(&self, bucket: Bucket, key: &str) -> Result<(), ObjectStoreError> { + let filename = Self::filename(bucket.as_str(), key); tracing::trace!( - "Stored data to GCS for key {key} from bucket {bucket} and it took: {elapsed:?}" + "Removing data from GCS for key {filename} from bucket {}", + self.bucket_prefix ); - object.map(drop) - } - async fn remove_raw(&self, bucket: Bucket, key: &str) -> Result<(), ObjectStoreError> { - self.remove_inner(bucket.as_str(), key).await + let request = DeleteObjectRequest { + bucket: self.bucket_prefix.clone(), + object: filename, + ..DeleteObjectRequest::default() + }; + self.client.delete_object(&request).await?; + Ok(()) } fn storage_prefix_raw(&self, bucket: Bucket) -> String { @@ -257,52 +203,3 @@ impl ObjectStore for GoogleCloudStorage { ) } } - -#[cfg(test)] -mod test { - use std::sync::atomic::{AtomicU16, Ordering}; - - use assert_matches::assert_matches; - - use super::*; - - fn transient_error() -> ObjectStoreError { - ObjectStoreError::Other { - is_transient: true, - source: "oops".into(), - } - } - - #[tokio::test] - async fn test_retry_success_immediate() { - let result = retry(2, || async { Ok(42) }).await.unwrap(); - assert_eq!(result, 42); - } - - #[tokio::test] - async fn test_retry_failure_exhausted() { - let err = retry(2, || async { Err::(transient_error()) }) - .await - .unwrap_err(); - assert_matches!(err, ObjectStoreError::Other { .. }); - } - - async fn retry_success_after_n_retries(n: u16) -> Result { - let retries = AtomicU16::new(0); - retry(n, || async { - let retries = retries.fetch_add(1, Ordering::Relaxed); - if retries + 1 == n { - Ok(42) - } else { - Err(transient_error()) - } - }) - .await - } - - #[tokio::test] - async fn test_retry_success_after_retry() { - let result = retry(2, || retry_success_after_n_retries(2)).await.unwrap(); - assert_eq!(result, 42); - } -} diff --git a/core/lib/object_store/src/lib.rs b/core/lib/object_store/src/lib.rs index 0eddf3a61d53..bccc139336b8 100644 --- a/core/lib/object_store/src/lib.rs +++ b/core/lib/object_store/src/lib.rs @@ -1,13 +1,13 @@ //! This crate provides the [object storage abstraction](ObjectStore) that allows to get, //! put and remove binary blobs. The following implementations are available: //! -//! - File-based storage saving blobs as separate files in the local filesystem -//! - GCS-based storage +//! - [File-backed store](FileBackedObjectStore) saving blobs as separate files in the local filesystem +//! - [GCS-based store](GoogleCloudStore) +//! - [Mock in-memory store](MockObjectStore) //! -//! These implementations are not exposed externally. Instead, a store trait object +//! Normally, these implementations are not used directly. Instead, a store trait object (`Arc`) //! can be constructed using an [`ObjectStoreFactory`] based on the configuration. -//! The configuration can be provided explicitly (see [`ObjectStoreFactory::new()`]) -//! or obtained from the environment (see [`ObjectStoreFactory::from_env()`]). +//! This trait object is what should be used for dependency injection. //! //! Besides the lower-level storage abstraction, the crate provides high-level //! typesafe `::get()` and `::put()` methods @@ -23,12 +23,14 @@ clippy::doc_markdown )] +mod factory; mod file; mod gcs; mod metrics; mod mock; mod objects; mod raw; +mod retries; // Re-export `bincode` crate so that client binaries can conveniently use it. pub use bincode; @@ -39,6 +41,10 @@ pub mod _reexports { } pub use self::{ + factory::ObjectStoreFactory, + file::FileBackedObjectStore, + gcs::{GoogleCloudStore, GoogleCloudStoreAuthMode}, + mock::MockObjectStore, objects::StoredObject, - raw::{Bucket, ObjectStore, ObjectStoreError, ObjectStoreFactory}, + raw::{Bucket, ObjectStore, ObjectStoreError}, }; diff --git a/core/lib/object_store/src/metrics.rs b/core/lib/object_store/src/metrics.rs index f372b5bac1cc..ed37d4790d55 100644 --- a/core/lib/object_store/src/metrics.rs +++ b/core/lib/object_store/src/metrics.rs @@ -8,16 +8,16 @@ use crate::Bucket; #[derive(Debug, Metrics)] #[metrics(prefix = "server_object_store")] -pub(crate) struct GcsMetrics { - /// Latency to fetch an object from GCS. +pub(crate) struct ObjectStoreMetrics { + /// Latency to fetch an object from the store (accounting for retries). #[metrics(buckets = Buckets::LATENCIES, labels = ["bucket"])] fetching_time: LabeledFamily<&'static str, Histogram>, - /// Latency to store an object in GCS. + /// Latency to store an object in the store (accounting for retries). #[metrics(buckets = Buckets::LATENCIES, labels = ["bucket"])] storing_time: LabeledFamily<&'static str, Histogram>, } -impl GcsMetrics { +impl ObjectStoreMetrics { pub fn start_fetch(&self, bucket: Bucket) -> LatencyObserver<'_> { self.fetching_time[&bucket.as_str()].start() } @@ -28,4 +28,4 @@ impl GcsMetrics { } #[vise::register] -pub(crate) static GCS_METRICS: vise::Global = vise::Global::new(); +pub(crate) static OBJECT_STORE_METRICS: vise::Global = vise::Global::new(); diff --git a/core/lib/object_store/src/mock.rs b/core/lib/object_store/src/mock.rs index f7ee7119c7a3..68b8881c86b2 100644 --- a/core/lib/object_store/src/mock.rs +++ b/core/lib/object_store/src/mock.rs @@ -1,6 +1,6 @@ //! Mock implementation of [`ObjectStore`]. -use std::collections::HashMap; +use std::{collections::HashMap, sync::Arc}; use async_trait::async_trait; use tokio::sync::Mutex; @@ -9,13 +9,21 @@ use crate::raw::{Bucket, ObjectStore, ObjectStoreError}; type BucketMap = HashMap>; +/// Mock [`ObjectStore`] implementation. #[derive(Debug, Default)] -pub(crate) struct MockStore { +pub struct MockObjectStore { inner: Mutex>, } +impl MockObjectStore { + /// Convenience method creating a new mock object store and wrapping it in a trait object. + pub fn arc() -> Arc { + Arc::::default() + } +} + #[async_trait] -impl ObjectStore for MockStore { +impl ObjectStore for MockObjectStore { async fn get_raw(&self, bucket: Bucket, key: &str) -> Result, ObjectStoreError> { let lock = self.inner.lock().await; let maybe_bytes = lock.get(&bucket).and_then(|bucket_map| bucket_map.get(key)); diff --git a/core/lib/object_store/src/objects.rs b/core/lib/object_store/src/objects.rs index c503db2306b7..d67e4e5df137 100644 --- a/core/lib/object_store/src/objects.rs +++ b/core/lib/object_store/src/objects.rs @@ -185,7 +185,7 @@ mod tests { }; use super::*; - use crate::ObjectStoreFactory; + use crate::MockObjectStore; #[test] fn test_storage_logs_filesnames_generate_corretly() { @@ -217,7 +217,7 @@ mod tests { #[tokio::test] async fn test_storage_logs_can_be_serialized_and_deserialized() { - let store = ObjectStoreFactory::mock().create_store().await; + let store = MockObjectStore::arc(); let key = SnapshotStorageLogsStorageKey { l1_batch_number: L1BatchNumber(567), chunk_id: 5, @@ -245,7 +245,7 @@ mod tests { #[tokio::test] async fn test_factory_deps_can_be_serialized_and_deserialized() { - let store = ObjectStoreFactory::mock().create_store().await; + let store = MockObjectStore::arc(); let key = L1BatchNumber(123); let factory_deps = SnapshotFactoryDependencies { factory_deps: vec![ diff --git a/core/lib/object_store/src/raw.rs b/core/lib/object_store/src/raw.rs index d415ae431aaa..8b99f9769900 100644 --- a/core/lib/object_store/src/raw.rs +++ b/core/lib/object_store/src/raw.rs @@ -1,13 +1,6 @@ -use std::{error, fmt, sync::Arc}; +use std::{error, fmt}; use async_trait::async_trait; -use zksync_config::configs::object_store::{ObjectStoreConfig, ObjectStoreMode}; - -use crate::{ - file::FileBackedObjectStore, - gcs::{GoogleCloudStorage, GoogleCloudStorageAuthMode}, - mock::MockStore, -}; /// Bucket for [`ObjectStore`] in which objects can be placed. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] @@ -162,131 +155,3 @@ pub trait ObjectStore: 'static + fmt::Debug + Send + Sync { fn storage_prefix_raw(&self, bucket: Bucket) -> String; } - -#[async_trait] -impl ObjectStore for Arc { - async fn get_raw(&self, bucket: Bucket, key: &str) -> Result, ObjectStoreError> { - (**self).get_raw(bucket, key).await - } - - async fn put_raw( - &self, - bucket: Bucket, - key: &str, - value: Vec, - ) -> Result<(), ObjectStoreError> { - (**self).put_raw(bucket, key, value).await - } - - async fn remove_raw(&self, bucket: Bucket, key: &str) -> Result<(), ObjectStoreError> { - (**self).remove_raw(bucket, key).await - } - - fn storage_prefix_raw(&self, bucket: Bucket) -> String { - (**self).storage_prefix_raw(bucket) - } -} - -#[derive(Debug)] -enum ObjectStoreOrigin { - Config(ObjectStoreConfig), - Mock(Arc), -} - -/// Factory of [`ObjectStore`]s. -#[derive(Debug)] -pub struct ObjectStoreFactory { - origin: ObjectStoreOrigin, -} - -impl ObjectStoreFactory { - /// Creates an object store factory based on the provided `config`. - /// - /// # Panics - /// - /// If the GCS-backed implementation is configured, this constructor will panic if called - /// outside the Tokio runtime. - pub fn new(config: ObjectStoreConfig) -> Self { - Self { - origin: ObjectStoreOrigin::Config(config), - } - } - - /// Creates an object store factory with a mock in-memory store. - /// All calls to [`Self::create_store()`] will return the same store; thus, the testing code - /// can use [`ObjectStore`] methods for assertions. - pub fn mock() -> Self { - Self { - origin: ObjectStoreOrigin::Mock(Arc::new(MockStore::default())), - } - } - - /// Creates an [`ObjectStore`]. - /// - /// # Panics - /// - /// Panics if store initialization fails (e.g., because of incorrect configuration). - pub async fn create_store(&self) -> Arc { - match &self.origin { - ObjectStoreOrigin::Config(config) => Self::create_from_config(config) - .await - .unwrap_or_else(|err| { - panic!( - "failed creating object store factory with configuration {config:?}: {err}" - ) - }), - ObjectStoreOrigin::Mock(store) => Arc::new(Arc::clone(store)), - } - } - - async fn create_from_config( - config: &ObjectStoreConfig, - ) -> Result, ObjectStoreError> { - match &config.mode { - ObjectStoreMode::GCS { bucket_base_url } => { - tracing::trace!( - "Initialized GoogleCloudStorage Object store without credential file" - ); - let store = GoogleCloudStorage::new( - GoogleCloudStorageAuthMode::Authenticated, - bucket_base_url.clone(), - config.max_retries, - ) - .await?; - Ok(Arc::new(store)) - } - ObjectStoreMode::GCSWithCredentialFile { - bucket_base_url, - gcs_credential_file_path, - } => { - tracing::trace!("Initialized GoogleCloudStorage Object store with credential file"); - let store = GoogleCloudStorage::new( - GoogleCloudStorageAuthMode::AuthenticatedWithCredentialFile( - gcs_credential_file_path.clone(), - ), - bucket_base_url.clone(), - config.max_retries, - ) - .await?; - Ok(Arc::new(store)) - } - ObjectStoreMode::FileBacked { - file_backed_base_path, - } => { - tracing::trace!("Initialized FileBacked Object store"); - let store = FileBackedObjectStore::new(file_backed_base_path.clone()).await?; - Ok(Arc::new(store)) - } - ObjectStoreMode::GCSAnonymousReadOnly { bucket_base_url } => { - tracing::trace!("Initialized GoogleCloudStoragePublicReadOnly store"); - let store = GoogleCloudStorage::new( - GoogleCloudStorageAuthMode::Anonymous, - bucket_base_url.clone(), - config.max_retries, - ) - .await?; - Ok(Arc::new(store)) - } - } - } -} diff --git a/core/lib/object_store/src/retries.rs b/core/lib/object_store/src/retries.rs new file mode 100644 index 000000000000..fafde47a9e2d --- /dev/null +++ b/core/lib/object_store/src/retries.rs @@ -0,0 +1,184 @@ +use std::{any, fmt, future::Future, time::Duration}; + +use async_trait::async_trait; +use rand::Rng; + +use crate::{ + metrics::OBJECT_STORE_METRICS, + raw::{Bucket, ObjectStore, ObjectStoreError}, +}; + +/// Information about request added to logs. +#[derive(Debug, Clone, Copy)] +#[allow(dead_code)] // fields are used via `Debug` impl in logs +enum Request<'a> { + New, + Get(Bucket, &'a str), + Put(Bucket, &'a str), + Remove(Bucket, &'a str), +} + +impl Request<'_> { + #[tracing::instrument(skip(f))] // output request and store as a part of structured logs + async fn retry( + self, + store: &impl fmt::Debug, + max_retries: u16, + mut f: F, + ) -> Result + where + Fut: Future>, + F: FnMut() -> Fut, + { + let mut retries = 1; + let mut backoff_secs = 1; + loop { + match f().await { + Ok(result) => return Ok(result), + Err(err) if err.is_transient() => { + if retries > max_retries { + tracing::warn!(%err, "Exhausted {max_retries} retries performing request; returning last error"); + return Err(err); + } + tracing::info!(%err, "Failed request, retries: {retries}/{max_retries}"); + retries += 1; + // Randomize sleep duration to prevent stampeding the server if multiple requests are initiated at the same time. + let sleep_duration = Duration::from_secs(backoff_secs) + .mul_f32(rand::thread_rng().gen_range(0.8..1.2)); + tokio::time::sleep(sleep_duration).await; + backoff_secs *= 2; + } + Err(err) => { + tracing::warn!(%err, "Failed request with a fatal error"); + return Err(err); + } + } + } + } +} + +/// [`ObjectStore`] wrapper that retries all operations according to a reasonable policy. +#[derive(Debug)] +pub(crate) struct StoreWithRetries { + inner: S, + max_retries: u16, +} + +impl StoreWithRetries { + /// Creates a store based on the provided async initialization closure. + pub async fn try_new( + max_retries: u16, + init_fn: impl FnMut() -> Fut, + ) -> Result + where + Fut: Future>, + { + Ok(Self { + inner: Request::New + .retry(&any::type_name::(), max_retries, init_fn) + .await?, + max_retries, + }) + } +} + +// Object store metrics are placed here because `ObjectStoreFactory` (which in practice produces "production" object stores) +// wraps stores in `StoreWithRetries`. If this is changed, metrics will need to be refactored correspondingly. +#[async_trait] +impl ObjectStore for StoreWithRetries { + async fn get_raw(&self, bucket: Bucket, key: &str) -> Result, ObjectStoreError> { + let latency = OBJECT_STORE_METRICS.start_fetch(bucket); + let result = Request::Get(bucket, key) + .retry(&self.inner, self.max_retries, || { + self.inner.get_raw(bucket, key) + }) + .await; + latency.observe(); + result + } + + async fn put_raw( + &self, + bucket: Bucket, + key: &str, + value: Vec, + ) -> Result<(), ObjectStoreError> { + let latency = OBJECT_STORE_METRICS.start_store(bucket); + let result = Request::Put(bucket, key) + .retry(&self.inner, self.max_retries, || { + self.inner.put_raw(bucket, key, value.clone()) + }) + .await; + latency.observe(); + result + } + + async fn remove_raw(&self, bucket: Bucket, key: &str) -> Result<(), ObjectStoreError> { + Request::Remove(bucket, key) + .retry(&self.inner, self.max_retries, || { + self.inner.remove_raw(bucket, key) + }) + .await + } + + fn storage_prefix_raw(&self, bucket: Bucket) -> String { + self.inner.storage_prefix_raw(bucket) + } +} + +#[cfg(test)] +mod test { + use std::sync::atomic::{AtomicU16, Ordering}; + + use assert_matches::assert_matches; + + use super::*; + + fn transient_error() -> ObjectStoreError { + ObjectStoreError::Other { + is_transient: true, + source: "oops".into(), + } + } + + #[tokio::test] + async fn test_retry_success_immediate() { + let result = Request::New + .retry(&"store", 2, || async { Ok(42) }) + .await + .unwrap(); + assert_eq!(result, 42); + } + + #[tokio::test] + async fn test_retry_failure_exhausted() { + let err = Request::New + .retry(&"store", 2, || async { Err::(transient_error()) }) + .await + .unwrap_err(); + assert_matches!(err, ObjectStoreError::Other { .. }); + } + + async fn retry_success_after_n_retries(n: u16) -> Result { + let retries = AtomicU16::new(0); + Request::New + .retry(&"store", n, || async { + let retries = retries.fetch_add(1, Ordering::Relaxed); + if retries + 1 == n { + Ok(42) + } else { + Err(transient_error()) + } + }) + .await + } + + #[tokio::test] + async fn test_retry_success_after_retry() { + let result = Request::New + .retry(&"store", 2, || retry_success_after_n_retries(2)) + .await + .unwrap(); + assert_eq!(result, 42); + } +} diff --git a/core/lib/prover_interface/tests/job_serialization.rs b/core/lib/prover_interface/tests/job_serialization.rs index 0b37b6cf1283..ffa6d18ef451 100644 --- a/core/lib/prover_interface/tests/job_serialization.rs +++ b/core/lib/prover_interface/tests/job_serialization.rs @@ -1,7 +1,7 @@ //! Integration tests for object store serialization of job objects. use tokio::fs; -use zksync_object_store::{Bucket, ObjectStoreFactory}; +use zksync_object_store::{Bucket, MockObjectStore}; use zksync_prover_interface::{ inputs::{PrepareBasicCircuitsJob, StorageLogMetadata}, outputs::L1BatchProofForL1, @@ -17,7 +17,7 @@ async fn prepare_basic_circuits_job_serialization() { let snapshot = fs::read("./tests/snapshots/prepare-basic-circuits-job-full.bin") .await .unwrap(); - let store = ObjectStoreFactory::mock().create_store().await; + let store = MockObjectStore::arc(); store .put_raw( Bucket::WitnessInput, diff --git a/core/lib/snapshots_applier/src/lib.rs b/core/lib/snapshots_applier/src/lib.rs index b0024f78433f..ea1c11f40c2c 100644 --- a/core/lib/snapshots_applier/src/lib.rs +++ b/core/lib/snapshots_applier/src/lib.rs @@ -288,7 +288,7 @@ impl SnapshotsApplierTask { let result = SnapshotsApplier::load_snapshot( &self.connection_pool, self.main_node_client.as_ref(), - &self.blob_store, + self.blob_store.as_ref(), &self.health_updater, self.snapshot_l1_batch, self.config.max_concurrency.get(), diff --git a/core/lib/snapshots_applier/src/tests/mod.rs b/core/lib/snapshots_applier/src/tests/mod.rs index 4dcc66841934..b15f8bc657bf 100644 --- a/core/lib/snapshots_applier/src/tests/mod.rs +++ b/core/lib/snapshots_applier/src/tests/mod.rs @@ -9,7 +9,7 @@ use assert_matches::assert_matches; use test_casing::test_casing; use tokio::sync::Barrier; use zksync_health_check::CheckHealth; -use zksync_object_store::ObjectStoreFactory; +use zksync_object_store::MockObjectStore; use zksync_types::{ api::{BlockDetails, L1BatchDetails}, block::L1BatchHeader, @@ -315,8 +315,7 @@ async fn health_status_immediately_after_task_start() { } } - let object_store_factory = ObjectStoreFactory::mock(); - let object_store = object_store_factory.create_store().await; + let object_store = MockObjectStore::arc(); let client = HangingMainNodeClient(Arc::new(Barrier::new(2))); let task = SnapshotsApplierTask::new( SnapshotsApplierConfig::for_tests(), @@ -370,8 +369,7 @@ async fn applier_errors_after_genesis() { .unwrap(); drop(storage); - let object_store_factory = ObjectStoreFactory::mock(); - let object_store = object_store_factory.create_store().await; + let object_store = MockObjectStore::arc(); let client = MockMainNodeClient::default(); let task = SnapshotsApplierTask::new( @@ -386,8 +384,7 @@ async fn applier_errors_after_genesis() { #[tokio::test] async fn applier_errors_without_snapshots() { let pool = ConnectionPool::::test_pool().await; - let object_store_factory = ObjectStoreFactory::mock(); - let object_store = object_store_factory.create_store().await; + let object_store = MockObjectStore::arc(); let client = MockMainNodeClient::default(); let task = SnapshotsApplierTask::new( @@ -402,8 +399,7 @@ async fn applier_errors_without_snapshots() { #[tokio::test] async fn applier_errors_with_unrecognized_snapshot_version() { let pool = ConnectionPool::test_pool().await; - let object_store_factory = ObjectStoreFactory::mock(); - let object_store = object_store_factory.create_store().await; + let object_store = MockObjectStore::arc(); let expected_status = mock_recovery_status(); let client = MockMainNodeClient { fetch_newest_snapshot_response: Some(SnapshotHeader { diff --git a/core/lib/snapshots_applier/src/tests/utils.rs b/core/lib/snapshots_applier/src/tests/utils.rs index c853481ab53b..d3d1c3ae6e03 100644 --- a/core/lib/snapshots_applier/src/tests/utils.rs +++ b/core/lib/snapshots_applier/src/tests/utils.rs @@ -4,7 +4,7 @@ use std::{collections::HashMap, fmt, future, sync::Arc}; use async_trait::async_trait; use tokio::sync::watch; -use zksync_object_store::{Bucket, ObjectStore, ObjectStoreError, ObjectStoreFactory}; +use zksync_object_store::{Bucket, MockObjectStore, ObjectStore, ObjectStoreError}; use zksync_types::{ api, block::L2BlockHeader, @@ -253,8 +253,7 @@ pub(super) async fn prepare_clients( status: &SnapshotRecoveryStatus, logs: &[SnapshotStorageLog], ) -> (Arc, MockMainNodeClient) { - let object_store_factory = ObjectStoreFactory::mock(); - let object_store = object_store_factory.create_store().await; + let object_store = MockObjectStore::arc(); let mut client = MockMainNodeClient::default(); let factory_dep_bytes: Vec = (0..32).collect(); let factory_deps = SnapshotFactoryDependencies { diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index 4f8664ab74dc..d2012de83126 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -637,7 +637,7 @@ pub async fn initialize_components( sender_config.clone(), Aggregator::new( sender_config.clone(), - store_factory.create_store().await, + store_factory.create_store().await?, operator_blobs_address.is_some(), l1_batch_commit_data_generator_mode, ), @@ -761,7 +761,7 @@ pub async fn initialize_components( .proof_data_handler_config .clone() .context("proof_data_handler_config")?, - store_factory.create_store().await, + store_factory.create_store().await?, connection_pool.clone(), genesis_config.l1_batch_commit_data_generator_mode, stop_receiver.clone(), @@ -963,7 +963,7 @@ async fn add_trees_to_task_futures( let object_store = match db_config.merkle_tree.mode { MerkleTreeMode::Lightweight => None, - MerkleTreeMode::Full => Some(store_factory.create_store().await), + MerkleTreeMode::Full => Some(store_factory.create_store().await?), }; run_tree( @@ -1051,7 +1051,7 @@ async fn add_tee_verifier_input_producer_to_task_futures( tracing::info!("initializing TeeVerifierInputProducer"); let producer = TeeVerifierInputProducer::new( connection_pool.clone(), - store_factory.create_store().await, + store_factory.create_store().await?, l2_chain_id, ) .await?; diff --git a/core/node/block_reverter/src/lib.rs b/core/node/block_reverter/src/lib.rs index baba02a559f0..b0ee48563b7e 100644 --- a/core/node/block_reverter/src/lib.rs +++ b/core/node/block_reverter/src/lib.rs @@ -168,7 +168,7 @@ impl BlockReverter { vec![] }; - if let Some(object_store) = &self.snapshots_object_store { + if let Some(object_store) = self.snapshots_object_store.as_deref() { Self::delete_snapshot_files(object_store, &deleted_snapshots).await?; } else if !deleted_snapshots.is_empty() { tracing::info!( diff --git a/core/node/block_reverter/src/tests.rs b/core/node/block_reverter/src/tests.rs index 30ff24fa175b..0fb54bdb1f93 100644 --- a/core/node/block_reverter/src/tests.rs +++ b/core/node/block_reverter/src/tests.rs @@ -8,7 +8,7 @@ use test_casing::test_casing; use tokio::sync::watch; use zksync_dal::Connection; use zksync_merkle_tree::TreeInstruction; -use zksync_object_store::{Bucket, ObjectStoreFactory}; +use zksync_object_store::{Bucket, MockObjectStore}; use zksync_state::ReadStorage; use zksync_types::{ block::{L1BatchHeader, L2BlockHeader}, @@ -262,8 +262,8 @@ async fn reverting_snapshot(remove_objects: bool) { let mut storage = pool.connection().await.unwrap(); setup_storage(&mut storage, &storage_logs).await; - let object_store = ObjectStoreFactory::mock().create_store().await; - create_mock_snapshot(&mut storage, &object_store, L1BatchNumber(7), 0..5).await; + let object_store = MockObjectStore::arc(); + create_mock_snapshot(&mut storage, &*object_store, L1BatchNumber(7), 0..5).await; // Sanity check: snapshot should be visible. let all_snapshots = storage .snapshots_dal() @@ -320,8 +320,8 @@ async fn reverting_snapshot_ignores_not_found_object_store_errors() { let mut storage = pool.connection().await.unwrap(); setup_storage(&mut storage, &storage_logs).await; - let object_store = ObjectStoreFactory::mock().create_store().await; - create_mock_snapshot(&mut storage, &object_store, L1BatchNumber(7), 0..5).await; + let object_store = MockObjectStore::arc(); + create_mock_snapshot(&mut storage, &*object_store, L1BatchNumber(7), 0..5).await; // Manually remove some data from the store. object_store @@ -399,7 +399,7 @@ async fn reverting_snapshot_propagates_fatal_errors() { setup_storage(&mut storage, &storage_logs).await; let object_store = Arc::new(ErroneousStore::default()); - create_mock_snapshot(&mut storage, &object_store, L1BatchNumber(7), 0..5).await; + create_mock_snapshot(&mut storage, &*object_store, L1BatchNumber(7), 0..5).await; let mut block_reverter = BlockReverter::new(NodeRole::External, pool.clone()); block_reverter.enable_rolling_back_postgres(); @@ -436,11 +436,11 @@ async fn reverter_handles_incomplete_snapshot() { let mut storage = pool.connection().await.unwrap(); setup_storage(&mut storage, &storage_logs).await; - let object_store = ObjectStoreFactory::mock().create_store().await; + let object_store = MockObjectStore::arc(); let chunk_ids = [0, 1, 4].into_iter(); create_mock_snapshot( &mut storage, - &object_store, + &*object_store, L1BatchNumber(7), chunk_ids.clone(), ) diff --git a/core/node/eth_sender/src/tests.rs b/core/node/eth_sender/src/tests.rs index cd00f3af0883..a7f4a9f13a85 100644 --- a/core/node/eth_sender/src/tests.rs +++ b/core/node/eth_sender/src/tests.rs @@ -13,7 +13,7 @@ use zksync_eth_client::{clients::MockEthereum, EthInterface}; use zksync_l1_contract_interface::i_executor::methods::{ExecuteBatches, ProveBatches}; use zksync_node_fee_model::l1_gas_price::GasAdjuster; use zksync_node_test_utils::{create_l1_batch, l1_batch_metadata_to_commitment_artifacts}; -use zksync_object_store::ObjectStoreFactory; +use zksync_object_store::MockObjectStore; use zksync_types::{ block::L1BatchHeader, commitment::{ @@ -161,7 +161,6 @@ impl EthSenderTester { .await .unwrap(), ); - let store_factory = ObjectStoreFactory::mock(); let eth_sender = eth_sender_config.sender.clone().unwrap(); let aggregator = EthTxAggregator::new( @@ -174,7 +173,7 @@ impl EthSenderTester { // Aggregator - unused Aggregator::new( aggregator_config.clone(), - store_factory.create_store().await, + MockObjectStore::arc(), aggregator_operate_4844_mode, commitment_mode, ), diff --git a/core/node/metadata_calculator/src/tests.rs b/core/node/metadata_calculator/src/tests.rs index 1a1b4eb98298..0406544614d4 100644 --- a/core/node/metadata_calculator/src/tests.rs +++ b/core/node/metadata_calculator/src/tests.rs @@ -15,7 +15,7 @@ use zksync_health_check::{CheckHealth, HealthStatus}; use zksync_merkle_tree::domain::ZkSyncTree; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_node_test_utils::{create_l1_batch, create_l2_block}; -use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_object_store::{MockObjectStore, ObjectStore}; use zksync_prover_interface::inputs::PrepareBasicCircuitsJob; use zksync_storage::RocksDB; use zksync_types::{ @@ -384,13 +384,16 @@ pub(crate) async fn setup_calculator( db_path: &Path, pool: ConnectionPool, ) -> (MetadataCalculator, Arc) { - let store_factory = ObjectStoreFactory::mock(); - let store = store_factory.create_store().await; + let store = MockObjectStore::arc(); let (merkle_tree_config, operation_manager) = create_config(db_path, MerkleTreeMode::Full); - let calculator = - setup_calculator_with_options(&merkle_tree_config, &operation_manager, pool, Some(store)) - .await; - (calculator, store_factory.create_store().await) + let calculator = setup_calculator_with_options( + &merkle_tree_config, + &operation_manager, + pool, + Some(store.clone()), + ) + .await; + (calculator, store) } async fn setup_lightweight_calculator( diff --git a/core/node/node_framework/src/implementations/layers/object_store.rs b/core/node/node_framework/src/implementations/layers/object_store.rs index c886758f97e2..e5a4b19c6b56 100644 --- a/core/node/node_framework/src/implementations/layers/object_store.rs +++ b/core/node/node_framework/src/implementations/layers/object_store.rs @@ -25,7 +25,7 @@ impl WiringLayer for ObjectStoreLayer { } async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let object_store = ObjectStoreFactory::new(self.config).create_store().await; + let object_store = ObjectStoreFactory::new(self.config).create_store().await?; context.insert_resource(ObjectStoreResource(object_store))?; Ok(()) } diff --git a/prover/proof_fri_compressor/src/main.rs b/prover/proof_fri_compressor/src/main.rs index 9786170874ec..61b72d790f0a 100644 --- a/prover/proof_fri_compressor/src/main.rs +++ b/prover/proof_fri_compressor/src/main.rs @@ -71,7 +71,7 @@ async fn main() -> anyhow::Result<()> { ProverObjectStoreConfig::from_env().context("ProverObjectStoreConfig::from_env()")?; let blob_store = ObjectStoreFactory::new(object_store_config.0) .create_store() - .await; + .await?; let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; diff --git a/prover/prover_fri/src/main.rs b/prover/prover_fri/src/main.rs index 7bd658868258..86fd114fa12e 100644 --- a/prover/prover_fri/src/main.rs +++ b/prover/prover_fri/src/main.rs @@ -114,7 +114,7 @@ async fn main() -> anyhow::Result<()> { true => Some( ObjectStoreFactory::new(public_object_store_config.0) .create_store() - .await, + .await?, ), }; let specialized_group_id = prover_config.specialized_group_id; @@ -205,7 +205,7 @@ async fn get_prover_tasks( let setup_load_mode = load_setup_data_cache(&prover_config).context("load_setup_data_cache()")?; let prover = Prover::new( - store_factory.create_store().await, + store_factory.create_store().await?, public_blob_store, prover_config, pool, @@ -250,7 +250,7 @@ async fn get_prover_tasks( let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; let prover = gpu_prover::Prover::new( - store_factory.create_store().await, + store_factory.create_store().await?, public_blob_store, prover_config.clone(), pool.clone(), diff --git a/prover/prover_fri/tests/basic_test.rs b/prover/prover_fri/tests/basic_test.rs index 625c55e0cb71..fa5e5ca9cc63 100644 --- a/prover/prover_fri/tests/basic_test.rs +++ b/prover/prover_fri/tests/basic_test.rs @@ -34,11 +34,11 @@ async fn prover_and_assert_base_layer( }; let object_store = ObjectStoreFactory::new(object_store_config) .create_store() - .await; + .await?; let expected_proof = object_store .get(expected_proof_id) .await - .expect("missing expected proof"); + .context("missing expected proof")?; let aggregation_round = AggregationRound::BasicCircuits; let blob_key = FriCircuitKey { diff --git a/prover/prover_fri_gateway/src/main.rs b/prover/prover_fri_gateway/src/main.rs index 6372e2c5b44f..de687f45e62c 100644 --- a/prover/prover_fri_gateway/src/main.rs +++ b/prover/prover_fri_gateway/src/main.rs @@ -54,14 +54,14 @@ async fn main() -> anyhow::Result<()> { let store_factory = ObjectStoreFactory::new(object_store_config.0); let proof_submitter = PeriodicApiStruct { - blob_store: store_factory.create_store().await, + blob_store: store_factory.create_store().await?, pool: pool.clone(), api_url: format!("{}{SUBMIT_PROOF_PATH}", config.api_url), poll_duration: config.api_poll_duration(), client: Client::new(), }; let proof_gen_data_fetcher = PeriodicApiStruct { - blob_store: store_factory.create_store().await, + blob_store: store_factory.create_store().await?, pool, api_url: format!("{}{PROOF_GENERATION_DATA_PATH}", config.api_url), poll_duration: config.api_poll_duration(), diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index 3c6f8a789965..65d3b976c086 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -20,7 +20,7 @@ use tracing::Instrument; use zkevm_test_harness::geometry_config::get_geometry_config; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_dal::{Core, CoreDal}; -use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_object_store::ObjectStore; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::{ @@ -93,9 +93,9 @@ pub struct BasicWitnessGenerator { } impl BasicWitnessGenerator { - pub async fn new( + pub fn new( config: FriWitnessGeneratorConfig, - store_factory: &ObjectStoreFactory, + object_store: Arc, public_blob_store: Option>, connection_pool: ConnectionPool, prover_connection_pool: ConnectionPool, @@ -103,7 +103,7 @@ impl BasicWitnessGenerator { ) -> Self { Self { config: Arc::new(config), - object_store: store_factory.create_store().await, + object_store, public_blob_store, connection_pool, prover_connection_pool, diff --git a/prover/witness_generator/src/leaf_aggregation.rs b/prover/witness_generator/src/leaf_aggregation.rs index bf079dbb4ae2..2695ec198888 100644 --- a/prover/witness_generator/src/leaf_aggregation.rs +++ b/prover/witness_generator/src/leaf_aggregation.rs @@ -10,7 +10,7 @@ use zkevm_test_harness::{ }; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_dal::ConnectionPool; -use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_object_store::ObjectStore; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::field::goldilocks::GoldilocksField, @@ -80,15 +80,15 @@ pub struct LeafAggregationWitnessGenerator { } impl LeafAggregationWitnessGenerator { - pub async fn new( + pub fn new( config: FriWitnessGeneratorConfig, - store_factory: &ObjectStoreFactory, + object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, ) -> Self { Self { config, - object_store: store_factory.create_store().await, + object_store, prover_connection_pool, protocol_version, } diff --git a/prover/witness_generator/src/main.rs b/prover/witness_generator/src/main.rs index e0e39b442a83..941dd56c9f61 100644 --- a/prover/witness_generator/src/main.rs +++ b/prover/witness_generator/src/main.rs @@ -203,58 +203,53 @@ async fn main() -> anyhow::Result<()> { .context("ObjectStoreConfig::from_env()")?, ) .create_store() - .await, + .await?, ), }; let generator = BasicWitnessGenerator::new( config.clone(), - &store_factory, + store_factory.create_store().await?, public_blob_store, connection_pool.clone(), prover_connection_pool.clone(), protocol_version, - ) - .await; + ); generator.run(stop_receiver.clone(), opt.batch_size) } AggregationRound::LeafAggregation => { let generator = LeafAggregationWitnessGenerator::new( config.clone(), - &store_factory, + store_factory.create_store().await?, prover_connection_pool.clone(), protocol_version, - ) - .await; + ); generator.run(stop_receiver.clone(), opt.batch_size) } AggregationRound::NodeAggregation => { let generator = NodeAggregationWitnessGenerator::new( config.clone(), - &store_factory, + store_factory.create_store().await?, prover_connection_pool.clone(), protocol_version, - ) - .await; + ); generator.run(stop_receiver.clone(), opt.batch_size) } AggregationRound::RecursionTip => { let generator = RecursionTipWitnessGenerator::new( config.clone(), - &store_factory, + store_factory.create_store().await?, prover_connection_pool.clone(), protocol_version, - ) - .await; + ); generator.run(stop_receiver.clone(), opt.batch_size) } AggregationRound::Scheduler => { let generator = SchedulerWitnessGenerator::new( config.clone(), - &store_factory, + store_factory.create_store().await?, prover_connection_pool.clone(), protocol_version, - ) - .await; + ); generator.run(stop_receiver.clone(), opt.batch_size) } }; diff --git a/prover/witness_generator/src/node_aggregation.rs b/prover/witness_generator/src/node_aggregation.rs index f352f9fd9d2a..209ae5ef7749 100644 --- a/prover/witness_generator/src/node_aggregation.rs +++ b/prover/witness_generator/src/node_aggregation.rs @@ -8,7 +8,7 @@ use zkevm_test_harness::witness::recursive_aggregation::{ }; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_dal::ConnectionPool; -use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_object_store::ObjectStore; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::field::goldilocks::GoldilocksField, @@ -80,15 +80,15 @@ pub struct NodeAggregationWitnessGenerator { } impl NodeAggregationWitnessGenerator { - pub async fn new( + pub fn new( config: FriWitnessGeneratorConfig, - store_factory: &ObjectStoreFactory, + object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, ) -> Self { Self { config, - object_store: store_factory.create_store().await, + object_store, prover_connection_pool, protocol_version, } diff --git a/prover/witness_generator/src/recursion_tip.rs b/prover/witness_generator/src/recursion_tip.rs index 626b1a8ed09c..e9291b5b182f 100644 --- a/prover/witness_generator/src/recursion_tip.rs +++ b/prover/witness_generator/src/recursion_tip.rs @@ -38,7 +38,7 @@ use zkevm_test_harness::{ }; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_dal::ConnectionPool; -use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_object_store::ObjectStore; use zksync_prover_fri_types::{ get_current_pod_name, keys::{ClosedFormInputKey, FriCircuitKey}, @@ -79,15 +79,15 @@ pub struct RecursionTipWitnessGenerator { } impl RecursionTipWitnessGenerator { - pub async fn new( + pub fn new( config: FriWitnessGeneratorConfig, - store_factory: &ObjectStoreFactory, + object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, ) -> Self { Self { config, - object_store: store_factory.create_store().await, + object_store, prover_connection_pool, protocol_version, } diff --git a/prover/witness_generator/src/scheduler.rs b/prover/witness_generator/src/scheduler.rs index 832058e92670..8585c0c2f2b4 100644 --- a/prover/witness_generator/src/scheduler.rs +++ b/prover/witness_generator/src/scheduler.rs @@ -8,7 +8,7 @@ use zkevm_test_harness::zkevm_circuits::recursion::{ }; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_dal::ConnectionPool; -use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_object_store::ObjectStore; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::{ @@ -61,15 +61,15 @@ pub struct SchedulerWitnessGenerator { } impl SchedulerWitnessGenerator { - pub async fn new( + pub fn new( config: FriWitnessGeneratorConfig, - store_factory: &ObjectStoreFactory, + object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, ) -> Self { Self { config, - object_store: store_factory.create_store().await, + object_store, prover_connection_pool, protocol_version, } diff --git a/prover/witness_generator/tests/basic_test.rs b/prover/witness_generator/tests/basic_test.rs index 2cb19d34890d..8b94224f20c0 100644 --- a/prover/witness_generator/tests/basic_test.rs +++ b/prover/witness_generator/tests/basic_test.rs @@ -33,7 +33,8 @@ async fn test_leaf_witness_gen() { }; let object_store = ObjectStoreFactory::new(object_store_config) .create_store() - .await; + .await + .unwrap(); let circuit_id = 4; let block_number = L1BatchNumber(125010); @@ -73,7 +74,8 @@ async fn test_node_witness_gen() { }; let object_store = ObjectStoreFactory::new(object_store_config) .create_store() - .await; + .await + .unwrap(); let circuit_id = 8; let block_number = L1BatchNumber(127856); diff --git a/prover/witness_vector_generator/src/generator.rs b/prover/witness_vector_generator/src/generator.rs index baae215e8866..bc03593e0bfb 100644 --- a/prover/witness_vector_generator/src/generator.rs +++ b/prover/witness_vector_generator/src/generator.rs @@ -27,7 +27,7 @@ use zksync_vk_setup_data_server_fri::keystore::Keystore; use crate::metrics::METRICS; pub struct WitnessVectorGenerator { - blob_store: Arc, + object_store: Arc, pool: ConnectionPool, circuit_ids_for_round_to_be_proven: Vec, zone: String, @@ -38,7 +38,7 @@ pub struct WitnessVectorGenerator { impl WitnessVectorGenerator { pub fn new( - blob_store: Arc, + object_store: Arc, prover_connection_pool: ConnectionPool, circuit_ids_for_round_to_be_proven: Vec, zone: String, @@ -47,7 +47,7 @@ impl WitnessVectorGenerator { max_attempts: u32, ) -> Self { Self { - blob_store, + object_store, pool: prover_connection_pool, circuit_ids_for_round_to_be_proven, zone, @@ -89,7 +89,7 @@ impl JobProcessor for WitnessVectorGenerator { let mut storage = self.pool.connection().await.unwrap(); let Some(job) = fetch_next_circuit( &mut storage, - &*self.blob_store, + &*self.object_store, &self.circuit_ids_for_round_to_be_proven, &self.protocol_version, ) diff --git a/prover/witness_vector_generator/src/main.rs b/prover/witness_vector_generator/src/main.rs index 2b8134d09e58..b319c80e4810 100644 --- a/prover/witness_vector_generator/src/main.rs +++ b/prover/witness_vector_generator/src/main.rs @@ -74,9 +74,9 @@ async fn main() -> anyhow::Result<()> { .context("failed to build a connection pool")?; let object_store_config = ProverObjectStoreConfig::from_env().context("ProverObjectStoreConfig::from_env()")?; - let blob_store = ObjectStoreFactory::new(object_store_config.0) + let object_store = ObjectStoreFactory::new(object_store_config.0) .create_store() - .await; + .await?; let circuit_ids_for_round_to_be_proven = FriProverGroupConfig::from_env() .context("FriProverGroupConfig::from_env()")? .get_circuit_ids_for_group_id(specialized_group_id) @@ -90,7 +90,7 @@ async fn main() -> anyhow::Result<()> { let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; let witness_vector_generator = WitnessVectorGenerator::new( - blob_store, + object_store, pool, circuit_ids_for_round_to_be_proven.clone(), zone.clone(), From 800b8f456282685e81d3423ba3e27d017db2f183 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Wed, 5 Jun 2024 15:28:57 +0300 Subject: [PATCH 8/8] feat(api): Rework zks_getProtocolVersion (#2146) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - renames fields in method's response - removes `verification_keys_hashes` completely ## Why ❔ - more meaningfully field names and camelCase for consistency - `verification_keys_hashes` doesn't make sense in the context of minor protocol version ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `zk spellcheck`. --- ...6d2fe2908a22e933b2f25ce6b4357e51ed9b.json} | 18 +-- ...3c2a3d0a09c5ee88bdd671462904d4d27a355.json | 46 ++++++++ .../src/models/storage_protocol_version.rs | 45 +++----- core/lib/dal/src/protocol_versions_dal.rs | 1 - .../lib/dal/src/protocol_versions_web3_dal.rs | 24 ++-- core/lib/types/src/api/mod.rs | 106 +++++++++++++++++- core/node/node_sync/src/external_io.rs | 30 +++-- core/node/node_sync/src/tests.rs | 27 +++-- .../ts-integration/tests/api/web3.test.ts | 5 +- 9 files changed, 219 insertions(+), 83 deletions(-) rename core/lib/dal/.sqlx/{query-67852a656494ec8381b253b71e1b3572757aba0580637c0ef0e7cc5cdd7396f3.json => query-33b1fbb1e80c3815d30da5854c866d2fe2908a22e933b2f25ce6b4357e51ed9b.json} (64%) create mode 100644 core/lib/dal/.sqlx/query-5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355.json diff --git a/core/lib/dal/.sqlx/query-67852a656494ec8381b253b71e1b3572757aba0580637c0ef0e7cc5cdd7396f3.json b/core/lib/dal/.sqlx/query-33b1fbb1e80c3815d30da5854c866d2fe2908a22e933b2f25ce6b4357e51ed9b.json similarity index 64% rename from core/lib/dal/.sqlx/query-67852a656494ec8381b253b71e1b3572757aba0580637c0ef0e7cc5cdd7396f3.json rename to core/lib/dal/.sqlx/query-33b1fbb1e80c3815d30da5854c866d2fe2908a22e933b2f25ce6b4357e51ed9b.json index 6defdf7afebc..bb38503cc353 100644 --- a/core/lib/dal/.sqlx/query-67852a656494ec8381b253b71e1b3572757aba0580637c0ef0e7cc5cdd7396f3.json +++ b/core/lib/dal/.sqlx/query-33b1fbb1e80c3815d30da5854c866d2fe2908a22e933b2f25ce6b4357e51ed9b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_versions.id AS \"minor!\",\n protocol_versions.timestamp,\n protocol_versions.bootloader_code_hash,\n protocol_versions.default_account_code_hash,\n protocol_versions.upgrade_tx_hash,\n protocol_patches.patch,\n protocol_patches.recursion_scheduler_level_vk_hash,\n protocol_patches.recursion_node_level_vk_hash,\n protocol_patches.recursion_leaf_level_vk_hash,\n protocol_patches.recursion_circuits_set_vks_hash\n FROM\n protocol_versions\n JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id\n WHERE\n id = $1\n ORDER BY\n protocol_patches.patch DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n protocol_versions.id AS \"minor!\",\n protocol_versions.timestamp,\n protocol_versions.bootloader_code_hash,\n protocol_versions.default_account_code_hash,\n protocol_patches.patch,\n protocol_patches.recursion_scheduler_level_vk_hash,\n protocol_patches.recursion_node_level_vk_hash,\n protocol_patches.recursion_leaf_level_vk_hash,\n protocol_patches.recursion_circuits_set_vks_hash\n FROM\n protocol_versions\n JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id\n WHERE\n id = $1\n ORDER BY\n protocol_patches.patch DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -25,31 +25,26 @@ }, { "ordinal": 4, - "name": "upgrade_tx_hash", - "type_info": "Bytea" - }, - { - "ordinal": 5, "name": "patch", "type_info": "Int4" }, { - "ordinal": 6, + "ordinal": 5, "name": "recursion_scheduler_level_vk_hash", "type_info": "Bytea" }, { - "ordinal": 7, + "ordinal": 6, "name": "recursion_node_level_vk_hash", "type_info": "Bytea" }, { - "ordinal": 8, + "ordinal": 7, "name": "recursion_leaf_level_vk_hash", "type_info": "Bytea" }, { - "ordinal": 9, + "ordinal": 8, "name": "recursion_circuits_set_vks_hash", "type_info": "Bytea" } @@ -64,7 +59,6 @@ false, false, false, - true, false, false, false, @@ -72,5 +66,5 @@ false ] }, - "hash": "67852a656494ec8381b253b71e1b3572757aba0580637c0ef0e7cc5cdd7396f3" + "hash": "33b1fbb1e80c3815d30da5854c866d2fe2908a22e933b2f25ce6b4357e51ed9b" } diff --git a/core/lib/dal/.sqlx/query-5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355.json b/core/lib/dal/.sqlx/query-5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355.json new file mode 100644 index 000000000000..5e9051587bb9 --- /dev/null +++ b/core/lib/dal/.sqlx/query-5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355.json @@ -0,0 +1,46 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n id AS \"minor!\",\n timestamp,\n bootloader_code_hash,\n default_account_code_hash,\n upgrade_tx_hash\n FROM\n protocol_versions\n WHERE\n id = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "minor!", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "timestamp", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "bootloader_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 3, + "name": "default_account_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 4, + "name": "upgrade_tx_hash", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int4" + ] + }, + "nullable": [ + false, + false, + false, + false, + true + ] + }, + "hash": "5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355" +} diff --git a/core/lib/dal/src/models/storage_protocol_version.rs b/core/lib/dal/src/models/storage_protocol_version.rs index f21fa594f665..7ac6d70f38c9 100644 --- a/core/lib/dal/src/models/storage_protocol_version.rs +++ b/core/lib/dal/src/models/storage_protocol_version.rs @@ -19,7 +19,6 @@ pub struct StorageProtocolVersion { pub recursion_circuits_set_vks_hash: Vec, pub bootloader_code_hash: Vec, pub default_account_code_hash: Vec, - pub upgrade_tx_hash: Option>, } pub(crate) fn protocol_version_from_storage( @@ -56,36 +55,28 @@ pub(crate) fn protocol_version_from_storage( } } -impl From for api::ProtocolVersion { - fn from(storage_protocol_version: StorageProtocolVersion) -> Self { +#[derive(sqlx::FromRow)] +pub struct StorageApiProtocolVersion { + pub minor: i32, + pub timestamp: i64, + pub bootloader_code_hash: Vec, + pub default_account_code_hash: Vec, + pub upgrade_tx_hash: Option>, +} + +impl From for api::ProtocolVersion { + #[allow(deprecated)] + fn from(storage_protocol_version: StorageApiProtocolVersion) -> Self { let l2_system_upgrade_tx_hash = storage_protocol_version .upgrade_tx_hash .as_ref() .map(|hash| H256::from_slice(hash)); - api::ProtocolVersion { - version_id: storage_protocol_version.minor as u16, - timestamp: storage_protocol_version.timestamp as u64, - verification_keys_hashes: L1VerifierConfig { - params: VerifierParams { - recursion_node_level_vk_hash: H256::from_slice( - &storage_protocol_version.recursion_node_level_vk_hash, - ), - recursion_leaf_level_vk_hash: H256::from_slice( - &storage_protocol_version.recursion_leaf_level_vk_hash, - ), - recursion_circuits_set_vks_hash: H256::from_slice( - &storage_protocol_version.recursion_circuits_set_vks_hash, - ), - }, - recursion_scheduler_level_vk_hash: H256::from_slice( - &storage_protocol_version.recursion_scheduler_level_vk_hash, - ), - }, - base_system_contracts: BaseSystemContractsHashes { - bootloader: H256::from_slice(&storage_protocol_version.bootloader_code_hash), - default_aa: H256::from_slice(&storage_protocol_version.default_account_code_hash), - }, + api::ProtocolVersion::new( + storage_protocol_version.minor as u16, + storage_protocol_version.timestamp as u64, + H256::from_slice(&storage_protocol_version.bootloader_code_hash), + H256::from_slice(&storage_protocol_version.default_account_code_hash), l2_system_upgrade_tx_hash, - } + ) } } diff --git a/core/lib/dal/src/protocol_versions_dal.rs b/core/lib/dal/src/protocol_versions_dal.rs index c395d8cba4ce..212be734f0b3 100644 --- a/core/lib/dal/src/protocol_versions_dal.rs +++ b/core/lib/dal/src/protocol_versions_dal.rs @@ -254,7 +254,6 @@ impl ProtocolVersionsDal<'_, '_> { protocol_versions.timestamp, protocol_versions.bootloader_code_hash, protocol_versions.default_account_code_hash, - protocol_versions.upgrade_tx_hash, protocol_patches.patch, protocol_patches.recursion_scheduler_level_vk_hash, protocol_patches.recursion_node_level_vk_hash, diff --git a/core/lib/dal/src/protocol_versions_web3_dal.rs b/core/lib/dal/src/protocol_versions_web3_dal.rs index 5b5e1e21dca7..a3a7a162c3dd 100644 --- a/core/lib/dal/src/protocol_versions_web3_dal.rs +++ b/core/lib/dal/src/protocol_versions_web3_dal.rs @@ -1,7 +1,7 @@ use zksync_db_connection::{connection::Connection, error::DalResult, instrument::InstrumentExt}; use zksync_types::api::ProtocolVersion; -use crate::{models::storage_protocol_version::StorageProtocolVersion, Core, CoreDal}; +use crate::{models::storage_protocol_version::StorageApiProtocolVersion, Core, CoreDal}; #[derive(Debug)] pub struct ProtocolVersionsWeb3Dal<'a, 'c> { @@ -14,28 +14,18 @@ impl ProtocolVersionsWeb3Dal<'_, '_> { version_id: u16, ) -> DalResult> { let storage_protocol_version = sqlx::query_as!( - StorageProtocolVersion, + StorageApiProtocolVersion, r#" SELECT - protocol_versions.id AS "minor!", - protocol_versions.timestamp, - protocol_versions.bootloader_code_hash, - protocol_versions.default_account_code_hash, - protocol_versions.upgrade_tx_hash, - protocol_patches.patch, - protocol_patches.recursion_scheduler_level_vk_hash, - protocol_patches.recursion_node_level_vk_hash, - protocol_patches.recursion_leaf_level_vk_hash, - protocol_patches.recursion_circuits_set_vks_hash + id AS "minor!", + timestamp, + bootloader_code_hash, + default_account_code_hash, + upgrade_tx_hash FROM protocol_versions - JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id WHERE id = $1 - ORDER BY - protocol_patches.patch DESC - LIMIT - 1 "#, i32::from(version_id) ) diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index 6bebb2388801..5c0bfe2d848b 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -640,18 +640,81 @@ impl From for DebugCall { } } +// TODO (PLA-965): remove deprecated fields from the struct. It is currently in a "migration" phase +// to keep compatibility between old and new versions. #[derive(Default, Serialize, Deserialize, Clone, Debug)] pub struct ProtocolVersion { - /// Protocol version ID - pub version_id: u16, + /// Minor version of the protocol + #[deprecated] + pub version_id: Option, + /// Minor version of the protocol + #[serde(rename = "minorVersion")] + pub minor_version: Option, /// Timestamp at which upgrade should be performed pub timestamp: u64, /// Verifier configuration - pub verification_keys_hashes: L1VerifierConfig, + #[deprecated] + pub verification_keys_hashes: Option, /// Hashes of base system contracts (bootloader and default account) - pub base_system_contracts: BaseSystemContractsHashes, + #[deprecated] + pub base_system_contracts: Option, + /// Bootloader code hash + #[serde(rename = "bootloaderCodeHash")] + pub bootloader_code_hash: Option, + /// Default account code hash + #[serde(rename = "defaultAccountCodeHash")] + pub default_account_code_hash: Option, /// L2 Upgrade transaction hash + #[deprecated] pub l2_system_upgrade_tx_hash: Option, + /// L2 Upgrade transaction hash + #[serde(rename = "l2SystemUpgradeTxHash")] + pub l2_system_upgrade_tx_hash_new: Option, +} + +#[allow(deprecated)] +impl ProtocolVersion { + pub fn new( + minor_version: u16, + timestamp: u64, + bootloader_code_hash: H256, + default_account_code_hash: H256, + l2_system_upgrade_tx_hash: Option, + ) -> Self { + Self { + version_id: Some(minor_version), + minor_version: Some(minor_version), + timestamp, + verification_keys_hashes: Some(Default::default()), + base_system_contracts: Some(BaseSystemContractsHashes { + bootloader: bootloader_code_hash, + default_aa: default_account_code_hash, + }), + bootloader_code_hash: Some(bootloader_code_hash), + default_account_code_hash: Some(default_account_code_hash), + l2_system_upgrade_tx_hash, + l2_system_upgrade_tx_hash_new: l2_system_upgrade_tx_hash, + } + } + + pub fn bootloader_code_hash(&self) -> Option { + self.bootloader_code_hash + .or_else(|| self.base_system_contracts.map(|hashes| hashes.bootloader)) + } + + pub fn default_account_code_hash(&self) -> Option { + self.default_account_code_hash + .or_else(|| self.base_system_contracts.map(|hashes| hashes.default_aa)) + } + + pub fn minor_version(&self) -> Option { + self.minor_version.or(self.version_id) + } + + pub fn l2_system_upgrade_tx_hash(&self) -> Option { + self.l2_system_upgrade_tx_hash_new + .or(self.l2_system_upgrade_tx_hash) + } } #[derive(Debug, Serialize, Deserialize, Clone)] @@ -751,3 +814,38 @@ pub struct ApiStorageLog { pub key: U256, pub written_value: U256, } + +#[cfg(test)] +mod tests { + use super::*; + + // TODO (PLA-965): remove test after removing deprecating fields. + #[allow(deprecated)] + #[test] + fn check_protocol_version_type_compatibility() { + let new_version = ProtocolVersion { + version_id: Some(24), + minor_version: Some(24), + timestamp: 0, + verification_keys_hashes: Some(Default::default()), + base_system_contracts: Some(Default::default()), + bootloader_code_hash: Some(Default::default()), + default_account_code_hash: Some(Default::default()), + l2_system_upgrade_tx_hash: Default::default(), + l2_system_upgrade_tx_hash_new: Default::default(), + }; + + #[derive(Deserialize)] + #[allow(dead_code)] + struct OldProtocolVersion { + pub version_id: u16, + pub timestamp: u64, + pub verification_keys_hashes: L1VerifierConfig, + pub base_system_contracts: BaseSystemContractsHashes, + pub l2_system_upgrade_tx_hash: Option, + } + + serde_json::from_str::(&serde_json::to_string(&new_version).unwrap()) + .unwrap(); + } +} diff --git a/core/node/node_sync/src/external_io.rs b/core/node/node_sync/src/external_io.rs index 630ecc36c416..559c377e4537 100644 --- a/core/node/node_sync/src/external_io.rs +++ b/core/node/node_sync/src/external_io.rs @@ -337,35 +337,43 @@ impl StateKeeperIO for ExternalIO { .await .context("failed to fetch protocol version from the main node")? .context("protocol version is missing on the main node")?; + let minor = protocol_version + .minor_version() + .context("Missing minor protocol version")?; + let bootloader_code_hash = protocol_version + .bootloader_code_hash() + .context("Missing bootloader code hash")?; + let default_account_code_hash = protocol_version + .default_account_code_hash() + .context("Missing default account code hash")?; + let l2_system_upgrade_tx_hash = protocol_version.l2_system_upgrade_tx_hash(); self.pool .connection_tagged("sync_layer") .await? .protocol_versions_dal() .save_protocol_version( ProtocolSemanticVersion { - minor: protocol_version - .version_id + minor: minor .try_into() .context("cannot convert protocol version")?, patch: VersionPatch(0), }, protocol_version.timestamp, - protocol_version.verification_keys_hashes, - protocol_version.base_system_contracts, - protocol_version.l2_system_upgrade_tx_hash, + Default::default(), // verification keys are unused for EN + BaseSystemContractsHashes { + bootloader: bootloader_code_hash, + default_aa: default_account_code_hash, + }, + l2_system_upgrade_tx_hash, ) .await?; - let BaseSystemContractsHashes { - bootloader, - default_aa, - } = protocol_version.base_system_contracts; let bootloader = self - .get_base_system_contract(bootloader, cursor.next_l2_block) + .get_base_system_contract(bootloader_code_hash, cursor.next_l2_block) .await .with_context(|| format!("cannot fetch bootloader code for {protocol_version:?}"))?; let default_aa = self - .get_base_system_contract(default_aa, cursor.next_l2_block) + .get_base_system_contract(default_account_code_hash, cursor.next_l2_block) .await .with_context(|| format!("cannot fetch default AA code for {protocol_version:?}"))?; Ok(BaseSystemContracts { diff --git a/core/node/node_sync/src/tests.rs b/core/node/node_sync/src/tests.rs index 1d278d1af385..9830641a9fa1 100644 --- a/core/node/node_sync/src/tests.rs +++ b/core/node/node_sync/src/tests.rs @@ -77,10 +77,11 @@ impl MockMainNodeClient { pub fn insert_protocol_version(&mut self, version: api::ProtocolVersion) { self.system_contracts - .insert(version.base_system_contracts.bootloader, vec![]); + .insert(version.bootloader_code_hash.unwrap(), vec![]); self.system_contracts - .insert(version.base_system_contracts.default_aa, vec![]); - self.protocol_versions.insert(version.version_id, version); + .insert(version.default_account_code_hash.unwrap(), vec![]); + self.protocol_versions + .insert(version.minor_version.unwrap(), version); } } @@ -300,12 +301,10 @@ async fn external_io_works_without_local_protocol_version(snapshot_recovery: boo let (actions_sender, action_queue) = ActionQueue::new(); let mut client = MockMainNodeClient::default(); let next_protocol_version = api::ProtocolVersion { - version_id: ProtocolVersionId::next() as u16, + minor_version: Some(ProtocolVersionId::next() as u16), timestamp: snapshot.l2_block_timestamp + 1, - base_system_contracts: BaseSystemContractsHashes { - bootloader: H256::repeat_byte(1), - default_aa: H256::repeat_byte(2), - }, + bootloader_code_hash: Some(H256::repeat_byte(1)), + default_account_code_hash: Some(H256::repeat_byte(1)), ..api::ProtocolVersion::default() }; client.insert_protocol_version(next_protocol_version.clone()); @@ -335,8 +334,16 @@ async fn external_io_works_without_local_protocol_version(snapshot_recovery: boo next_protocol_version.timestamp ); assert_eq!( - persisted_protocol_version.base_system_contracts_hashes, - next_protocol_version.base_system_contracts + persisted_protocol_version + .base_system_contracts_hashes + .bootloader, + next_protocol_version.bootloader_code_hash.unwrap() + ); + assert_eq!( + persisted_protocol_version + .base_system_contracts_hashes + .default_aa, + next_protocol_version.default_account_code_hash.unwrap() ); let l2_block = storage diff --git a/core/tests/ts-integration/tests/api/web3.test.ts b/core/tests/ts-integration/tests/api/web3.test.ts index ff590a24cf59..39f9eb610d30 100644 --- a/core/tests/ts-integration/tests/api/web3.test.ts +++ b/core/tests/ts-integration/tests/api/web3.test.ts @@ -833,7 +833,10 @@ describe('web3 API compatibility tests', () => { }; let expectedProtocolVersion = { version_id: expect.any(Number), + minorVersion: expect.any(Number), base_system_contracts: expectedSysContractsHashes, + bootloaderCodeHash: expect.stringMatching(HEX_VALUE_REGEX), + defaultAccountCodeHash: expect.stringMatching(HEX_VALUE_REGEX), verification_keys_hashes: { params: { recursion_circuits_set_vks_hash: expect.stringMatching(HEX_VALUE_REGEX), @@ -847,7 +850,7 @@ describe('web3 API compatibility tests', () => { expect(latestProtocolVersion).toMatchObject(expectedProtocolVersion); const exactProtocolVersion = await alice.provider.send('zks_getProtocolVersion', [ - latestProtocolVersion.version_id + latestProtocolVersion.minorVersion ]); expect(exactProtocolVersion).toMatchObject(expectedProtocolVersion); });