From 4ce3d7b53215552c660fbd8b874659a3bb112b21 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Fri, 7 Jun 2024 15:20:01 +0300 Subject: [PATCH 01/56] initial commit --- Cargo.lock | 1 + core/lib/prover_interface/src/inputs.rs | 20 +- core/node/vm_runner/Cargo.toml | 1 + core/node/vm_runner/src/impls/bwip.rs | 243 ++++++++++++++++++ core/node/vm_runner/src/impls/mod.rs | 1 + .../witness_generator/src/basic_circuits.rs | 52 +--- 6 files changed, 277 insertions(+), 41 deletions(-) create mode 100644 core/node/vm_runner/src/impls/bwip.rs diff --git a/Cargo.lock b/Cargo.lock index 0bb1fd0fced5..a81745ccd919 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9446,6 +9446,7 @@ dependencies = [ "zksync_dal", "zksync_node_genesis", "zksync_node_test_utils", + "zksync_prover_interface", "zksync_state", "zksync_state_keeper", "zksync_storage", diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index e4c0a0d3846b..1be7316b2929 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -1,9 +1,9 @@ -use std::{convert::TryInto, fmt::Debug}; +use std::{collections::HashMap, convert::TryInto, fmt::Debug}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, Bytes}; use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; -use zksync_types::{L1BatchNumber, H256, U256}; +use zksync_types::{L1BatchNumber, StorageLog, H256, U256}; const HASH_LEN: usize = H256::len_bytes(); @@ -144,6 +144,22 @@ pub struct BasicCircuitWitnessGeneratorInput { pub merkle_paths_input: PrepareBasicCircuitsJob, } +pub struct WitnessGeneratorData { + pub block_number: L1BatchNumber, + pub previous_block_hash: H256, + pub previous_block_timestamp: u64, + pub block_timestamp: u64, + pub used_bytecodes: HashMap>, + pub initial_heap_content: Vec<(usize, U256)>, + + pub storage_logs: Vec, + pub bootloader_code_hash: H256, + pub bootloader_code: Vec<[u8; 32]>, + pub default_account_code_hash: U256, + pub storage_refunds: Vec, + pub pubdata_costs: Option>, +} + #[cfg(test)] mod tests { use super::*; diff --git a/core/node/vm_runner/Cargo.toml b/core/node/vm_runner/Cargo.toml index b3ede5a796be..b6588c3471a2 100644 --- a/core/node/vm_runner/Cargo.toml +++ b/core/node/vm_runner/Cargo.toml @@ -18,6 +18,7 @@ zksync_state.workspace = true zksync_storage.workspace = true zksync_state_keeper.workspace = true zksync_utils.workspace = true +zksync_prover_interface.workspace = true vm_utils.workspace = true tokio = { workspace = true, features = ["time"] } diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs new file mode 100644 index 000000000000..db035b5bbced --- /dev/null +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -0,0 +1,243 @@ +use std::{collections::HashSet, sync::Arc}; + +use anyhow::Context; +use async_trait::async_trait; +use tokio::sync::watch; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use zksync_prover_interface::inputs::WitnessGeneratorData; +use zksync_state_keeper::{MainBatchExecutor, StateKeeperOutputHandler, UpdatesManager}; +use zksync_types::{L1BatchNumber, L2ChainId, H256}; +use zksync_utils::{bytes_to_chunks, h256_to_u256, u256_to_h256}; + +use crate::{ + storage::StorageSyncTask, ConcurrentOutputHandlerFactory, ConcurrentOutputHandlerFactoryTask, + OutputHandlerFactory, VmRunner, VmRunnerIo, VmRunnerStorage, +}; + +/// A standalone component that writes protective reads asynchronously to state keeper. +#[derive(Debug)] +pub struct BasicWitnessInputProducer { + vm_runner: VmRunner, +} + +impl BasicWitnessInputProducer { + /// Create a new protective reads writer from the provided DB parameters and window size which + /// regulates how many batches this component can handle at the same time. + pub async fn new( + pool: ConnectionPool, + rocksdb_path: String, + chain_id: L2ChainId, + window_size: u32, + ) -> anyhow::Result<(Self, BasicWitnessInputProducerTasks)> { + let io = BasicWitnessInputProducerIo { window_size }; + let (loader, loader_task) = + VmRunnerStorage::new(pool.clone(), rocksdb_path, io.clone(), chain_id).await?; + let output_handler_factory = + BasicWitnessInputProducerOutputHandlerFactory { pool: pool.clone() }; + let (output_handler_factory, output_handler_factory_task) = + ConcurrentOutputHandlerFactory::new(pool.clone(), io.clone(), output_handler_factory); + let batch_processor = MainBatchExecutor::new(false, false); + let vm_runner = VmRunner::new( + pool, + Box::new(io), + Arc::new(loader), + Box::new(output_handler_factory), + Box::new(batch_processor), + ); + Ok(( + Self { vm_runner }, + BasicWitnessInputProducerTasks { + loader_task, + output_handler_factory_task, + }, + )) + } + + /// Continuously loads new available batches and writes the corresponding protective reads + /// produced by that batch. + /// + /// # Errors + /// + /// Propagates RocksDB and Postgres errors. + pub async fn run(self, stop_receiver: &watch::Receiver) -> anyhow::Result<()> { + self.vm_runner.run(stop_receiver).await + } +} + +/// A collections of tasks that need to be run in order for protective reads writer to work as +/// intended. +#[derive(Debug)] +pub struct BasicWitnessInputProducerTasks { + /// Task that synchronizes storage with new available batches. + pub loader_task: StorageSyncTask, + /// Task that handles output from processed batches. + pub output_handler_factory_task: + ConcurrentOutputHandlerFactoryTask, +} + +#[derive(Debug, Clone)] +pub struct BasicWitnessInputProducerIo { + window_size: u32, +} + +#[async_trait] +impl VmRunnerIo for BasicWitnessInputProducerIo { + fn name(&self) -> &'static str { + "protective_reads_writer" + } + + async fn latest_processed_batch( + &self, + conn: &mut Connection<'_, Core>, + ) -> anyhow::Result { + Ok(conn + .vm_runner_dal() + .get_protective_reads_latest_processed_batch() + .await?) + } + + async fn last_ready_to_be_loaded_batch( + &self, + conn: &mut Connection<'_, Core>, + ) -> anyhow::Result { + Ok(conn + .vm_runner_dal() + .get_protective_reads_last_ready_batch(self.window_size) + .await?) + } + + async fn mark_l1_batch_as_completed( + &self, + conn: &mut Connection<'_, Core>, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result<()> { + Ok(conn + .vm_runner_dal() + .mark_protective_reads_batch_as_completed(l1_batch_number) + .await?) + } +} + +#[derive(Debug)] +struct BasicWitnessInputProducerOutputHandler { + pool: ConnectionPool, +} + +#[async_trait] +impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { + async fn handle_l2_block(&mut self, _updates_manager: &UpdatesManager) -> anyhow::Result<()> { + Ok(()) + } + + async fn handle_l1_batch( + &mut self, + updates_manager: Arc, + ) -> anyhow::Result<()> { + let finished_batch = updates_manager + .l1_batch + .finished + .as_ref() + .context("L1 batch is not actually finished")?; + let l1_batch_number = updates_manager.l1_batch.number; + + let mut connection = self.pool.connection().await?; + + let block_header = connection + .blocks_dal() + .get_l1_batch_header(l1_batch_number) + .await + .unwrap() + .unwrap(); + let initial_heap_content = connection + .blocks_dal() + .get_initial_bootloader_heap(l1_batch_number) + .await + .unwrap() + .unwrap(); + let (_, previous_block_timestamp) = connection + .blocks_dal() + .get_l1_batch_state_root_and_timestamp(l1_batch_number - 1) + .await + .unwrap() + .unwrap(); + let previous_block_hash = connection + .blocks_dal() + .get_l1_batch_state_root(l1_batch_number - 1) + .await + .unwrap() + .expect("cannot generate witness before the root hash is computed"); + + let account_code_hash = h256_to_u256(block_header.base_system_contracts_hashes.default_aa); + let account_bytecode_bytes = connection + .factory_deps_dal() + .get_sealed_factory_dep(block_header.base_system_contracts_hashes.default_aa) + .await + .expect("Failed fetching default account bytecode from DB") + .expect("Default account bytecode should exist"); + let account_bytecode = bytes_to_chunks(&account_bytecode_bytes); + + let hashes: HashSet = block_header + .used_contract_hashes + .iter() + // SMA-1555: remove this hack once updated to the latest version of `zkevm_test_harness` + .filter(|&&hash| { + hash != h256_to_u256(block_header.base_system_contracts_hashes.bootloader) + }) + .map(|hash| u256_to_h256(*hash)) + .collect(); + let mut used_bytecodes = connection + .factory_deps_dal() + .get_factory_deps(&hashes) + .await; + if block_header + .used_contract_hashes + .contains(&account_code_hash) + { + used_bytecodes.insert(account_code_hash, account_bytecode); + } + + assert_eq!( + hashes.len(), + used_bytecodes.len(), + "{} factory deps are not found in DB", + hashes.len() - used_bytecodes.len() + ); + + let result = WitnessGeneratorData { + block_number: l1_batch_number, + previous_block_hash, + previous_block_timestamp, + block_timestamp: block_header.timestamp, + used_bytecodes, + initial_heap_content, + + storage_logs: vec![], + bootloader_code_hash: Default::default(), + bootloader_code: vec![], + default_account_code_hash: account_code_hash, + storage_refunds: vec![], + pubdata_costs: None, + }; + + Ok(()) + } +} + +#[derive(Debug)] +struct BasicWitnessInputProducerOutputHandlerFactory { + pool: ConnectionPool, +} + +#[async_trait] +impl OutputHandlerFactory for BasicWitnessInputProducerOutputHandlerFactory { + async fn create_handler( + &mut self, + _l1_batch_number: L1BatchNumber, + ) -> anyhow::Result> { + Ok(Box::new(BasicWitnessInputProducerOutputHandler { + pool: self.pool.clone(), + })) + } +} + +pub struct BasicCircuitWitnessGeneratorInput {} diff --git a/core/node/vm_runner/src/impls/mod.rs b/core/node/vm_runner/src/impls/mod.rs index 70d01f6932ef..af671639dce7 100644 --- a/core/node/vm_runner/src/impls/mod.rs +++ b/core/node/vm_runner/src/impls/mod.rs @@ -1,3 +1,4 @@ +mod bwip; mod protective_reads; pub use protective_reads::{ProtectiveReadsWriter, ProtectiveReadsWriterTasks}; diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index 65d3b976c086..c367087608b2 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -36,7 +36,9 @@ use zksync_prover_fri_types::{ AuxOutputWitnessWrapper, }; use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; -use zksync_prover_interface::inputs::{BasicCircuitWitnessGeneratorInput, PrepareBasicCircuitsJob}; +use zksync_prover_interface::inputs::{ + BasicCircuitWitnessGeneratorInput, PrepareBasicCircuitsJob, WitnessGeneratorData, +}; use zksync_queued_job_processor::JobProcessor; use zksync_state::{PostgresStorage, StorageView}; use zksync_types::{ @@ -450,7 +452,7 @@ async fn generate_witness( block_number: L1BatchNumber, object_store: &dyn ObjectStore, connection_pool: ConnectionPool, - input: BasicCircuitWitnessGeneratorInput, + input: WitnessGeneratorData, eip_4844_blobs: Eip4844Blobs, ) -> ( Vec<(u8, String)>, @@ -490,24 +492,9 @@ async fn generate_witness( .expect("Failed fetching bootloader bytecode from DB") .expect("Bootloader bytecode should exist"); let bootloader_code = bytes_to_chunks(&bootloader_code_bytes); - let account_bytecode_bytes = connection - .factory_deps_dal() - .get_sealed_factory_dep(header.base_system_contracts_hashes.default_aa) - .await - .expect("Failed fetching default account bytecode from DB") - .expect("Default account bytecode should exist"); - let account_bytecode = bytes_to_chunks(&account_bytecode_bytes); + let bootloader_contents = expand_bootloader_contents(&input.initial_heap_content, protocol_version); - let account_code_hash = h256_to_u256(header.base_system_contracts_hashes.default_aa); - - let hashes: HashSet = input - .used_bytecodes_hashes - .iter() - // SMA-1555: remove this hack once updated to the latest version of `zkevm_test_harness` - .filter(|&&hash| hash != h256_to_u256(header.base_system_contracts_hashes.bootloader)) - .map(|hash| u256_to_h256(*hash)) - .collect(); let StorageOracleInfo { storage_refunds, @@ -519,21 +506,6 @@ async fn generate_witness( .unwrap() .unwrap(); - let mut used_bytecodes = connection - .factory_deps_dal() - .get_factory_deps(&hashes) - .await; - if input.used_bytecodes_hashes.contains(&account_code_hash) { - used_bytecodes.insert(account_code_hash, account_bytecode); - } - - assert_eq!( - hashes.len(), - used_bytecodes.len(), - "{} factory deps are not found in DB", - hashes.len() - used_bytecodes.len() - ); - // `DbStorageProvider` was designed to be used in API, so it accepts miniblock numbers. // Probably, we should make it work with L1 batch numbers too. let (_, last_miniblock_number) = connection @@ -573,8 +545,10 @@ async fn generate_witness( VmStorageOracle::new(storage_view.clone()); let storage_oracle = StorageOracle::new( vm_storage_oracle, - storage_refunds, - pubdata_costs.expect("pubdata costs should be present"), + input.storage_refunds, + input + .pubdata_costs + .expect("pubdata costs should be present"), ); let path = KZG_TRUSTED_SETUP_FILE @@ -585,13 +559,13 @@ async fn generate_witness( let (scheduler_witness, block_witness) = zkevm_test_harness::external_calls::run( Address::zero(), BOOTLOADER_ADDRESS, - bootloader_code, + input.bootloader_code, bootloader_contents, false, - account_code_hash, + input.default_account_code_hash, // NOTE: this will be evm_simulator_code_hash in future releases - account_code_hash, - used_bytecodes, + input.default_account_code_hash, + input.used_bytecodes, Vec::default(), MAX_CYCLES_FOR_TX as usize, geometry_config, From 69a282811800e1e3d3e868d86cd1426c03f77932 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Tue, 18 Jun 2024 15:09:53 +0300 Subject: [PATCH 02/56] few more tries to compose data --- core/lib/prover_interface/src/inputs.rs | 10 +++- core/lib/state/src/lib.rs | 25 +++++---- core/lib/state/src/witness.rs | 56 +++++++++++++++++++ core/lib/types/src/storage/mod.rs | 8 +-- core/node/vm_runner/src/impls/bwip.rs | 11 +++- .../witness_generator/src/basic_circuits.rs | 7 +-- 6 files changed, 94 insertions(+), 23 deletions(-) create mode 100644 core/lib/state/src/witness.rs diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index 1be7316b2929..33f598fa17ed 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -3,7 +3,10 @@ use std::{collections::HashMap, convert::TryInto, fmt::Debug}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, Bytes}; use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; -use zksync_types::{L1BatchNumber, StorageLog, H256, U256}; +use zksync_types::{ + commitment::L1BatchWithMetadata, L1BatchNumber, L2BlockNumber, ProtocolVersionId, StorageLog, + H256, U256, +}; const HASH_LEN: usize = H256::len_bytes(); @@ -146,11 +149,14 @@ pub struct BasicCircuitWitnessGeneratorInput { pub struct WitnessGeneratorData { pub block_number: L1BatchNumber, + pub previous_batch_with_metadata: L1BatchWithMetadata, + pub last_miniblock_number: L2BlockNumber, pub previous_block_hash: H256, pub previous_block_timestamp: u64, pub block_timestamp: u64, pub used_bytecodes: HashMap>, pub initial_heap_content: Vec<(usize, U256)>, + pub protocol_version: ProtocolVersionId, pub storage_logs: Vec, pub bootloader_code_hash: H256, @@ -158,6 +164,8 @@ pub struct WitnessGeneratorData { pub default_account_code_hash: U256, pub storage_refunds: Vec, pub pubdata_costs: Option>, + pub witness_storage_memory: (), + pub merkle_paths_input: PrepareBasicCircuitsJob, } #[cfg(test)] diff --git a/core/lib/state/src/lib.rs b/core/lib/state/src/lib.rs index 1359e62824f5..48565077f8f3 100644 --- a/core/lib/state/src/lib.rs +++ b/core/lib/state/src/lib.rs @@ -17,22 +17,11 @@ use zksync_types::{ H256, }; -mod cache; -mod catchup; -mod in_memory; -mod postgres; -mod rocksdb; -mod shadow_storage; -mod storage_factory; -mod storage_view; -#[cfg(test)] -mod test_utils; - pub use self::{ cache::sequential_cache::SequentialCache, catchup::{AsyncCatchupTask, RocksdbCell}, - in_memory::InMemoryStorage, // Note, that `test_infra` of the bootloader tests relies on this value to be exposed + in_memory::InMemoryStorage, in_memory::IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID, postgres::{PostgresStorage, PostgresStorageCaches, PostgresStorageCachesTask}, rocksdb::{ @@ -43,6 +32,18 @@ pub use self::{ storage_view::{StorageView, StorageViewMetrics}, }; +mod cache; +mod catchup; +mod in_memory; +mod postgres; +mod rocksdb; +mod shadow_storage; +mod storage_factory; +mod storage_view; +#[cfg(test)] +mod test_utils; +mod witness; + /// Functionality to read from the VM storage. pub trait ReadStorage: fmt::Debug { /// Read value of the key. diff --git a/core/lib/state/src/witness.rs b/core/lib/state/src/witness.rs new file mode 100644 index 000000000000..50e2d9b54076 --- /dev/null +++ b/core/lib/state/src/witness.rs @@ -0,0 +1,56 @@ +use vise::{Counter, Metrics}; +use zksync_types::{witness_block_state::WitnessBlockState, StorageKey, StorageValue, H256}; + +use crate::ReadStorage; + +#[derive(Debug, Metrics)] +#[metrics(prefix = "witness_storage")] +struct WitnessStorageMetrics { + /// Number of unexpected calls when calling `get_enumeration_index` on a witness storage. + get_enumeration_index_unexpected_call: Counter, +} + +#[vise::register] +static METRICS: vise::Global = vise::Global::new(); + +/// [`ReadStorage`] implementation backed by binary serialized [`WitnessHashBlockState`]. +/// Note that `load_factory_deps` is not used. +/// FactoryDeps data is used straight inside witness generator, loaded with the blob. +#[derive(Debug)] +pub struct WitnessStorage<'a> { + block_state: WitnessBlockState, + metrics: &'a WitnessStorageMetrics, +} + +impl WitnessStorage<'_> { + /// Creates a new storage with the provided witness's block state. + pub fn new(block_state: WitnessBlockState) -> Self { + Self { + block_state, + metrics: &METRICS, + } + } +} + +impl ReadStorage for WitnessStorage<'_> { + fn read_value(&mut self, key: &StorageKey) -> StorageValue { + *self + .block_state + .read_storage_key + .get(key) + .unwrap_or(&H256::default()) + } + + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + *self.block_state.is_write_initial.get(key).unwrap_or(&false) + } + + fn load_factory_dep(&mut self, _hash: H256) -> Option> { + None + } + + fn get_enumeration_index(&mut self, _key: &StorageKey) -> Option { + self.metrics.get_enumeration_index_unexpected_call.inc(); + None + } +} diff --git a/core/lib/types/src/storage/mod.rs b/core/lib/types/src/storage/mod.rs index 510ec5b19d12..a30a57bffa51 100644 --- a/core/lib/types/src/storage/mod.rs +++ b/core/lib/types/src/storage/mod.rs @@ -1,18 +1,18 @@ use core::fmt::Debug; use blake2::{Blake2s256, Digest}; +pub use log::*; use serde::{Deserialize, Serialize}; use zksync_basic_types::{web3::keccak256, L2ChainId}; +pub use zksync_system_constants::*; +use zksync_utils::address_to_h256; use crate::{AccountTreeId, Address, H160, H256, U256}; pub mod log; +pub mod witness_block_state; pub mod writes; -pub use log::*; -pub use zksync_system_constants::*; -use zksync_utils::address_to_h256; - /// Typed fully qualified key of the storage slot in global state tree. #[derive(Debug, Copy, Clone, Hash, Eq, PartialEq, Ord, PartialOrd)] #[derive(Serialize, Deserialize)] diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index db035b5bbced..e72f0ed08983 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -6,7 +6,7 @@ use tokio::sync::watch; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_prover_interface::inputs::WitnessGeneratorData; use zksync_state_keeper::{MainBatchExecutor, StateKeeperOutputHandler, UpdatesManager}; -use zksync_types::{L1BatchNumber, L2ChainId, H256}; +use zksync_types::{L1BatchNumber, L2ChainId, ProtocolVersionId, H256}; use zksync_utils::{bytes_to_chunks, h256_to_u256, u256_to_h256}; use crate::{ @@ -148,12 +148,14 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { .await .unwrap() .unwrap(); + let initial_heap_content = connection .blocks_dal() .get_initial_bootloader_heap(l1_batch_number) .await .unwrap() .unwrap(); + let (_, previous_block_timestamp) = connection .blocks_dal() .get_l1_batch_state_root_and_timestamp(l1_batch_number - 1) @@ -205,18 +207,25 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { let result = WitnessGeneratorData { block_number: l1_batch_number, + previous_batch_with_metadata: L1BatchWithMetadata {}, + last_miniblock_number: Default::default(), previous_block_hash, previous_block_timestamp, block_timestamp: block_header.timestamp, used_bytecodes, initial_heap_content, + protocol_version: block_header + .protocol_version + .unwrap_or(ProtocolVersionId::last_potentially_undefined()), storage_logs: vec![], bootloader_code_hash: Default::default(), bootloader_code: vec![], default_account_code_hash: account_code_hash, storage_refunds: vec![], pubdata_costs: None, + witness_storage_memory: (), + merkle_paths_input: (), }; Ok(()) diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index c367087608b2..a0056743d688 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -472,9 +472,7 @@ async fn generate_witness( .unwrap() .unwrap(); - let protocol_version = header - .protocol_version - .unwrap_or(ProtocolVersionId::last_potentially_undefined()); + let protocol_version = input.protocol_version; let previous_batch_with_metadata = connection .blocks_dal() @@ -538,8 +536,7 @@ async fn generate_witness( let make_circuits = tokio::task::spawn_blocking(move || { let connection = rt_handle.block_on(connection_pool.connection()).unwrap(); - let storage = PostgresStorage::new(rt_handle, connection, last_miniblock_number, true); - let storage_view = StorageView::new(storage).to_rc_ptr(); + let storage_view = StorageView::new(input.witness_storage_memory).to_rc_ptr(); let vm_storage_oracle: VmStorageOracle>, HistoryDisabled> = VmStorageOracle::new(storage_view.clone()); From 205df46f677ca37034a4c45200b1a6762f4fbd01 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Tue, 18 Jun 2024 15:55:19 +0300 Subject: [PATCH 03/56] more or less finalize needed data --- core/lib/prover_interface/src/inputs.rs | 12 +- core/node/vm_runner/src/impls/bwip.rs | 53 ++++---- .../witness_generator/src/basic_circuits.rs | 113 ++---------------- 3 files changed, 37 insertions(+), 141 deletions(-) diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index 33f598fa17ed..b0241e2da13d 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -4,8 +4,8 @@ use serde::{Deserialize, Serialize}; use serde_with::{serde_as, Bytes}; use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; use zksync_types::{ - commitment::L1BatchWithMetadata, L1BatchNumber, L2BlockNumber, ProtocolVersionId, StorageLog, - H256, U256, + block::L1BatchHeader, commitment::L1BatchWithMetadata, L1BatchNumber, ProtocolVersionId, H256, + U256, }; const HASH_LEN: usize = H256::len_bytes(); @@ -148,18 +148,12 @@ pub struct BasicCircuitWitnessGeneratorInput { } pub struct WitnessGeneratorData { - pub block_number: L1BatchNumber, + pub l1_batch_header: L1BatchHeader, pub previous_batch_with_metadata: L1BatchWithMetadata, - pub last_miniblock_number: L2BlockNumber, - pub previous_block_hash: H256, - pub previous_block_timestamp: u64, - pub block_timestamp: u64, pub used_bytecodes: HashMap>, pub initial_heap_content: Vec<(usize, U256)>, pub protocol_version: ProtocolVersionId, - pub storage_logs: Vec, - pub bootloader_code_hash: H256, pub bootloader_code: Vec<[u8; 32]>, pub default_account_code_hash: U256, pub storage_refunds: Vec, diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index e72f0ed08983..39c9ed79127d 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -1,12 +1,11 @@ use std::{collections::HashSet, sync::Arc}; -use anyhow::Context; use async_trait::async_trait; use tokio::sync::watch; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_prover_interface::inputs::WitnessGeneratorData; use zksync_state_keeper::{MainBatchExecutor, StateKeeperOutputHandler, UpdatesManager}; -use zksync_types::{L1BatchNumber, L2ChainId, ProtocolVersionId, H256}; +use zksync_types::{block::StorageOracleInfo, L1BatchNumber, L2ChainId, ProtocolVersionId, H256}; use zksync_utils::{bytes_to_chunks, h256_to_u256, u256_to_h256}; use crate::{ @@ -133,11 +132,6 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { &mut self, updates_manager: Arc, ) -> anyhow::Result<()> { - let finished_batch = updates_manager - .l1_batch - .finished - .as_ref() - .context("L1 batch is not actually finished")?; let l1_batch_number = updates_manager.l1_batch.number; let mut connection = self.pool.connection().await?; @@ -156,19 +150,6 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { .unwrap() .unwrap(); - let (_, previous_block_timestamp) = connection - .blocks_dal() - .get_l1_batch_state_root_and_timestamp(l1_batch_number - 1) - .await - .unwrap() - .unwrap(); - let previous_block_hash = connection - .blocks_dal() - .get_l1_batch_state_root(l1_batch_number - 1) - .await - .unwrap() - .expect("cannot generate witness before the root hash is computed"); - let account_code_hash = h256_to_u256(block_header.base_system_contracts_hashes.default_aa); let account_bytecode_bytes = connection .factory_deps_dal() @@ -205,25 +186,37 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { hashes.len() - used_bytecodes.len() ); + let previous_batch_with_metadata = connection + .blocks_dal() + .get_l1_batch_metadata(L1BatchNumber(block_header.number.checked_sub(1).unwrap())) + .await + .unwrap() + .unwrap(); + + let StorageOracleInfo { + storage_refunds, + pubdata_costs, + } = connection + .blocks_dal() + .get_storage_oracle_info(input.block_number) + .await + .unwrap() + .unwrap(); + let result = WitnessGeneratorData { - block_number: l1_batch_number, - previous_batch_with_metadata: L1BatchWithMetadata {}, - last_miniblock_number: Default::default(), - previous_block_hash, - previous_block_timestamp, - block_timestamp: block_header.timestamp, + l1_batch_header: block_header.clone(), + previous_batch_with_metadata, used_bytecodes, initial_heap_content, protocol_version: block_header .protocol_version .unwrap_or(ProtocolVersionId::last_potentially_undefined()), - storage_logs: vec![], - bootloader_code_hash: Default::default(), + bootloader_code: vec![], default_account_code_hash: account_code_hash, - storage_refunds: vec![], - pubdata_costs: None, + storage_refunds, + pubdata_costs, witness_storage_memory: (), merkle_paths_input: (), }; diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index a0056743d688..f0c80f9ec4ea 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -277,16 +277,14 @@ async fn process_basic_circuits_job( connection_pool: ConnectionPool, started_at: Instant, block_number: L1BatchNumber, - job: PrepareBasicCircuitsJob, + job: WitnessGeneratorData, eip_4844_blobs: Eip4844Blobs, ) -> BasicCircuitArtifacts { - let witness_gen_input = - build_basic_circuits_witness_generator_input(&connection_pool, job, block_number).await; let (circuit_urls, queue_urls, scheduler_witness, aux_output_witness) = generate_witness( block_number, object_store, connection_pool, - witness_gen_input, + job, eip_4844_blobs, ) .await; @@ -405,49 +403,6 @@ async fn save_recursion_queue( (circuit_id, blob_url, basic_circuit_count) } -// If making changes to this method, consider moving this logic to the DAL layer and make -// `PrepareBasicCircuitsJob` have all fields of `BasicCircuitWitnessGeneratorInput`. -async fn build_basic_circuits_witness_generator_input( - connection_pool: &ConnectionPool, - witness_merkle_input: PrepareBasicCircuitsJob, - l1_batch_number: L1BatchNumber, -) -> BasicCircuitWitnessGeneratorInput { - let mut connection = connection_pool.connection().await.unwrap(); - let block_header = connection - .blocks_dal() - .get_l1_batch_header(l1_batch_number) - .await - .unwrap() - .unwrap(); - let initial_heap_content = connection - .blocks_dal() - .get_initial_bootloader_heap(l1_batch_number) - .await - .unwrap() - .unwrap(); - let (_, previous_block_timestamp) = connection - .blocks_dal() - .get_l1_batch_state_root_and_timestamp(l1_batch_number - 1) - .await - .unwrap() - .unwrap(); - let previous_block_hash = connection - .blocks_dal() - .get_l1_batch_state_root(l1_batch_number - 1) - .await - .unwrap() - .expect("cannot generate witness before the root hash is computed"); - BasicCircuitWitnessGeneratorInput { - block_number: l1_batch_number, - previous_block_timestamp, - previous_block_hash, - block_timestamp: block_header.timestamp, - used_bytecodes_hashes: block_header.used_contract_hashes, - initial_heap_content, - merkle_paths_input: witness_merkle_input, - } -} - async fn generate_witness( block_number: L1BatchNumber, object_store: &dyn ObjectStore, @@ -464,66 +419,19 @@ async fn generate_witness( >, BlockAuxilaryOutputWitness, ) { - let mut connection = connection_pool.connection().await.unwrap(); - let header = connection - .blocks_dal() - .get_l1_batch_header(input.block_number) - .await - .unwrap() - .unwrap(); - - let protocol_version = input.protocol_version; - - let previous_batch_with_metadata = connection - .blocks_dal() - .get_l1_batch_metadata(zksync_types::L1BatchNumber( - input.block_number.checked_sub(1).unwrap(), - )) - .await - .unwrap() - .unwrap(); - - let bootloader_code_bytes = connection - .factory_deps_dal() - .get_sealed_factory_dep(header.base_system_contracts_hashes.bootloader) - .await - .expect("Failed fetching bootloader bytecode from DB") - .expect("Bootloader bytecode should exist"); - let bootloader_code = bytes_to_chunks(&bootloader_code_bytes); - let bootloader_contents = - expand_bootloader_contents(&input.initial_heap_content, protocol_version); - - let StorageOracleInfo { - storage_refunds, - pubdata_costs, - } = connection - .blocks_dal() - .get_storage_oracle_info(input.block_number) - .await - .unwrap() - .unwrap(); - - // `DbStorageProvider` was designed to be used in API, so it accepts miniblock numbers. - // Probably, we should make it work with L1 batch numbers too. - let (_, last_miniblock_number) = connection - .blocks_dal() - .get_l2_block_range_of_l1_batch(input.block_number - 1) - .await - .unwrap() - .expect("L1 batch should contain at least one miniblock"); - drop(connection); + expand_bootloader_contents(&input.initial_heap_content, input.protocol_version); let mut tree = PrecalculatedMerklePathsProvider::new( input.merkle_paths_input, - input.previous_block_hash.0, + input.previous_batch_with_metadata.metadata.root_hash.0, ); let geometry_config = get_geometry_config(); let mut hasher = DefaultHasher::new(); geometry_config.hash(&mut hasher); tracing::info!( "generating witness for block {} using geometry config hash: {}", - input.block_number.0, + input.l1_batch_header.number.0, hasher.finish() ); @@ -534,8 +442,6 @@ async fn generate_witness( let (queue_sender, mut queue_receiver) = tokio::sync::mpsc::channel(1); let make_circuits = tokio::task::spawn_blocking(move || { - let connection = rt_handle.block_on(connection_pool.connection()).unwrap(); - let storage_view = StorageView::new(input.witness_storage_memory).to_rc_ptr(); let vm_storage_oracle: VmStorageOracle>, HistoryDisabled> = @@ -606,10 +512,13 @@ async fn generate_witness( recursion_urls.retain(|(circuit_id, _, _)| circuits_present.contains(circuit_id)); - scheduler_witness.previous_block_meta_hash = - previous_batch_with_metadata.metadata.meta_parameters_hash.0; + scheduler_witness.previous_block_meta_hash = input + .previous_batch_with_metadata + .metadata + .meta_parameters_hash + .0; scheduler_witness.previous_block_aux_hash = - previous_batch_with_metadata.metadata.aux_data_hash.0; + input.previous_batch_with_metadata.metadata.aux_data_hash.0; ( circuit_urls, From 25696714a1922dcedaa93513c39e82d620b90ad1 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Thu, 20 Jun 2024 11:01:12 +0300 Subject: [PATCH 04/56] add storage cache and migrations --- Cargo.lock | 1 + ...8_add_vm_run_data_blob_url_column.down.sql | 1 + ...458_add_vm_run_data_blob_url_column.up.sql | 2 + core/lib/object_store/src/objects.rs | 1 - core/lib/prover_interface/Cargo.toml | 1 + core/lib/prover_interface/src/inputs.rs | 39 +++++++++-------- core/lib/state/src/lib.rs | 3 +- core/lib/state/src/storage_view.rs | 43 ++++++++++++++----- .../src/batch_executor/main_executor.rs | 9 +++- .../state_keeper/src/batch_executor/mod.rs | 26 ++++++++++- core/node/state_keeper/src/metrics.rs | 1 + core/node/state_keeper/src/updates/mod.rs | 6 +++ core/node/vm_runner/src/impls/bwip.rs | 19 +++++--- core/node/vm_runner/src/process.rs | 7 +++ prover/Cargo.lock | 1 + .../witness_generator/src/basic_circuits.rs | 38 +++++++++------- 16 files changed, 143 insertions(+), 55 deletions(-) create mode 100644 core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.down.sql create mode 100644 core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.up.sql diff --git a/Cargo.lock b/Cargo.lock index 81acbdcf780d..b6e9129539c9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9136,6 +9136,7 @@ dependencies = [ "strum", "tokio", "zksync_object_store", + "zksync_state", "zksync_types", ] diff --git a/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.down.sql b/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.down.sql new file mode 100644 index 000000000000..4e5767a4a7fb --- /dev/null +++ b/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.down.sql @@ -0,0 +1 @@ +ALTER TABLE proof_generation_details DROP COLUMN IF EXISTS vm_run_data_blob_url; \ No newline at end of file diff --git a/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.up.sql b/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.up.sql new file mode 100644 index 000000000000..4c4a28f2d076 --- /dev/null +++ b/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE proof_generation_details + ADD COLUMN IF NOT EXISTS vm_run_data_blob_url DEFAULT NULL; \ No newline at end of file diff --git a/core/lib/object_store/src/objects.rs b/core/lib/object_store/src/objects.rs index d67e4e5df137..dbfd9caa25d2 100644 --- a/core/lib/object_store/src/objects.rs +++ b/core/lib/object_store/src/objects.rs @@ -86,7 +86,6 @@ impl StoredObject for SnapshotFactoryDependencies { .map_err(From::from) } } - impl StoredObject for SnapshotStorageLogsChunk { const BUCKET: Bucket = Bucket::StorageSnapshot; type Key<'a> = SnapshotStorageLogsStorageKey; diff --git a/core/lib/prover_interface/Cargo.toml b/core/lib/prover_interface/Cargo.toml index 869338a8830d..6dbf65b3362b 100644 --- a/core/lib/prover_interface/Cargo.toml +++ b/core/lib/prover_interface/Cargo.toml @@ -12,6 +12,7 @@ categories.workspace = true [dependencies] zksync_object_store.workspace = true zksync_types.workspace = true +zksync_state.workspace = true # We can use the newest api to send proofs to L1. circuit_sequencer_api_1_5_0.workspace = true diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index b0241e2da13d..c2cc5556a13b 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -3,9 +3,10 @@ use std::{collections::HashMap, convert::TryInto, fmt::Debug}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, Bytes}; use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; +pub use zksync_state::WitnessStorage; use zksync_types::{ - block::L1BatchHeader, commitment::L1BatchWithMetadata, L1BatchNumber, ProtocolVersionId, H256, - U256, + block::L1BatchHeader, commitment::L1BatchWithMetadata, witness_block_state::WitnessBlockState, + L1BatchNumber, ProtocolVersionId, H256, U256, }; const HASH_LEN: usize = H256::len_bytes(); @@ -135,19 +136,7 @@ impl PrepareBasicCircuitsJob { } } -/// Enriched `PrepareBasicCircuitsJob`. All the other fields are taken from the `l1_batches` table. -#[derive(Debug, Clone)] -pub struct BasicCircuitWitnessGeneratorInput { - pub block_number: L1BatchNumber, - pub previous_block_hash: H256, - pub previous_block_timestamp: u64, - pub block_timestamp: u64, - pub used_bytecodes_hashes: Vec, - pub initial_heap_content: Vec<(usize, U256)>, - pub merkle_paths_input: PrepareBasicCircuitsJob, -} - -pub struct WitnessGeneratorData { +pub struct VMRunWitnessInputData { pub l1_batch_header: L1BatchHeader, pub previous_batch_with_metadata: L1BatchWithMetadata, pub used_bytecodes: HashMap>, @@ -158,8 +147,24 @@ pub struct WitnessGeneratorData { pub default_account_code_hash: U256, pub storage_refunds: Vec, pub pubdata_costs: Option>, - pub witness_storage_memory: (), - pub merkle_paths_input: PrepareBasicCircuitsJob, + pub witness_block_state: WitnessBlockState, +} + +pub struct WitnessInputData { + pub vm_run_data: VMRunWitnessInputData, + pub merkle_paths: PrepareBasicCircuitsJob, +} + +impl StoredObject for WitnessInputData { + const BUCKET: Bucket = Bucket::WitnessInput; + + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("witness_inputs_{key}.bin") + } + + serialize_using_bincode!(); } #[cfg(test)] diff --git a/core/lib/state/src/lib.rs b/core/lib/state/src/lib.rs index 48565077f8f3..f93898c56f3a 100644 --- a/core/lib/state/src/lib.rs +++ b/core/lib/state/src/lib.rs @@ -29,7 +29,8 @@ pub use self::{ }, shadow_storage::ShadowStorage, storage_factory::{BatchDiff, PgOrRocksdbStorage, ReadStorageFactory, RocksdbWithMemory}, - storage_view::{StorageView, StorageViewMetrics}, + storage_view::{StorageView, StorageViewCache, StorageViewMetrics}, + witness::WitnessStorage, }; mod cache; diff --git a/core/lib/state/src/storage_view.rs b/core/lib/state/src/storage_view.rs index 7756f6007eec..31853de0a853 100644 --- a/core/lib/state/src/storage_view.rs +++ b/core/lib/state/src/storage_view.rs @@ -45,14 +45,33 @@ pub struct StorageView { storage_handle: S, // Used for caching and to get the list/count of modified keys modified_storage_keys: HashMap, + cache: StorageViewCache, + metrics: StorageViewMetrics, +} + +#[derive(Debug, Clone)] +pub struct StorageViewCache { // Used purely for caching read_storage_keys: HashMap, // Cache for `contains_key()` checks. The cache is only valid within one L1 batch execution. - initial_writes_cache: HashMap, - metrics: StorageViewMetrics, + initial_writes: HashMap, +} + +impl StorageViewCache { + pub fn read_storage_keys(&self) -> HashMap { + self.read_storage_keys.clone() + } + + pub fn initial_writes(&self) -> HashMap { + self.initial_writes.clone() + } } impl StorageView { + pub fn cache(&self) -> StorageViewCache { + self.cache.clone() + } + /// Returns the modified storage keys pub fn modified_storage_keys(&self) -> &HashMap { &self.modified_storage_keys @@ -90,8 +109,10 @@ impl StorageView { Self { storage_handle, modified_storage_keys: HashMap::new(), - read_storage_keys: HashMap::new(), - initial_writes_cache: HashMap::new(), + cache: StorageViewCache { + read_storage_keys: HashMap::new(), + initial_writes: HashMap::new(), + }, metrics: StorageViewMetrics::default(), } } @@ -102,10 +123,10 @@ impl StorageView { let cached_value = self .modified_storage_keys .get(key) - .or_else(|| self.read_storage_keys.get(key)); + .or_else(|| self.cache.read_storage_keys.get(key)); cached_value.copied().unwrap_or_else(|| { let value = self.storage_handle.read_value(key); - self.read_storage_keys.insert(*key, value); + self.cache.read_storage_keys.insert(*key, value); self.metrics.time_spent_on_storage_missed += started_at.elapsed(); self.metrics.storage_invocations_missed += 1; value @@ -114,8 +135,8 @@ impl StorageView { fn cache_size(&self) -> usize { self.modified_storage_keys.len() * mem::size_of::<(StorageKey, StorageValue)>() - + self.initial_writes_cache.len() * mem::size_of::<(StorageKey, bool)>() - + self.read_storage_keys.len() * mem::size_of::<(StorageKey, StorageValue)>() + + self.cache.initial_writes.len() * mem::size_of::<(StorageKey, bool)>() + + self.cache.read_storage_keys.len() * mem::size_of::<(StorageKey, StorageValue)>() } /// Returns the current metrics. @@ -153,11 +174,11 @@ impl ReadStorage for StorageView { /// Only keys contained in the underlying storage will return `false`. If a key was /// inserted using [`Self::set_value()`], it will still return `true`. fn is_write_initial(&mut self, key: &StorageKey) -> bool { - if let Some(&is_write_initial) = self.initial_writes_cache.get(key) { + if let Some(&is_write_initial) = self.cache.initial_writes.get(key) { is_write_initial } else { let is_write_initial = self.storage_handle.is_write_initial(key); - self.initial_writes_cache.insert(*key, is_write_initial); + self.cache.initial_writes.insert(*key, is_write_initial); is_write_initial } } @@ -173,7 +194,7 @@ impl ReadStorage for StorageView { impl WriteStorage for StorageView { fn read_storage_keys(&self) -> &HashMap { - &self.read_storage_keys + &self.cache.read_storage_keys } fn set_value(&mut self, key: StorageKey, value: StorageValue) -> StorageValue { diff --git a/core/node/state_keeper/src/batch_executor/main_executor.rs b/core/node/state_keeper/src/batch_executor/main_executor.rs index a16b9920dd6e..70cd8774208e 100644 --- a/core/node/state_keeper/src/batch_executor/main_executor.rs +++ b/core/node/state_keeper/src/batch_executor/main_executor.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::{collections::HashMap, sync::Arc}; use anyhow::Context as _; use async_trait::async_trait; @@ -18,7 +18,7 @@ use tokio::{ }; use zksync_shared_metrics::{InteractionType, TxStage, APP_METRICS}; use zksync_state::{ReadStorage, ReadStorageFactory, StorageView, WriteStorage}; -use zksync_types::{vm_trace::Call, Transaction}; +use zksync_types::{vm_trace::Call, StorageKey, StorageValue, Transaction}; use zksync_utils::bytecode::CompressedBytecodeInfo; use super::{BatchExecutor, BatchExecutorHandle, Command, TxExecutionResult}; @@ -141,6 +141,11 @@ impl CommandReceiver { .observe(metrics.time_spent_on_set_value); return; } + Command::StorageViewCache(resp) => { + if resp.send((**storage_view).cache()).is_err() { + break; + } + } } } // State keeper can exit because of stop signal, so it's OK to exit mid-batch. diff --git a/core/node/state_keeper/src/batch_executor/mod.rs b/core/node/state_keeper/src/batch_executor/mod.rs index 8703831f3952..08a668ef827c 100644 --- a/core/node/state_keeper/src/batch_executor/mod.rs +++ b/core/node/state_keeper/src/batch_executor/mod.rs @@ -9,7 +9,7 @@ use tokio::{ sync::{mpsc, oneshot, watch}, task::JoinHandle, }; -use zksync_state::ReadStorageFactory; +use zksync_state::{ReadStorageFactory, StorageViewCache}; use zksync_types::{vm_trace::Call, Transaction}; use zksync_utils::bytecode::CompressedBytecodeInfo; @@ -229,6 +229,29 @@ impl BatchExecutorHandle { latency.observe(); Ok(finished_batch) } + + pub async fn storage_view_cache(mut self) -> anyhow::Result { + let (response_sender, response_receiver) = oneshot::channel(); + let send_failed = self + .commands + .send(Command::StorageViewCache(response_sender)) + .await + .is_err(); + if send_failed { + return Err(self.handle.wait_for_error().await); + } + + let latency = EXECUTOR_METRICS.batch_executor_command_response_time + [&ExecutorCommand::StorageViewCache] + .start(); + let storage_view_cache = match response_receiver.await { + Ok(cache) => cache, + Err(_) => return Err(self.handle.wait_for_error().await), + }; + self.handle.wait().await?; + latency.observe(); + Ok(storage_view_cache) + } } #[derive(Debug)] @@ -237,4 +260,5 @@ pub(super) enum Command { StartNextL2Block(L2BlockEnv, oneshot::Sender<()>), RollbackLastTx(oneshot::Sender<()>), FinishBatch(oneshot::Sender), + StorageViewCache(oneshot::Sender), } diff --git a/core/node/state_keeper/src/metrics.rs b/core/node/state_keeper/src/metrics.rs index 66c6e7933e8e..88750e7420d2 100644 --- a/core/node/state_keeper/src/metrics.rs +++ b/core/node/state_keeper/src/metrics.rs @@ -444,6 +444,7 @@ pub(super) enum ExecutorCommand { StartNextL2Block, RollbackLastTx, FinishBatch, + StorageViewCache, } const GAS_PER_NANOSECOND_BUCKETS: Buckets = Buckets::values(&[ diff --git a/core/node/state_keeper/src/updates/mod.rs b/core/node/state_keeper/src/updates/mod.rs index 6f920464cc06..f04584c9166d 100644 --- a/core/node/state_keeper/src/updates/mod.rs +++ b/core/node/state_keeper/src/updates/mod.rs @@ -3,6 +3,7 @@ use multivm::{ utils::get_batch_base_fee, }; use zksync_contracts::BaseSystemContractsHashes; +use zksync_state::StorageViewCache; use zksync_types::{ block::BlockGasCount, fee_model::BatchFeeInput, storage_writes_deduplicator::StorageWritesDeduplicator, @@ -35,6 +36,7 @@ pub struct UpdatesManager { base_fee_per_gas: u64, base_system_contract_hashes: BaseSystemContractsHashes, protocol_version: ProtocolVersionId, + pub storage_view_cache: StorageViewCache, pub l1_batch: L1BatchUpdates, pub l2_block: L2BlockUpdates, pub storage_writes_deduplicator: StorageWritesDeduplicator, @@ -153,6 +155,10 @@ impl UpdatesManager { latency.observe(); } + pub fn update_storage_view_cache(&mut self, storage_view_cache: StorageViewCache) { + self.storage_view_cache = storage_view_cache; + } + /// Pushes a new L2 block with the specified timestamp into this manager. The previously /// held L2 block is considered sealed and is used to extend the L1 batch data. pub fn push_l2_block(&mut self, l2_block_params: L2BlockParams) { diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index 39c9ed79127d..8f95721b97f1 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -3,9 +3,12 @@ use std::{collections::HashSet, sync::Arc}; use async_trait::async_trait; use tokio::sync::watch; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; -use zksync_prover_interface::inputs::WitnessGeneratorData; +use zksync_prover_interface::inputs::VMRunWitnessInputData; use zksync_state_keeper::{MainBatchExecutor, StateKeeperOutputHandler, UpdatesManager}; -use zksync_types::{block::StorageOracleInfo, L1BatchNumber, L2ChainId, ProtocolVersionId, H256}; +use zksync_types::{ + block::StorageOracleInfo, witness_block_state::WitnessBlockState, L1BatchNumber, L2ChainId, + ProtocolVersionId, H256, +}; use zksync_utils::{bytes_to_chunks, h256_to_u256, u256_to_h256}; use crate::{ @@ -198,12 +201,17 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { pubdata_costs, } = connection .blocks_dal() - .get_storage_oracle_info(input.block_number) + .get_storage_oracle_info(block_header.number) .await .unwrap() .unwrap(); - let result = WitnessGeneratorData { + let block_state = WitnessBlockState { + read_storage_key: updates_manager.storage_view_cache.read_storage_keys(), + is_write_initial: updates_manager.storage_view_cache.initial_writes(), + }; + + let result = VMRunWitnessInputData { l1_batch_header: block_header.clone(), previous_batch_with_metadata, used_bytecodes, @@ -217,8 +225,7 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { default_account_code_hash: account_code_hash, storage_refunds, pubdata_costs, - witness_storage_memory: (), - merkle_paths_input: (), + witness_block_state: block_state, }; Ok(()) diff --git a/core/node/vm_runner/src/process.rs b/core/node/vm_runner/src/process.rs index 945d35477ce6..d64b24dce184 100644 --- a/core/node/vm_runner/src/process.rs +++ b/core/node/vm_runner/src/process.rs @@ -115,6 +115,13 @@ impl VmRunner { .await .context("failed finishing L1 batch in executor")?; updates_manager.finish_batch(finished_batch); + + let storage_view_cache = batch_executor + .storage_view_cache() + .await + .context("Failed getting storage view cache")?; + updates_manager.update_storage_view_cache(storage_view_cache); + latency.observe(); output_handler .handle_l1_batch(Arc::new(updates_manager)) diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 44c2a8b8395f..4fe307f41478 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -9388,6 +9388,7 @@ dependencies = [ "serde_with", "strum", "zksync_object_store", + "zksync_state", "zksync_types", ] diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index f0c80f9ec4ea..d49d01b65a4a 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -36,9 +36,7 @@ use zksync_prover_fri_types::{ AuxOutputWitnessWrapper, }; use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; -use zksync_prover_interface::inputs::{ - BasicCircuitWitnessGeneratorInput, PrepareBasicCircuitsJob, WitnessGeneratorData, -}; +use zksync_prover_interface::inputs::{PrepareBasicCircuitsJob, WitnessInputData}; use zksync_queued_job_processor::JobProcessor; use zksync_state::{PostgresStorage, StorageView}; use zksync_types::{ @@ -277,7 +275,7 @@ async fn process_basic_circuits_job( connection_pool: ConnectionPool, started_at: Instant, block_number: L1BatchNumber, - job: WitnessGeneratorData, + job: WitnessInputData, eip_4844_blobs: Eip4844Blobs, ) -> BasicCircuitArtifacts { let (circuit_urls, queue_urls, scheduler_witness, aux_output_witness) = generate_witness( @@ -407,7 +405,7 @@ async fn generate_witness( block_number: L1BatchNumber, object_store: &dyn ObjectStore, connection_pool: ConnectionPool, - input: WitnessGeneratorData, + input: WitnessInputData, eip_4844_blobs: Eip4844Blobs, ) -> ( Vec<(u8, String)>, @@ -419,19 +417,26 @@ async fn generate_witness( >, BlockAuxilaryOutputWitness, ) { - let bootloader_contents = - expand_bootloader_contents(&input.initial_heap_content, input.protocol_version); + let bootloader_contents = expand_bootloader_contents( + &input.vm_run_data.initial_heap_content, + input.vm_run_data.protocol_version, + ); let mut tree = PrecalculatedMerklePathsProvider::new( - input.merkle_paths_input, - input.previous_batch_with_metadata.metadata.root_hash.0, + input.vm_run_data.merkle_paths_input, + input + .vm_run_data + .previous_batch_with_metadata + .metadata + .root_hash + .0, ); let geometry_config = get_geometry_config(); let mut hasher = DefaultHasher::new(); geometry_config.hash(&mut hasher); tracing::info!( "generating witness for block {} using geometry config hash: {}", - input.l1_batch_header.number.0, + input.vm_run_data.l1_batch_header.number.0, hasher.finish() ); @@ -442,14 +447,15 @@ async fn generate_witness( let (queue_sender, mut queue_receiver) = tokio::sync::mpsc::channel(1); let make_circuits = tokio::task::spawn_blocking(move || { - let storage_view = StorageView::new(input.witness_storage_memory).to_rc_ptr(); + let storage_view = StorageView::new(input.vm_run_data.witness_block_state).to_rc_ptr(); let vm_storage_oracle: VmStorageOracle>, HistoryDisabled> = VmStorageOracle::new(storage_view.clone()); let storage_oracle = StorageOracle::new( vm_storage_oracle, - input.storage_refunds, + input.vm_run_data.storage_refunds, input + .vm_run_data .pubdata_costs .expect("pubdata costs should be present"), ); @@ -462,13 +468,13 @@ async fn generate_witness( let (scheduler_witness, block_witness) = zkevm_test_harness::external_calls::run( Address::zero(), BOOTLOADER_ADDRESS, - input.bootloader_code, + input.vm_run_data.bootloader_code, bootloader_contents, false, - input.default_account_code_hash, + input.vm_run_data.default_account_code_hash, // NOTE: this will be evm_simulator_code_hash in future releases - input.default_account_code_hash, - input.used_bytecodes, + input.vm_run_data.default_account_code_hash, + input.vm_run_data.used_bytecodes, Vec::default(), MAX_CYCLES_FOR_TX as usize, geometry_config, From 5da26bfe6e1a9d0ae01022c93c4a07d7e963f43f Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Thu, 20 Jun 2024 15:56:29 +0300 Subject: [PATCH 05/56] update some db stuff --- Cargo.lock | 1 + ...8_add_vm_run_data_blob_url_column.down.sql | 3 +- ...458_add_vm_run_data_blob_url_column.up.sql | 10 ++- core/lib/dal/src/vm_runner_dal.rs | 76 +++++++++++++++++++ core/lib/prover_interface/src/inputs.rs | 12 +++ core/node/vm_runner/Cargo.toml | 1 + core/node/vm_runner/src/impls/bwip.rs | 24 ++++-- .../witness_generator/src/basic_circuits.rs | 5 +- 8 files changed, 120 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4f05fe3836aa..9e60e20642d7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9312,6 +9312,7 @@ dependencies = [ "zksync_dal", "zksync_node_genesis", "zksync_node_test_utils", + "zksync_object_store", "zksync_prover_interface", "zksync_state", "zksync_state_keeper", diff --git a/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.down.sql b/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.down.sql index 4e5767a4a7fb..ea3f2ae3131d 100644 --- a/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.down.sql +++ b/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.down.sql @@ -1 +1,2 @@ -ALTER TABLE proof_generation_details DROP COLUMN IF EXISTS vm_run_data_blob_url; \ No newline at end of file +ALTER TABLE proof_generation_details DROP COLUMN IF EXISTS vm_run_data_blob_url; +DROP TABLE IF EXISTS vm_runner_protective_reads; diff --git a/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.up.sql b/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.up.sql index 4c4a28f2d076..b93de38d728f 100644 --- a/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.up.sql +++ b/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.up.sql @@ -1,2 +1,10 @@ ALTER TABLE proof_generation_details - ADD COLUMN IF NOT EXISTS vm_run_data_blob_url DEFAULT NULL; \ No newline at end of file + ADD COLUMN IF NOT EXISTS vm_run_data_blob_url DEFAULT NULL; + +CREATE TABLE IF NOT EXISTS vm_runner_bwip +( + l1_batch_number BIGINT NOT NULL PRIMARY KEY, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL, + time_taken TIME +); diff --git a/core/lib/dal/src/vm_runner_dal.rs b/core/lib/dal/src/vm_runner_dal.rs index 2d17ff3f9fca..e4d08fd85cf6 100644 --- a/core/lib/dal/src/vm_runner_dal.rs +++ b/core/lib/dal/src/vm_runner_dal.rs @@ -84,4 +84,80 @@ impl VmRunnerDal<'_, '_> { .await?; Ok(()) } + + pub async fn get_bwip_latest_processed_batch( + &mut self, + default_batch: L1BatchNumber, + ) -> DalResult { + let row = sqlx::query!( + r#" + SELECT + COALESCE(MAX(l1_batch_number), $1) AS "last_processed_l1_batch!" + FROM + vm_runner_bwip + "#, + default_batch.0 as i32 + ) + .instrument("get_bwip_latest_processed_batch") + .report_latency() + .fetch_one(self.storage) + .await?; + Ok(L1BatchNumber(row.last_processed_l1_batch as u32)) + } + + pub async fn get_bwip_last_ready_batch( + &mut self, + default_batch: L1BatchNumber, + window_size: u32, + ) -> DalResult { + let row = sqlx::query!( + r#" + WITH + available_batches AS ( + SELECT + MAX(number) AS "last_batch" + FROM + l1_batches + ), + processed_batches AS ( + SELECT + COALESCE(MAX(l1_batch_number), $1) + $2 AS "last_ready_batch" + FROM + vm_runner_bwip + ) + SELECT + LEAST(last_batch, last_ready_batch) AS "last_ready_batch!" + FROM + available_batches + FULL JOIN processed_batches ON TRUE + "#, + default_batch.0 as i32, + window_size as i32 + ) + .instrument("get_bwip_last_ready_batch") + .report_latency() + .fetch_one(self.storage) + .await?; + Ok(L1BatchNumber(row.last_ready_batch as u32)) + } + + pub async fn mark_bwip_batch_as_completed( + &mut self, + l1_batch_number: L1BatchNumber, + ) -> DalResult<()> { + sqlx::query!( + r#" + INSERT INTO + vm_runner_bwip (l1_batch_number, created_at, updated_at) + VALUES + ($1, NOW(), NOW()) + "#, + i64::from(l1_batch_number.0), + ) + .instrument("mark_bwip_batch_as_completed") + .report_latency() + .execute(self.storage) + .await?; + Ok(()) + } } diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index c2cc5556a13b..6e2d8a3571fa 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -150,6 +150,18 @@ pub struct VMRunWitnessInputData { pub witness_block_state: WitnessBlockState, } +impl StoredObject for VMRunWitnessInputData { + const BUCKET: Bucket = Bucket::WitnessInput; + + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("vm_run_data_{key}.bin") + } + + serialize_using_bincode!(); +} + pub struct WitnessInputData { pub vm_run_data: VMRunWitnessInputData, pub merkle_paths: PrepareBasicCircuitsJob, diff --git a/core/node/vm_runner/Cargo.toml b/core/node/vm_runner/Cargo.toml index 59db7168cc73..fdaa76acbcdc 100644 --- a/core/node/vm_runner/Cargo.toml +++ b/core/node/vm_runner/Cargo.toml @@ -19,6 +19,7 @@ zksync_storage.workspace = true zksync_state_keeper.workspace = true zksync_utils.workspace = true zksync_prover_interface.workspace = true +zksync_object_store.workspace = true vm_utils.workspace = true tokio = { workspace = true, features = ["time"] } diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index 8f95721b97f1..12c4514bfb68 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -3,6 +3,7 @@ use std::{collections::HashSet, sync::Arc}; use async_trait::async_trait; use tokio::sync::watch; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use zksync_object_store::ObjectStore; use zksync_prover_interface::inputs::VMRunWitnessInputData; use zksync_state_keeper::{MainBatchExecutor, StateKeeperOutputHandler, UpdatesManager}; use zksync_types::{ @@ -27,6 +28,7 @@ impl BasicWitnessInputProducer { /// regulates how many batches this component can handle at the same time. pub async fn new( pool: ConnectionPool, + object_store: Arc, rocksdb_path: String, chain_id: L2ChainId, window_size: u32, @@ -34,8 +36,10 @@ impl BasicWitnessInputProducer { let io = BasicWitnessInputProducerIo { window_size }; let (loader, loader_task) = VmRunnerStorage::new(pool.clone(), rocksdb_path, io.clone(), chain_id).await?; - let output_handler_factory = - BasicWitnessInputProducerOutputHandlerFactory { pool: pool.clone() }; + let output_handler_factory = BasicWitnessInputProducerOutputHandlerFactory { + pool: pool.clone(), + object_store, + }; let (output_handler_factory, output_handler_factory_task) = ConcurrentOutputHandlerFactory::new(pool.clone(), io.clone(), output_handler_factory); let batch_processor = MainBatchExecutor::new(false, false); @@ -79,13 +83,14 @@ pub struct BasicWitnessInputProducerTasks { #[derive(Debug, Clone)] pub struct BasicWitnessInputProducerIo { + first_processed_batch: L1BatchNumber, window_size: u32, } #[async_trait] impl VmRunnerIo for BasicWitnessInputProducerIo { fn name(&self) -> &'static str { - "protective_reads_writer" + "basic_witness_input_producer" } async fn latest_processed_batch( @@ -94,7 +99,7 @@ impl VmRunnerIo for BasicWitnessInputProducerIo { ) -> anyhow::Result { Ok(conn .vm_runner_dal() - .get_protective_reads_latest_processed_batch() + .get_bwip_latest_processed_batch(self.first_processed_batch) .await?) } @@ -104,7 +109,7 @@ impl VmRunnerIo for BasicWitnessInputProducerIo { ) -> anyhow::Result { Ok(conn .vm_runner_dal() - .get_protective_reads_last_ready_batch(self.window_size) + .get_bwip_last_ready_batch(self.first_processed_batch, self.window_size) .await?) } @@ -115,7 +120,7 @@ impl VmRunnerIo for BasicWitnessInputProducerIo { ) -> anyhow::Result<()> { Ok(conn .vm_runner_dal() - .mark_protective_reads_batch_as_completed(l1_batch_number) + .mark_bwip_batch_as_completed(l1_batch_number) .await?) } } @@ -123,6 +128,7 @@ impl VmRunnerIo for BasicWitnessInputProducerIo { #[derive(Debug)] struct BasicWitnessInputProducerOutputHandler { pool: ConnectionPool, + object_store: Arc, } #[async_trait] @@ -228,6 +234,8 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { witness_block_state: block_state, }; + self.object_store.put(l1_batch_number, &result).await?; + Ok(()) } } @@ -235,6 +243,7 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { #[derive(Debug)] struct BasicWitnessInputProducerOutputHandlerFactory { pool: ConnectionPool, + object_store: Arc, } #[async_trait] @@ -245,8 +254,7 @@ impl OutputHandlerFactory for BasicWitnessInputProducerOutputHandlerFactory { ) -> anyhow::Result> { Ok(Box::new(BasicWitnessInputProducerOutputHandler { pool: self.pool.clone(), + object_store: self.object_store.clone(), })) } } - -pub struct BasicCircuitWitnessGeneratorInput {} diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index d49d01b65a4a..20b4dc5029be 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -38,7 +38,7 @@ use zksync_prover_fri_types::{ use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; use zksync_prover_interface::inputs::{PrepareBasicCircuitsJob, WitnessInputData}; use zksync_queued_job_processor::JobProcessor; -use zksync_state::{PostgresStorage, StorageView}; +use zksync_state::{PostgresStorage, StorageView, WitnessStorage}; use zksync_types::{ basic_fri_types::{AggregationRound, Eip4844Blobs}, block::StorageOracleInfo, @@ -447,7 +447,8 @@ async fn generate_witness( let (queue_sender, mut queue_receiver) = tokio::sync::mpsc::channel(1); let make_circuits = tokio::task::spawn_blocking(move || { - let storage_view = StorageView::new(input.vm_run_data.witness_block_state).to_rc_ptr(); + let witness_storage = WitnessStorage::new(input.vm_run_data.witness_block_state); + let storage_view = StorageView::new(witness_storage).to_rc_ptr(); let vm_storage_oracle: VmStorageOracle>, HistoryDisabled> = VmStorageOracle::new(storage_view.clone()); From da3fb9848672d3ddf41956447e9e3acb5dff7606 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Fri, 21 Jun 2024 17:50:56 +0300 Subject: [PATCH 06/56] fix some errors --- ...1ad3349e4d932d3de64b6dade97481cd171a4.json | 23 +++++++++++++++++++ ...373c57d2dc6ec03d84f91a221ab8097e587cc.json | 14 +++++++++++ ...a55c6fa93f854a5a9777778acb66275cc7be7.json | 22 ++++++++++++++++++ ...458_add_vm_run_data_blob_url_column.up.sql | 2 +- core/lib/prover_interface/src/inputs.rs | 3 +++ core/lib/state/src/storage_view.rs | 2 +- .../types/src/storage/witness_block_state.rs | 2 +- .../src/batch_executor/main_executor.rs | 2 +- .../state_keeper/src/batch_executor/mod.rs | 2 +- core/node/state_keeper/src/testonly/mod.rs | 2 ++ .../src/testonly/test_batch_executor.rs | 4 ++++ core/node/state_keeper/src/updates/mod.rs | 1 + core/node/vm_runner/src/impls/bwip.rs | 6 ++++- core/node/vm_runner/src/process.rs | 11 +++++---- 14 files changed, 85 insertions(+), 11 deletions(-) create mode 100644 core/lib/dal/.sqlx/query-2482716de397893c52840eb39391ad3349e4d932d3de64b6dade97481cd171a4.json create mode 100644 core/lib/dal/.sqlx/query-a3f24c7f2298398517db009f7e5373c57d2dc6ec03d84f91a221ab8097e587cc.json create mode 100644 core/lib/dal/.sqlx/query-a4658abdec913690849378a85b2a55c6fa93f854a5a9777778acb66275cc7be7.json diff --git a/core/lib/dal/.sqlx/query-2482716de397893c52840eb39391ad3349e4d932d3de64b6dade97481cd171a4.json b/core/lib/dal/.sqlx/query-2482716de397893c52840eb39391ad3349e4d932d3de64b6dade97481cd171a4.json new file mode 100644 index 000000000000..b5c9869d1467 --- /dev/null +++ b/core/lib/dal/.sqlx/query-2482716de397893c52840eb39391ad3349e4d932d3de64b6dade97481cd171a4.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n available_batches AS (\n SELECT\n MAX(number) AS \"last_batch\"\n FROM\n l1_batches\n ),\n processed_batches AS (\n SELECT\n COALESCE(MAX(l1_batch_number), $1) + $2 AS \"last_ready_batch\"\n FROM\n vm_runner_bwip\n )\n SELECT\n LEAST(last_batch, last_ready_batch) AS \"last_ready_batch!\"\n FROM\n available_batches\n FULL JOIN processed_batches ON TRUE\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "last_ready_batch!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + true + ] + }, + "hash": "2482716de397893c52840eb39391ad3349e4d932d3de64b6dade97481cd171a4" +} diff --git a/core/lib/dal/.sqlx/query-a3f24c7f2298398517db009f7e5373c57d2dc6ec03d84f91a221ab8097e587cc.json b/core/lib/dal/.sqlx/query-a3f24c7f2298398517db009f7e5373c57d2dc6ec03d84f91a221ab8097e587cc.json new file mode 100644 index 000000000000..617fd4e81ea1 --- /dev/null +++ b/core/lib/dal/.sqlx/query-a3f24c7f2298398517db009f7e5373c57d2dc6ec03d84f91a221ab8097e587cc.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n vm_runner_bwip (l1_batch_number, created_at, updated_at)\n VALUES\n ($1, NOW(), NOW())\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "a3f24c7f2298398517db009f7e5373c57d2dc6ec03d84f91a221ab8097e587cc" +} diff --git a/core/lib/dal/.sqlx/query-a4658abdec913690849378a85b2a55c6fa93f854a5a9777778acb66275cc7be7.json b/core/lib/dal/.sqlx/query-a4658abdec913690849378a85b2a55c6fa93f854a5a9777778acb66275cc7be7.json new file mode 100644 index 000000000000..4fb3640a169e --- /dev/null +++ b/core/lib/dal/.sqlx/query-a4658abdec913690849378a85b2a55c6fa93f854a5a9777778acb66275cc7be7.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n COALESCE(MAX(l1_batch_number), $1) AS \"last_processed_l1_batch!\"\n FROM\n vm_runner_bwip\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "last_processed_l1_batch!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + null + ] + }, + "hash": "a4658abdec913690849378a85b2a55c6fa93f854a5a9777778acb66275cc7be7" +} diff --git a/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.up.sql b/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.up.sql index b93de38d728f..1fe90c191411 100644 --- a/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.up.sql +++ b/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.up.sql @@ -1,5 +1,5 @@ ALTER TABLE proof_generation_details - ADD COLUMN IF NOT EXISTS vm_run_data_blob_url DEFAULT NULL; + ADD COLUMN IF NOT EXISTS vm_run_data_blob_url TEXT DEFAULT NULL; CREATE TABLE IF NOT EXISTS vm_runner_bwip ( diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index 6e2d8a3571fa..8cfe0e6762bb 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -2,6 +2,7 @@ use std::{collections::HashMap, convert::TryInto, fmt::Debug}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, Bytes}; + use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; pub use zksync_state::WitnessStorage; use zksync_types::{ @@ -136,6 +137,7 @@ impl PrepareBasicCircuitsJob { } } +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct VMRunWitnessInputData { pub l1_batch_header: L1BatchHeader, pub previous_batch_with_metadata: L1BatchWithMetadata, @@ -162,6 +164,7 @@ impl StoredObject for VMRunWitnessInputData { serialize_using_bincode!(); } +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct WitnessInputData { pub vm_run_data: VMRunWitnessInputData, pub merkle_paths: PrepareBasicCircuitsJob, diff --git a/core/lib/state/src/storage_view.rs b/core/lib/state/src/storage_view.rs index 31853de0a853..1eb5488129e3 100644 --- a/core/lib/state/src/storage_view.rs +++ b/core/lib/state/src/storage_view.rs @@ -49,7 +49,7 @@ pub struct StorageView { metrics: StorageViewMetrics, } -#[derive(Debug, Clone)] +#[derive(Debug, Default, Clone)] pub struct StorageViewCache { // Used purely for caching read_storage_keys: HashMap, diff --git a/core/lib/types/src/storage/witness_block_state.rs b/core/lib/types/src/storage/witness_block_state.rs index 63ee1ba1c566..cae3eb892c6e 100644 --- a/core/lib/types/src/storage/witness_block_state.rs +++ b/core/lib/types/src/storage/witness_block_state.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use crate::{StorageKey, StorageValue}; /// Storage data used during Witness Generation. -#[derive(Debug, Default, Serialize, Deserialize)] +#[derive(Debug, Default, Clone, Serialize, Deserialize)] pub struct WitnessBlockState { pub read_storage_key: HashMap, pub is_write_initial: HashMap, diff --git a/core/node/state_keeper/src/batch_executor/main_executor.rs b/core/node/state_keeper/src/batch_executor/main_executor.rs index d08a4a581fac..9a4f87294268 100644 --- a/core/node/state_keeper/src/batch_executor/main_executor.rs +++ b/core/node/state_keeper/src/batch_executor/main_executor.rs @@ -148,7 +148,7 @@ impl CommandReceiver { return; } Command::StorageViewCache(resp) => { - if resp.send((**storage_view).cache()).is_err() { + if resp.send((*storage_view).borrow().cache()).is_err() { break; } } diff --git a/core/node/state_keeper/src/batch_executor/mod.rs b/core/node/state_keeper/src/batch_executor/mod.rs index 08a668ef827c..a5b8f52360e6 100644 --- a/core/node/state_keeper/src/batch_executor/mod.rs +++ b/core/node/state_keeper/src/batch_executor/mod.rs @@ -230,7 +230,7 @@ impl BatchExecutorHandle { Ok(finished_batch) } - pub async fn storage_view_cache(mut self) -> anyhow::Result { + pub async fn storage_view_cache(&mut self) -> anyhow::Result { let (response_sender, response_receiver) = oneshot::channel(); let send_failed = self .commands diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index 3f7244a2fb75..6f0ce1a809d5 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -105,6 +105,8 @@ impl BatchExecutor for MockBatchExecutor { resp.send(default_vm_batch_result()).unwrap(); break; } + // todo: add test + Command::StorageViewCache(_) => (), } } anyhow::Ok(()) diff --git a/core/node/state_keeper/src/testonly/test_batch_executor.rs b/core/node/state_keeper/src/testonly/test_batch_executor.rs index 4539633174a8..6d7a5e416a83 100644 --- a/core/node/state_keeper/src/testonly/test_batch_executor.rs +++ b/core/node/state_keeper/src/testonly/test_batch_executor.rs @@ -499,6 +499,8 @@ impl TestBatchExecutor { resp.send(default_vm_batch_result()).unwrap(); return; } + //todo: add test + Command::StorageViewCache(_) => (), } } } @@ -827,6 +829,8 @@ impl BatchExecutor for MockBatchExecutor { resp.send(default_vm_batch_result()).unwrap(); break; } + // todo: add test + Command::StorageViewCache(_) => (), } } anyhow::Ok(()) diff --git a/core/node/state_keeper/src/updates/mod.rs b/core/node/state_keeper/src/updates/mod.rs index 5aef0ef7be7d..4c31a1f9adfb 100644 --- a/core/node/state_keeper/src/updates/mod.rs +++ b/core/node/state_keeper/src/updates/mod.rs @@ -61,6 +61,7 @@ impl UpdatesManager { protocol_version, ), storage_writes_deduplicator: StorageWritesDeduplicator::new(), + storage_view_cache: StorageViewCache::default(), } } diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index 12c4514bfb68..7c54c39d9fd6 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -31,9 +31,13 @@ impl BasicWitnessInputProducer { object_store: Arc, rocksdb_path: String, chain_id: L2ChainId, + first_processed_batch: L1BatchNumber, window_size: u32, ) -> anyhow::Result<(Self, BasicWitnessInputProducerTasks)> { - let io = BasicWitnessInputProducerIo { window_size }; + let io = BasicWitnessInputProducerIo { + first_processed_batch, + window_size, + }; let (loader, loader_task) = VmRunnerStorage::new(pool.clone(), rocksdb_path, io.clone(), chain_id).await?; let output_handler_factory = BasicWitnessInputProducerOutputHandlerFactory { diff --git a/core/node/vm_runner/src/process.rs b/core/node/vm_runner/src/process.rs index d64b24dce184..c15900ae2974 100644 --- a/core/node/vm_runner/src/process.rs +++ b/core/node/vm_runner/src/process.rs @@ -110,11 +110,6 @@ impl VmRunner { .await .context("VM runner failed to handle L2 block")?; } - let finished_batch = batch_executor - .finish_batch() - .await - .context("failed finishing L1 batch in executor")?; - updates_manager.finish_batch(finished_batch); let storage_view_cache = batch_executor .storage_view_cache() @@ -122,6 +117,12 @@ impl VmRunner { .context("Failed getting storage view cache")?; updates_manager.update_storage_view_cache(storage_view_cache); + let finished_batch = batch_executor + .finish_batch() + .await + .context("failed finishing L1 batch in executor")?; + updates_manager.finish_batch(finished_batch); + latency.observe(); output_handler .handle_l1_batch(Arc::new(updates_manager)) From 0e68fa5001efe462f7f99eae734d431489c40881 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Mon, 24 Jun 2024 14:51:19 +0300 Subject: [PATCH 07/56] fix core build --- core/node/state_keeper/src/batch_executor/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/core/node/state_keeper/src/batch_executor/mod.rs b/core/node/state_keeper/src/batch_executor/mod.rs index a5b8f52360e6..8f67b66bd97a 100644 --- a/core/node/state_keeper/src/batch_executor/mod.rs +++ b/core/node/state_keeper/src/batch_executor/mod.rs @@ -248,7 +248,6 @@ impl BatchExecutorHandle { Ok(cache) => cache, Err(_) => return Err(self.handle.wait_for_error().await), }; - self.handle.wait().await?; latency.observe(); Ok(storage_view_cache) } From 2b1a7ae2ac0926d571430793743d0e89c6b176ea Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Mon, 24 Jun 2024 14:51:58 +0300 Subject: [PATCH 08/56] fmt --- core/lib/prover_interface/src/inputs.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index 8cfe0e6762bb..2278dbfec185 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -2,7 +2,6 @@ use std::{collections::HashMap, convert::TryInto, fmt::Debug}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, Bytes}; - use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; pub use zksync_state::WitnessStorage; use zksync_types::{ From 714f8e39f2b27b41712bc4d24fbb4caf4962997c Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Mon, 24 Jun 2024 15:57:25 +0300 Subject: [PATCH 09/56] update data handler and gateway --- ...a2418ca2223c8f37ec9856dcf6ccfa8b8dabb.json | 15 +++++++++++++ ...1e4ee6682a89fb86f3b715a240805d44e6d87.json | 15 ------------- ...700a95e4c37a7a18531b3cdf120394cb055b9.json | 22 ------------------- ...365251942e98cc2283320b1bf4851dfdd020.json} | 4 ++-- ...41c4a7db0ea88c2e4caece1e7c170c991baa2.json | 14 ++++++++++++ ...b608d21dc70397b64ce500881a8b55953c59c.json | 14 ------------ ...2f30e4f0c95458311a08e70302e98387406fd.json | 22 +++++++++++++++++++ ...047c6531289ebf5d82ff9a987f4c62eb2010.json} | 4 ++-- core/lib/dal/src/proof_generation_dal.rs | 17 +++++++++----- core/lib/dal/src/tee_proof_generation_dal.rs | 9 ++++---- core/lib/prover_interface/src/api.rs | 4 ++-- .../src/request_processor.rs | 21 ++++++++++++++---- .../witness_generator/src/basic_circuits.rs | 4 ++-- 13 files changed, 91 insertions(+), 74 deletions(-) create mode 100644 core/lib/dal/.sqlx/query-087be742721df75c9b197030204a2418ca2223c8f37ec9856dcf6ccfa8b8dabb.json delete mode 100644 core/lib/dal/.sqlx/query-08e59ed8e2fd1a74e19d8bf0d131e4ee6682a89fb86f3b715a240805d44e6d87.json delete mode 100644 core/lib/dal/.sqlx/query-11af69fc254e54449b64c086667700a95e4c37a7a18531b3cdf120394cb055b9.json rename core/lib/dal/.sqlx/{query-58aed39245c72d231b268ce83105bb2036d21f60d4c6934f9145730ac35c04de.json => query-22a3a95c6a32614bbca153f6a006365251942e98cc2283320b1bf4851dfdd020.json} (58%) create mode 100644 core/lib/dal/.sqlx/query-4175d40e342052cc4b63c9c346541c4a7db0ea88c2e4caece1e7c170c991baa2.json delete mode 100644 core/lib/dal/.sqlx/query-9533a672ae82db344ae1070ae11b608d21dc70397b64ce500881a8b55953c59c.json create mode 100644 core/lib/dal/.sqlx/query-ace73ef49d84b19060c80f921e92f30e4f0c95458311a08e70302e98387406fd.json rename core/lib/dal/.sqlx/{query-640d37aa1d6dc722b1651c74b7ea2fca14965083b0589c3b3efad02e37d55f0c.json => query-bcd4376476a6f794d0ff8c83a61d047c6531289ebf5d82ff9a987f4c62eb2010.json} (67%) diff --git a/core/lib/dal/.sqlx/query-087be742721df75c9b197030204a2418ca2223c8f37ec9856dcf6ccfa8b8dabb.json b/core/lib/dal/.sqlx/query-087be742721df75c9b197030204a2418ca2223c8f37ec9856dcf6ccfa8b8dabb.json new file mode 100644 index 000000000000..87759bd5d68c --- /dev/null +++ b/core/lib/dal/.sqlx/query-087be742721df75c9b197030204a2418ca2223c8f37ec9856dcf6ccfa8b8dabb.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n proof_generation_details (l1_batch_number, proof_gen_data_blob_url, created_at, updated_at)\n VALUES\n ($1, $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text" + ] + }, + "nullable": [] + }, + "hash": "087be742721df75c9b197030204a2418ca2223c8f37ec9856dcf6ccfa8b8dabb" +} diff --git a/core/lib/dal/.sqlx/query-08e59ed8e2fd1a74e19d8bf0d131e4ee6682a89fb86f3b715a240805d44e6d87.json b/core/lib/dal/.sqlx/query-08e59ed8e2fd1a74e19d8bf0d131e4ee6682a89fb86f3b715a240805d44e6d87.json deleted file mode 100644 index 0c3ca92c10c5..000000000000 --- a/core/lib/dal/.sqlx/query-08e59ed8e2fd1a74e19d8bf0d131e4ee6682a89fb86f3b715a240805d44e6d87.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n proof_generation_details (l1_batch_number, status, proof_gen_data_blob_url, created_at, updated_at)\n VALUES\n ($1, 'ready_to_be_proven', $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Text" - ] - }, - "nullable": [] - }, - "hash": "08e59ed8e2fd1a74e19d8bf0d131e4ee6682a89fb86f3b715a240805d44e6d87" -} diff --git a/core/lib/dal/.sqlx/query-11af69fc254e54449b64c086667700a95e4c37a7a18531b3cdf120394cb055b9.json b/core/lib/dal/.sqlx/query-11af69fc254e54449b64c086667700a95e4c37a7a18531b3cdf120394cb055b9.json deleted file mode 100644 index ed211d7dc9d8..000000000000 --- a/core/lib/dal/.sqlx/query-11af69fc254e54449b64c086667700a95e4c37a7a18531b3cdf120394cb055b9.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE proof_generation_details\n SET\n status = 'picked_by_prover',\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n proof_generation_details\n WHERE\n status = 'ready_to_be_proven'\n OR (\n status = 'picked_by_prover'\n AND prover_taken_at < NOW() - $1::INTERVAL\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n proof_generation_details.l1_batch_number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Interval" - ] - }, - "nullable": [ - false - ] - }, - "hash": "11af69fc254e54449b64c086667700a95e4c37a7a18531b3cdf120394cb055b9" -} diff --git a/core/lib/dal/.sqlx/query-58aed39245c72d231b268ce83105bb2036d21f60d4c6934f9145730ac35c04de.json b/core/lib/dal/.sqlx/query-22a3a95c6a32614bbca153f6a006365251942e98cc2283320b1bf4851dfdd020.json similarity index 58% rename from core/lib/dal/.sqlx/query-58aed39245c72d231b268ce83105bb2036d21f60d4c6934f9145730ac35c04de.json rename to core/lib/dal/.sqlx/query-22a3a95c6a32614bbca153f6a006365251942e98cc2283320b1bf4851dfdd020.json index 502d14e05ea5..5e6784728232 100644 --- a/core/lib/dal/.sqlx/query-58aed39245c72d231b268ce83105bb2036d21f60d4c6934f9145730ac35c04de.json +++ b/core/lib/dal/.sqlx/query-22a3a95c6a32614bbca153f6a006365251942e98cc2283320b1bf4851dfdd020.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n l1_batch_number\n FROM\n proof_generation_details\n WHERE\n status = 'ready_to_be_proven'\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n ", + "query": "\n SELECT\n l1_batch_number\n FROM\n proof_generation_details\n WHERE\n status NOT IN ('picked_by_prover', 'generated')\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ false ] }, - "hash": "58aed39245c72d231b268ce83105bb2036d21f60d4c6934f9145730ac35c04de" + "hash": "22a3a95c6a32614bbca153f6a006365251942e98cc2283320b1bf4851dfdd020" } diff --git a/core/lib/dal/.sqlx/query-4175d40e342052cc4b63c9c346541c4a7db0ea88c2e4caece1e7c170c991baa2.json b/core/lib/dal/.sqlx/query-4175d40e342052cc4b63c9c346541c4a7db0ea88c2e4caece1e7c170c991baa2.json new file mode 100644 index 000000000000..66a889915c35 --- /dev/null +++ b/core/lib/dal/.sqlx/query-4175d40e342052cc4b63c9c346541c4a7db0ea88c2e4caece1e7c170c991baa2.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n tee_proof_generation_details (l1_batch_number, created_at, updated_at)\n VALUES\n ($1, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "4175d40e342052cc4b63c9c346541c4a7db0ea88c2e4caece1e7c170c991baa2" +} diff --git a/core/lib/dal/.sqlx/query-9533a672ae82db344ae1070ae11b608d21dc70397b64ce500881a8b55953c59c.json b/core/lib/dal/.sqlx/query-9533a672ae82db344ae1070ae11b608d21dc70397b64ce500881a8b55953c59c.json deleted file mode 100644 index 994bfcfbb5a2..000000000000 --- a/core/lib/dal/.sqlx/query-9533a672ae82db344ae1070ae11b608d21dc70397b64ce500881a8b55953c59c.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n tee_proof_generation_details (l1_batch_number, status, created_at, updated_at)\n VALUES\n ($1, 'ready_to_be_proven', NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [] - }, - "hash": "9533a672ae82db344ae1070ae11b608d21dc70397b64ce500881a8b55953c59c" -} diff --git a/core/lib/dal/.sqlx/query-ace73ef49d84b19060c80f921e92f30e4f0c95458311a08e70302e98387406fd.json b/core/lib/dal/.sqlx/query-ace73ef49d84b19060c80f921e92f30e4f0c95458311a08e70302e98387406fd.json new file mode 100644 index 000000000000..88a526f7a950 --- /dev/null +++ b/core/lib/dal/.sqlx/query-ace73ef49d84b19060c80f921e92f30e4f0c95458311a08e70302e98387406fd.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE proof_generation_details\n SET\n status = 'picked_by_prover',\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n l1_batch_number = (\n SELECT l1_batch_number\n FROM proof_generation_details\n LEFT JOIN l1_batches ON l1_batch_number = l1_batches.number\n WHERE (vm_run_data_blob_url IS NOT NULL\n AND proof_gen_data_blob_url IS NOT NULL\n AND l1_batches.merkle_root_hash IS NOT NULL\n AND l1_batches.aux_data_hash IS NOT NULL\n AND l1_batches.meta_parameters_hash IS NOT NULL)\n OR (\n status = 'picked_by_prover'\n AND prover_taken_at < NOW() - $1::INTERVAL\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n proof_generation_details.l1_batch_number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Interval" + ] + }, + "nullable": [ + false + ] + }, + "hash": "ace73ef49d84b19060c80f921e92f30e4f0c95458311a08e70302e98387406fd" +} diff --git a/core/lib/dal/.sqlx/query-640d37aa1d6dc722b1651c74b7ea2fca14965083b0589c3b3efad02e37d55f0c.json b/core/lib/dal/.sqlx/query-bcd4376476a6f794d0ff8c83a61d047c6531289ebf5d82ff9a987f4c62eb2010.json similarity index 67% rename from core/lib/dal/.sqlx/query-640d37aa1d6dc722b1651c74b7ea2fca14965083b0589c3b3efad02e37d55f0c.json rename to core/lib/dal/.sqlx/query-bcd4376476a6f794d0ff8c83a61d047c6531289ebf5d82ff9a987f4c62eb2010.json index f0603488f1e8..9d9e32ee70fa 100644 --- a/core/lib/dal/.sqlx/query-640d37aa1d6dc722b1651c74b7ea2fca14965083b0589c3b3efad02e37d55f0c.json +++ b/core/lib/dal/.sqlx/query-bcd4376476a6f794d0ff8c83a61d047c6531289ebf5d82ff9a987f4c62eb2010.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = 'Successful'\n AND proofs.status = 'ready_to_be_proven'\n ORDER BY\n proofs.l1_batch_number ASC\n LIMIT\n 1\n ", + "query": "\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = 'Successful'\n AND proofs.status NOT IN ('picked_by_prover', 'generated')\n ORDER BY\n proofs.l1_batch_number ASC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ false ] }, - "hash": "640d37aa1d6dc722b1651c74b7ea2fca14965083b0589c3b3efad02e37d55f0c" + "hash": "bcd4376476a6f794d0ff8c83a61d047c6531289ebf5d82ff9a987f4c62eb2010" } diff --git a/core/lib/dal/src/proof_generation_dal.rs b/core/lib/dal/src/proof_generation_dal.rs index 040b4246604f..ce2ca81e8819 100644 --- a/core/lib/dal/src/proof_generation_dal.rs +++ b/core/lib/dal/src/proof_generation_dal.rs @@ -17,8 +17,6 @@ pub struct ProofGenerationDal<'a, 'c> { #[derive(Debug, EnumString, Display)] enum ProofGenerationJobStatus { - #[strum(serialize = "ready_to_be_proven")] - ReadyToBeProven, #[strum(serialize = "picked_by_prover")] PickedByProver, #[strum(serialize = "generated")] @@ -46,8 +44,15 @@ impl ProofGenerationDal<'_, '_> { l1_batch_number FROM proof_generation_details + LEFT JOIN l1_batches ON l1_batch_number = l1_batches.number WHERE - status = 'ready_to_be_proven' + ( + vm_run_data_blob_url IS NOT NULL + AND proof_gen_data_blob_url IS NOT NULL + AND l1_batches.merkle_root_hash IS NOT NULL + AND l1_batches.aux_data_hash IS NOT NULL + AND l1_batches.meta_parameters_hash IS NOT NULL + ) OR ( status = 'picked_by_prover' AND prover_taken_at < NOW() - $1::INTERVAL @@ -119,9 +124,9 @@ impl ProofGenerationDal<'_, '_> { let query = sqlx::query!( r#" INSERT INTO - proof_generation_details (l1_batch_number, status, proof_gen_data_blob_url, created_at, updated_at) + proof_generation_details (l1_batch_number, proof_gen_data_blob_url, created_at, updated_at) VALUES - ($1, 'ready_to_be_proven', $2, NOW(), NOW()) + ($1, $2, NOW(), NOW()) ON CONFLICT (l1_batch_number) DO NOTHING "#, l1_batch_number, @@ -191,7 +196,7 @@ impl ProofGenerationDal<'_, '_> { FROM proof_generation_details WHERE - status = 'ready_to_be_proven' + status NOT IN ('picked_by_prover', 'generated') ORDER BY l1_batch_number ASC LIMIT diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index d5625935fa1b..001393cb316f 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -18,8 +18,6 @@ pub struct TeeProofGenerationDal<'a, 'c> { #[derive(Debug, EnumString, Display)] enum TeeProofGenerationJobStatus { - #[strum(serialize = "ready_to_be_proven")] - ReadyToBeProven, #[strum(serialize = "picked_by_prover")] PickedByProver, #[strum(serialize = "generated")] @@ -40,6 +38,7 @@ impl TeeProofGenerationDal<'_, '_> { processing_timeout: Duration, ) -> DalResult> { let processing_timeout = pg_interval_from_duration(processing_timeout); + // todo: deprecate ready to be proven let result: Option = sqlx::query!( r#" UPDATE tee_proof_generation_details @@ -138,9 +137,9 @@ impl TeeProofGenerationDal<'_, '_> { sqlx::query!( r#" INSERT INTO - tee_proof_generation_details (l1_batch_number, status, created_at, updated_at) + tee_proof_generation_details (l1_batch_number, created_at, updated_at) VALUES - ($1, 'ready_to_be_proven', NOW(), NOW()) + ($1, NOW(), NOW()) ON CONFLICT (l1_batch_number) DO NOTHING "#, block_number, @@ -164,7 +163,7 @@ impl TeeProofGenerationDal<'_, '_> { JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number WHERE inputs.status = 'Successful' - AND proofs.status = 'ready_to_be_proven' + AND proofs.status NOT IN ('picked_by_prover', 'generated') ORDER BY proofs.l1_batch_number ASC LIMIT diff --git a/core/lib/prover_interface/src/api.rs b/core/lib/prover_interface/src/api.rs index fb96c62d38c7..2ef5cabea731 100644 --- a/core/lib/prover_interface/src/api.rs +++ b/core/lib/prover_interface/src/api.rs @@ -9,7 +9,7 @@ use zksync_types::{ }; use crate::{ - inputs::PrepareBasicCircuitsJob, + inputs::{PrepareBasicCircuitsJob, WitnessInputData}, outputs::{L1BatchProofForL1, L1BatchTeeProofForL1}, }; @@ -18,7 +18,7 @@ use crate::{ #[derive(Debug, Serialize, Deserialize)] pub struct ProofGenerationData { pub l1_batch_number: L1BatchNumber, - pub data: PrepareBasicCircuitsJob, + pub data: WitnessInputData, pub protocol_version: ProtocolSemanticVersion, pub l1_verifier_config: L1VerifierConfig, pub eip_4844_blobs: Eip4844Blobs, diff --git a/core/node/proof_data_handler/src/request_processor.rs b/core/node/proof_data_handler/src/request_processor.rs index 170b27bb971f..9231af372b1a 100644 --- a/core/node/proof_data_handler/src/request_processor.rs +++ b/core/node/proof_data_handler/src/request_processor.rs @@ -4,9 +4,12 @@ use axum::{extract::Path, Json}; use zksync_config::configs::ProofDataHandlerConfig; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_object_store::ObjectStore; -use zksync_prover_interface::api::{ - ProofGenerationData, ProofGenerationDataRequest, ProofGenerationDataResponse, - SubmitProofRequest, SubmitProofResponse, +use zksync_prover_interface::{ + api::{ + ProofGenerationData, ProofGenerationDataRequest, ProofGenerationDataResponse, + SubmitProofRequest, SubmitProofResponse, + }, + inputs::{PrepareBasicCircuitsJob, VMRunWitnessInputData, WitnessInputData}, }; use zksync_types::{ basic_fri_types::Eip4844Blobs, @@ -61,11 +64,21 @@ impl RequestProcessor { None => return Ok(Json(ProofGenerationDataResponse::Success(None))), // no batches pending to be proven }; - let blob = self + let vm_run_data = self .blob_store .get(l1_batch_number) .await .map_err(RequestProcessorError::ObjectStore)?; + let merkle_paths = self + .blob_store + .get(l1_batch_number) + .await + .map_err(RequestProcessorError::ObjectStore)?; + + let blob = WitnessInputData { + vm_run_data, + merkle_paths, + }; let header = self .pool diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index 20b4dc5029be..3446fb94f051 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -78,7 +78,7 @@ struct BlobUrls { #[derive(Clone)] pub struct BasicWitnessGeneratorJob { block_number: L1BatchNumber, - job: PrepareBasicCircuitsJob, + job: WitnessInputData, eip_4844_blobs: Eip4844Blobs, } @@ -423,7 +423,7 @@ async fn generate_witness( ); let mut tree = PrecalculatedMerklePathsProvider::new( - input.vm_run_data.merkle_paths_input, + input.merkle_paths, input .vm_run_data .previous_batch_with_metadata From d54e5ae2ec79365189ff18cf331600b4ea05f610 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Mon, 24 Jun 2024 17:32:59 +0300 Subject: [PATCH 10/56] add bwip to node --- core/bin/zksync_server/src/main.rs | 9 +- core/bin/zksync_server/src/node_builder.rs | 18 +++- core/lib/config/src/configs/general.rs | 3 +- core/lib/config/src/configs/mod.rs | 2 +- core/lib/config/src/configs/vm_runner.rs | 17 ++++ ...ef7419598a0113c85ff215e13728c0a15b310.json | 15 ++++ ...65c24fdabe748979306e50eb7ecb47c71ec18.json | 22 +++++ ...2f30e4f0c95458311a08e70302e98387406fd.json | 22 ----- core/lib/dal/src/proof_generation_dal.rs | 40 ++++++++- core/lib/env_config/src/vm_runner.rs | 8 +- core/lib/protobuf_config/src/general.rs | 8 ++ .../src/proto/config/general.proto | 45 +++++----- .../src/proto/config/vm_runner.proto | 12 ++- core/lib/protobuf_config/src/vm_runner.rs | 22 +++++ core/lib/zksync_core_leftovers/src/lib.rs | 3 + .../src/temp_config_store/mod.rs | 3 + .../implementations/layers/vm_runner/bwip.rs | 90 +++++++++++++++++++ .../implementations/layers/vm_runner/mod.rs | 1 + .../src/request_processor.rs | 4 +- .../src/batch_executor/main_executor.rs | 4 +- core/node/vm_runner/src/impls/bwip.rs | 26 ++++-- core/node/vm_runner/src/impls/mod.rs | 1 + core/node/vm_runner/src/lib.rs | 5 +- 23 files changed, 314 insertions(+), 66 deletions(-) create mode 100644 core/lib/dal/.sqlx/query-2b02ece0964c71d501c99885b73ef7419598a0113c85ff215e13728c0a15b310.json create mode 100644 core/lib/dal/.sqlx/query-7727ef45e01aef2ffa2edc8227d65c24fdabe748979306e50eb7ecb47c71ec18.json delete mode 100644 core/lib/dal/.sqlx/query-ace73ef49d84b19060c80f921e92f30e4f0c95458311a08e70302e98387406fd.json create mode 100644 core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index dfb11b55da92..39211a794cc7 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -11,10 +11,10 @@ use zksync_config::{ }, fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, - ContractsConfig, DatabaseSecrets, FriProofCompressorConfig, FriProverConfig, - FriProverGatewayConfig, FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, - L1Secrets, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, - ProtectiveReadsWriterConfig, Secrets, + BasicWitnessInputProducerConfig, ContractsConfig, DatabaseSecrets, + FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig, + FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, L1Secrets, ObservabilityConfig, + PrometheusConfig, ProofDataHandlerConfig, ProtectiveReadsWriterConfig, Secrets, }, ApiConfig, ContractVerifierConfig, DBConfig, EthConfig, EthWatchConfig, GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, @@ -269,6 +269,7 @@ fn load_env_config() -> anyhow::Result { observability: ObservabilityConfig::from_env().ok(), snapshot_creator: SnapshotsCreatorConfig::from_env().ok(), protective_reads_writer_config: ProtectiveReadsWriterConfig::from_env().ok(), + basic_witness_input_producer_config: BasicWitnessInputProducerConfig::from_env().ok(), core_object_store: ObjectStoreConfig::from_env().ok(), }) } diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 096d5e783551..9a6b59523a15 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -39,7 +39,9 @@ use zksync_node_framework::{ output_handler::OutputHandlerLayer, RocksdbStorageOptions, StateKeeperLayer, }, tee_verifier_input_producer::TeeVerifierInputProducerLayer, - vm_runner::protective_reads::ProtectiveReadsWriterLayer, + vm_runner::{ + bwip::BasicWitnessInputProducerLayer, protective_reads::ProtectiveReadsWriterLayer, + }, web3_api::{ caches::MempoolCacheLayer, server::{Web3ServerLayer, Web3ServerOptionalConfig}, @@ -444,6 +446,17 @@ impl MainNodeBuilder { Ok(self) } + fn add_vm_runner_bwip_layer(mut self) -> anyhow::Result { + let basic_witness_input_producer_config = + try_load_config!(self.configs.basic_witness_input_producer_config); + self.node.add_layer(BasicWitnessInputProducerLayer::new( + basic_witness_input_producer_config, + self.genesis_config.l2_chain_id, + )); + + Ok(self) + } + pub fn build(mut self, mut components: Vec) -> anyhow::Result { // Add "base" layers (resources and helper tasks). self = self @@ -531,6 +544,9 @@ impl MainNodeBuilder { Component::VmRunnerProtectiveReads => { self = self.add_vm_runner_protective_reads_layer()?; } + Component::VmRunnerBwip => { + self = self.add_vm_runner_bwip_layer()?; + } } } Ok(self.node.build()?) diff --git a/core/lib/config/src/configs/general.rs b/core/lib/config/src/configs/general.rs index 9f249d655f57..3a843093d2b6 100644 --- a/core/lib/config/src/configs/general.rs +++ b/core/lib/config/src/configs/general.rs @@ -3,7 +3,7 @@ use crate::{ chain::{CircuitBreakerConfig, MempoolConfig, OperationsManagerConfig, StateKeeperConfig}, fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, - vm_runner::ProtectiveReadsWriterConfig, + vm_runner::{BasicWitnessInputProducerConfig, ProtectiveReadsWriterConfig}, FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig, FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, @@ -35,5 +35,6 @@ pub struct GeneralConfig { pub snapshot_creator: Option, pub observability: Option, pub protective_reads_writer_config: Option, + pub basic_witness_input_producer_config: Option, pub core_object_store: Option, } diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index b2d9571ad292..808875bcddad 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -20,7 +20,7 @@ pub use self::{ secrets::{DatabaseSecrets, L1Secrets, Secrets}, snapshots_creator::SnapshotsCreatorConfig, utils::PrometheusConfig, - vm_runner::ProtectiveReadsWriterConfig, + vm_runner::{BasicWitnessInputProducerConfig, ProtectiveReadsWriterConfig}, }; pub mod api; diff --git a/core/lib/config/src/configs/vm_runner.rs b/core/lib/config/src/configs/vm_runner.rs index eb3d4a9d4b24..b4e5d42291b3 100644 --- a/core/lib/config/src/configs/vm_runner.rs +++ b/core/lib/config/src/configs/vm_runner.rs @@ -17,3 +17,20 @@ impl ProtectiveReadsWriterConfig { "./db/protective_reads_writer".to_owned() } } + +#[derive(Debug, Deserialize, Clone, PartialEq, Default)] +pub struct BasicWitnessInputProducerConfig { + /// Path to the RocksDB data directory that serves state cache. + #[serde(default = "ProtectiveReadsWriterConfig::default_db_path")] + pub db_path: String, + /// How many max batches should be processed at the same time. + pub window_size: u32, + /// All batches before this one (inclusive) are always considered to be processed. + pub first_processed_batch: L1BatchNumber, +} + +impl BasicWitnessInputProducerConfig { + fn default_db_path() -> String { + "./db/protective_reads_writer".to_owned() + } +} diff --git a/core/lib/dal/.sqlx/query-2b02ece0964c71d501c99885b73ef7419598a0113c85ff215e13728c0a15b310.json b/core/lib/dal/.sqlx/query-2b02ece0964c71d501c99885b73ef7419598a0113c85ff215e13728c0a15b310.json new file mode 100644 index 000000000000..d389eb41431d --- /dev/null +++ b/core/lib/dal/.sqlx/query-2b02ece0964c71d501c99885b73ef7419598a0113c85ff215e13728c0a15b310.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE proof_generation_details\n SET\n status = 'generated',\n vm_run_data_blob_url = $1,\n updated_at = NOW()\n WHERE\n l1_batch_number = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "2b02ece0964c71d501c99885b73ef7419598a0113c85ff215e13728c0a15b310" +} diff --git a/core/lib/dal/.sqlx/query-7727ef45e01aef2ffa2edc8227d65c24fdabe748979306e50eb7ecb47c71ec18.json b/core/lib/dal/.sqlx/query-7727ef45e01aef2ffa2edc8227d65c24fdabe748979306e50eb7ecb47c71ec18.json new file mode 100644 index 000000000000..b47d0af9f7bd --- /dev/null +++ b/core/lib/dal/.sqlx/query-7727ef45e01aef2ffa2edc8227d65c24fdabe748979306e50eb7ecb47c71ec18.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE proof_generation_details\n SET\n status = 'picked_by_prover',\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n proof_generation_details\n LEFT JOIN l1_batches ON l1_batch_number = l1_batches.number\n WHERE\n (\n vm_run_data_blob_url IS NOT NULL\n AND proof_gen_data_blob_url IS NOT NULL\n AND l1_batches.merkle_root_hash IS NOT NULL\n AND l1_batches.aux_data_hash IS NOT NULL\n AND l1_batches.meta_parameters_hash IS NOT NULL\n )\n OR (\n status = 'picked_by_prover'\n AND prover_taken_at < NOW() - $1::INTERVAL\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n proof_generation_details.l1_batch_number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Interval" + ] + }, + "nullable": [ + false + ] + }, + "hash": "7727ef45e01aef2ffa2edc8227d65c24fdabe748979306e50eb7ecb47c71ec18" +} diff --git a/core/lib/dal/.sqlx/query-ace73ef49d84b19060c80f921e92f30e4f0c95458311a08e70302e98387406fd.json b/core/lib/dal/.sqlx/query-ace73ef49d84b19060c80f921e92f30e4f0c95458311a08e70302e98387406fd.json deleted file mode 100644 index 88a526f7a950..000000000000 --- a/core/lib/dal/.sqlx/query-ace73ef49d84b19060c80f921e92f30e4f0c95458311a08e70302e98387406fd.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE proof_generation_details\n SET\n status = 'picked_by_prover',\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n l1_batch_number = (\n SELECT l1_batch_number\n FROM proof_generation_details\n LEFT JOIN l1_batches ON l1_batch_number = l1_batches.number\n WHERE (vm_run_data_blob_url IS NOT NULL\n AND proof_gen_data_blob_url IS NOT NULL\n AND l1_batches.merkle_root_hash IS NOT NULL\n AND l1_batches.aux_data_hash IS NOT NULL\n AND l1_batches.meta_parameters_hash IS NOT NULL)\n OR (\n status = 'picked_by_prover'\n AND prover_taken_at < NOW() - $1::INTERVAL\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n proof_generation_details.l1_batch_number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Interval" - ] - }, - "nullable": [ - false - ] - }, - "hash": "ace73ef49d84b19060c80f921e92f30e4f0c95458311a08e70302e98387406fd" -} diff --git a/core/lib/dal/src/proof_generation_dal.rs b/core/lib/dal/src/proof_generation_dal.rs index ce2ca81e8819..d8f4c3ab3634 100644 --- a/core/lib/dal/src/proof_generation_dal.rs +++ b/core/lib/dal/src/proof_generation_dal.rs @@ -77,7 +77,7 @@ impl ProofGenerationDal<'_, '_> { Ok(result) } - pub async fn save_proof_artifacts_metadata( + pub async fn save_proof_merkle_paths_artifacts_metadata( &mut self, batch_number: L1BatchNumber, proof_blob_url: &str, @@ -115,6 +115,44 @@ impl ProofGenerationDal<'_, '_> { Ok(()) } + pub async fn save_vm_runner_artifacts_metadata( + &mut self, + batch_number: L1BatchNumber, + vm_run_data_blob_url: &str, + ) -> DalResult<()> { + let batch_number = i64::from(batch_number.0); + let query = sqlx::query!( + r#" + UPDATE proof_generation_details + SET + status = 'generated', + vm_run_data_blob_url = $1, + updated_at = NOW() + WHERE + l1_batch_number = $2 + "#, + vm_run_data_blob_url, + batch_number + ); + let instrumentation = Instrumented::new("save_proof_artifacts_metadata") + .with_arg("vm_run_data_blob_url", &vm_run_data_blob_url) + .with_arg("l1_batch_number", &batch_number); + let result = instrumentation + .clone() + .with(query) + .execute(self.storage) + .await?; + if result.rows_affected() == 0 { + let err = instrumentation.constraint_error(anyhow::anyhow!( + "Cannot save proof_blob_url for a batch number {} that does not exist", + batch_number + )); + return Err(err); + } + + Ok(()) + } + pub async fn insert_proof_generation_details( &mut self, block_number: L1BatchNumber, diff --git a/core/lib/env_config/src/vm_runner.rs b/core/lib/env_config/src/vm_runner.rs index 8a99ea2dc8e2..9973d760a236 100644 --- a/core/lib/env_config/src/vm_runner.rs +++ b/core/lib/env_config/src/vm_runner.rs @@ -1,4 +1,4 @@ -use zksync_config::configs::ProtectiveReadsWriterConfig; +use zksync_config::configs::{BasicWitnessInputProducerConfig, ProtectiveReadsWriterConfig}; use crate::{envy_load, FromEnv}; @@ -7,3 +7,9 @@ impl FromEnv for ProtectiveReadsWriterConfig { envy_load("vm_runner.protective_reads", "VM_RUNNER_PROTECTIVE_READS_") } } + +impl FromEnv for BasicWitnessInputProducerConfig { + fn from_env() -> anyhow::Result { + envy_load("vm_runner.bwip", "VM_RUNNER_BWIP_") + } +} diff --git a/core/lib/protobuf_config/src/general.rs b/core/lib/protobuf_config/src/general.rs index 834977759ae2..e30ad56ae7f3 100644 --- a/core/lib/protobuf_config/src/general.rs +++ b/core/lib/protobuf_config/src/general.rs @@ -39,6 +39,10 @@ impl ProtoRepr for proto::GeneralConfig { observability: read_optional_repr(&self.observability).context("observability")?, protective_reads_writer_config: read_optional_repr(&self.protective_reads_writer) .context("protective_reads_writer")?, + basic_witness_input_producer_config: read_optional_repr( + &self.basic_witness_input_producer, + ) + .context("basic_witness_input_producer")?, core_object_store: read_optional_repr(&self.core_object_store) .context("core_object_store")?, }) @@ -76,6 +80,10 @@ impl ProtoRepr for proto::GeneralConfig { .protective_reads_writer_config .as_ref() .map(ProtoRepr::build), + basic_witness_input_producer: this + .basic_witness_input_producer_config + .as_ref() + .map(ProtoRepr::build), core_object_store: this.core_object_store.as_ref().map(ProtoRepr::build), } } diff --git a/core/lib/protobuf_config/src/proto/config/general.proto b/core/lib/protobuf_config/src/proto/config/general.proto index fdfe257aecf1..bc00c235b7f1 100644 --- a/core/lib/protobuf_config/src/proto/config/general.proto +++ b/core/lib/protobuf_config/src/proto/config/general.proto @@ -17,26 +17,27 @@ import "zksync/config/vm_runner.proto"; import "zksync/config/object_store.proto"; message GeneralConfig { - optional config.database.Postgres postgres = 1; - optional config.api.Api api = 2; - optional config.contract_verifier.ContractVerifier contract_verifier = 3; - optional config.circuit_breaker.CircuitBreaker circuit_breaker = 5; - optional config.chain.Mempool mempool = 6; - optional config.chain.OperationsManager operations_manager = 8; - optional config.chain.StateKeeper state_keeper = 9; - optional config.house_keeper.HouseKeeper house_keeper = 10; - optional config.prover.Prover prover = 12; - optional config.utils.Prometheus prometheus = 15; - optional config.database.DB db = 20; - optional config.eth.ETH eth = 22; - optional config.prover.WitnessGenerator witness_generator = 24; - optional config.prover.WitnessVectorGenerator witness_vector_generator = 25; - optional config.prover.ProofCompressor proof_compressor = 27; - optional config.prover.ProofDataHandler data_handler = 28; - optional config.prover.ProverGroup prover_group = 29; - optional config.prover.ProverGateway prover_gateway = 30; - optional config.snapshot_creator.SnapshotsCreator snapshot_creator = 31; - optional config.observability.Observability observability = 32; - optional config.vm_runner.ProtectiveReadsWriter protective_reads_writer = 33; - optional config.object_store.ObjectStore core_object_store = 34; + optional config.database.Postgres postgres = 1; + optional config.api.Api api = 2; + optional config.contract_verifier.ContractVerifier contract_verifier = 3; + optional config.circuit_breaker.CircuitBreaker circuit_breaker = 5; + optional config.chain.Mempool mempool = 6; + optional config.chain.OperationsManager operations_manager = 8; + optional config.chain.StateKeeper state_keeper = 9; + optional config.house_keeper.HouseKeeper house_keeper = 10; + optional config.prover.Prover prover = 12; + optional config.utils.Prometheus prometheus = 15; + optional config.database.DB db = 20; + optional config.eth.ETH eth = 22; + optional config.prover.WitnessGenerator witness_generator = 24; + optional config.prover.WitnessVectorGenerator witness_vector_generator = 25; + optional config.prover.ProofCompressor proof_compressor = 27; + optional config.prover.ProofDataHandler data_handler = 28; + optional config.prover.ProverGroup prover_group = 29; + optional config.prover.ProverGateway prover_gateway = 30; + optional config.snapshot_creator.SnapshotsCreator snapshot_creator = 31; + optional config.observability.Observability observability = 32; + optional config.vm_runner.ProtectiveReadsWriter protective_reads_writer = 33; + optional config.vm_runner.BasicWitnessInputProducer basic_witness_input_producer = 35; + optional config.object_store.ObjectStore core_object_store = 34; } diff --git a/core/lib/protobuf_config/src/proto/config/vm_runner.proto b/core/lib/protobuf_config/src/proto/config/vm_runner.proto index c0c82d4d415f..ba443174e68e 100644 --- a/core/lib/protobuf_config/src/proto/config/vm_runner.proto +++ b/core/lib/protobuf_config/src/proto/config/vm_runner.proto @@ -3,7 +3,13 @@ syntax = "proto3"; package zksync.config.vm_runner; message ProtectiveReadsWriter { - optional string db_path = 1; // required; fs path - optional uint64 window_size = 2; // required - optional uint64 first_processed_batch = 3; // required + optional string db_path = 1; // required; fs path + optional uint64 window_size = 2; // required + optional uint64 first_processed_batch = 3; // required +} + +message BasicWitnessInputProducer { + optional string db_path = 1; // required; fs path + optional uint64 window_size = 2; // required + optional uint64 first_processed_batch = 3; // required } diff --git a/core/lib/protobuf_config/src/vm_runner.rs b/core/lib/protobuf_config/src/vm_runner.rs index 78bfee750521..cc0d53ad519e 100644 --- a/core/lib/protobuf_config/src/vm_runner.rs +++ b/core/lib/protobuf_config/src/vm_runner.rs @@ -26,3 +26,25 @@ impl ProtoRepr for proto::ProtectiveReadsWriter { } } } + +impl ProtoRepr for proto::BasicWitnessInputProducer { + type Type = configs::BasicWitnessInputProducerConfig; + + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + db_path: required(&self.db_path).context("db_path")?.clone(), + window_size: *required(&self.window_size).context("window_size")? as u32, + first_processed_batch: L1BatchNumber( + *required(&self.first_processed_batch).context("first_batch")? as u32, + ), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + db_path: Some(this.db_path.clone()), + window_size: Some(this.window_size as u64), + first_processed_batch: Some(this.first_processed_batch.0 as u64), + } + } +} diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index 8e85bad9cc33..0e0404947bd2 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -88,6 +88,8 @@ pub enum Component { CommitmentGenerator, /// VM runner-based component that saves protective reads to Postgres. VmRunnerProtectiveReads, + /// VM runner-based component that saves VM execution data for basic witness generation. + VmRunnerBwip, } #[derive(Debug)] @@ -127,6 +129,7 @@ impl FromStr for Components { "vm_runner_protective_reads" => { Ok(Components(vec![Component::VmRunnerProtectiveReads])) } + "vm_runner_bwip" => Ok(Components(vec![Component::VmRunnerBwip])), other => Err(format!("{} is not a valid component name", other)), } } diff --git a/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs b/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs index cb3e0d08794d..d810f4b7261e 100644 --- a/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs +++ b/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs @@ -7,6 +7,7 @@ use zksync_config::{ }, fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, + vm_runner::BasicWitnessInputProducerConfig, wallets::{AddressWallet, EthSender, StateKeeper, Wallet, Wallets}, FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig, FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, GeneralConfig, @@ -55,6 +56,7 @@ pub struct TempConfigStore { pub observability: Option, pub snapshot_creator: Option, pub protective_reads_writer_config: Option, + pub basic_witness_input_producer_config: Option, pub core_object_store: Option, } @@ -82,6 +84,7 @@ impl TempConfigStore { snapshot_creator: self.snapshot_creator.clone(), observability: self.observability.clone(), protective_reads_writer_config: self.protective_reads_writer_config.clone(), + basic_witness_input_producer_config: self.basic_witness_input_producer_config.clone(), core_object_store: self.core_object_store.clone(), } } diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs b/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs new file mode 100644 index 000000000000..c2f784040e93 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs @@ -0,0 +1,90 @@ +use zksync_config::configs::vm_runner::BasicWitnessInputProducerConfig; +use zksync_types::L2ChainId; +use zksync_vm_runner::BasicWitnessInputProducer; + +use crate::{ + implementations::resources::{ + object_store::ObjectStoreResource, + pools::{MasterPool, PoolResource}, + }, + service::{ServiceContext, StopReceiver}, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, +}; + +#[derive(Debug)] +pub struct BasicWitnessInputProducerLayer { + basic_witness_input_producer_config: BasicWitnessInputProducerConfig, + zksync_network_id: L2ChainId, +} + +impl BasicWitnessInputProducerLayer { + pub fn new( + basic_witness_input_producer_config: BasicWitnessInputProducerConfig, + zksync_network_id: L2ChainId, + ) -> Self { + Self { + basic_witness_input_producer_config, + zksync_network_id, + } + } +} + +#[async_trait::async_trait] +impl WiringLayer for BasicWitnessInputProducerLayer { + fn layer_name(&self) -> &'static str { + "vm_runner_bwip" + } + + async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + let master_pool = context.get_resource::>().await?; + let object_store = context.get_resource::().await?; + + let (basic_witness_input_producer, tasks) = BasicWitnessInputProducer::new( + // One for `StorageSyncTask` which can hold a long-term connection in case it needs to + // catch up cache. + // + // One for `ConcurrentOutputHandlerFactoryTask`/`VmRunner` as they need occasional access + // to DB for querying last processed batch and last ready to be loaded batch. + // + // `window_size` connections for `ProtectiveReadsOutputHandlerFactory` + // as there can be multiple output handlers holding multi-second connections to write + // large amount of protective reads. + master_pool + .get_custom(self.basic_witness_input_producer_config.window_size + 2) + .await?, + object_store.0, + self.basic_witness_input_producer_config.db_path, + self.zksync_network_id, + self.basic_witness_input_producer_config + .first_processed_batch, + self.basic_witness_input_producer_config.window_size, + ) + .await?; + + context.add_task(Box::new(tasks.loader_task)); + context.add_task(Box::new(tasks.output_handler_factory_task)); + context.add_task(Box::new(BasicWitnessInputProducerTask { + basic_witness_input_producer, + })); + Ok(()) + } +} + +#[derive(Debug)] +struct BasicWitnessInputProducerTask { + basic_witness_input_producer: BasicWitnessInputProducer, +} + +#[async_trait::async_trait] +impl Task for BasicWitnessInputProducerTask { + fn id(&self) -> TaskId { + "vm_runner/bwip".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + self.basic_witness_input_producer + .run(&stop_receiver.0) + .await + } +} diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/mod.rs b/core/node/node_framework/src/implementations/layers/vm_runner/mod.rs index a105ad81ee60..0b3f611038b2 100644 --- a/core/node/node_framework/src/implementations/layers/vm_runner/mod.rs +++ b/core/node/node_framework/src/implementations/layers/vm_runner/mod.rs @@ -5,6 +5,7 @@ use crate::{ task::{Task, TaskId}, }; +pub mod bwip; pub mod protective_reads; #[async_trait::async_trait] diff --git a/core/node/proof_data_handler/src/request_processor.rs b/core/node/proof_data_handler/src/request_processor.rs index 9231af372b1a..54524458a63f 100644 --- a/core/node/proof_data_handler/src/request_processor.rs +++ b/core/node/proof_data_handler/src/request_processor.rs @@ -9,7 +9,7 @@ use zksync_prover_interface::{ ProofGenerationData, ProofGenerationDataRequest, ProofGenerationDataResponse, SubmitProofRequest, SubmitProofResponse, }, - inputs::{PrepareBasicCircuitsJob, VMRunWitnessInputData, WitnessInputData}, + inputs::WitnessInputData, }; use zksync_types::{ basic_fri_types::Eip4844Blobs, @@ -227,7 +227,7 @@ impl RequestProcessor { } storage .proof_generation_dal() - .save_proof_artifacts_metadata(l1_batch_number, &blob_url) + .save_proof_merkle_paths_artifacts_metadata(l1_batch_number, &blob_url) .await .map_err(RequestProcessorError::Dal)?; } diff --git a/core/node/state_keeper/src/batch_executor/main_executor.rs b/core/node/state_keeper/src/batch_executor/main_executor.rs index 9a4f87294268..8a421e1066f9 100644 --- a/core/node/state_keeper/src/batch_executor/main_executor.rs +++ b/core/node/state_keeper/src/batch_executor/main_executor.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; use anyhow::Context as _; use async_trait::async_trait; @@ -18,7 +18,7 @@ use tokio::{ }; use zksync_shared_metrics::{InteractionType, TxStage, APP_METRICS}; use zksync_state::{ReadStorage, ReadStorageFactory, StorageView, WriteStorage}; -use zksync_types::{vm_trace::Call, StorageKey, StorageValue, Transaction}; +use zksync_types::{vm_trace::Call, Transaction}; use zksync_utils::bytecode::CompressedBytecodeInfo; use super::{BatchExecutor, BatchExecutorHandle, Command, TxExecutionResult}; diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index 7c54c39d9fd6..b504308fd85e 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -17,14 +17,14 @@ use crate::{ OutputHandlerFactory, VmRunner, VmRunnerIo, VmRunnerStorage, }; -/// A standalone component that writes protective reads asynchronously to state keeper. +/// A standalone component that writes witness input data asynchronously to state keeper. #[derive(Debug)] pub struct BasicWitnessInputProducer { vm_runner: VmRunner, } impl BasicWitnessInputProducer { - /// Create a new protective reads writer from the provided DB parameters and window size which + /// Create a new BWIP from the provided DB parameters and window size which /// regulates how many batches this component can handle at the same time. pub async fn new( pool: ConnectionPool, @@ -63,7 +63,7 @@ impl BasicWitnessInputProducer { )) } - /// Continuously loads new available batches and writes the corresponding protective reads + /// Continuously loads new available batches and writes the corresponding data /// produced by that batch. /// /// # Errors @@ -74,7 +74,7 @@ impl BasicWitnessInputProducer { } } -/// A collections of tasks that need to be run in order for protective reads writer to work as +/// A collections of tasks that need to be run in order for BWIP to work as /// intended. #[derive(Debug)] pub struct BasicWitnessInputProducerTasks { @@ -238,7 +238,23 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { witness_block_state: block_state, }; - self.object_store.put(l1_batch_number, &result).await?; + let blob_url = self.object_store.put(l1_batch_number, &result).await?; + self.pool + .connection() + .await + .unwrap() + .vm_runner_dal() + .mark_bwip_batch_as_completed(l1_batch_number) + .await + .unwrap(); + self.pool + .connection() + .await + .unwrap() + .proof_generation_dal() + .save_vm_runner_artifacts_metadata(l1_batch_number, &blob_url) + .await + .unwrap(); Ok(()) } diff --git a/core/node/vm_runner/src/impls/mod.rs b/core/node/vm_runner/src/impls/mod.rs index af671639dce7..5bae7e03f568 100644 --- a/core/node/vm_runner/src/impls/mod.rs +++ b/core/node/vm_runner/src/impls/mod.rs @@ -1,4 +1,5 @@ mod bwip; mod protective_reads; +pub use bwip::{BasicWitnessInputProducer, BasicWitnessInputProducerTasks}; pub use protective_reads::{ProtectiveReadsWriter, ProtectiveReadsWriterTasks}; diff --git a/core/node/vm_runner/src/lib.rs b/core/node/vm_runner/src/lib.rs index 50cf2a4433c1..d6c9a88185ee 100644 --- a/core/node/vm_runner/src/lib.rs +++ b/core/node/vm_runner/src/lib.rs @@ -13,7 +13,10 @@ mod metrics; #[cfg(test)] mod tests; -pub use impls::{ProtectiveReadsWriter, ProtectiveReadsWriterTasks}; +pub use impls::{ + BasicWitnessInputProducer, BasicWitnessInputProducerTasks, ProtectiveReadsWriter, + ProtectiveReadsWriterTasks, +}; pub use io::VmRunnerIo; pub use output_handler::{ ConcurrentOutputHandlerFactory, ConcurrentOutputHandlerFactoryTask, OutputHandlerFactory, From 46cd1ff2ce9a138db96665116382cdb09fcf0057 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Mon, 24 Jun 2024 17:52:15 +0300 Subject: [PATCH 11/56] fix some builds --- core/node/vm_runner/src/impls/bwip.rs | 257 ++++++++++++------ prover/config/src/lib.rs | 9 +- .../witness_generator/src/basic_circuits.rs | 11 +- 3 files changed, 188 insertions(+), 89 deletions(-) diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index b504308fd85e..69ec5a0fa3fe 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -149,94 +149,14 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { let mut connection = self.pool.connection().await?; - let block_header = connection - .blocks_dal() - .get_l1_batch_header(l1_batch_number) - .await - .unwrap() - .unwrap(); - - let initial_heap_content = connection - .blocks_dal() - .get_initial_bootloader_heap(l1_batch_number) - .await - .unwrap() - .unwrap(); - - let account_code_hash = h256_to_u256(block_header.base_system_contracts_hashes.default_aa); - let account_bytecode_bytes = connection - .factory_deps_dal() - .get_sealed_factory_dep(block_header.base_system_contracts_hashes.default_aa) - .await - .expect("Failed fetching default account bytecode from DB") - .expect("Default account bytecode should exist"); - let account_bytecode = bytes_to_chunks(&account_bytecode_bytes); - - let hashes: HashSet = block_header - .used_contract_hashes - .iter() - // SMA-1555: remove this hack once updated to the latest version of `zkevm_test_harness` - .filter(|&&hash| { - hash != h256_to_u256(block_header.base_system_contracts_hashes.bootloader) - }) - .map(|hash| u256_to_h256(*hash)) - .collect(); - let mut used_bytecodes = connection - .factory_deps_dal() - .get_factory_deps(&hashes) - .await; - if block_header - .used_contract_hashes - .contains(&account_code_hash) - { - used_bytecodes.insert(account_code_hash, account_bytecode); - } - - assert_eq!( - hashes.len(), - used_bytecodes.len(), - "{} factory deps are not found in DB", - hashes.len() - used_bytecodes.len() - ); - - let previous_batch_with_metadata = connection - .blocks_dal() - .get_l1_batch_metadata(L1BatchNumber(block_header.number.checked_sub(1).unwrap())) - .await - .unwrap() - .unwrap(); - - let StorageOracleInfo { - storage_refunds, - pubdata_costs, - } = connection - .blocks_dal() - .get_storage_oracle_info(block_header.number) - .await - .unwrap() - .unwrap(); + let mut result = get_database_witness_input_data(&mut connection, l1_batch_number).await; let block_state = WitnessBlockState { read_storage_key: updates_manager.storage_view_cache.read_storage_keys(), is_write_initial: updates_manager.storage_view_cache.initial_writes(), }; - let result = VMRunWitnessInputData { - l1_batch_header: block_header.clone(), - previous_batch_with_metadata, - used_bytecodes, - initial_heap_content, - - protocol_version: block_header - .protocol_version - .unwrap_or(ProtocolVersionId::last_potentially_undefined()), - - bootloader_code: vec![], - default_account_code_hash: account_code_hash, - storage_refunds, - pubdata_costs, - witness_block_state: block_state, - }; + result.witness_block_state = block_state; let blob_url = self.object_store.put(l1_batch_number, &result).await?; self.pool @@ -260,6 +180,179 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { } } +// async fn get_updates_manager_witness_input_data( +// updates_manager: Arc, +// ) -> VMRunWitnessInputData { +// let block_header = connection +// .blocks_dal() +// .get_l1_batch_header(l1_batch_number) +// .await +// .unwrap() +// .unwrap(); +// +// let initial_heap_content = connection +// .blocks_dal() +// .get_initial_bootloader_heap(l1_batch_number) +// .await +// .unwrap() +// .unwrap(); +// +// let account_code_hash = h256_to_u256(block_header.base_system_contracts_hashes.default_aa); +// let account_bytecode_bytes = connection +// .factory_deps_dal() +// .get_sealed_factory_dep(block_header.base_system_contracts_hashes.default_aa) +// .await +// .expect("Failed fetching default account bytecode from DB") +// .expect("Default account bytecode should exist"); +// let account_bytecode = bytes_to_chunks(&account_bytecode_bytes); +// +// let hashes: HashSet = block_header +// .used_contract_hashes +// .iter() +// // SMA-1555: remove this hack once updated to the latest version of `zkevm_test_harness` +// .filter(|&&hash| hash != h256_to_u256(block_header.base_system_contracts_hashes.bootloader)) +// .map(|hash| u256_to_h256(*hash)) +// .collect(); +// let mut used_bytecodes = connection +// .factory_deps_dal() +// .get_factory_deps(&hashes) +// .await; +// if block_header +// .used_contract_hashes +// .contains(&account_code_hash) +// { +// used_bytecodes.insert(account_code_hash, account_bytecode); +// } +// +// assert_eq!( +// hashes.len(), +// used_bytecodes.len(), +// "{} factory deps are not found in DB", +// hashes.len() - used_bytecodes.len() +// ); +// +// let previous_batch_with_metadata = connection +// .blocks_dal() +// .get_l1_batch_metadata(L1BatchNumber(block_header.number.checked_sub(1).unwrap())) +// .await +// .unwrap() +// .unwrap(); +// +// let StorageOracleInfo { +// storage_refunds, +// pubdata_costs, +// } = connection +// .blocks_dal() +// .get_storage_oracle_info(block_header.number) +// .await +// .unwrap() +// .unwrap(); +// +// VMRunWitnessInputData { +// l1_batch_header: block_header.clone(), +// previous_batch_with_metadata, +// used_bytecodes, +// initial_heap_content, +// +// protocol_version: block_header +// .protocol_version +// .unwrap_or(ProtocolVersionId::last_potentially_undefined()), +// +// bootloader_code: vec![], +// default_account_code_hash: account_code_hash, +// storage_refunds, +// pubdata_costs, +// witness_block_state: WitnessBlockState::default(), +// } +// } + +async fn get_database_witness_input_data( + connection: &mut Connection, + l1_batch_number: L1BatchNumber, +) -> VMRunWitnessInputData { + let block_header = connection + .blocks_dal() + .get_l1_batch_header(l1_batch_number) + .await + .unwrap() + .unwrap(); + + let initial_heap_content = connection + .blocks_dal() + .get_initial_bootloader_heap(l1_batch_number) + .await + .unwrap() + .unwrap(); + + let account_code_hash = h256_to_u256(block_header.base_system_contracts_hashes.default_aa); + let account_bytecode_bytes = connection + .factory_deps_dal() + .get_sealed_factory_dep(block_header.base_system_contracts_hashes.default_aa) + .await + .expect("Failed fetching default account bytecode from DB") + .expect("Default account bytecode should exist"); + let account_bytecode = bytes_to_chunks(&account_bytecode_bytes); + + let hashes: HashSet = block_header + .used_contract_hashes + .iter() + // SMA-1555: remove this hack once updated to the latest version of `zkevm_test_harness` + .filter(|&&hash| hash != h256_to_u256(block_header.base_system_contracts_hashes.bootloader)) + .map(|hash| u256_to_h256(*hash)) + .collect(); + let mut used_bytecodes = connection + .factory_deps_dal() + .get_factory_deps(&hashes) + .await; + if block_header + .used_contract_hashes + .contains(&account_code_hash) + { + used_bytecodes.insert(account_code_hash, account_bytecode); + } + + assert_eq!( + hashes.len(), + used_bytecodes.len(), + "{} factory deps are not found in DB", + hashes.len() - used_bytecodes.len() + ); + + let previous_batch_with_metadata = connection + .blocks_dal() + .get_l1_batch_metadata(L1BatchNumber(block_header.number.checked_sub(1).unwrap())) + .await + .unwrap() + .unwrap(); + + let StorageOracleInfo { + storage_refunds, + pubdata_costs, + } = connection + .blocks_dal() + .get_storage_oracle_info(block_header.number) + .await + .unwrap() + .unwrap(); + + VMRunWitnessInputData { + l1_batch_header: block_header.clone(), + previous_batch_with_metadata, + used_bytecodes, + initial_heap_content, + + protocol_version: block_header + .protocol_version + .unwrap_or(ProtocolVersionId::last_potentially_undefined()), + + bootloader_code: vec![], + default_account_code_hash: account_code_hash, + storage_refunds, + pubdata_costs, + witness_block_state: WitnessBlockState::default(), + } +} + #[derive(Debug)] struct BasicWitnessInputProducerOutputHandlerFactory { pool: ConnectionPool, diff --git a/prover/config/src/lib.rs b/prover/config/src/lib.rs index f501dd2d6e06..3cbfcb78ff49 100644 --- a/prover/config/src/lib.rs +++ b/prover/config/src/lib.rs @@ -8,10 +8,10 @@ use zksync_config::{ }, fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, - DatabaseSecrets, FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig, - FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, GeneralConfig, - ObjectStoreConfig, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, - ProtectiveReadsWriterConfig, + BasicWitnessInputProducerConfig, DatabaseSecrets, FriProofCompressorConfig, + FriProverConfig, FriProverGatewayConfig, FriWitnessGeneratorConfig, + FriWitnessVectorGeneratorConfig, GeneralConfig, ObjectStoreConfig, ObservabilityConfig, + PrometheusConfig, ProofDataHandlerConfig, ProtectiveReadsWriterConfig, }, ApiConfig, ContractVerifierConfig, DBConfig, EthConfig, EthWatchConfig, GasAdjusterConfig, PostgresConfig, SnapshotsCreatorConfig, @@ -49,6 +49,7 @@ fn load_env_config() -> anyhow::Result { observability: ObservabilityConfig::from_env().ok(), snapshot_creator: SnapshotsCreatorConfig::from_env().ok(), protective_reads_writer_config: ProtectiveReadsWriterConfig::from_env().ok(), + basic_witness_input_producer_config: BasicWitnessInputProducerConfig::from_env().ok(), core_object_store: ObjectStoreConfig::from_env().ok(), }) } diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index 3446fb94f051..1e0e77c38941 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -450,7 +450,7 @@ async fn generate_witness( let witness_storage = WitnessStorage::new(input.vm_run_data.witness_block_state); let storage_view = StorageView::new(witness_storage).to_rc_ptr(); - let vm_storage_oracle: VmStorageOracle>, HistoryDisabled> = + let vm_storage_oracle: VmStorageOracle>, HistoryDisabled> = VmStorageOracle::new(storage_view.clone()); let storage_oracle = StorageOracle::new( vm_storage_oracle, @@ -520,12 +520,17 @@ async fn generate_witness( recursion_urls.retain(|(circuit_id, _, _)| circuits_present.contains(circuit_id)); scheduler_witness.previous_block_meta_hash = input + .vm_run_data .previous_batch_with_metadata .metadata .meta_parameters_hash .0; - scheduler_witness.previous_block_aux_hash = - input.previous_batch_with_metadata.metadata.aux_data_hash.0; + scheduler_witness.previous_block_aux_hash = input + .vm_run_data + .previous_batch_with_metadata + .metadata + .aux_data_hash + .0; ( circuit_urls, From f9fe7aa7950bed0805a41ea812aa54fd292c7dac Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Tue, 25 Jun 2024 14:01:38 +0300 Subject: [PATCH 12/56] get some data from updates manager --- core/lib/prover_interface/src/inputs.rs | 6 +- core/node/state_keeper/src/updates/mod.rs | 2 +- core/node/vm_runner/src/impls/bwip.rs | 201 +++++++++--------- .../witness_generator/src/basic_circuits.rs | 23 +- 4 files changed, 114 insertions(+), 118 deletions(-) diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index 2278dbfec185..311c9df5a277 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -138,8 +138,10 @@ impl PrepareBasicCircuitsJob { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct VMRunWitnessInputData { - pub l1_batch_header: L1BatchHeader, - pub previous_batch_with_metadata: L1BatchWithMetadata, + pub l1_batch_number: L1BatchNumber, + pub previous_root_hash: H256, + pub previous_meta_hash: H256, + pub previous_aux_hash: H256, pub used_bytecodes: HashMap>, pub initial_heap_content: Vec<(usize, U256)>, pub protocol_version: ProtocolVersionId, diff --git a/core/node/state_keeper/src/updates/mod.rs b/core/node/state_keeper/src/updates/mod.rs index 4c31a1f9adfb..3db8deca9797 100644 --- a/core/node/state_keeper/src/updates/mod.rs +++ b/core/node/state_keeper/src/updates/mod.rs @@ -69,7 +69,7 @@ impl UpdatesManager { self.batch_timestamp } - pub(crate) fn base_system_contract_hashes(&self) -> BaseSystemContractsHashes { + pub fn base_system_contract_hashes(&self) -> BaseSystemContractsHashes { self.base_system_contract_hashes } diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index 69ec5a0fa3fe..499a4be11e3d 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -149,7 +149,19 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { let mut connection = self.pool.connection().await?; - let mut result = get_database_witness_input_data(&mut connection, l1_batch_number).await; + let db_result = get_database_witness_input_data(&mut connection, l1_batch_number).await; + let mut result = + get_updates_manager_witness_input_data(updates_manager.clone(), &mut connection).await; + + // todo: update this assert + assert_eq!(db_result, result); + + let previous_batch_with_metadata = connection + .blocks_dal() + .get_l1_batch_metadata(L1BatchNumber(l1_batch_number.checked_sub(1).unwrap())) + .await + .unwrap() + .unwrap(); let block_state = WitnessBlockState { read_storage_key: updates_manager.storage_view_cache.read_storage_keys(), @@ -157,6 +169,9 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { }; result.witness_block_state = block_state; + result.previous_aux_hash = previous_batch_with_metadata.metadata.aux_data_hash; + result.previous_meta_hash = previous_batch_with_metadata.metadata.meta_parameters_hash; + result.previous_root_hash = previous_batch_with_metadata.metadata.root_hash; let blob_url = self.object_store.put(l1_batch_number, &result).await?; self.pool @@ -180,91 +195,85 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { } } -// async fn get_updates_manager_witness_input_data( -// updates_manager: Arc, -// ) -> VMRunWitnessInputData { -// let block_header = connection -// .blocks_dal() -// .get_l1_batch_header(l1_batch_number) -// .await -// .unwrap() -// .unwrap(); -// -// let initial_heap_content = connection -// .blocks_dal() -// .get_initial_bootloader_heap(l1_batch_number) -// .await -// .unwrap() -// .unwrap(); -// -// let account_code_hash = h256_to_u256(block_header.base_system_contracts_hashes.default_aa); -// let account_bytecode_bytes = connection -// .factory_deps_dal() -// .get_sealed_factory_dep(block_header.base_system_contracts_hashes.default_aa) -// .await -// .expect("Failed fetching default account bytecode from DB") -// .expect("Default account bytecode should exist"); -// let account_bytecode = bytes_to_chunks(&account_bytecode_bytes); -// -// let hashes: HashSet = block_header -// .used_contract_hashes -// .iter() -// // SMA-1555: remove this hack once updated to the latest version of `zkevm_test_harness` -// .filter(|&&hash| hash != h256_to_u256(block_header.base_system_contracts_hashes.bootloader)) -// .map(|hash| u256_to_h256(*hash)) -// .collect(); -// let mut used_bytecodes = connection -// .factory_deps_dal() -// .get_factory_deps(&hashes) -// .await; -// if block_header -// .used_contract_hashes -// .contains(&account_code_hash) -// { -// used_bytecodes.insert(account_code_hash, account_bytecode); -// } -// -// assert_eq!( -// hashes.len(), -// used_bytecodes.len(), -// "{} factory deps are not found in DB", -// hashes.len() - used_bytecodes.len() -// ); -// -// let previous_batch_with_metadata = connection -// .blocks_dal() -// .get_l1_batch_metadata(L1BatchNumber(block_header.number.checked_sub(1).unwrap())) -// .await -// .unwrap() -// .unwrap(); -// -// let StorageOracleInfo { -// storage_refunds, -// pubdata_costs, -// } = connection -// .blocks_dal() -// .get_storage_oracle_info(block_header.number) -// .await -// .unwrap() -// .unwrap(); -// -// VMRunWitnessInputData { -// l1_batch_header: block_header.clone(), -// previous_batch_with_metadata, -// used_bytecodes, -// initial_heap_content, -// -// protocol_version: block_header -// .protocol_version -// .unwrap_or(ProtocolVersionId::last_potentially_undefined()), -// -// bootloader_code: vec![], -// default_account_code_hash: account_code_hash, -// storage_refunds, -// pubdata_costs, -// witness_block_state: WitnessBlockState::default(), -// } -// } +async fn get_updates_manager_witness_input_data( + updates_manager: Arc, + connection: &mut Connection, +) -> VMRunWitnessInputData { + let l1_batch_number = updates_manager.l1_batch.number.clone(); + let finished_batch = updates_manager + .l1_batch + .finished + .clone() + .expect(format!("L1 batch {l1_batch_number:?} is not finished").as_str()); + + let block_header = connection + .blocks_dal() + .get_l1_batch_header(l1_batch_number) + .await + .unwrap() + .unwrap(); + + let initial_heap_content = finished_batch.final_bootloader_memory.unwrap(); // might be just empty + let default_aa = updates_manager + .base_system_contract_hashes() + .default_aa + .clone(); + let bootloader = updates_manager + .base_system_contract_hashes() + .bootloader + .clone(); + + let account_code_hash = h256_to_u256(default_aa); + let account_bytecode_bytes = connection + .factory_deps_dal() + .get_sealed_factory_dep(default_aa) + .await + .expect("Failed fetching default account bytecode from DB") + .expect("Default account bytecode should exist"); + let account_bytecode = bytes_to_chunks(&account_bytecode_bytes); + + let hashes: HashSet = finished_batch + .final_execution_state + .used_contract_hashes + .iter() + // SMA-1555: remove this hack once updated to the latest version of `zkevm_test_harness` + .filter(|&&hash| hash != h256_to_u256(bootloader)) + .map(|hash| u256_to_h256(*hash)) + .collect(); + let mut used_bytecodes = connection + .factory_deps_dal() + .get_factory_deps(&hashes) + .await; + if finished_batch + .final_execution_state + .used_contract_hashes + .contains(&account_code_hash) + { + used_bytecodes.insert(account_code_hash, account_bytecode); + } + + let storage_refunds = finished_batch.final_execution_state.storage_refunds; + let pubdata_costs = Some(finished_batch.final_execution_state.pubdata_costs); + + VMRunWitnessInputData { + l1_batch_number, + previous_aux_hash: H256::zero(), + previous_meta_hash: H256::zero(), + previous_root_hash: H256::zero(), + used_bytecodes, + initial_heap_content, + + protocol_version: block_header + .protocol_version + .unwrap_or(ProtocolVersionId::last_potentially_undefined()), + + bootloader_code: bytes_to_chunks(bootloader.as_bytes()), + default_account_code_hash: account_code_hash, + storage_refunds, + pubdata_costs, + witness_block_state: WitnessBlockState::default(), + } +} async fn get_database_witness_input_data( connection: &mut Connection, @@ -318,13 +327,6 @@ async fn get_database_witness_input_data( hashes.len() - used_bytecodes.len() ); - let previous_batch_with_metadata = connection - .blocks_dal() - .get_l1_batch_metadata(L1BatchNumber(block_header.number.checked_sub(1).unwrap())) - .await - .unwrap() - .unwrap(); - let StorageOracleInfo { storage_refunds, pubdata_costs, @@ -336,8 +338,10 @@ async fn get_database_witness_input_data( .unwrap(); VMRunWitnessInputData { - l1_batch_header: block_header.clone(), - previous_batch_with_metadata, + l1_batch_number: block_header.number, + previous_root_hash: H256::zero(), + previous_meta_hash: H256::zero(), + previous_aux_hash: H256::zero(), used_bytecodes, initial_heap_content, @@ -345,7 +349,12 @@ async fn get_database_witness_input_data( .protocol_version .unwrap_or(ProtocolVersionId::last_potentially_undefined()), - bootloader_code: vec![], + bootloader_code: bytes_to_chunks( + block_header + .base_system_contracts_hashes + .bootloader + .as_bytes(), + ), default_account_code_hash: account_code_hash, storage_refunds, pubdata_costs, diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index 1e0e77c38941..b9607629fc8a 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -424,19 +424,14 @@ async fn generate_witness( let mut tree = PrecalculatedMerklePathsProvider::new( input.merkle_paths, - input - .vm_run_data - .previous_batch_with_metadata - .metadata - .root_hash - .0, + input.vm_run_data.previous_root_hash.0, ); let geometry_config = get_geometry_config(); let mut hasher = DefaultHasher::new(); geometry_config.hash(&mut hasher); tracing::info!( "generating witness for block {} using geometry config hash: {}", - input.vm_run_data.l1_batch_header.number.0, + input.vm_run_data.l1_batch_number.0 hasher.finish() ); @@ -519,18 +514,8 @@ async fn generate_witness( recursion_urls.retain(|(circuit_id, _, _)| circuits_present.contains(circuit_id)); - scheduler_witness.previous_block_meta_hash = input - .vm_run_data - .previous_batch_with_metadata - .metadata - .meta_parameters_hash - .0; - scheduler_witness.previous_block_aux_hash = input - .vm_run_data - .previous_batch_with_metadata - .metadata - .aux_data_hash - .0; + scheduler_witness.previous_block_meta_hash = input.vm_run_data.previous_meta_hash.0; + scheduler_witness.previous_block_aux_hash = input.vm_run_data.previous_aux_hash.0; ( circuit_urls, From 85dcf5be72b4d3d690efc0fb2ff56ef93f0d5a5d Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Tue, 25 Jun 2024 14:12:03 +0300 Subject: [PATCH 13/56] use updates manager data everywhere possible --- core/node/state_keeper/src/updates/mod.rs | 2 +- core/node/vm_runner/src/impls/bwip.rs | 73 +++++++++++++++++++---- 2 files changed, 62 insertions(+), 13 deletions(-) diff --git a/core/node/state_keeper/src/updates/mod.rs b/core/node/state_keeper/src/updates/mod.rs index 3db8deca9797..7f8e46ffc623 100644 --- a/core/node/state_keeper/src/updates/mod.rs +++ b/core/node/state_keeper/src/updates/mod.rs @@ -101,7 +101,7 @@ impl UpdatesManager { } } - pub(crate) fn protocol_version(&self) -> ProtocolVersionId { + pub fn protocol_version(&self) -> ProtocolVersionId { self.protocol_version } diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index 499a4be11e3d..f19bef318516 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -153,8 +153,7 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { let mut result = get_updates_manager_witness_input_data(updates_manager.clone(), &mut connection).await; - // todo: update this assert - assert_eq!(db_result, result); + compare_witness_input_data(&db_result, &result); let previous_batch_with_metadata = connection .blocks_dal() @@ -206,13 +205,6 @@ async fn get_updates_manager_witness_input_data( .clone() .expect(format!("L1 batch {l1_batch_number:?} is not finished").as_str()); - let block_header = connection - .blocks_dal() - .get_l1_batch_header(l1_batch_number) - .await - .unwrap() - .unwrap(); - let initial_heap_content = finished_batch.final_bootloader_memory.unwrap(); // might be just empty let default_aa = updates_manager .base_system_contract_hashes() @@ -263,9 +255,7 @@ async fn get_updates_manager_witness_input_data( used_bytecodes, initial_heap_content, - protocol_version: block_header - .protocol_version - .unwrap_or(ProtocolVersionId::last_potentially_undefined()), + protocol_version: updates_manager.protocol_version(), bootloader_code: bytes_to_chunks(bootloader.as_bytes()), default_account_code_hash: account_code_hash, @@ -362,6 +352,65 @@ async fn get_database_witness_input_data( } } +fn compare_witness_input_data(db_result: &VMRunWitnessInputData, result: &VMRunWitnessInputData) { + if db_result.protocol_version != result.protocol_version { + tracing::error!( + "Protocol version mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", + db_result.protocol_version, + result.protocol_version + ); + } + if db_result.l1_batch_number != result.l1_batch_number { + tracing::error!( + "L1 batch number mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", + db_result.l1_batch_number, + result.l1_batch_number + ); + } + if db_result.used_bytecodes.len() != result.used_bytecodes.len() { + tracing::error!( + "Used bytecodes length mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", + db_result.used_bytecodes.len(), + result.used_bytecodes.len() + ); + } + if db_result.storage_refunds != result.storage_refunds { + tracing::error!( + "Storage refunds mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", + db_result.storage_refunds, + result.storage_refunds + ); + } + if db_result.pubdata_costs != result.pubdata_costs { + tracing::error!( + "Pubdata costs mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", + db_result.pubdata_costs, + result.pubdata_costs + ); + } + if db_result.initial_heap_content != result.initial_heap_content { + tracing::error!( + "Initial heap content mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", + db_result.initial_heap_content, + result.initial_heap_content + ); + } + if db_result.bootloader_code != result.bootloader_code { + tracing::error!( + "Bootloader code mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", + db_result.bootloader_code, + result.bootloader_code + ); + } + if db_result.default_account_code_hash != result.default_account_code_hash { + tracing::error!( + "Default account code hash mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", + db_result.default_account_code_hash, + result.default_account_code_hash + ); + } +} + #[derive(Debug)] struct BasicWitnessInputProducerOutputHandlerFactory { pool: ConnectionPool, From ecac5973100a496860711038886c5b3b7fe269a6 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Wed, 26 Jun 2024 12:38:00 +0300 Subject: [PATCH 14/56] fix build --- core/lib/merkle_tree/src/domain.rs | 6 ++--- core/lib/prover_interface/src/api.rs | 2 +- core/lib/prover_interface/src/inputs.rs | 13 +++++------ .../tests/job_serialization.rs | 6 ++--- core/lib/tee_verifier/src/lib.rs | 10 ++++----- core/node/metadata_calculator/src/helpers.rs | 4 ++-- core/node/metadata_calculator/src/tests.rs | 6 ++--- core/node/proof_data_handler/src/tests.rs | 4 ++-- .../tee_verifier_input_producer/src/lib.rs | 4 ++-- core/node/vm_runner/src/impls/bwip.rs | 22 ++++++++++++------- .../witness_generator/src/basic_circuits.rs | 2 +- .../precalculated_merkle_paths_provider.rs | 4 ++-- prover/witness_generator/src/tests.rs | 4 ++-- 13 files changed, 46 insertions(+), 41 deletions(-) diff --git a/core/lib/merkle_tree/src/domain.rs b/core/lib/merkle_tree/src/domain.rs index ffc4b0b84106..4e61951e3fe1 100644 --- a/core/lib/merkle_tree/src/domain.rs +++ b/core/lib/merkle_tree/src/domain.rs @@ -2,7 +2,7 @@ use rayon::{ThreadPool, ThreadPoolBuilder}; use zksync_crypto::hasher::blake2::Blake2Hasher; -use zksync_prover_interface::inputs::{PrepareBasicCircuitsJob, StorageLogMetadata}; +use zksync_prover_interface::inputs::{StorageLogMetadata, WitnessInputMerklePaths}; use zksync_types::{L1BatchNumber, StorageKey}; use crate::{ @@ -23,7 +23,7 @@ pub struct TreeMetadata { /// 1-based index of the next leaf to be inserted in the tree. pub rollup_last_leaf_index: u64, /// Witness information. As with `repeated_writes`, no-op updates will be omitted from Merkle paths. - pub witness: Option, + pub witness: Option, } #[derive(Debug, PartialEq, Eq)] @@ -244,7 +244,7 @@ impl ZkSyncTree { self.tree.extend_with_proofs(instructions_with_hashed_keys) }?; - let mut witness = PrepareBasicCircuitsJob::new(starting_leaf_count + 1); + let mut witness = WitnessInputMerklePaths::new(starting_leaf_count + 1); witness.reserve(output.logs.len()); for (log, instruction) in output.logs.iter().zip(instructions) { let empty_levels_end = TREE_DEPTH - log.merkle_path.len(); diff --git a/core/lib/prover_interface/src/api.rs b/core/lib/prover_interface/src/api.rs index 2ef5cabea731..09f44fca1b77 100644 --- a/core/lib/prover_interface/src/api.rs +++ b/core/lib/prover_interface/src/api.rs @@ -9,7 +9,7 @@ use zksync_types::{ }; use crate::{ - inputs::{PrepareBasicCircuitsJob, WitnessInputData}, + inputs::WitnessInputData, outputs::{L1BatchProofForL1, L1BatchTeeProofForL1}, }; diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index 311c9df5a277..f2fbf54b4031 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -5,8 +5,7 @@ use serde_with::{serde_as, Bytes}; use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; pub use zksync_state::WitnessStorage; use zksync_types::{ - block::L1BatchHeader, commitment::L1BatchWithMetadata, witness_block_state::WitnessBlockState, - L1BatchNumber, ProtocolVersionId, H256, U256, + witness_block_state::WitnessBlockState, L1BatchNumber, ProtocolVersionId, H256, U256, }; const HASH_LEN: usize = H256::len_bytes(); @@ -63,13 +62,13 @@ impl StorageLogMetadata { /// Merkle paths; if this is the case, the starting hashes are skipped and are the same /// as in the first path. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct PrepareBasicCircuitsJob { +pub struct WitnessInputMerklePaths { // Merkle paths and some auxiliary information for each read / write operation in a block. merkle_paths: Vec, next_enumeration_index: u64, } -impl StoredObject for PrepareBasicCircuitsJob { +impl StoredObject for WitnessInputMerklePaths { const BUCKET: Bucket = Bucket::WitnessInput; type Key<'a> = L1BatchNumber; @@ -80,7 +79,7 @@ impl StoredObject for PrepareBasicCircuitsJob { serialize_using_bincode!(); } -impl PrepareBasicCircuitsJob { +impl WitnessInputMerklePaths { /// Creates a new job with the specified leaf index and no included paths. pub fn new(next_enumeration_index: u64) -> Self { Self { @@ -168,7 +167,7 @@ impl StoredObject for VMRunWitnessInputData { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct WitnessInputData { pub vm_run_data: VMRunWitnessInputData, - pub merkle_paths: PrepareBasicCircuitsJob, + pub merkle_paths: WitnessInputMerklePaths, } impl StoredObject for WitnessInputData { @@ -206,7 +205,7 @@ mod tests { }); let logs: Vec<_> = logs.collect(); - let mut job = PrepareBasicCircuitsJob::new(4); + let mut job = WitnessInputMerklePaths::new(4); job.reserve(logs.len()); for log in &logs { job.push_merkle_path(log.clone()); diff --git a/core/lib/prover_interface/tests/job_serialization.rs b/core/lib/prover_interface/tests/job_serialization.rs index 60a80f91ed8d..1ad87141922b 100644 --- a/core/lib/prover_interface/tests/job_serialization.rs +++ b/core/lib/prover_interface/tests/job_serialization.rs @@ -5,7 +5,7 @@ use tokio::fs; use zksync_object_store::{Bucket, MockObjectStore}; use zksync_prover_interface::{ api::{SubmitProofRequest, SubmitTeeProofRequest}, - inputs::{PrepareBasicCircuitsJob, StorageLogMetadata}, + inputs::{StorageLogMetadata, WitnessInputMerklePaths}, outputs::{L1BatchProofForL1, L1BatchTeeProofForL1}, }; use zksync_types::{protocol_version::ProtocolSemanticVersion, L1BatchNumber, ProtocolVersionId}; @@ -29,7 +29,7 @@ async fn prepare_basic_circuits_job_serialization() { .await .unwrap(); - let job: PrepareBasicCircuitsJob = store.get(L1BatchNumber(1)).await.unwrap(); + let job: WitnessInputMerklePaths = store.get(L1BatchNumber(1)).await.unwrap(); let key = store.put(L1BatchNumber(2), &job).await.unwrap(); let serialized_job = store.get_raw(Bucket::WitnessInput, &key).await.unwrap(); @@ -60,7 +60,7 @@ async fn prepare_basic_circuits_job_compatibility() { let serialized = bincode::serialize(&job_tuple).unwrap(); assert_eq!(serialized, snapshot); - let job: PrepareBasicCircuitsJob = bincode::deserialize(&snapshot).unwrap(); + let job: WitnessInputMerklePaths = bincode::deserialize(&snapshot).unwrap(); assert_eq!(job.next_enumeration_index(), job_tuple.1); let job_merkle_paths: Vec<_> = job.into_merkle_paths().collect(); assert_eq!(job_merkle_paths, job_tuple.0); diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index 19e9c4655f40..38a2b69832c3 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -19,7 +19,7 @@ use zksync_merkle_tree::{ BlockOutputWithProofs, TreeInstruction, TreeLogEntry, TreeLogEntryWithProof, }; use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; -use zksync_prover_interface::inputs::{PrepareBasicCircuitsJob, StorageLogMetadata}; +use zksync_prover_interface::inputs::{StorageLogMetadata, WitnessInputMerklePaths}; use zksync_state::{InMemoryStorage, StorageView, WriteStorage}; use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, StorageLog, H256}; use zksync_utils::bytecode::hash_bytecode; @@ -27,7 +27,7 @@ use zksync_utils::bytecode::hash_bytecode; /// Version 1 of the data used as input for the TEE verifier. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct V1TeeVerifierInput { - prepare_basic_circuits_job: PrepareBasicCircuitsJob, + prepare_basic_circuits_job: WitnessInputMerklePaths, l2_blocks_execution_data: Vec, l1_batch_env: L1BatchEnv, system_env: SystemEnv, @@ -46,7 +46,7 @@ pub enum TeeVerifierInput { impl TeeVerifierInput { pub fn new( - prepare_basic_circuits_job: PrepareBasicCircuitsJob, + prepare_basic_circuits_job: WitnessInputMerklePaths, l2_blocks_execution_data: Vec, l1_batch_env: L1BatchEnv, system_env: SystemEnv, @@ -118,7 +118,7 @@ impl TeeVerifierInput { /// Sets the initial storage values and returns `BlockOutputWithProofs` fn get_bowp_and_set_initial_values( - prepare_basic_circuits_job: PrepareBasicCircuitsJob, + prepare_basic_circuits_job: WitnessInputMerklePaths, raw_storage: &mut InMemoryStorage, ) -> BlockOutputWithProofs { let logs = prepare_basic_circuits_job @@ -291,7 +291,7 @@ mod tests { #[test] fn test_v1_serialization() { let tvi = TeeVerifierInput::new( - PrepareBasicCircuitsJob::new(0), + WitnessInputMerklePaths::new(0), vec![], L1BatchEnv { previous_batch_hash: Some(H256([1; 32])), diff --git a/core/node/metadata_calculator/src/helpers.rs b/core/node/metadata_calculator/src/helpers.rs index d6918b7a5e87..5ccd741ea2a4 100644 --- a/core/node/metadata_calculator/src/helpers.rs +++ b/core/node/metadata_calculator/src/helpers.rs @@ -790,7 +790,7 @@ mod tests { use tempfile::TempDir; use zksync_dal::{ConnectionPool, Core}; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; - use zksync_prover_interface::inputs::PrepareBasicCircuitsJob; + use zksync_prover_interface::inputs::WitnessInputMerklePaths; use zksync_types::{writes::TreeWrite, StorageKey, StorageLog}; use super::*; @@ -1019,7 +1019,7 @@ mod tests { ); } - fn assert_equivalent_witnesses(lhs: PrepareBasicCircuitsJob, rhs: PrepareBasicCircuitsJob) { + fn assert_equivalent_witnesses(lhs: WitnessInputMerklePaths, rhs: WitnessInputMerklePaths) { assert_eq!(lhs.next_enumeration_index(), rhs.next_enumeration_index()); let lhs_paths = lhs.into_merkle_paths(); let rhs_paths = rhs.into_merkle_paths(); diff --git a/core/node/metadata_calculator/src/tests.rs b/core/node/metadata_calculator/src/tests.rs index 38e1a09d1091..927fdccaf301 100644 --- a/core/node/metadata_calculator/src/tests.rs +++ b/core/node/metadata_calculator/src/tests.rs @@ -17,7 +17,7 @@ use zksync_merkle_tree::domain::ZkSyncTree; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_node_test_utils::{create_l1_batch, create_l2_block}; use zksync_object_store::{MockObjectStore, ObjectStore}; -use zksync_prover_interface::inputs::PrepareBasicCircuitsJob; +use zksync_prover_interface::inputs::WitnessInputMerklePaths; use zksync_storage::RocksDB; use zksync_types::{ block::{L1BatchHeader, L1BatchTreeData}, @@ -235,7 +235,7 @@ async fn basic_workflow() { let expected_tree_hash = expected_tree_hash(&pool).await; assert_eq!(merkle_tree_hash, expected_tree_hash); - let job: PrepareBasicCircuitsJob = object_store.get(L1BatchNumber(1)).await.unwrap(); + let job: WitnessInputMerklePaths = object_store.get(L1BatchNumber(1)).await.unwrap(); assert!(job.next_enumeration_index() > 0); let merkle_paths: Vec<_> = job.clone().into_merkle_paths().collect(); assert!(!merkle_paths.is_empty() && merkle_paths.len() <= 100); @@ -344,7 +344,7 @@ async fn multi_l1_batch_workflow() { let mut prev_index = None; for l1_batch_number in 1..=10 { let l1_batch_number = L1BatchNumber(l1_batch_number); - let job: PrepareBasicCircuitsJob = object_store.get(l1_batch_number).await.unwrap(); + let job: WitnessInputMerklePaths = object_store.get(l1_batch_number).await.unwrap(); let next_enumeration_index = job.next_enumeration_index(); let merkle_paths: Vec<_> = job.into_merkle_paths().collect(); assert!(!merkle_paths.is_empty() && merkle_paths.len() <= 10); diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index 7047bd154c9a..f13ea3dec09b 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -15,7 +15,7 @@ use zksync_config::configs::ProofDataHandlerConfig; use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_dal::{ConnectionPool, CoreDal}; use zksync_object_store::MockObjectStore; -use zksync_prover_interface::{api::SubmitTeeProofRequest, inputs::PrepareBasicCircuitsJob}; +use zksync_prover_interface::{api::SubmitTeeProofRequest, inputs::WitnessInputMerklePaths}; use zksync_tee_verifier::TeeVerifierInput; use zksync_types::{commitment::L1BatchCommitmentMode, L1BatchNumber, H256}; @@ -33,7 +33,7 @@ async fn request_tee_proof_inputs() { let batch_number = L1BatchNumber::from(1); let tvi = TeeVerifierInput::new( - PrepareBasicCircuitsJob::new(0), + WitnessInputMerklePaths::new(0), vec![], L1BatchEnv { previous_batch_hash: Some(H256([1; 32])), diff --git a/core/node/tee_verifier_input_producer/src/lib.rs b/core/node/tee_verifier_input_producer/src/lib.rs index 9104b62fa5e5..33d63305df2a 100644 --- a/core/node/tee_verifier_input_producer/src/lib.rs +++ b/core/node/tee_verifier_input_producer/src/lib.rs @@ -15,7 +15,7 @@ use tokio::task::JoinHandle; use vm_utils::storage::L1BatchParamsProvider; use zksync_dal::{tee_verifier_input_producer_dal::JOB_MAX_ATTEMPT, ConnectionPool, Core, CoreDal}; use zksync_object_store::ObjectStore; -use zksync_prover_interface::inputs::PrepareBasicCircuitsJob; +use zksync_prover_interface::inputs::WitnessInputMerklePaths; use zksync_queued_job_processor::JobProcessor; use zksync_tee_verifier::TeeVerifierInput; use zksync_types::{L1BatchNumber, L2ChainId}; @@ -53,7 +53,7 @@ impl TeeVerifierInputProducer { object_store: Arc, l2_chain_id: L2ChainId, ) -> anyhow::Result { - let prepare_basic_circuits_job: PrepareBasicCircuitsJob = object_store + let prepare_basic_circuits_job: WitnessInputMerklePaths = object_store .get(l1_batch_number) .await .context("failed to get PrepareBasicCircuitsJob from object store")?; diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index f19bef318516..b9cda91dc921 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -147,15 +147,21 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { ) -> anyhow::Result<()> { let l1_batch_number = updates_manager.l1_batch.number; - let mut connection = self.pool.connection().await?; - - let db_result = get_database_witness_input_data(&mut connection, l1_batch_number).await; - let mut result = - get_updates_manager_witness_input_data(updates_manager.clone(), &mut connection).await; + let db_result = + get_database_witness_input_data(&mut self.pool.connection().await?, l1_batch_number) + .await; + let mut result = get_updates_manager_witness_input_data( + &mut self.pool.connection().await?, + updates_manager.clone(), + ) + .await; compare_witness_input_data(&db_result, &result); - let previous_batch_with_metadata = connection + let previous_batch_with_metadata = self + .pool + .connection() + .await? .blocks_dal() .get_l1_batch_metadata(L1BatchNumber(l1_batch_number.checked_sub(1).unwrap())) .await @@ -195,8 +201,8 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { } async fn get_updates_manager_witness_input_data( + connection: &mut Connection<'_, Core>, updates_manager: Arc, - connection: &mut Connection, ) -> VMRunWitnessInputData { let l1_batch_number = updates_manager.l1_batch.number.clone(); let finished_batch = updates_manager @@ -266,7 +272,7 @@ async fn get_updates_manager_witness_input_data( } async fn get_database_witness_input_data( - connection: &mut Connection, + connection: &mut Connection<'_, Core>, l1_batch_number: L1BatchNumber, ) -> VMRunWitnessInputData { let block_header = connection diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index b9607629fc8a..693cf69e4c3d 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -36,7 +36,7 @@ use zksync_prover_fri_types::{ AuxOutputWitnessWrapper, }; use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; -use zksync_prover_interface::inputs::{PrepareBasicCircuitsJob, WitnessInputData}; +use zksync_prover_interface::inputs::{WitnessInputData, WitnessInputMerklePaths}; use zksync_queued_job_processor::JobProcessor; use zksync_state::{PostgresStorage, StorageView, WitnessStorage}; use zksync_types::{ diff --git a/prover/witness_generator/src/precalculated_merkle_paths_provider.rs b/prover/witness_generator/src/precalculated_merkle_paths_provider.rs index 2cfadc93fc6a..52c8688cfb42 100644 --- a/prover/witness_generator/src/precalculated_merkle_paths_provider.rs +++ b/prover/witness_generator/src/precalculated_merkle_paths_provider.rs @@ -3,7 +3,7 @@ use zk_evm::blake2::Blake2s256; use zkevm_test_harness::witness::tree::{ BinaryHasher, BinarySparseStorageTree, EnumeratedBinaryLeaf, LeafQuery, ZkSyncStorageLeaf, }; -use zksync_prover_interface::inputs::{PrepareBasicCircuitsJob, StorageLogMetadata}; +use zksync_prover_interface::inputs::{StorageLogMetadata, WitnessInputMerklePaths}; #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] pub struct PrecalculatedMerklePathsProvider { @@ -19,7 +19,7 @@ pub struct PrecalculatedMerklePathsProvider { } impl PrecalculatedMerklePathsProvider { - pub fn new(input: PrepareBasicCircuitsJob, root_hash: [u8; 32]) -> Self { + pub fn new(input: WitnessInputMerklePaths, root_hash: [u8; 32]) -> Self { let next_enumeration_index = input.next_enumeration_index(); tracing::debug!("Initializing PrecalculatedMerklePathsProvider. Initial root_hash: {:?}, initial next_enumeration_index: {:?}", root_hash, next_enumeration_index); Self { diff --git a/prover/witness_generator/src/tests.rs b/prover/witness_generator/src/tests.rs index 5163368d66d2..d6b00d2ccb4b 100644 --- a/prover/witness_generator/src/tests.rs +++ b/prover/witness_generator/src/tests.rs @@ -5,7 +5,7 @@ use zkevm_test_harness::{ kzg::KzgSettings, witness::tree::{BinarySparseStorageTree, ZkSyncStorageLeaf}, }; -use zksync_prover_interface::inputs::{PrepareBasicCircuitsJob, StorageLogMetadata}; +use zksync_prover_interface::inputs::{StorageLogMetadata, WitnessInputMerklePaths}; use zksync_types::U256; use super::precalculated_merkle_paths_provider::PrecalculatedMerklePathsProvider; @@ -81,7 +81,7 @@ const fn generate_storage_log_metadata( } fn create_provider() -> PrecalculatedMerklePathsProvider { - let mut job = PrepareBasicCircuitsJob::new(4); + let mut job = WitnessInputMerklePaths::new(4); for (mut log, merkle_path) in LOGS_AND_PATHS { log.merkle_paths = vec![merkle_path]; job.push_merkle_path(log); From 6ea08dc024f83114d9d1ed92a0f742841b357cbe Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Wed, 26 Jun 2024 13:44:19 +0300 Subject: [PATCH 15/56] fix build --- core/lib/dal/src/proof_generation_dal.rs | 2 +- prover/Cargo.lock | 1 - prover/witness_generator/Cargo.toml | 3 +-- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/core/lib/dal/src/proof_generation_dal.rs b/core/lib/dal/src/proof_generation_dal.rs index 91d320b5991a..7e998be5b195 100644 --- a/core/lib/dal/src/proof_generation_dal.rs +++ b/core/lib/dal/src/proof_generation_dal.rs @@ -347,7 +347,7 @@ mod tests { assert_eq!(picked_l1_batch, Some(L1BatchNumber(1))); conn.proof_generation_dal() - .save_proof_artifacts_metadata(L1BatchNumber(1), "proof") + .save_proof_merkle_paths_artifacts_metadata(L1BatchNumber(1), "proof") .await .unwrap(); diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 0bcf1276bec1..da1e7fe13623 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -8457,7 +8457,6 @@ dependencies = [ "zkevm_test_harness 1.5.0", "zksync_config", "zksync_core_leftovers", - "zksync_dal", "zksync_env_config", "zksync_multivm", "zksync_object_store", diff --git a/prover/witness_generator/Cargo.toml b/prover/witness_generator/Cargo.toml index 5c42343f60b9..b637ac7f20dd 100644 --- a/prover/witness_generator/Cargo.toml +++ b/prover/witness_generator/Cargo.toml @@ -12,7 +12,6 @@ categories.workspace = true [dependencies] vise.workspace = true zksync_prover_dal.workspace = true -zksync_dal.workspace = true zksync_config.workspace = true zksync_prover_interface.workspace = true zksync_prover_config.workspace = true @@ -32,7 +31,7 @@ zksync_core_leftovers.workspace = true zksync_protobuf_config.workspace = true zkevm_test_harness = { workspace = true } -circuit_definitions = { workspace = true, features = [ "log_tracing" ] } +circuit_definitions = { workspace = true, features = ["log_tracing"] } zk_evm.workspace = true anyhow.workspace = true From ba40d4ac1cc1399f3a610813e53d78d0645a65ba Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Wed, 26 Jun 2024 14:38:42 +0300 Subject: [PATCH 16/56] fix prover build --- .../witness_generator/src/basic_circuits.rs | 40 +++++-------------- .../witness_generator/src/leaf_aggregation.rs | 3 +- prover/witness_generator/src/main.rs | 10 ----- .../witness_generator/src/node_aggregation.rs | 3 +- prover/witness_generator/src/recursion_tip.rs | 3 +- prover/witness_generator/src/scheduler.rs | 3 +- 6 files changed, 13 insertions(+), 49 deletions(-) diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index 4f30e6af7e34..eb186a068a53 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -15,7 +15,6 @@ use circuit_definitions::{ use tracing::Instrument; use zkevm_test_harness::geometry_config::get_geometry_config; use zksync_config::configs::FriWitnessGeneratorConfig; -use zksync_dal::{Core, CoreDal}; use zksync_multivm::vm_latest::{ constants::MAX_CYCLES_FOR_TX, HistoryDisabled, StorageOracle as VmStorageOracle, }; @@ -36,16 +35,14 @@ use zksync_prover_fri_types::{ AuxOutputWitnessWrapper, }; use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; -use zksync_prover_interface::inputs::{WitnessInputData, WitnessInputMerklePaths}; +use zksync_prover_interface::inputs::WitnessInputData; use zksync_queued_job_processor::JobProcessor; -use zksync_state::{PostgresStorage, StorageView, WitnessStorage}; +use zksync_state::{StorageView, WitnessStorage}; use zksync_types::{ basic_fri_types::{AggregationRound, Eip4844Blobs}, - block::StorageOracleInfo, protocol_version::ProtocolSemanticVersion, - Address, L1BatchNumber, ProtocolVersionId, BOOTLOADER_ADDRESS, H256, + Address, L1BatchNumber, BOOTLOADER_ADDRESS, }; -use zksync_utils::{bytes_to_chunks, h256_to_u256, u256_to_h256}; use crate::{ metrics::WITNESS_GENERATOR_METRICS, @@ -87,7 +84,6 @@ pub struct BasicWitnessGenerator { config: Arc, object_store: Arc, public_blob_store: Option>, - connection_pool: ConnectionPool, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, } @@ -97,7 +93,6 @@ impl BasicWitnessGenerator { config: FriWitnessGeneratorConfig, object_store: Arc, public_blob_store: Option>, - connection_pool: ConnectionPool, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, ) -> Self { @@ -105,7 +100,6 @@ impl BasicWitnessGenerator { config: Arc::new(config), object_store, public_blob_store, - connection_pool, prover_connection_pool, protocol_version, } @@ -113,7 +107,6 @@ impl BasicWitnessGenerator { async fn process_job_impl( object_store: Arc, - connection_pool: ConnectionPool, basic_job: BasicWitnessGeneratorJob, started_at: Instant, ) -> Option { @@ -132,7 +125,6 @@ impl BasicWitnessGenerator { Some( process_basic_circuits_job( &*object_store, - connection_pool, started_at, block_number, job, @@ -200,14 +192,11 @@ impl JobProcessor for BasicWitnessGenerator { started_at: Instant, ) -> tokio::task::JoinHandle>> { let object_store = Arc::clone(&self.object_store); - let connection_pool = self.connection_pool.clone(); tokio::spawn(async move { let block_number = job.block_number; - Ok( - Self::process_job_impl(object_store, connection_pool, job, started_at) - .instrument(tracing::info_span!("basic_circuit", %block_number)) - .await, - ) + Ok(Self::process_job_impl(object_store, job, started_at) + .instrument(tracing::info_span!("basic_circuit", %block_number)) + .await) }) } @@ -272,20 +261,13 @@ impl JobProcessor for BasicWitnessGenerator { #[allow(clippy::too_many_arguments)] async fn process_basic_circuits_job( object_store: &dyn ObjectStore, - connection_pool: ConnectionPool, started_at: Instant, block_number: L1BatchNumber, job: WitnessInputData, eip_4844_blobs: Eip4844Blobs, ) -> BasicCircuitArtifacts { - let (circuit_urls, queue_urls, scheduler_witness, aux_output_witness) = generate_witness( - block_number, - object_store, - connection_pool, - job, - eip_4844_blobs, - ) - .await; + let (circuit_urls, queue_urls, scheduler_witness, aux_output_witness) = + generate_witness(block_number, object_store, job, eip_4844_blobs).await; WITNESS_GENERATOR_METRICS.witness_generation_time[&AggregationRound::BasicCircuits.into()] .observe(started_at.elapsed()); tracing::info!( @@ -404,7 +386,6 @@ async fn save_recursion_queue( async fn generate_witness( block_number: L1BatchNumber, object_store: &dyn ObjectStore, - connection_pool: ConnectionPool, input: WitnessInputData, eip_4844_blobs: Eip4844Blobs, ) -> ( @@ -431,13 +412,10 @@ async fn generate_witness( geometry_config.hash(&mut hasher); tracing::info!( "generating witness for block {} using geometry config hash: {}", - input.vm_run_data.l1_batch_number.0 + input.vm_run_data.l1_batch_number.0, hasher.finish() ); - // The following part is CPU-heavy, so we move it to a separate thread. - let rt_handle = tokio::runtime::Handle::current(); - let (circuit_sender, mut circuit_receiver) = tokio::sync::mpsc::channel(1); let (queue_sender, mut queue_receiver) = tokio::sync::mpsc::channel(1); diff --git a/prover/witness_generator/src/leaf_aggregation.rs b/prover/witness_generator/src/leaf_aggregation.rs index 112d07498837..76703d0d874d 100644 --- a/prover/witness_generator/src/leaf_aggregation.rs +++ b/prover/witness_generator/src/leaf_aggregation.rs @@ -8,9 +8,8 @@ use zkevm_test_harness::{ zkevm_circuits::scheduler::aux::BaseLayerCircuitType, }; use zksync_config::configs::FriWitnessGeneratorConfig; -use zksync_dal::ConnectionPool; use zksync_object_store::ObjectStore; -use zksync_prover_dal::{Prover, ProverDal}; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::field::goldilocks::GoldilocksField, diff --git a/prover/witness_generator/src/main.rs b/prover/witness_generator/src/main.rs index 8208c62c6277..d51f1dd3af22 100644 --- a/prover/witness_generator/src/main.rs +++ b/prover/witness_generator/src/main.rs @@ -35,7 +35,6 @@ mod utils; #[cfg(not(target_env = "msvc"))] use jemallocator::Jemalloc; -use zksync_dal::Core; use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; #[cfg(not(target_env = "msvc"))] @@ -125,14 +124,6 @@ async fn main() -> anyhow::Result<()> { let prometheus_config = general_config .prometheus_config .context("prometheus config")?; - let postgres_config = general_config.postgres_config.context("postgres config")?; - let connection_pool = ConnectionPool::::builder( - database_secrets.master_url()?, - postgres_config.max_connections()?, - ) - .build() - .await - .context("failed to build a connection_pool")?; let prover_connection_pool = ConnectionPool::::singleton(database_secrets.prover_url()?) .build() @@ -224,7 +215,6 @@ async fn main() -> anyhow::Result<()> { config.clone(), store_factory.create_store().await?, public_blob_store, - connection_pool.clone(), prover_connection_pool.clone(), protocol_version, ); diff --git a/prover/witness_generator/src/node_aggregation.rs b/prover/witness_generator/src/node_aggregation.rs index 0af59890504d..36b13d4357a9 100644 --- a/prover/witness_generator/src/node_aggregation.rs +++ b/prover/witness_generator/src/node_aggregation.rs @@ -6,9 +6,8 @@ use zkevm_test_harness::witness::recursive_aggregation::{ compute_node_vk_commitment, create_node_witnesses, }; use zksync_config::configs::FriWitnessGeneratorConfig; -use zksync_dal::ConnectionPool; use zksync_object_store::ObjectStore; -use zksync_prover_dal::{Prover, ProverDal}; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::field::goldilocks::GoldilocksField, diff --git a/prover/witness_generator/src/recursion_tip.rs b/prover/witness_generator/src/recursion_tip.rs index b6c9cd7173dd..2f55621fecaf 100644 --- a/prover/witness_generator/src/recursion_tip.rs +++ b/prover/witness_generator/src/recursion_tip.rs @@ -36,9 +36,8 @@ use zkevm_test_harness::{ }, }; use zksync_config::configs::FriWitnessGeneratorConfig; -use zksync_dal::ConnectionPool; use zksync_object_store::ObjectStore; -use zksync_prover_dal::{Prover, ProverDal}; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; use zksync_prover_fri_types::{ get_current_pod_name, keys::{ClosedFormInputKey, FriCircuitKey}, diff --git a/prover/witness_generator/src/scheduler.rs b/prover/witness_generator/src/scheduler.rs index a6173c813586..80c4322e644e 100644 --- a/prover/witness_generator/src/scheduler.rs +++ b/prover/witness_generator/src/scheduler.rs @@ -6,9 +6,8 @@ use zkevm_test_harness::zkevm_circuits::recursion::{ leaf_layer::input::RecursionLeafParametersWitness, NUM_BASE_LAYER_CIRCUITS, }; use zksync_config::configs::FriWitnessGeneratorConfig; -use zksync_dal::ConnectionPool; use zksync_object_store::ObjectStore; -use zksync_prover_dal::{Prover, ProverDal}; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::{ From f270ccfeb5dcd854572de6ea51164d86c38420ce Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Wed, 26 Jun 2024 14:42:00 +0300 Subject: [PATCH 17/56] checkout contracts --- contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts b/contracts index 8a70bbbc4812..db9387690502 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 8a70bbbc48125f5bde6189b4e3c6a3ee79631678 +Subproject commit db9387690502937de081a959b164db5a5262ce0a From f5b593a5bc6cfc4a6c24a7387e01ab5c1527a8dd Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Wed, 26 Jun 2024 16:53:47 +0300 Subject: [PATCH 18/56] fix some bugs --- ...a2418ca2223c8f37ec9856dcf6ccfa8b8dabb.json | 15 ------- ...ef7419598a0113c85ff215e13728c0a15b310.json | 15 ------- ...41c4a7db0ea88c2e4caece1e7c170c991baa2.json | 14 ------- ...2fca14965083b0589c3b3efad02e37d55f0c.json} | 4 +- ...ac5e5fccb3ec55aa1227f5f1ea5a11f9b36a9.json | 15 +++++++ ...5661bb3018be6d43164c1457edf50b5957429.json | 20 ++++++++++ ...b608d21dc70397b64ce500881a8b55953c59c.json | 14 +++++++ ...7fe0da8905c344755f264292cb436393069e.json} | 4 +- ...fd1fa6facc419ce16b8d628a0f5f78e28a0ee.json | 15 +++++++ core/lib/dal/src/proof_generation_dal.rs | 39 ++++++++++++++----- core/lib/dal/src/tee_proof_generation_dal.rs | 25 ++++++------ core/lib/types/src/commitment/mod.rs | 4 +- .../src/request_processor.rs | 2 +- 13 files changed, 114 insertions(+), 72 deletions(-) delete mode 100644 core/lib/dal/.sqlx/query-087be742721df75c9b197030204a2418ca2223c8f37ec9856dcf6ccfa8b8dabb.json delete mode 100644 core/lib/dal/.sqlx/query-2b02ece0964c71d501c99885b73ef7419598a0113c85ff215e13728c0a15b310.json delete mode 100644 core/lib/dal/.sqlx/query-4175d40e342052cc4b63c9c346541c4a7db0ea88c2e4caece1e7c170c991baa2.json rename core/lib/dal/.sqlx/{query-bcd4376476a6f794d0ff8c83a61d047c6531289ebf5d82ff9a987f4c62eb2010.json => query-640d37aa1d6dc722b1651c74b7ea2fca14965083b0589c3b3efad02e37d55f0c.json} (67%) create mode 100644 core/lib/dal/.sqlx/query-703836a3f065b0aedf71ad0474cac5e5fccb3ec55aa1227f5f1ea5a11f9b36a9.json create mode 100644 core/lib/dal/.sqlx/query-722e1b3a358c0627008e631505a5661bb3018be6d43164c1457edf50b5957429.json create mode 100644 core/lib/dal/.sqlx/query-9533a672ae82db344ae1070ae11b608d21dc70397b64ce500881a8b55953c59c.json rename core/lib/dal/.sqlx/{query-7727ef45e01aef2ffa2edc8227d65c24fdabe748979306e50eb7ecb47c71ec18.json => query-e59fb40d833837707e8d1e3c78e87fe0da8905c344755f264292cb436393069e.json} (59%) create mode 100644 core/lib/dal/.sqlx/query-ff510d87494636482d1abd05c9afd1fa6facc419ce16b8d628a0f5f78e28a0ee.json diff --git a/core/lib/dal/.sqlx/query-087be742721df75c9b197030204a2418ca2223c8f37ec9856dcf6ccfa8b8dabb.json b/core/lib/dal/.sqlx/query-087be742721df75c9b197030204a2418ca2223c8f37ec9856dcf6ccfa8b8dabb.json deleted file mode 100644 index 87759bd5d68c..000000000000 --- a/core/lib/dal/.sqlx/query-087be742721df75c9b197030204a2418ca2223c8f37ec9856dcf6ccfa8b8dabb.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n proof_generation_details (l1_batch_number, proof_gen_data_blob_url, created_at, updated_at)\n VALUES\n ($1, $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Text" - ] - }, - "nullable": [] - }, - "hash": "087be742721df75c9b197030204a2418ca2223c8f37ec9856dcf6ccfa8b8dabb" -} diff --git a/core/lib/dal/.sqlx/query-2b02ece0964c71d501c99885b73ef7419598a0113c85ff215e13728c0a15b310.json b/core/lib/dal/.sqlx/query-2b02ece0964c71d501c99885b73ef7419598a0113c85ff215e13728c0a15b310.json deleted file mode 100644 index d389eb41431d..000000000000 --- a/core/lib/dal/.sqlx/query-2b02ece0964c71d501c99885b73ef7419598a0113c85ff215e13728c0a15b310.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE proof_generation_details\n SET\n status = 'generated',\n vm_run_data_blob_url = $1,\n updated_at = NOW()\n WHERE\n l1_batch_number = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Text", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "2b02ece0964c71d501c99885b73ef7419598a0113c85ff215e13728c0a15b310" -} diff --git a/core/lib/dal/.sqlx/query-4175d40e342052cc4b63c9c346541c4a7db0ea88c2e4caece1e7c170c991baa2.json b/core/lib/dal/.sqlx/query-4175d40e342052cc4b63c9c346541c4a7db0ea88c2e4caece1e7c170c991baa2.json deleted file mode 100644 index 66a889915c35..000000000000 --- a/core/lib/dal/.sqlx/query-4175d40e342052cc4b63c9c346541c4a7db0ea88c2e4caece1e7c170c991baa2.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n tee_proof_generation_details (l1_batch_number, created_at, updated_at)\n VALUES\n ($1, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [] - }, - "hash": "4175d40e342052cc4b63c9c346541c4a7db0ea88c2e4caece1e7c170c991baa2" -} diff --git a/core/lib/dal/.sqlx/query-bcd4376476a6f794d0ff8c83a61d047c6531289ebf5d82ff9a987f4c62eb2010.json b/core/lib/dal/.sqlx/query-640d37aa1d6dc722b1651c74b7ea2fca14965083b0589c3b3efad02e37d55f0c.json similarity index 67% rename from core/lib/dal/.sqlx/query-bcd4376476a6f794d0ff8c83a61d047c6531289ebf5d82ff9a987f4c62eb2010.json rename to core/lib/dal/.sqlx/query-640d37aa1d6dc722b1651c74b7ea2fca14965083b0589c3b3efad02e37d55f0c.json index 9d9e32ee70fa..f0603488f1e8 100644 --- a/core/lib/dal/.sqlx/query-bcd4376476a6f794d0ff8c83a61d047c6531289ebf5d82ff9a987f4c62eb2010.json +++ b/core/lib/dal/.sqlx/query-640d37aa1d6dc722b1651c74b7ea2fca14965083b0589c3b3efad02e37d55f0c.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = 'Successful'\n AND proofs.status NOT IN ('picked_by_prover', 'generated')\n ORDER BY\n proofs.l1_batch_number ASC\n LIMIT\n 1\n ", + "query": "\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = 'Successful'\n AND proofs.status = 'ready_to_be_proven'\n ORDER BY\n proofs.l1_batch_number ASC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ false ] }, - "hash": "bcd4376476a6f794d0ff8c83a61d047c6531289ebf5d82ff9a987f4c62eb2010" + "hash": "640d37aa1d6dc722b1651c74b7ea2fca14965083b0589c3b3efad02e37d55f0c" } diff --git a/core/lib/dal/.sqlx/query-703836a3f065b0aedf71ad0474cac5e5fccb3ec55aa1227f5f1ea5a11f9b36a9.json b/core/lib/dal/.sqlx/query-703836a3f065b0aedf71ad0474cac5e5fccb3ec55aa1227f5f1ea5a11f9b36a9.json new file mode 100644 index 000000000000..be9d5219665a --- /dev/null +++ b/core/lib/dal/.sqlx/query-703836a3f065b0aedf71ad0474cac5e5fccb3ec55aa1227f5f1ea5a11f9b36a9.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE proof_generation_details\n SET\n vm_run_data_blob_url = $1,\n updated_at = NOW()\n WHERE\n l1_batch_number = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "703836a3f065b0aedf71ad0474cac5e5fccb3ec55aa1227f5f1ea5a11f9b36a9" +} diff --git a/core/lib/dal/.sqlx/query-722e1b3a358c0627008e631505a5661bb3018be6d43164c1457edf50b5957429.json b/core/lib/dal/.sqlx/query-722e1b3a358c0627008e631505a5661bb3018be6d43164c1457edf50b5957429.json new file mode 100644 index 000000000000..9247df10c910 --- /dev/null +++ b/core/lib/dal/.sqlx/query-722e1b3a358c0627008e631505a5661bb3018be6d43164c1457edf50b5957429.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n l1_batch_number\n FROM\n proof_generation_details\n LEFT JOIN l1_batches ON l1_batch_number = l1_batches.number\n WHERE\n (\n vm_run_data_blob_url IS NOT NULL\n AND proof_gen_data_blob_url IS NOT NULL\n AND status NOT IN ('picked_by_prover', 'generated')\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "722e1b3a358c0627008e631505a5661bb3018be6d43164c1457edf50b5957429" +} diff --git a/core/lib/dal/.sqlx/query-9533a672ae82db344ae1070ae11b608d21dc70397b64ce500881a8b55953c59c.json b/core/lib/dal/.sqlx/query-9533a672ae82db344ae1070ae11b608d21dc70397b64ce500881a8b55953c59c.json new file mode 100644 index 000000000000..994bfcfbb5a2 --- /dev/null +++ b/core/lib/dal/.sqlx/query-9533a672ae82db344ae1070ae11b608d21dc70397b64ce500881a8b55953c59c.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n tee_proof_generation_details (l1_batch_number, status, created_at, updated_at)\n VALUES\n ($1, 'ready_to_be_proven', NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "9533a672ae82db344ae1070ae11b608d21dc70397b64ce500881a8b55953c59c" +} diff --git a/core/lib/dal/.sqlx/query-7727ef45e01aef2ffa2edc8227d65c24fdabe748979306e50eb7ecb47c71ec18.json b/core/lib/dal/.sqlx/query-e59fb40d833837707e8d1e3c78e87fe0da8905c344755f264292cb436393069e.json similarity index 59% rename from core/lib/dal/.sqlx/query-7727ef45e01aef2ffa2edc8227d65c24fdabe748979306e50eb7ecb47c71ec18.json rename to core/lib/dal/.sqlx/query-e59fb40d833837707e8d1e3c78e87fe0da8905c344755f264292cb436393069e.json index b47d0af9f7bd..689dd4626939 100644 --- a/core/lib/dal/.sqlx/query-7727ef45e01aef2ffa2edc8227d65c24fdabe748979306e50eb7ecb47c71ec18.json +++ b/core/lib/dal/.sqlx/query-e59fb40d833837707e8d1e3c78e87fe0da8905c344755f264292cb436393069e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE proof_generation_details\n SET\n status = 'picked_by_prover',\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n proof_generation_details\n LEFT JOIN l1_batches ON l1_batch_number = l1_batches.number\n WHERE\n (\n vm_run_data_blob_url IS NOT NULL\n AND proof_gen_data_blob_url IS NOT NULL\n AND l1_batches.merkle_root_hash IS NOT NULL\n AND l1_batches.aux_data_hash IS NOT NULL\n AND l1_batches.meta_parameters_hash IS NOT NULL\n )\n OR (\n status = 'picked_by_prover'\n AND prover_taken_at < NOW() - $1::INTERVAL\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n proof_generation_details.l1_batch_number\n ", + "query": "\n UPDATE proof_generation_details\n SET\n status = 'picked_by_prover',\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n proof_generation_details\n LEFT JOIN l1_batches ON l1_batch_number = l1_batches.number\n WHERE\n (\n vm_run_data_blob_url IS NOT NULL\n AND proof_gen_data_blob_url IS NOT NULL\n AND l1_batches.hash IS NOT NULL\n AND l1_batches.aux_data_hash IS NOT NULL\n AND l1_batches.meta_parameters_hash IS NOT NULL\n AND status NOT IN ('picked_by_prover', 'generated')\n )\n OR (\n status = 'picked_by_prover'\n AND prover_taken_at < NOW() - $1::INTERVAL\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n )\n RETURNING\n proof_generation_details.l1_batch_number\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ false ] }, - "hash": "7727ef45e01aef2ffa2edc8227d65c24fdabe748979306e50eb7ecb47c71ec18" + "hash": "e59fb40d833837707e8d1e3c78e87fe0da8905c344755f264292cb436393069e" } diff --git a/core/lib/dal/.sqlx/query-ff510d87494636482d1abd05c9afd1fa6facc419ce16b8d628a0f5f78e28a0ee.json b/core/lib/dal/.sqlx/query-ff510d87494636482d1abd05c9afd1fa6facc419ce16b8d628a0f5f78e28a0ee.json new file mode 100644 index 000000000000..59828610a817 --- /dev/null +++ b/core/lib/dal/.sqlx/query-ff510d87494636482d1abd05c9afd1fa6facc419ce16b8d628a0f5f78e28a0ee.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n proof_generation_details (l1_batch_number, status, proof_gen_data_blob_url, created_at, updated_at)\n VALUES\n ($1, 'waiting_for_data', $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text" + ] + }, + "nullable": [] + }, + "hash": "ff510d87494636482d1abd05c9afd1fa6facc419ce16b8d628a0f5f78e28a0ee" +} diff --git a/core/lib/dal/src/proof_generation_dal.rs b/core/lib/dal/src/proof_generation_dal.rs index 7e998be5b195..00670639ce54 100644 --- a/core/lib/dal/src/proof_generation_dal.rs +++ b/core/lib/dal/src/proof_generation_dal.rs @@ -51,9 +51,10 @@ impl ProofGenerationDal<'_, '_> { ( vm_run_data_blob_url IS NOT NULL AND proof_gen_data_blob_url IS NOT NULL - AND l1_batches.merkle_root_hash IS NOT NULL + AND l1_batches.hash IS NOT NULL AND l1_batches.aux_data_hash IS NOT NULL AND l1_batches.meta_parameters_hash IS NOT NULL + AND status NOT IN ('picked_by_prover', 'generated') ) OR ( status = 'picked_by_prover' @@ -63,8 +64,6 @@ impl ProofGenerationDal<'_, '_> { l1_batch_number ASC LIMIT 1 - FOR UPDATE - SKIP LOCKED ) RETURNING proof_generation_details.l1_batch_number @@ -79,7 +78,7 @@ impl ProofGenerationDal<'_, '_> { Ok(result) } - pub async fn save_proof_merkle_paths_artifacts_metadata( + pub async fn save_proof_artifacts_metadata( &mut self, batch_number: L1BatchNumber, proof_blob_url: &str, @@ -127,7 +126,6 @@ impl ProofGenerationDal<'_, '_> { r#" UPDATE proof_generation_details SET - status = 'generated', vm_run_data_blob_url = $1, updated_at = NOW() WHERE @@ -164,9 +162,9 @@ impl ProofGenerationDal<'_, '_> { let result = sqlx::query!( r#" INSERT INTO - proof_generation_details (l1_batch_number, proof_gen_data_blob_url, created_at, updated_at) + proof_generation_details (l1_batch_number, status, proof_gen_data_blob_url, created_at, updated_at) VALUES - ($1, $2, NOW(), NOW()) + ($1, 'waiting_for_data', $2, NOW(), NOW()) ON CONFLICT (l1_batch_number) DO NOTHING "#, i64::from(l1_batch_number.0), @@ -274,7 +272,9 @@ impl ProofGenerationDal<'_, '_> { #[cfg(test)] mod tests { - use zksync_types::ProtocolVersion; + use zksync_types::{ + block::L1BatchTreeData, commitment::L1BatchCommitmentArtifacts, ProtocolVersion, H256, + }; use super::*; use crate::{tests::create_l1_batch_header, ConnectionPool, CoreDal}; @@ -317,6 +317,27 @@ mod tests { .insert_proof_generation_details(L1BatchNumber(1), "generation_data") .await .unwrap(); + conn.proof_generation_dal() + .save_vm_runner_artifacts_metadata(L1BatchNumber(1), "vm_run") + .await + .unwrap(); + conn.blocks_dal() + .save_l1_batch_tree_data( + L1BatchNumber(1), + &L1BatchTreeData { + hash: H256::zero(), + rollup_last_leaf_index: 123, + }, + ) + .await + .unwrap(); + conn.blocks_dal() + .save_l1_batch_commitment_artifacts( + L1BatchNumber(1), + &L1BatchCommitmentArtifacts::default(), + ) + .await + .unwrap(); let unpicked_l1_batch = conn .proof_generation_dal() @@ -347,7 +368,7 @@ mod tests { assert_eq!(picked_l1_batch, Some(L1BatchNumber(1))); conn.proof_generation_dal() - .save_proof_merkle_paths_artifacts_metadata(L1BatchNumber(1), "proof") + .save_proof_artifacts_metadata(L1BatchNumber(1), "proof") .await .unwrap(); diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index 001393cb316f..1415dca1df7a 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -18,6 +18,8 @@ pub struct TeeProofGenerationDal<'a, 'c> { #[derive(Debug, EnumString, Display)] enum TeeProofGenerationJobStatus { + #[strum(serialize = "ready_to_be_proven")] + ReadyToBeProven, #[strum(serialize = "picked_by_prover")] PickedByProver, #[strum(serialize = "generated")] @@ -38,7 +40,6 @@ impl TeeProofGenerationDal<'_, '_> { processing_timeout: Duration, ) -> DalResult> { let processing_timeout = pg_interval_from_duration(processing_timeout); - // todo: deprecate ready to be proven let result: Option = sqlx::query!( r#" UPDATE tee_proof_generation_details @@ -74,10 +75,10 @@ impl TeeProofGenerationDal<'_, '_> { "#, &processing_timeout, ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| L1BatchNumber(row.l1_batch_number as u32)); + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| L1BatchNumber(row.l1_batch_number as u32)); Ok(result) } @@ -137,9 +138,9 @@ impl TeeProofGenerationDal<'_, '_> { sqlx::query!( r#" INSERT INTO - tee_proof_generation_details (l1_batch_number, created_at, updated_at) + tee_proof_generation_details (l1_batch_number, status, created_at, updated_at) VALUES - ($1, NOW(), NOW()) + ($1, 'ready_to_be_proven', NOW(), NOW()) ON CONFLICT (l1_batch_number) DO NOTHING "#, block_number, @@ -163,17 +164,17 @@ impl TeeProofGenerationDal<'_, '_> { JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number WHERE inputs.status = 'Successful' - AND proofs.status NOT IN ('picked_by_prover', 'generated') + AND proofs.status = 'ready_to_be_proven' ORDER BY proofs.l1_batch_number ASC LIMIT 1 "#, ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| L1BatchNumber(row.l1_batch_number as u32)); + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| L1BatchNumber(row.l1_batch_number as u32)); Ok(result) } diff --git a/core/lib/types/src/commitment/mod.rs b/core/lib/types/src/commitment/mod.rs index 61c2d7b5ea27..63d1bad486f3 100644 --- a/core/lib/types/src/commitment/mod.rs +++ b/core/lib/types/src/commitment/mod.rs @@ -536,7 +536,7 @@ pub struct L1BatchCommitment { pub meta_parameters: L1BatchMetaParameters, } -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Default, Debug, Clone, PartialEq, Eq)] #[cfg_attr(test, derive(Serialize, Deserialize))] pub struct L1BatchCommitmentHash { pub pass_through_data: H256, @@ -720,7 +720,7 @@ impl CommitmentInput { } } -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct L1BatchCommitmentArtifacts { pub commitment_hash: L1BatchCommitmentHash, pub l2_l1_merkle_root: H256, diff --git a/core/node/proof_data_handler/src/request_processor.rs b/core/node/proof_data_handler/src/request_processor.rs index 54524458a63f..37434a01f269 100644 --- a/core/node/proof_data_handler/src/request_processor.rs +++ b/core/node/proof_data_handler/src/request_processor.rs @@ -227,7 +227,7 @@ impl RequestProcessor { } storage .proof_generation_dal() - .save_proof_merkle_paths_artifacts_metadata(l1_batch_number, &blob_url) + .save_proof_artifacts_metadata(l1_batch_number, &blob_url) .await .map_err(RequestProcessorError::Dal)?; } From b0fa3b47e554be6bf8076e165eb4e56a85c33cbe Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Thu, 27 Jun 2024 14:52:50 +0300 Subject: [PATCH 19/56] some cleanups --- core/lib/config/src/configs/vm_runner.rs | 4 ++-- core/lib/object_store/src/objects.rs | 1 + core/lib/prover_interface/src/inputs.rs | 1 - core/lib/tee_verifier/src/lib.rs | 12 ++++++------ .../src/implementations/layers/vm_runner/bwip.rs | 2 +- core/node/state_keeper/src/testonly/mod.rs | 9 ++++++--- .../state_keeper/src/testonly/test_batch_executor.rs | 10 +++++----- core/node/vm_runner/src/impls/bwip.rs | 2 +- etc/env/base/vm_runner.toml | 8 ++++++++ 9 files changed, 30 insertions(+), 19 deletions(-) diff --git a/core/lib/config/src/configs/vm_runner.rs b/core/lib/config/src/configs/vm_runner.rs index b4e5d42291b3..fa7c7c1a90a3 100644 --- a/core/lib/config/src/configs/vm_runner.rs +++ b/core/lib/config/src/configs/vm_runner.rs @@ -21,7 +21,7 @@ impl ProtectiveReadsWriterConfig { #[derive(Debug, Deserialize, Clone, PartialEq, Default)] pub struct BasicWitnessInputProducerConfig { /// Path to the RocksDB data directory that serves state cache. - #[serde(default = "ProtectiveReadsWriterConfig::default_db_path")] + #[serde(default = "BasicWitnessInputProducerConfig::default_db_path")] pub db_path: String, /// How many max batches should be processed at the same time. pub window_size: u32, @@ -31,6 +31,6 @@ pub struct BasicWitnessInputProducerConfig { impl BasicWitnessInputProducerConfig { fn default_db_path() -> String { - "./db/protective_reads_writer".to_owned() + "./db/basic_witness_input_producer".to_owned() } } diff --git a/core/lib/object_store/src/objects.rs b/core/lib/object_store/src/objects.rs index dbfd9caa25d2..d67e4e5df137 100644 --- a/core/lib/object_store/src/objects.rs +++ b/core/lib/object_store/src/objects.rs @@ -86,6 +86,7 @@ impl StoredObject for SnapshotFactoryDependencies { .map_err(From::from) } } + impl StoredObject for SnapshotStorageLogsChunk { const BUCKET: Bucket = Bucket::StorageSnapshot; type Key<'a> = SnapshotStorageLogsStorageKey; diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index f2fbf54b4031..f276349dabc5 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -144,7 +144,6 @@ pub struct VMRunWitnessInputData { pub used_bytecodes: HashMap>, pub initial_heap_content: Vec<(usize, U256)>, pub protocol_version: ProtocolVersionId, - pub bootloader_code: Vec<[u8; 32]>, pub default_account_code_hash: U256, pub storage_refunds: Vec, diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index 758858321477..19759cb10208 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -27,7 +27,7 @@ use zksync_vm_utils::execute_tx; /// Version 1 of the data used as input for the TEE verifier. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct V1TeeVerifierInput { - prepare_basic_circuits_job: WitnessInputMerklePaths, + merkle_paths: WitnessInputMerklePaths, l2_blocks_execution_data: Vec, l1_batch_env: L1BatchEnv, system_env: SystemEnv, @@ -46,14 +46,14 @@ pub enum TeeVerifierInput { impl TeeVerifierInput { pub fn new( - prepare_basic_circuits_job: WitnessInputMerklePaths, + merkle_paths: WitnessInputMerklePaths, l2_blocks_execution_data: Vec, l1_batch_env: L1BatchEnv, system_env: SystemEnv, used_contracts: Vec<(H256, Vec)>, ) -> Self { TeeVerifierInput::V1(V1TeeVerifierInput { - prepare_basic_circuits_job, + merkle_paths, l2_blocks_execution_data, l1_batch_env, system_env, @@ -71,7 +71,7 @@ impl TeeVerifierInput { /// not actionable. pub fn verify(self) -> anyhow::Result<()> { let TeeVerifierInput::V1(V1TeeVerifierInput { - prepare_basic_circuits_job, + merkle_paths: prepare_basic_circuits_job, l2_blocks_execution_data, l1_batch_env, system_env, @@ -118,10 +118,10 @@ impl TeeVerifierInput { /// Sets the initial storage values and returns `BlockOutputWithProofs` fn get_bowp_and_set_initial_values( - prepare_basic_circuits_job: WitnessInputMerklePaths, + merkle_paths: WitnessInputMerklePaths, raw_storage: &mut InMemoryStorage, ) -> BlockOutputWithProofs { - let logs = prepare_basic_circuits_job + let logs = merkle_paths .into_merkle_paths() .map( |StorageLogMetadata { diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs b/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs index c2f784040e93..ba433ffa3899 100644 --- a/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs +++ b/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs @@ -47,7 +47,7 @@ impl WiringLayer for BasicWitnessInputProducerLayer { // One for `ConcurrentOutputHandlerFactoryTask`/`VmRunner` as they need occasional access // to DB for querying last processed batch and last ready to be loaded batch. // - // `window_size` connections for `ProtectiveReadsOutputHandlerFactory` + // `window_size` connections for `BasicWitnessInputProducer` // as there can be multiple output handlers holding multi-second connections to write // large amount of protective reads. master_pool diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index b284fc3e9212..15eaee689310 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -15,7 +15,7 @@ use zksync_multivm::{ }, vm_latest::VmExecutionLogs, }; -use zksync_state::ReadStorageFactory; +use zksync_state::{ReadStorageFactory, StorageViewCache}; use zksync_test_account::Account; use zksync_types::{ fee::Fee, utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Execute, @@ -79,6 +79,10 @@ pub(crate) fn successful_exec() -> TxExecutionResult { } } +pub(crate) fn storage_view_cache() -> StorageViewCache { + StorageViewCache::default() +} + /// `BatchExecutor` which doesn't check anything at all. Accepts all transactions. #[derive(Debug)] pub struct MockBatchExecutor; @@ -105,8 +109,7 @@ impl BatchExecutor for MockBatchExecutor { resp.send(default_vm_batch_result()).unwrap(); break; } - // todo: add test - Command::StorageViewCache(_) => (), + Command::StorageViewCache(resp) => resp.send(storage_view_cache()).unwrap(), } } anyhow::Ok(()) diff --git a/core/node/state_keeper/src/testonly/test_batch_executor.rs b/core/node/state_keeper/src/testonly/test_batch_executor.rs index 6839b01b052c..8a250fb8dab2 100644 --- a/core/node/state_keeper/src/testonly/test_batch_executor.rs +++ b/core/node/state_keeper/src/testonly/test_batch_executor.rs @@ -30,7 +30,9 @@ use crate::{ batch_executor::{BatchExecutor, BatchExecutorHandle, Command, TxExecutionResult}, io::{IoCursor, L1BatchParams, L2BlockParams, PendingBatchData, StateKeeperIO}, seal_criteria::{IoSealCriteria, SequencerSealer, UnexecutableReason}, - testonly::{default_vm_batch_result, successful_exec, BASE_SYSTEM_CONTRACTS}, + testonly::{ + default_vm_batch_result, storage_view_cache, successful_exec, BASE_SYSTEM_CONTRACTS, + }, types::ExecutionMetricsForCriteria, updates::UpdatesManager, OutputHandler, StateKeeperOutputHandler, ZkSyncStateKeeper, @@ -499,8 +501,7 @@ impl TestBatchExecutor { resp.send(default_vm_batch_result()).unwrap(); return; } - //todo: add test - Command::StorageViewCache(_) => (), + Command::StorageViewCache(resp) => resp.send(storage_view_cache()).unwrap(), } } } @@ -829,8 +830,7 @@ impl BatchExecutor for MockBatchExecutor { resp.send(default_vm_batch_result()).unwrap(); break; } - // todo: add test - Command::StorageViewCache(_) => (), + Command::StorageViewCache(resp) => resp.send(storage_view_cache()).unwrap(), } } anyhow::Ok(()) diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index b9cda91dc921..cd51405ca4c0 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -17,7 +17,7 @@ use crate::{ OutputHandlerFactory, VmRunner, VmRunnerIo, VmRunnerStorage, }; -/// A standalone component that writes witness input data asynchronously to state keeper. +/// A standalone component that retrieves all needed data for basic witness generation and saves it to the bucket #[derive(Debug)] pub struct BasicWitnessInputProducer { vm_runner: VmRunner, diff --git a/etc/env/base/vm_runner.toml b/etc/env/base/vm_runner.toml index c8f259efc3b7..ca47eb47799a 100644 --- a/etc/env/base/vm_runner.toml +++ b/etc/env/base/vm_runner.toml @@ -9,3 +9,11 @@ db_path = "./db/main/protective_reads" window_size = 3 # All batches before this one (inclusive) are always considered to be processed. first_processed_batch = 0 + +[vm_runner.bwip] +# Path to the directory that contains RocksDB with bwip writer cache. +db_path = "./db/main/basic_witness_input_producer" +# Amount of batches that can be processed in parallel. +window_size = 3 +# All batches before this one (inclusive) are always considered to be processed. +first_processed_batch = 0 \ No newline at end of file From fcc4a341910085480928abb800ab6387cce8a6b3 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Thu, 27 Jun 2024 14:55:31 +0300 Subject: [PATCH 20/56] allow dead code --- core/lib/config/src/configs/vm_runner.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/core/lib/config/src/configs/vm_runner.rs b/core/lib/config/src/configs/vm_runner.rs index fa7c7c1a90a3..477d8acfcb0f 100644 --- a/core/lib/config/src/configs/vm_runner.rs +++ b/core/lib/config/src/configs/vm_runner.rs @@ -30,6 +30,7 @@ pub struct BasicWitnessInputProducerConfig { } impl BasicWitnessInputProducerConfig { + #[allow(dead_code)] fn default_db_path() -> String { "./db/basic_witness_input_producer".to_owned() } From 314557ad2e288d3cd886c31d0dbc1db7593ab820 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Thu, 27 Jun 2024 15:07:08 +0300 Subject: [PATCH 21/56] add docs --- core/lib/state/src/storage_view.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/core/lib/state/src/storage_view.rs b/core/lib/state/src/storage_view.rs index 1eb5488129e3..0529196a9e93 100644 --- a/core/lib/state/src/storage_view.rs +++ b/core/lib/state/src/storage_view.rs @@ -49,6 +49,7 @@ pub struct StorageView { metrics: StorageViewMetrics, } +/// `StorageViewCache` is a struct for caching storage reads and `contains_key()` checks. #[derive(Debug, Default, Clone)] pub struct StorageViewCache { // Used purely for caching @@ -58,16 +59,19 @@ pub struct StorageViewCache { } impl StorageViewCache { + /// Returns the read storage keys. pub fn read_storage_keys(&self) -> HashMap { self.read_storage_keys.clone() } + /// Returns the initial writes. pub fn initial_writes(&self) -> HashMap { self.initial_writes.clone() } } impl StorageView { + /// Returns the underlying storage cache. pub fn cache(&self) -> StorageViewCache { self.cache.clone() } From dc82471fc416ad5339d1392e54dd5cbce9c1cf78 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Thu, 27 Jun 2024 16:37:48 +0300 Subject: [PATCH 22/56] remove redundant query --- core/node/vm_runner/src/impls/bwip.rs | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index cd51405ca4c0..cd2ad9947ed2 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -179,14 +179,7 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { result.previous_root_hash = previous_batch_with_metadata.metadata.root_hash; let blob_url = self.object_store.put(l1_batch_number, &result).await?; - self.pool - .connection() - .await - .unwrap() - .vm_runner_dal() - .mark_bwip_batch_as_completed(l1_batch_number) - .await - .unwrap(); + self.pool .connection() .await From da2729ab7d27840beb1f97101f65b79d168d40fe Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Thu, 27 Jun 2024 16:40:52 +0300 Subject: [PATCH 23/56] add tracing --- core/node/vm_runner/src/impls/bwip.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index cd2ad9947ed2..61041cba1928 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -180,6 +180,8 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { let blob_url = self.object_store.put(l1_batch_number, &result).await?; + tracing::info!("Saved VM run data for L1 batch {}", l1_batch_number.0); + self.pool .connection() .await From 5ba82aea0cdf840cb9492d3deea78448eae817fe Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Thu, 27 Jun 2024 20:43:56 +0300 Subject: [PATCH 24/56] fix serializing problem(should be fixed better in the future) --- core/lib/prover_interface/src/inputs.rs | 5 ++-- .../types/src/storage/witness_block_state.rs | 24 +++++++++++++++++++ core/node/vm_runner/src/impls/bwip.rs | 11 +++++---- .../witness_generator/src/basic_circuits.rs | 2 +- 4 files changed, 34 insertions(+), 8 deletions(-) diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index f276349dabc5..a5436038296b 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -5,7 +5,8 @@ use serde_with::{serde_as, Bytes}; use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; pub use zksync_state::WitnessStorage; use zksync_types::{ - witness_block_state::WitnessBlockState, L1BatchNumber, ProtocolVersionId, H256, U256, + witness_block_state::WitnessBlockStateSerializable, L1BatchNumber, ProtocolVersionId, H256, + U256, }; const HASH_LEN: usize = H256::len_bytes(); @@ -148,7 +149,7 @@ pub struct VMRunWitnessInputData { pub default_account_code_hash: U256, pub storage_refunds: Vec, pub pubdata_costs: Option>, - pub witness_block_state: WitnessBlockState, + pub witness_block_state: WitnessBlockStateSerializable, } impl StoredObject for VMRunWitnessInputData { diff --git a/core/lib/types/src/storage/witness_block_state.rs b/core/lib/types/src/storage/witness_block_state.rs index cae3eb892c6e..6ca2afbae126 100644 --- a/core/lib/types/src/storage/witness_block_state.rs +++ b/core/lib/types/src/storage/witness_block_state.rs @@ -10,3 +10,27 @@ pub struct WitnessBlockState { pub read_storage_key: HashMap, pub is_write_initial: HashMap, } + +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct WitnessBlockStateSerializable { + pub read_storage_key: Vec<(StorageKey, StorageValue)>, + pub is_write_initial: Vec<(StorageKey, bool)>, +} + +impl From for WitnessBlockStateSerializable { + fn from(state: WitnessBlockState) -> Self { + Self { + read_storage_key: state.read_storage_key.into_iter().collect(), + is_write_initial: state.is_write_initial.into_iter().collect(), + } + } +} + +impl From for WitnessBlockState { + fn from(state: WitnessBlockStateSerializable) -> Self { + Self { + read_storage_key: state.read_storage_key.into_iter().collect(), + is_write_initial: state.is_write_initial.into_iter().collect(), + } + } +} diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index 61041cba1928..d19b56ec5f00 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -7,8 +7,9 @@ use zksync_object_store::ObjectStore; use zksync_prover_interface::inputs::VMRunWitnessInputData; use zksync_state_keeper::{MainBatchExecutor, StateKeeperOutputHandler, UpdatesManager}; use zksync_types::{ - block::StorageOracleInfo, witness_block_state::WitnessBlockState, L1BatchNumber, L2ChainId, - ProtocolVersionId, H256, + block::StorageOracleInfo, + witness_block_state::{WitnessBlockState, WitnessBlockStateSerializable}, + L1BatchNumber, L2ChainId, ProtocolVersionId, H256, }; use zksync_utils::{bytes_to_chunks, h256_to_u256, u256_to_h256}; @@ -173,7 +174,7 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { is_write_initial: updates_manager.storage_view_cache.initial_writes(), }; - result.witness_block_state = block_state; + result.witness_block_state = block_state.into(); result.previous_aux_hash = previous_batch_with_metadata.metadata.aux_data_hash; result.previous_meta_hash = previous_batch_with_metadata.metadata.meta_parameters_hash; result.previous_root_hash = previous_batch_with_metadata.metadata.root_hash; @@ -262,7 +263,7 @@ async fn get_updates_manager_witness_input_data( default_account_code_hash: account_code_hash, storage_refunds, pubdata_costs, - witness_block_state: WitnessBlockState::default(), + witness_block_state: WitnessBlockStateSerializable::default(), } } @@ -349,7 +350,7 @@ async fn get_database_witness_input_data( default_account_code_hash: account_code_hash, storage_refunds, pubdata_costs, - witness_block_state: WitnessBlockState::default(), + witness_block_state: WitnessBlockStateSerializable::default(), } } diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index eb186a068a53..b078738a4247 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -420,7 +420,7 @@ async fn generate_witness( let (queue_sender, mut queue_receiver) = tokio::sync::mpsc::channel(1); let make_circuits = tokio::task::spawn_blocking(move || { - let witness_storage = WitnessStorage::new(input.vm_run_data.witness_block_state); + let witness_storage = WitnessStorage::new(input.vm_run_data.witness_block_state.into()); let storage_view = StorageView::new(witness_storage).to_rc_ptr(); let vm_storage_oracle: VmStorageOracle>, HistoryDisabled> = From 84607e56265fa2e239d36df1991cd1c1d7be0cb8 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Fri, 28 Jun 2024 13:30:59 +0300 Subject: [PATCH 25/56] fmt --- core/lib/state/src/storage_view.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/core/lib/state/src/storage_view.rs b/core/lib/state/src/storage_view.rs index 75293bcafd25..0529196a9e93 100644 --- a/core/lib/state/src/storage_view.rs +++ b/core/lib/state/src/storage_view.rs @@ -70,7 +70,6 @@ impl StorageViewCache { } } - impl StorageView { /// Returns the underlying storage cache. pub fn cache(&self) -> StorageViewCache { From 4c0e2708a67120f89d8c2f8bb6837e1377dd4f9f Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Fri, 28 Jun 2024 15:23:11 +0300 Subject: [PATCH 26/56] rework serialization --- core/lib/prover_interface/src/inputs.rs | 5 +-- .../types/src/storage/witness_block_state.rs | 37 ++++++++++++------- core/node/vm_runner/src/impls/bwip.rs | 11 +++--- .../witness_generator/src/basic_circuits.rs | 2 +- 4 files changed, 32 insertions(+), 23 deletions(-) diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index a5436038296b..f276349dabc5 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -5,8 +5,7 @@ use serde_with::{serde_as, Bytes}; use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; pub use zksync_state::WitnessStorage; use zksync_types::{ - witness_block_state::WitnessBlockStateSerializable, L1BatchNumber, ProtocolVersionId, H256, - U256, + witness_block_state::WitnessBlockState, L1BatchNumber, ProtocolVersionId, H256, U256, }; const HASH_LEN: usize = H256::len_bytes(); @@ -149,7 +148,7 @@ pub struct VMRunWitnessInputData { pub default_account_code_hash: U256, pub storage_refunds: Vec, pub pubdata_costs: Option>, - pub witness_block_state: WitnessBlockStateSerializable, + pub witness_block_state: WitnessBlockState, } impl StoredObject for VMRunWitnessInputData { diff --git a/core/lib/types/src/storage/witness_block_state.rs b/core/lib/types/src/storage/witness_block_state.rs index 6ca2afbae126..0ae020fd823c 100644 --- a/core/lib/types/src/storage/witness_block_state.rs +++ b/core/lib/types/src/storage/witness_block_state.rs @@ -5,32 +5,43 @@ use serde::{Deserialize, Serialize}; use crate::{StorageKey, StorageValue}; /// Storage data used during Witness Generation. -#[derive(Debug, Default, Clone, Serialize, Deserialize)] +#[derive(Debug, Default, Clone)] pub struct WitnessBlockState { pub read_storage_key: HashMap, pub is_write_initial: HashMap, } +/// A serde schema for serializing/deserializing `WitnessBlockState` #[derive(Debug, Default, Clone, Serialize, Deserialize)] -pub struct WitnessBlockStateSerializable { +struct WitnessBlockStateSerde { pub read_storage_key: Vec<(StorageKey, StorageValue)>, pub is_write_initial: Vec<(StorageKey, bool)>, } -impl From for WitnessBlockStateSerializable { - fn from(state: WitnessBlockState) -> Self { - Self { - read_storage_key: state.read_storage_key.into_iter().collect(), - is_write_initial: state.is_write_initial.into_iter().collect(), +impl Serialize for WitnessBlockState { + fn serialize(&self, s: S) -> Result { + WitnessBlockStateSerde { + read_storage_key: self + .read_storage_key + .iter() + .map(|(k, v)| (*k, v.clone())) + .collect(), + is_write_initial: self + .is_write_initial + .iter() + .map(|(k, v)| (*k, *v)) + .collect(), } + .serialize(s) } } -impl From for WitnessBlockState { - fn from(state: WitnessBlockStateSerializable) -> Self { - Self { - read_storage_key: state.read_storage_key.into_iter().collect(), - is_write_initial: state.is_write_initial.into_iter().collect(), - } +impl<'de> serde::Deserialize<'de> for WitnessBlockState { + fn deserialize>(d: D) -> Result { + let x = WitnessBlockStateSerde::deserialize(d)?; + Ok(Self { + read_storage_key: x.read_storage_key.into_iter().collect(), + is_write_initial: x.is_write_initial.into_iter().collect(), + }) } } diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index d19b56ec5f00..61041cba1928 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -7,9 +7,8 @@ use zksync_object_store::ObjectStore; use zksync_prover_interface::inputs::VMRunWitnessInputData; use zksync_state_keeper::{MainBatchExecutor, StateKeeperOutputHandler, UpdatesManager}; use zksync_types::{ - block::StorageOracleInfo, - witness_block_state::{WitnessBlockState, WitnessBlockStateSerializable}, - L1BatchNumber, L2ChainId, ProtocolVersionId, H256, + block::StorageOracleInfo, witness_block_state::WitnessBlockState, L1BatchNumber, L2ChainId, + ProtocolVersionId, H256, }; use zksync_utils::{bytes_to_chunks, h256_to_u256, u256_to_h256}; @@ -174,7 +173,7 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { is_write_initial: updates_manager.storage_view_cache.initial_writes(), }; - result.witness_block_state = block_state.into(); + result.witness_block_state = block_state; result.previous_aux_hash = previous_batch_with_metadata.metadata.aux_data_hash; result.previous_meta_hash = previous_batch_with_metadata.metadata.meta_parameters_hash; result.previous_root_hash = previous_batch_with_metadata.metadata.root_hash; @@ -263,7 +262,7 @@ async fn get_updates_manager_witness_input_data( default_account_code_hash: account_code_hash, storage_refunds, pubdata_costs, - witness_block_state: WitnessBlockStateSerializable::default(), + witness_block_state: WitnessBlockState::default(), } } @@ -350,7 +349,7 @@ async fn get_database_witness_input_data( default_account_code_hash: account_code_hash, storage_refunds, pubdata_costs, - witness_block_state: WitnessBlockStateSerializable::default(), + witness_block_state: WitnessBlockState::default(), } } diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index b078738a4247..eb186a068a53 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -420,7 +420,7 @@ async fn generate_witness( let (queue_sender, mut queue_receiver) = tokio::sync::mpsc::channel(1); let make_circuits = tokio::task::spawn_blocking(move || { - let witness_storage = WitnessStorage::new(input.vm_run_data.witness_block_state.into()); + let witness_storage = WitnessStorage::new(input.vm_run_data.witness_block_state); let storage_view = StorageView::new(witness_storage).to_rc_ptr(); let vm_storage_oracle: VmStorageOracle>, HistoryDisabled> = From f3465e6ab542929ae2e5e02d0998a9a422bbe265 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Fri, 28 Jun 2024 17:51:05 +0300 Subject: [PATCH 27/56] fix bootloader code problem --- core/node/vm_runner/src/impls/bwip.rs | 29 ++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index 61041cba1928..3b8f239792f3 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -147,6 +147,11 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { ) -> anyhow::Result<()> { let l1_batch_number = updates_manager.l1_batch.number; + tracing::info!( + "Started saving VM run data for L1 batch {}", + l1_batch_number + ); + let db_result = get_database_witness_input_data(&mut self.pool.connection().await?, l1_batch_number) .await; @@ -215,6 +220,13 @@ async fn get_updates_manager_witness_input_data( .base_system_contract_hashes() .bootloader .clone(); + let bootloader_code_bytes = connection + .factory_deps_dal() + .get_sealed_factory_dep(bootloader) + .await + .expect("Failed fetching bootloader bytecode from DB") + .expect("Bootloader bytecode should exist"); + let bootloader_code = bytes_to_chunks(&bootloader_code_bytes); let account_code_hash = h256_to_u256(default_aa); let account_bytecode_bytes = connection @@ -258,7 +270,7 @@ async fn get_updates_manager_witness_input_data( protocol_version: updates_manager.protocol_version(), - bootloader_code: bytes_to_chunks(bootloader.as_bytes()), + bootloader_code, default_account_code_hash: account_code_hash, storage_refunds, pubdata_costs, @@ -328,6 +340,14 @@ async fn get_database_witness_input_data( .unwrap() .unwrap(); + let bootloader_code_bytes = connection + .factory_deps_dal() + .get_sealed_factory_dep(block_header.base_system_contracts_hashes.bootloader) + .await + .expect("Failed fetching bootloader bytecode from DB") + .expect("Bootloader bytecode should exist"); + let bootloader_code = bytes_to_chunks(&bootloader_code_bytes); + VMRunWitnessInputData { l1_batch_number: block_header.number, previous_root_hash: H256::zero(), @@ -340,12 +360,7 @@ async fn get_database_witness_input_data( .protocol_version .unwrap_or(ProtocolVersionId::last_potentially_undefined()), - bootloader_code: bytes_to_chunks( - block_header - .base_system_contracts_hashes - .bootloader - .as_bytes(), - ), + bootloader_code, default_account_code_hash: account_code_hash, storage_refunds, pubdata_costs, From c6e3e6836b2a676bcc5a164efb4afa37b51d4949 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Mon, 1 Jul 2024 21:25:57 +0300 Subject: [PATCH 28/56] fix cached state --- .../src/batch_executor/main_executor.rs | 8 ++++++-- .../state_keeper/src/batch_executor/mod.rs | 19 ++++++++++++------- core/node/state_keeper/src/metrics.rs | 2 +- core/node/state_keeper/src/testonly/mod.rs | 4 +++- .../src/testonly/test_batch_executor.rs | 8 ++++++-- core/node/vm_runner/src/process.rs | 11 +++-------- 6 files changed, 31 insertions(+), 21 deletions(-) diff --git a/core/node/state_keeper/src/batch_executor/main_executor.rs b/core/node/state_keeper/src/batch_executor/main_executor.rs index df613deb8855..2434e92e812f 100644 --- a/core/node/state_keeper/src/batch_executor/main_executor.rs +++ b/core/node/state_keeper/src/batch_executor/main_executor.rs @@ -147,10 +147,14 @@ impl CommandReceiver { .observe(metrics.time_spent_on_set_value); return; } - Command::StorageViewCache(resp) => { - if resp.send((*storage_view).borrow().cache()).is_err() { + Command::FinishBatchWithCache(resp) => { + let vm_block_result = self.finish_batch(&mut vm); + let cache = (*storage_view).borrow().cache(); + if resp.send((vm_block_result, cache)).is_err() { break; } + + return; } } } diff --git a/core/node/state_keeper/src/batch_executor/mod.rs b/core/node/state_keeper/src/batch_executor/mod.rs index 6066275f2875..4577ab1b360a 100644 --- a/core/node/state_keeper/src/batch_executor/mod.rs +++ b/core/node/state_keeper/src/batch_executor/mod.rs @@ -230,11 +230,13 @@ impl BatchExecutorHandle { Ok(finished_batch) } - pub async fn storage_view_cache(&mut self) -> anyhow::Result { + pub async fn finish_batch_with_cache( + mut self, + ) -> anyhow::Result<(FinishedL1Batch, StorageViewCache)> { let (response_sender, response_receiver) = oneshot::channel(); let send_failed = self .commands - .send(Command::StorageViewCache(response_sender)) + .send(Command::FinishBatchWithCache(response_sender)) .await .is_err(); if send_failed { @@ -242,14 +244,17 @@ impl BatchExecutorHandle { } let latency = EXECUTOR_METRICS.batch_executor_command_response_time - [&ExecutorCommand::StorageViewCache] + [&ExecutorCommand::FinishBatchWithCache] .start(); - let storage_view_cache = match response_receiver.await { - Ok(cache) => cache, + let batch_with_cache = match response_receiver.await { + Ok(batch_with_cache) => batch_with_cache, Err(_) => return Err(self.handle.wait_for_error().await), }; + + self.handle.wait().await?; + latency.observe(); - Ok(storage_view_cache) + Ok(batch_with_cache) } } @@ -259,5 +264,5 @@ pub(super) enum Command { StartNextL2Block(L2BlockEnv, oneshot::Sender<()>), RollbackLastTx(oneshot::Sender<()>), FinishBatch(oneshot::Sender), - StorageViewCache(oneshot::Sender), + FinishBatchWithCache(oneshot::Sender<(FinishedL1Batch, StorageViewCache)>), } diff --git a/core/node/state_keeper/src/metrics.rs b/core/node/state_keeper/src/metrics.rs index 978ab9c2e852..c154719e3900 100644 --- a/core/node/state_keeper/src/metrics.rs +++ b/core/node/state_keeper/src/metrics.rs @@ -444,7 +444,7 @@ pub(super) enum ExecutorCommand { StartNextL2Block, RollbackLastTx, FinishBatch, - StorageViewCache, + FinishBatchWithCache, } const GAS_PER_NANOSECOND_BUCKETS: Buckets = Buckets::values(&[ diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index 1c00b4191a1c..c287bc97407f 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -109,7 +109,9 @@ impl BatchExecutor for MockBatchExecutor { resp.send(default_vm_batch_result()).unwrap(); break; } - Command::StorageViewCache(resp) => resp.send(storage_view_cache()).unwrap(), + Command::FinishBatchWithCache(resp) => resp + .send((default_vm_batch_result(), storage_view_cache())) + .unwrap(), } } anyhow::Ok(()) diff --git a/core/node/state_keeper/src/testonly/test_batch_executor.rs b/core/node/state_keeper/src/testonly/test_batch_executor.rs index 8a250fb8dab2..1be84cfbf54e 100644 --- a/core/node/state_keeper/src/testonly/test_batch_executor.rs +++ b/core/node/state_keeper/src/testonly/test_batch_executor.rs @@ -501,7 +501,9 @@ impl TestBatchExecutor { resp.send(default_vm_batch_result()).unwrap(); return; } - Command::StorageViewCache(resp) => resp.send(storage_view_cache()).unwrap(), + Command::FinishBatchWithCache(resp) => resp + .send((default_vm_batch_result(), storage_view_cache())) + .unwrap(), } } } @@ -830,7 +832,9 @@ impl BatchExecutor for MockBatchExecutor { resp.send(default_vm_batch_result()).unwrap(); break; } - Command::StorageViewCache(resp) => resp.send(storage_view_cache()).unwrap(), + Command::FinishBatchWithCache(resp) => resp + .send((default_vm_batch_result(), storage_view_cache())) + .unwrap(), } } anyhow::Ok(()) diff --git a/core/node/vm_runner/src/process.rs b/core/node/vm_runner/src/process.rs index 4c5b8ac111df..f44d26c1b4e0 100644 --- a/core/node/vm_runner/src/process.rs +++ b/core/node/vm_runner/src/process.rs @@ -111,17 +111,12 @@ impl VmRunner { .context("VM runner failed to handle L2 block")?; } - let storage_view_cache = batch_executor - .storage_view_cache() + let (finished_batch, storage_view_cache) = batch_executor + .finish_batch_with_cache() .await .context("Failed getting storage view cache")?; - updates_manager.update_storage_view_cache(storage_view_cache); - - let finished_batch = batch_executor - .finish_batch() - .await - .context("failed finishing L1 batch in executor")?; updates_manager.finish_batch(finished_batch); + updates_manager.update_storage_view_cache(storage_view_cache); latency.observe(); output_handler From 838b589e42cf5e8641223c99398c499995994c6b Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Tue, 2 Jul 2024 15:08:07 +0300 Subject: [PATCH 29/56] fix previous batch metadataa --- core/lib/prover_interface/src/inputs.rs | 6 ++--- .../src/request_processor.rs | 18 ++++++++++++- core/node/vm_runner/src/impls/bwip.rs | 25 +++++-------------- 3 files changed, 26 insertions(+), 23 deletions(-) diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index f276349dabc5..7c6ea793956a 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -138,9 +138,9 @@ impl WitnessInputMerklePaths { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct VMRunWitnessInputData { pub l1_batch_number: L1BatchNumber, - pub previous_root_hash: H256, - pub previous_meta_hash: H256, - pub previous_aux_hash: H256, + pub previous_root_hash: Option, + pub previous_meta_hash: Option, + pub previous_aux_hash: Option, pub used_bytecodes: HashMap>, pub initial_heap_content: Vec<(usize, U256)>, pub protocol_version: ProtocolVersionId, diff --git a/core/node/proof_data_handler/src/request_processor.rs b/core/node/proof_data_handler/src/request_processor.rs index 37434a01f269..3f09f6f7f922 100644 --- a/core/node/proof_data_handler/src/request_processor.rs +++ b/core/node/proof_data_handler/src/request_processor.rs @@ -64,7 +64,7 @@ impl RequestProcessor { None => return Ok(Json(ProofGenerationDataResponse::Success(None))), // no batches pending to be proven }; - let vm_run_data = self + let mut vm_run_data = self .blob_store .get(l1_batch_number) .await @@ -75,6 +75,22 @@ impl RequestProcessor { .await .map_err(RequestProcessorError::ObjectStore)?; + let previous_batch_metadata = self + .pool + .connection() + .await + .unwrap() + .blocks_dal() + .get_l1_batch_metadata(L1BatchNumber(l1_batch_number.checked_sub(1).unwrap())) + .await + .unwrap() + .expect("No metadata for previous batch"); + + vm_run_data.previous_root_hash = Some(previous_batch_metadata.metadata.root_hash); + vm_run_data.previous_meta_hash = + Some(previous_batch_metadata.metadata.meta_parameters_hash); + vm_run_data.previous_aux_hash = Some(previous_batch_metadata.metadata.aux_data_hash); + let blob = WitnessInputData { vm_run_data, merkle_paths, diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index 3b8f239792f3..2d030a267552 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -163,25 +163,12 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { compare_witness_input_data(&db_result, &result); - let previous_batch_with_metadata = self - .pool - .connection() - .await? - .blocks_dal() - .get_l1_batch_metadata(L1BatchNumber(l1_batch_number.checked_sub(1).unwrap())) - .await - .unwrap() - .unwrap(); - let block_state = WitnessBlockState { read_storage_key: updates_manager.storage_view_cache.read_storage_keys(), is_write_initial: updates_manager.storage_view_cache.initial_writes(), }; result.witness_block_state = block_state; - result.previous_aux_hash = previous_batch_with_metadata.metadata.aux_data_hash; - result.previous_meta_hash = previous_batch_with_metadata.metadata.meta_parameters_hash; - result.previous_root_hash = previous_batch_with_metadata.metadata.root_hash; let blob_url = self.object_store.put(l1_batch_number, &result).await?; @@ -262,9 +249,9 @@ async fn get_updates_manager_witness_input_data( VMRunWitnessInputData { l1_batch_number, - previous_aux_hash: H256::zero(), - previous_meta_hash: H256::zero(), - previous_root_hash: H256::zero(), + previous_aux_hash: None, + previous_meta_hash: None, + previous_root_hash: None, used_bytecodes, initial_heap_content, @@ -350,9 +337,9 @@ async fn get_database_witness_input_data( VMRunWitnessInputData { l1_batch_number: block_header.number, - previous_root_hash: H256::zero(), - previous_meta_hash: H256::zero(), - previous_aux_hash: H256::zero(), + previous_root_hash: None, + previous_meta_hash: None, + previous_aux_hash: None, used_bytecodes, initial_heap_content, From 7e6f2361007b2548d3bbc82be37af19b5f4f615f Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Tue, 2 Jul 2024 15:13:15 +0300 Subject: [PATCH 30/56] fix type annotation --- core/node/proof_data_handler/src/request_processor.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/core/node/proof_data_handler/src/request_processor.rs b/core/node/proof_data_handler/src/request_processor.rs index 3f09f6f7f922..d61cd58c62a8 100644 --- a/core/node/proof_data_handler/src/request_processor.rs +++ b/core/node/proof_data_handler/src/request_processor.rs @@ -4,6 +4,7 @@ use axum::{extract::Path, Json}; use zksync_config::configs::ProofDataHandlerConfig; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_object_store::ObjectStore; +use zksync_prover_interface::inputs::{VMRunWitnessInputData, WitnessInputMerklePaths}; use zksync_prover_interface::{ api::{ ProofGenerationData, ProofGenerationDataRequest, ProofGenerationDataResponse, @@ -64,12 +65,12 @@ impl RequestProcessor { None => return Ok(Json(ProofGenerationDataResponse::Success(None))), // no batches pending to be proven }; - let mut vm_run_data = self + let mut vm_run_data: VMRunWitnessInputData = self .blob_store .get(l1_batch_number) .await .map_err(RequestProcessorError::ObjectStore)?; - let merkle_paths = self + let merkle_paths: WitnessInputMerklePaths = self .blob_store .get(l1_batch_number) .await From e4e91aea335aa7656f32f1db3f216e3d49e05461 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Tue, 2 Jul 2024 15:13:44 +0300 Subject: [PATCH 31/56] fmt --- core/node/proof_data_handler/src/request_processor.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/core/node/proof_data_handler/src/request_processor.rs b/core/node/proof_data_handler/src/request_processor.rs index d61cd58c62a8..c48922bfc16e 100644 --- a/core/node/proof_data_handler/src/request_processor.rs +++ b/core/node/proof_data_handler/src/request_processor.rs @@ -4,13 +4,12 @@ use axum::{extract::Path, Json}; use zksync_config::configs::ProofDataHandlerConfig; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_object_store::ObjectStore; -use zksync_prover_interface::inputs::{VMRunWitnessInputData, WitnessInputMerklePaths}; use zksync_prover_interface::{ api::{ ProofGenerationData, ProofGenerationDataRequest, ProofGenerationDataResponse, SubmitProofRequest, SubmitProofResponse, }, - inputs::WitnessInputData, + inputs::{VMRunWitnessInputData, WitnessInputData, WitnessInputMerklePaths}, }; use zksync_types::{ basic_fri_types::Eip4844Blobs, From db579094665f953c94d50cd08ac88b2727d33604 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Tue, 2 Jul 2024 15:19:28 +0300 Subject: [PATCH 32/56] fix type for BWG --- prover/witness_generator/src/basic_circuits.rs | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index eb186a068a53..ee0043ac67dc 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -405,7 +405,11 @@ async fn generate_witness( let mut tree = PrecalculatedMerklePathsProvider::new( input.merkle_paths, - input.vm_run_data.previous_root_hash.0, + input + .vm_run_data + .previous_root_hash + .expect("Previous root hash should exist") + .0, ); let geometry_config = get_geometry_config(); let mut hasher = DefaultHasher::new(); @@ -492,8 +496,16 @@ async fn generate_witness( recursion_urls.retain(|(circuit_id, _, _)| circuits_present.contains(circuit_id)); - scheduler_witness.previous_block_meta_hash = input.vm_run_data.previous_meta_hash.0; - scheduler_witness.previous_block_aux_hash = input.vm_run_data.previous_aux_hash.0; + scheduler_witness.previous_block_meta_hash = input + .vm_run_data + .previous_meta_hash + .expect("Previous metadata hash should exist") + .0; + scheduler_witness.previous_block_aux_hash = input + .vm_run_data + .previous_aux_hash + .expect("Previous aux data hash should exist") + .0; ( circuit_urls, From aad26bae70151f313d15562561caecbe32544429 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Tue, 2 Jul 2024 17:22:34 +0300 Subject: [PATCH 33/56] address some comments --- ...8_add_vm_run_data_blob_url_column.down.sql | 2 +- core/lib/prover_interface/src/inputs.rs | 6 +-- core/lib/state/src/witness.rs | 46 +++++++------------ .../types/src/storage/witness_block_state.rs | 12 ++--- core/node/vm_runner/src/impls/bwip.rs | 16 +++---- .../witness_generator/src/basic_circuits.rs | 5 +- 6 files changed, 36 insertions(+), 51 deletions(-) diff --git a/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.down.sql b/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.down.sql index ea3f2ae3131d..1f86ba3bb696 100644 --- a/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.down.sql +++ b/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.down.sql @@ -1,2 +1,2 @@ ALTER TABLE proof_generation_details DROP COLUMN IF EXISTS vm_run_data_blob_url; -DROP TABLE IF EXISTS vm_runner_protective_reads; +DROP TABLE IF EXISTS vm_runner_bwip; diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index 7c6ea793956a..929cc616fa58 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -5,7 +5,7 @@ use serde_with::{serde_as, Bytes}; use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; pub use zksync_state::WitnessStorage; use zksync_types::{ - witness_block_state::WitnessBlockState, L1BatchNumber, ProtocolVersionId, H256, U256, + witness_block_state::WitnessStorageState, L1BatchNumber, ProtocolVersionId, H256, U256, }; const HASH_LEN: usize = H256::len_bytes(); @@ -147,8 +147,8 @@ pub struct VMRunWitnessInputData { pub bootloader_code: Vec<[u8; 32]>, pub default_account_code_hash: U256, pub storage_refunds: Vec, - pub pubdata_costs: Option>, - pub witness_block_state: WitnessBlockState, + pub pubdata_costs: Vec, + pub witness_block_state: WitnessStorageState, } impl StoredObject for VMRunWitnessInputData { diff --git a/core/lib/state/src/witness.rs b/core/lib/state/src/witness.rs index 50e2d9b54076..5965f3c11884 100644 --- a/core/lib/state/src/witness.rs +++ b/core/lib/state/src/witness.rs @@ -1,56 +1,44 @@ -use vise::{Counter, Metrics}; -use zksync_types::{witness_block_state::WitnessBlockState, StorageKey, StorageValue, H256}; +use zksync_types::{witness_block_state::WitnessStorageState, StorageKey, StorageValue, H256}; use crate::ReadStorage; -#[derive(Debug, Metrics)] -#[metrics(prefix = "witness_storage")] -struct WitnessStorageMetrics { - /// Number of unexpected calls when calling `get_enumeration_index` on a witness storage. - get_enumeration_index_unexpected_call: Counter, -} - -#[vise::register] -static METRICS: vise::Global = vise::Global::new(); - /// [`ReadStorage`] implementation backed by binary serialized [`WitnessHashBlockState`]. /// Note that `load_factory_deps` is not used. /// FactoryDeps data is used straight inside witness generator, loaded with the blob. #[derive(Debug)] -pub struct WitnessStorage<'a> { - block_state: WitnessBlockState, - metrics: &'a WitnessStorageMetrics, +pub struct WitnessStorage { + storage_state: WitnessStorageState, } -impl WitnessStorage<'_> { +impl WitnessStorage { /// Creates a new storage with the provided witness's block state. - pub fn new(block_state: WitnessBlockState) -> Self { - Self { - block_state, - metrics: &METRICS, - } + pub fn new(storage_state: WitnessStorageState) -> Self { + Self { storage_state } } } -impl ReadStorage for WitnessStorage<'_> { +impl ReadStorage for WitnessStorage { fn read_value(&mut self, key: &StorageKey) -> StorageValue { - *self - .block_state + self.storage_state .read_storage_key .get(key) - .unwrap_or(&H256::default()) + .copied() + .unwrap_or_default() } fn is_write_initial(&mut self, key: &StorageKey) -> bool { - *self.block_state.is_write_initial.get(key).unwrap_or(&false) + self.storage_state + .is_write_initial + .get(key) + .copied() + .unwrap_or_default() } fn load_factory_dep(&mut self, _hash: H256) -> Option> { - None + unreachable!("Factory deps should not be used in the witness storage") } fn get_enumeration_index(&mut self, _key: &StorageKey) -> Option { - self.metrics.get_enumeration_index_unexpected_call.inc(); - None + unreachable!("Enumeration index should not be used in the witness storage") } } diff --git a/core/lib/types/src/storage/witness_block_state.rs b/core/lib/types/src/storage/witness_block_state.rs index 0ae020fd823c..7ab89144f22c 100644 --- a/core/lib/types/src/storage/witness_block_state.rs +++ b/core/lib/types/src/storage/witness_block_state.rs @@ -6,21 +6,21 @@ use crate::{StorageKey, StorageValue}; /// Storage data used during Witness Generation. #[derive(Debug, Default, Clone)] -pub struct WitnessBlockState { +pub struct WitnessStorageState { pub read_storage_key: HashMap, pub is_write_initial: HashMap, } /// A serde schema for serializing/deserializing `WitnessBlockState` #[derive(Debug, Default, Clone, Serialize, Deserialize)] -struct WitnessBlockStateSerde { +struct WitnessStorageStateSerde { pub read_storage_key: Vec<(StorageKey, StorageValue)>, pub is_write_initial: Vec<(StorageKey, bool)>, } -impl Serialize for WitnessBlockState { +impl Serialize for WitnessStorageState { fn serialize(&self, s: S) -> Result { - WitnessBlockStateSerde { + WitnessStorageStateSerde { read_storage_key: self .read_storage_key .iter() @@ -36,9 +36,9 @@ impl Serialize for WitnessBlockState { } } -impl<'de> serde::Deserialize<'de> for WitnessBlockState { +impl<'de> serde::Deserialize<'de> for WitnessStorageState { fn deserialize>(d: D) -> Result { - let x = WitnessBlockStateSerde::deserialize(d)?; + let x = WitnessStorageStateSerde::deserialize(d)?; Ok(Self { read_storage_key: x.read_storage_key.into_iter().collect(), is_write_initial: x.is_write_initial.into_iter().collect(), diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index 2d030a267552..f924f94cb4a1 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -7,7 +7,7 @@ use zksync_object_store::ObjectStore; use zksync_prover_interface::inputs::VMRunWitnessInputData; use zksync_state_keeper::{MainBatchExecutor, StateKeeperOutputHandler, UpdatesManager}; use zksync_types::{ - block::StorageOracleInfo, witness_block_state::WitnessBlockState, L1BatchNumber, L2ChainId, + block::StorageOracleInfo, witness_block_state::WitnessStorageState, L1BatchNumber, L2ChainId, ProtocolVersionId, H256, }; use zksync_utils::{bytes_to_chunks, h256_to_u256, u256_to_h256}; @@ -148,7 +148,7 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { let l1_batch_number = updates_manager.l1_batch.number; tracing::info!( - "Started saving VM run data for L1 batch {}", + "Started saving VM run data for L1 batch {:?}", l1_batch_number ); @@ -163,7 +163,7 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { compare_witness_input_data(&db_result, &result); - let block_state = WitnessBlockState { + let block_state = WitnessStorageState { read_storage_key: updates_manager.storage_view_cache.read_storage_keys(), is_write_initial: updates_manager.storage_view_cache.initial_writes(), }; @@ -172,7 +172,7 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { let blob_url = self.object_store.put(l1_batch_number, &result).await?; - tracing::info!("Saved VM run data for L1 batch {}", l1_batch_number.0); + tracing::info!("Saved VM run data for L1 batch {:?}", l1_batch_number); self.pool .connection() @@ -260,8 +260,8 @@ async fn get_updates_manager_witness_input_data( bootloader_code, default_account_code_hash: account_code_hash, storage_refunds, - pubdata_costs, - witness_block_state: WitnessBlockState::default(), + pubdata_costs: pubdata_costs.unwrap(), + witness_block_state: WitnessStorageState::default(), } } @@ -350,8 +350,8 @@ async fn get_database_witness_input_data( bootloader_code, default_account_code_hash: account_code_hash, storage_refunds, - pubdata_costs, - witness_block_state: WitnessBlockState::default(), + pubdata_costs: pubdata_costs.unwrap(), + witness_block_state: WitnessStorageState::default(), } } diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index ee0043ac67dc..5920fcef7191 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -432,10 +432,7 @@ async fn generate_witness( let storage_oracle = StorageOracle::new( vm_storage_oracle, input.vm_run_data.storage_refunds, - input - .vm_run_data - .pubdata_costs - .expect("pubdata costs should be present"), + input.vm_run_data.pubdata_costs, ); let path = KZG_TRUSTED_SETUP_FILE From 2c47a86a06f682881b8e471f5c170e6c213b3df4 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Wed, 3 Jul 2024 11:56:22 +0300 Subject: [PATCH 34/56] update contracts --- contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts b/contracts index db9387690502..8172969672cc 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit db9387690502937de081a959b164db5a5262ce0a +Subproject commit 8172969672cc6a38542cd8f5578c74b7e30cd3b4 From 8cc7ca60687018d19fe2ee1e7ff4d903dc959ad1 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Wed, 3 Jul 2024 13:16:57 +0300 Subject: [PATCH 35/56] update ident --- .../lib/protobuf_config/src/proto/config/general.proto | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/core/lib/protobuf_config/src/proto/config/general.proto b/core/lib/protobuf_config/src/proto/config/general.proto index 40f5339288b3..71dc6f75fed0 100644 --- a/core/lib/protobuf_config/src/proto/config/general.proto +++ b/core/lib/protobuf_config/src/proto/config/general.proto @@ -42,10 +42,10 @@ message GeneralConfig { optional config.snapshot_creator.SnapshotsCreator snapshot_creator = 31; optional config.observability.Observability observability = 32; optional config.vm_runner.ProtectiveReadsWriter protective_reads_writer = 33; - optional config.vm_runner.BasicWitnessInputProducer basic_witness_input_producer = 38; optional config.object_store.ObjectStore core_object_store = 34; - optional config.snapshot_recovery.SnapshotRecovery snapshot_recovery = 35; - optional config.pruning.Pruning pruning = 36; - optional config.commitment_generator.CommitmentGenerator commitment_generator = 37; - optional config.da_dispatcher.DataAvailabilityDispatcher da_dispatcher = 38; + optional config.snapshot_recovery.SnapshotRecovery snapshot_recovery = 35; + optional config.pruning.Pruning pruning = 36; + optional config.commitment_generator.CommitmentGenerator commitment_generator = 37; + optional config.da_dispatcher.DataAvailabilityDispatcher da_dispatcher = 38; + optional config.vm_runner.BasicWitnessInputProducer basic_witness_input_producer = 39; } From 0a7cdddd2ce6bdc429adf1a4e7a528a928470f77 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Wed, 3 Jul 2024 13:20:00 +0300 Subject: [PATCH 36/56] update ident --- .../src/proto/config/general.proto | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/core/lib/protobuf_config/src/proto/config/general.proto b/core/lib/protobuf_config/src/proto/config/general.proto index 71dc6f75fed0..cd89b993f587 100644 --- a/core/lib/protobuf_config/src/proto/config/general.proto +++ b/core/lib/protobuf_config/src/proto/config/general.proto @@ -21,31 +21,31 @@ import "zksync/config/pruning.proto"; import "zksync/config/object_store.proto"; message GeneralConfig { - optional config.database.Postgres postgres = 1; - optional config.api.Api api = 2; - optional config.contract_verifier.ContractVerifier contract_verifier = 3; - optional config.circuit_breaker.CircuitBreaker circuit_breaker = 5; - optional config.chain.Mempool mempool = 6; - optional config.chain.OperationsManager operations_manager = 8; - optional config.chain.StateKeeper state_keeper = 9; - optional config.house_keeper.HouseKeeper house_keeper = 10; - optional config.prover.Prover prover = 12; - optional config.utils.Prometheus prometheus = 15; - optional config.database.DB db = 20; - optional config.eth.ETH eth = 22; - optional config.prover.WitnessGenerator witness_generator = 24; - optional config.prover.WitnessVectorGenerator witness_vector_generator = 25; - optional config.prover.ProofCompressor proof_compressor = 27; - optional config.prover.ProofDataHandler data_handler = 28; - optional config.prover.ProverGroup prover_group = 29; - optional config.prover.ProverGateway prover_gateway = 30; - optional config.snapshot_creator.SnapshotsCreator snapshot_creator = 31; - optional config.observability.Observability observability = 32; - optional config.vm_runner.ProtectiveReadsWriter protective_reads_writer = 33; - optional config.object_store.ObjectStore core_object_store = 34; - optional config.snapshot_recovery.SnapshotRecovery snapshot_recovery = 35; - optional config.pruning.Pruning pruning = 36; - optional config.commitment_generator.CommitmentGenerator commitment_generator = 37; - optional config.da_dispatcher.DataAvailabilityDispatcher da_dispatcher = 38; - optional config.vm_runner.BasicWitnessInputProducer basic_witness_input_producer = 39; + optional config.database.Postgres postgres = 1; + optional config.api.Api api = 2; + optional config.contract_verifier.ContractVerifier contract_verifier = 3; + optional config.circuit_breaker.CircuitBreaker circuit_breaker = 5; + optional config.chain.Mempool mempool = 6; + optional config.chain.OperationsManager operations_manager = 8; + optional config.chain.StateKeeper state_keeper = 9; + optional config.house_keeper.HouseKeeper house_keeper = 10; + optional config.prover.Prover prover = 12; + optional config.utils.Prometheus prometheus = 15; + optional config.database.DB db = 20; + optional config.eth.ETH eth = 22; + optional config.prover.WitnessGenerator witness_generator = 24; + optional config.prover.WitnessVectorGenerator witness_vector_generator = 25; + optional config.prover.ProofCompressor proof_compressor = 27; + optional config.prover.ProofDataHandler data_handler = 28; + optional config.prover.ProverGroup prover_group = 29; + optional config.prover.ProverGateway prover_gateway = 30; + optional config.snapshot_creator.SnapshotsCreator snapshot_creator = 31; + optional config.observability.Observability observability = 32; + optional config.vm_runner.ProtectiveReadsWriter protective_reads_writer = 33; + optional config.object_store.ObjectStore core_object_store = 34; + optional config.snapshot_recovery.SnapshotRecovery snapshot_recovery = 35; + optional config.pruning.Pruning pruning = 36; + optional config.commitment_generator.CommitmentGenerator commitment_generator = 37; + optional config.da_dispatcher.DataAvailabilityDispatcher da_dispatcher = 38; + optional config.vm_runner.BasicWitnessInputProducer basic_witness_input_producer = 39; } From 8c1f1e763356accf3e987489d1e92cd3ce576c5e Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Wed, 3 Jul 2024 13:22:57 +0300 Subject: [PATCH 37/56] update ident --- .../protobuf_config/src/proto/config/vm_runner.proto | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/core/lib/protobuf_config/src/proto/config/vm_runner.proto b/core/lib/protobuf_config/src/proto/config/vm_runner.proto index ba443174e68e..93521a5fd893 100644 --- a/core/lib/protobuf_config/src/proto/config/vm_runner.proto +++ b/core/lib/protobuf_config/src/proto/config/vm_runner.proto @@ -3,13 +3,13 @@ syntax = "proto3"; package zksync.config.vm_runner; message ProtectiveReadsWriter { - optional string db_path = 1; // required; fs path - optional uint64 window_size = 2; // required - optional uint64 first_processed_batch = 3; // required + optional string db_path = 1; // required; fs path + optional uint64 window_size = 2; // required + optional uint64 first_processed_batch = 3; // required } message BasicWitnessInputProducer { - optional string db_path = 1; // required; fs path - optional uint64 window_size = 2; // required - optional uint64 first_processed_batch = 3; // required + optional string db_path = 1; // required; fs path + optional uint64 window_size = 2; // required + optional uint64 first_processed_batch = 3; // required } From d61acf8f43735e8008077b22231f8303d97d04ca Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Wed, 3 Jul 2024 13:46:23 +0300 Subject: [PATCH 38/56] address some comments --- .../implementations/layers/vm_runner/bwip.rs | 4 +- core/node/state_keeper/src/updates/mod.rs | 10 ++- core/node/vm_runner/src/impls/bwip.rs | 79 ++++++++----------- etc/env/base/vm_runner.toml | 2 +- 4 files changed, 45 insertions(+), 50 deletions(-) diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs b/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs index ba433ffa3899..1f6435879ceb 100644 --- a/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs +++ b/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs @@ -48,8 +48,8 @@ impl WiringLayer for BasicWitnessInputProducerLayer { // to DB for querying last processed batch and last ready to be loaded batch. // // `window_size` connections for `BasicWitnessInputProducer` - // as there can be multiple output handlers holding multi-second connections to write - // large amount of protective reads. + // as there can be multiple output handlers holding multi-second connections to process + // BWIP data. master_pool .get_custom(self.basic_witness_input_producer_config.window_size + 2) .await?, diff --git a/core/node/state_keeper/src/updates/mod.rs b/core/node/state_keeper/src/updates/mod.rs index 892a51650336..e05432c57b21 100644 --- a/core/node/state_keeper/src/updates/mod.rs +++ b/core/node/state_keeper/src/updates/mod.rs @@ -36,7 +36,7 @@ pub struct UpdatesManager { base_fee_per_gas: u64, base_system_contract_hashes: BaseSystemContractsHashes, protocol_version: ProtocolVersionId, - pub storage_view_cache: StorageViewCache, + storage_view_cache: Option, pub l1_batch: L1BatchUpdates, pub l2_block: L2BlockUpdates, pub storage_writes_deduplicator: StorageWritesDeduplicator, @@ -61,7 +61,7 @@ impl UpdatesManager { protocol_version, ), storage_writes_deduplicator: StorageWritesDeduplicator::new(), - storage_view_cache: StorageViewCache::default(), + storage_view_cache: None, } } @@ -157,7 +157,11 @@ impl UpdatesManager { } pub fn update_storage_view_cache(&mut self, storage_view_cache: StorageViewCache) { - self.storage_view_cache = storage_view_cache; + self.storage_view_cache = Some(storage_view_cache); + } + + pub fn storage_view_cache(&self) -> Option { + self.storage_view_cache.clone() } /// Pushes a new L2 block with the specified timestamp into this manager. The previously diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index f924f94cb4a1..5072a15612d8 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -1,5 +1,6 @@ use std::{collections::HashSet, sync::Arc}; +use anyhow::anyhow; use async_trait::async_trait; use tokio::sync::watch; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; @@ -146,26 +147,27 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { updates_manager: Arc, ) -> anyhow::Result<()> { let l1_batch_number = updates_manager.l1_batch.number; + let mut connection = self.pool.connection().await?; tracing::info!( "Started saving VM run data for L1 batch {:?}", l1_batch_number ); - let db_result = - get_database_witness_input_data(&mut self.pool.connection().await?, l1_batch_number) - .await; - let mut result = get_updates_manager_witness_input_data( - &mut self.pool.connection().await?, - updates_manager.clone(), - ) - .await; + let db_result = get_database_witness_input_data(&mut connection, l1_batch_number).await?; + let mut result = + get_updates_manager_witness_input_data(&mut connection, updates_manager.clone()) + .await?; compare_witness_input_data(&db_result, &result); + let storage_view_cache = updates_manager + .storage_view_cache() + .expect("Storage view cache was not initialized"); + let block_state = WitnessStorageState { - read_storage_key: updates_manager.storage_view_cache.read_storage_keys(), - is_write_initial: updates_manager.storage_view_cache.initial_writes(), + read_storage_key: storage_view_cache.read_storage_keys(), + is_write_initial: storage_view_cache.initial_writes(), }; result.witness_block_state = block_state; @@ -174,14 +176,10 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { tracing::info!("Saved VM run data for L1 batch {:?}", l1_batch_number); - self.pool - .connection() - .await - .unwrap() + connection .proof_generation_dal() .save_vm_runner_artifacts_metadata(l1_batch_number, &blob_url) - .await - .unwrap(); + .await?; Ok(()) } @@ -190,13 +188,13 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { async fn get_updates_manager_witness_input_data( connection: &mut Connection<'_, Core>, updates_manager: Arc, -) -> VMRunWitnessInputData { +) -> anyhow::Result { let l1_batch_number = updates_manager.l1_batch.number.clone(); let finished_batch = updates_manager .l1_batch .finished .clone() - .expect(format!("L1 batch {l1_batch_number:?} is not finished").as_str()); + .ok_or(Err(anyhow!("L1 batch {l1_batch_number:?} is not finished")))?; let initial_heap_content = finished_batch.final_bootloader_memory.unwrap(); // might be just empty let default_aa = updates_manager @@ -210,18 +208,16 @@ async fn get_updates_manager_witness_input_data( let bootloader_code_bytes = connection .factory_deps_dal() .get_sealed_factory_dep(bootloader) - .await - .expect("Failed fetching bootloader bytecode from DB") - .expect("Bootloader bytecode should exist"); + .await? + .ok_or(Err(anyhow!("Failed fetching bootloader bytecode from DB")))?; let bootloader_code = bytes_to_chunks(&bootloader_code_bytes); let account_code_hash = h256_to_u256(default_aa); let account_bytecode_bytes = connection .factory_deps_dal() .get_sealed_factory_dep(default_aa) - .await - .expect("Failed fetching default account bytecode from DB") - .expect("Default account bytecode should exist"); + .await? + .ok_or(Err(anyhow!("Default account bytecode should exist")))?; let account_bytecode = bytes_to_chunks(&account_bytecode_bytes); let hashes: HashSet = finished_batch @@ -247,7 +243,7 @@ async fn get_updates_manager_witness_input_data( let storage_refunds = finished_batch.final_execution_state.storage_refunds; let pubdata_costs = Some(finished_batch.final_execution_state.pubdata_costs); - VMRunWitnessInputData { + Ok(VMRunWitnessInputData { l1_batch_number, previous_aux_hash: None, previous_meta_hash: None, @@ -262,34 +258,31 @@ async fn get_updates_manager_witness_input_data( storage_refunds, pubdata_costs: pubdata_costs.unwrap(), witness_block_state: WitnessStorageState::default(), - } + }) } async fn get_database_witness_input_data( connection: &mut Connection<'_, Core>, l1_batch_number: L1BatchNumber, -) -> VMRunWitnessInputData { +) -> anyhow::Result { let block_header = connection .blocks_dal() .get_l1_batch_header(l1_batch_number) - .await - .unwrap() - .unwrap(); + .await? + .ok_or(Err(anyhow!("L1 block header should exist")))?; let initial_heap_content = connection .blocks_dal() .get_initial_bootloader_heap(l1_batch_number) - .await - .unwrap() - .unwrap(); + .await? + .ok_or(Err(anyhow!("Initial bootloader heap should exist")))?; let account_code_hash = h256_to_u256(block_header.base_system_contracts_hashes.default_aa); let account_bytecode_bytes = connection .factory_deps_dal() .get_sealed_factory_dep(block_header.base_system_contracts_hashes.default_aa) - .await - .expect("Failed fetching default account bytecode from DB") - .expect("Default account bytecode should exist"); + .await? + .ok_or(Err(anyhow!("Default account bytecode should exist")))?; let account_bytecode = bytes_to_chunks(&account_bytecode_bytes); let hashes: HashSet = block_header @@ -323,19 +316,17 @@ async fn get_database_witness_input_data( } = connection .blocks_dal() .get_storage_oracle_info(block_header.number) - .await - .unwrap() - .unwrap(); + .await? + .ok_or(Err(anyhow!("Storage oracle info should exist")))?; let bootloader_code_bytes = connection .factory_deps_dal() .get_sealed_factory_dep(block_header.base_system_contracts_hashes.bootloader) - .await - .expect("Failed fetching bootloader bytecode from DB") - .expect("Bootloader bytecode should exist"); + .await? + .ok_or(Err(anyhow!("Bootloader bytecode should exist")))?; let bootloader_code = bytes_to_chunks(&bootloader_code_bytes); - VMRunWitnessInputData { + Ok(VMRunWitnessInputData { l1_batch_number: block_header.number, previous_root_hash: None, previous_meta_hash: None, @@ -352,7 +343,7 @@ async fn get_database_witness_input_data( storage_refunds, pubdata_costs: pubdata_costs.unwrap(), witness_block_state: WitnessStorageState::default(), - } + }) } fn compare_witness_input_data(db_result: &VMRunWitnessInputData, result: &VMRunWitnessInputData) { diff --git a/etc/env/base/vm_runner.toml b/etc/env/base/vm_runner.toml index ca47eb47799a..dd8e9915280b 100644 --- a/etc/env/base/vm_runner.toml +++ b/etc/env/base/vm_runner.toml @@ -16,4 +16,4 @@ db_path = "./db/main/basic_witness_input_producer" # Amount of batches that can be processed in parallel. window_size = 3 # All batches before this one (inclusive) are always considered to be processed. -first_processed_batch = 0 \ No newline at end of file +first_processed_batch = 0 From e94ba4bb2b3e82ca1d41da1fd74a28bd99535a99 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Wed, 3 Jul 2024 14:08:49 +0300 Subject: [PATCH 39/56] fix build --- .../src/implementations/layers/vm_runner/bwip.rs | 12 ++++++------ core/node/vm_runner/src/impls/bwip.rs | 16 ++++++++-------- prover/witness_generator/src/basic_circuits.rs | 2 +- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs b/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs index 1f6435879ceb..36ad14b8db5a 100644 --- a/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs +++ b/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs @@ -37,8 +37,8 @@ impl WiringLayer for BasicWitnessInputProducerLayer { } async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let master_pool = context.get_resource::>().await?; - let object_store = context.get_resource::().await?; + let master_pool = context.get_resource::>()?; + let object_store = context.get_resource::()?; let (basic_witness_input_producer, tasks) = BasicWitnessInputProducer::new( // One for `StorageSyncTask` which can hold a long-term connection in case it needs to @@ -62,11 +62,11 @@ impl WiringLayer for BasicWitnessInputProducerLayer { ) .await?; - context.add_task(Box::new(tasks.loader_task)); - context.add_task(Box::new(tasks.output_handler_factory_task)); - context.add_task(Box::new(BasicWitnessInputProducerTask { + context.add_task(tasks.loader_task); + context.add_task(tasks.output_handler_factory_task); + context.add_task(BasicWitnessInputProducerTask { basic_witness_input_producer, - })); + }); Ok(()) } } diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index 5072a15612d8..a0d100b34647 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -194,7 +194,7 @@ async fn get_updates_manager_witness_input_data( .l1_batch .finished .clone() - .ok_or(Err(anyhow!("L1 batch {l1_batch_number:?} is not finished")))?; + .ok_or_else(|| anyhow!("L1 batch {l1_batch_number:?} is not finished"))?; let initial_heap_content = finished_batch.final_bootloader_memory.unwrap(); // might be just empty let default_aa = updates_manager @@ -209,7 +209,7 @@ async fn get_updates_manager_witness_input_data( .factory_deps_dal() .get_sealed_factory_dep(bootloader) .await? - .ok_or(Err(anyhow!("Failed fetching bootloader bytecode from DB")))?; + .ok_or_else(|| anyhow!("Failed fetching bootloader bytecode from DB"))?; let bootloader_code = bytes_to_chunks(&bootloader_code_bytes); let account_code_hash = h256_to_u256(default_aa); @@ -217,7 +217,7 @@ async fn get_updates_manager_witness_input_data( .factory_deps_dal() .get_sealed_factory_dep(default_aa) .await? - .ok_or(Err(anyhow!("Default account bytecode should exist")))?; + .ok_or_else(|| anyhow!("Default account bytecode should exist"))?; let account_bytecode = bytes_to_chunks(&account_bytecode_bytes); let hashes: HashSet = finished_batch @@ -269,20 +269,20 @@ async fn get_database_witness_input_data( .blocks_dal() .get_l1_batch_header(l1_batch_number) .await? - .ok_or(Err(anyhow!("L1 block header should exist")))?; + .ok_or_else(|| anyhow!("L1 block header should exist"))?; let initial_heap_content = connection .blocks_dal() .get_initial_bootloader_heap(l1_batch_number) .await? - .ok_or(Err(anyhow!("Initial bootloader heap should exist")))?; + .ok_or_else(|| anyhow!("Initial bootloader heap should exist"))?; let account_code_hash = h256_to_u256(block_header.base_system_contracts_hashes.default_aa); let account_bytecode_bytes = connection .factory_deps_dal() .get_sealed_factory_dep(block_header.base_system_contracts_hashes.default_aa) .await? - .ok_or(Err(anyhow!("Default account bytecode should exist")))?; + .ok_or_else(|| anyhow!("Default account bytecode should exist"))?; let account_bytecode = bytes_to_chunks(&account_bytecode_bytes); let hashes: HashSet = block_header @@ -317,13 +317,13 @@ async fn get_database_witness_input_data( .blocks_dal() .get_storage_oracle_info(block_header.number) .await? - .ok_or(Err(anyhow!("Storage oracle info should exist")))?; + .ok_or_else(|| anyhow!("Storage oracle info should exist"))?; let bootloader_code_bytes = connection .factory_deps_dal() .get_sealed_factory_dep(block_header.base_system_contracts_hashes.bootloader) .await? - .ok_or(Err(anyhow!("Bootloader bytecode should exist")))?; + .ok_or_else(|| anyhow!("Bootloader bytecode should exist"))?; let bootloader_code = bytes_to_chunks(&bootloader_code_bytes); Ok(VMRunWitnessInputData { diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index 5920fcef7191..7b6383bc96f6 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -427,7 +427,7 @@ async fn generate_witness( let witness_storage = WitnessStorage::new(input.vm_run_data.witness_block_state); let storage_view = StorageView::new(witness_storage).to_rc_ptr(); - let vm_storage_oracle: VmStorageOracle>, HistoryDisabled> = + let vm_storage_oracle: VmStorageOracle, HistoryDisabled> = VmStorageOracle::new(storage_view.clone()); let storage_oracle = StorageOracle::new( vm_storage_oracle, From 4079e62ce6a32ad4250990dd3b5e47e29f290841 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Wed, 3 Jul 2024 15:58:38 +0300 Subject: [PATCH 40/56] add migration for witness inputs --- core/lib/basic_types/src/prover_dal.rs | 1 + ...5661bb3018be6d43164c1457edf50b5957429.json | 20 ------------------- ...56f870f8bbd15666fec5cc9f398306eeb6136.json | 18 ----------------- ...01ab8ae3f32526d9b5eadcfe52d139f7d6e66.json | 19 ++++++++++++++++++ ...e118cabc67b6e507efefb7b69e102f1b43c58.json | 8 +++++++- ...1ed762158a27449f61d3b1bb80069ca446727.json | 8 +++++++- ...0703113903_add-vm_run_data-column.down.sql | 1 + ...240703113903_add-vm_run_data-column.up.sql | 1 + .../src/fri_witness_generator_dal.rs | 10 +++++++--- .../src/proof_gen_data_fetcher.rs | 11 ++++++++-- 10 files changed, 52 insertions(+), 45 deletions(-) delete mode 100644 core/lib/dal/.sqlx/query-722e1b3a358c0627008e631505a5661bb3018be6d43164c1457edf50b5957429.json delete mode 100644 prover/prover_dal/.sqlx/query-5354ed86960505fe6b159ce859656f870f8bbd15666fec5cc9f398306eeb6136.json create mode 100644 prover/prover_dal/.sqlx/query-adaa3126792aac4e3afb805068f01ab8ae3f32526d9b5eadcfe52d139f7d6e66.json create mode 100644 prover/prover_dal/migrations/20240703113903_add-vm_run_data-column.down.sql create mode 100644 prover/prover_dal/migrations/20240703113903_add-vm_run_data-column.up.sql diff --git a/core/lib/basic_types/src/prover_dal.rs b/core/lib/basic_types/src/prover_dal.rs index 3215e7095e60..29d36cc91f8f 100644 --- a/core/lib/basic_types/src/prover_dal.rs +++ b/core/lib/basic_types/src/prover_dal.rs @@ -267,6 +267,7 @@ pub struct ProverJobFriInfo { pub struct BasicWitnessGeneratorJobInfo { pub l1_batch_number: L1BatchNumber, pub merkle_tree_paths_blob_url: Option, + pub witness_inputs_blob_url: Option, pub attempts: u32, pub status: WitnessJobStatus, pub error: Option, diff --git a/core/lib/dal/.sqlx/query-722e1b3a358c0627008e631505a5661bb3018be6d43164c1457edf50b5957429.json b/core/lib/dal/.sqlx/query-722e1b3a358c0627008e631505a5661bb3018be6d43164c1457edf50b5957429.json deleted file mode 100644 index 9247df10c910..000000000000 --- a/core/lib/dal/.sqlx/query-722e1b3a358c0627008e631505a5661bb3018be6d43164c1457edf50b5957429.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n l1_batch_number\n FROM\n proof_generation_details\n LEFT JOIN l1_batches ON l1_batch_number = l1_batches.number\n WHERE\n (\n vm_run_data_blob_url IS NOT NULL\n AND proof_gen_data_blob_url IS NOT NULL\n AND status NOT IN ('picked_by_prover', 'generated')\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false - ] - }, - "hash": "722e1b3a358c0627008e631505a5661bb3018be6d43164c1457edf50b5957429" -} diff --git a/prover/prover_dal/.sqlx/query-5354ed86960505fe6b159ce859656f870f8bbd15666fec5cc9f398306eeb6136.json b/prover/prover_dal/.sqlx/query-5354ed86960505fe6b159ce859656f870f8bbd15666fec5cc9f398306eeb6136.json deleted file mode 100644 index 298f7bb30aa3..000000000000 --- a/prover/prover_dal/.sqlx/query-5354ed86960505fe6b159ce859656f870f8bbd15666fec5cc9f398306eeb6136.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n witness_inputs_fri (\n l1_batch_number,\n merkle_tree_paths_blob_url,\n protocol_version,\n eip_4844_blobs,\n status,\n created_at,\n updated_at,\n protocol_version_patch\n )\n VALUES\n ($1, $2, $3, $4, 'queued', NOW(), NOW(), $5)\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Text", - "Int4", - "Bytea", - "Int4" - ] - }, - "nullable": [] - }, - "hash": "5354ed86960505fe6b159ce859656f870f8bbd15666fec5cc9f398306eeb6136" -} diff --git a/prover/prover_dal/.sqlx/query-adaa3126792aac4e3afb805068f01ab8ae3f32526d9b5eadcfe52d139f7d6e66.json b/prover/prover_dal/.sqlx/query-adaa3126792aac4e3afb805068f01ab8ae3f32526d9b5eadcfe52d139f7d6e66.json new file mode 100644 index 000000000000..1af0943a3dd8 --- /dev/null +++ b/prover/prover_dal/.sqlx/query-adaa3126792aac4e3afb805068f01ab8ae3f32526d9b5eadcfe52d139f7d6e66.json @@ -0,0 +1,19 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n witness_inputs_fri (\n l1_batch_number,\n merkle_tree_paths_blob_url,\n witness_inputs_blob_url,\n protocol_version,\n eip_4844_blobs,\n status,\n created_at,\n updated_at,\n protocol_version_patch\n )\n VALUES\n ($1, $2, $3, $4, $5, 'queued', NOW(), NOW(), $6)\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text", + "Text", + "Int4", + "Bytea", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "adaa3126792aac4e3afb805068f01ab8ae3f32526d9b5eadcfe52d139f7d6e66" +} diff --git a/prover/prover_dal/.sqlx/query-e0a6cc885e437aa7ded9def71f3e118cabc67b6e507efefb7b69e102f1b43c58.json b/prover/prover_dal/.sqlx/query-e0a6cc885e437aa7ded9def71f3e118cabc67b6e507efefb7b69e102f1b43c58.json index 738a8b54a0b3..79f12689194f 100644 --- a/prover/prover_dal/.sqlx/query-e0a6cc885e437aa7ded9def71f3e118cabc67b6e507efefb7b69e102f1b43c58.json +++ b/prover/prover_dal/.sqlx/query-e0a6cc885e437aa7ded9def71f3e118cabc67b6e507efefb7b69e102f1b43c58.json @@ -72,6 +72,11 @@ "ordinal": 13, "name": "protocol_version_patch", "type_info": "Int4" + }, + { + "ordinal": 14, + "name": "witness_inputs_blob_url", + "type_info": "Text" } ], "parameters": { @@ -93,7 +98,8 @@ true, true, true, - false + false, + true ] }, "hash": "e0a6cc885e437aa7ded9def71f3e118cabc67b6e507efefb7b69e102f1b43c58" diff --git a/prover/prover_dal/.sqlx/query-e8412d5ad1b17269da02f9a5c201ed762158a27449f61d3b1bb80069ca446727.json b/prover/prover_dal/.sqlx/query-e8412d5ad1b17269da02f9a5c201ed762158a27449f61d3b1bb80069ca446727.json index 4ab8c324ff58..b8bad5ec76eb 100644 --- a/prover/prover_dal/.sqlx/query-e8412d5ad1b17269da02f9a5c201ed762158a27449f61d3b1bb80069ca446727.json +++ b/prover/prover_dal/.sqlx/query-e8412d5ad1b17269da02f9a5c201ed762158a27449f61d3b1bb80069ca446727.json @@ -72,6 +72,11 @@ "ordinal": 13, "name": "protocol_version_patch", "type_info": "Int4" + }, + { + "ordinal": 14, + "name": "witness_inputs_blob_url", + "type_info": "Text" } ], "parameters": { @@ -96,7 +101,8 @@ true, true, true, - false + false, + true ] }, "hash": "e8412d5ad1b17269da02f9a5c201ed762158a27449f61d3b1bb80069ca446727" diff --git a/prover/prover_dal/migrations/20240703113903_add-vm_run_data-column.down.sql b/prover/prover_dal/migrations/20240703113903_add-vm_run_data-column.down.sql new file mode 100644 index 000000000000..2d62a594cc73 --- /dev/null +++ b/prover/prover_dal/migrations/20240703113903_add-vm_run_data-column.down.sql @@ -0,0 +1 @@ +ALTER TABLE witness_inputs_fri DROP COLUMN IF EXISTS witness_inputs_blob_url; diff --git a/prover/prover_dal/migrations/20240703113903_add-vm_run_data-column.up.sql b/prover/prover_dal/migrations/20240703113903_add-vm_run_data-column.up.sql new file mode 100644 index 000000000000..311244337ca7 --- /dev/null +++ b/prover/prover_dal/migrations/20240703113903_add-vm_run_data-column.up.sql @@ -0,0 +1 @@ +ALTER TABLE witness_inputs_fri ADD COLUMN IF NOT EXISTS witness_inputs_blob_url TEXT DEFAULT NULL; diff --git a/prover/prover_dal/src/fri_witness_generator_dal.rs b/prover/prover_dal/src/fri_witness_generator_dal.rs index d884ce05aa16..27503f0ee9a7 100644 --- a/prover/prover_dal/src/fri_witness_generator_dal.rs +++ b/prover/prover_dal/src/fri_witness_generator_dal.rs @@ -43,7 +43,8 @@ impl FriWitnessGeneratorDal<'_, '_> { pub async fn save_witness_inputs( &mut self, block_number: L1BatchNumber, - object_key: &str, + merkle_paths_blob_url: &str, + witness_inputs_blob_url: &str, protocol_version: ProtocolSemanticVersion, eip_4844_blobs: Eip4844Blobs, ) { @@ -54,6 +55,7 @@ impl FriWitnessGeneratorDal<'_, '_> { witness_inputs_fri ( l1_batch_number, merkle_tree_paths_blob_url, + witness_inputs_blob_url, protocol_version, eip_4844_blobs, status, @@ -62,11 +64,12 @@ impl FriWitnessGeneratorDal<'_, '_> { protocol_version_patch ) VALUES - ($1, $2, $3, $4, 'queued', NOW(), NOW(), $5) + ($1, $2, $3, $4, $5, 'queued', NOW(), NOW(), $6) ON CONFLICT (l1_batch_number) DO NOTHING "#, i64::from(block_number.0), - object_key, + merkle_paths_blob_url, + witness_inputs_blob_url, protocol_version.minor as i32, blobs_raw, protocol_version.patch.0 as i32, @@ -1476,6 +1479,7 @@ impl FriWitnessGeneratorDal<'_, '_> { .map(|row| BasicWitnessGeneratorJobInfo { l1_batch_number, merkle_tree_paths_blob_url: row.merkle_tree_paths_blob_url, + witness_inputs_blob_url: row.witness_inputs_blob_url, attempts: row.attempts as u32, status: row.status.parse::().unwrap(), error: row.error, diff --git a/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs b/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs index a2e213a4e24a..ffcbcc92a04f 100644 --- a/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs +++ b/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs @@ -9,20 +9,27 @@ use crate::api_data_fetcher::{PeriodicApi, PeriodicApiStruct}; impl PeriodicApiStruct { async fn save_proof_gen_data(&self, data: ProofGenerationData) { let store = &*self.blob_store; - let blob_url = store + let merkle_paths = store + .put(data.l1_batch_number, &data.data.merkle_paths) + .await + .expect("Failed to save proof generation data to GCS"); + let witness_inputs = store .put(data.l1_batch_number, &data.data) .await .expect("Failed to save proof generation data to GCS"); let mut connection = self.pool.connection().await.unwrap(); + connection .fri_protocol_versions_dal() .save_prover_protocol_version(data.protocol_version, data.l1_verifier_config) .await; + connection .fri_witness_generator_dal() .save_witness_inputs( data.l1_batch_number, - &blob_url, + &merkle_paths, + &witness_inputs, data.protocol_version, data.eip_4844_blobs, ) From bd6213310be5888f2d30f39b4631247a56f3e0d3 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Wed, 3 Jul 2024 16:56:19 +0300 Subject: [PATCH 41/56] address comments --- core/lib/config/src/configs/vm_runner.rs | 1 - core/lib/dal/src/proof_generation_dal.rs | 2 + core/node/vm_runner/src/impls/bwip.rs | 174 +++++++++-------------- 3 files changed, 71 insertions(+), 106 deletions(-) diff --git a/core/lib/config/src/configs/vm_runner.rs b/core/lib/config/src/configs/vm_runner.rs index 477d8acfcb0f..fa7c7c1a90a3 100644 --- a/core/lib/config/src/configs/vm_runner.rs +++ b/core/lib/config/src/configs/vm_runner.rs @@ -30,7 +30,6 @@ pub struct BasicWitnessInputProducerConfig { } impl BasicWitnessInputProducerConfig { - #[allow(dead_code)] fn default_db_path() -> String { "./db/basic_witness_input_producer".to_owned() } diff --git a/core/lib/dal/src/proof_generation_dal.rs b/core/lib/dal/src/proof_generation_dal.rs index 00670639ce54..5960770cd577 100644 --- a/core/lib/dal/src/proof_generation_dal.rs +++ b/core/lib/dal/src/proof_generation_dal.rs @@ -19,6 +19,8 @@ pub struct ProofGenerationDal<'a, 'c> { #[derive(Debug, EnumString, Display)] enum ProofGenerationJobStatus { + #[strum(serialize = "waiting_for_data")] + WaitingForData, #[strum(serialize = "picked_by_prover")] PickedByProver, #[strum(serialize = "generated")] diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index a0d100b34647..4de92aa2c3ae 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -9,7 +9,7 @@ use zksync_prover_interface::inputs::VMRunWitnessInputData; use zksync_state_keeper::{MainBatchExecutor, StateKeeperOutputHandler, UpdatesManager}; use zksync_types::{ block::StorageOracleInfo, witness_block_state::WitnessStorageState, L1BatchNumber, L2ChainId, - ProtocolVersionId, H256, + H256, }; use zksync_utils::{bytes_to_chunks, h256_to_u256, u256_to_h256}; @@ -154,23 +154,11 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { l1_batch_number ); - let db_result = get_database_witness_input_data(&mut connection, l1_batch_number).await?; - let mut result = + let result = get_updates_manager_witness_input_data(&mut connection, updates_manager.clone()) .await?; - compare_witness_input_data(&db_result, &result); - - let storage_view_cache = updates_manager - .storage_view_cache() - .expect("Storage view cache was not initialized"); - - let block_state = WitnessStorageState { - read_storage_key: storage_view_cache.read_storage_keys(), - is_write_initial: storage_view_cache.initial_writes(), - }; - - result.witness_block_state = block_state; + assert_database_witness_input_data(&mut connection, l1_batch_number, &result).await; let blob_url = self.object_store.put(l1_batch_number, &result).await?; @@ -243,6 +231,15 @@ async fn get_updates_manager_witness_input_data( let storage_refunds = finished_batch.final_execution_state.storage_refunds; let pubdata_costs = Some(finished_batch.final_execution_state.pubdata_costs); + let storage_view_cache = updates_manager + .storage_view_cache() + .expect("Storage view cache was not initialized"); + + let witness_block_state = WitnessStorageState { + read_storage_key: storage_view_cache.read_storage_keys(), + is_write_initial: storage_view_cache.initial_writes(), + }; + Ok(VMRunWitnessInputData { l1_batch_number, previous_aux_hash: None, @@ -257,32 +254,36 @@ async fn get_updates_manager_witness_input_data( default_account_code_hash: account_code_hash, storage_refunds, pubdata_costs: pubdata_costs.unwrap(), - witness_block_state: WitnessStorageState::default(), + witness_block_state, }) } -async fn get_database_witness_input_data( +async fn assert_database_witness_input_data( connection: &mut Connection<'_, Core>, l1_batch_number: L1BatchNumber, -) -> anyhow::Result { + result: &VMRunWitnessInputData, +) { let block_header = connection .blocks_dal() .get_l1_batch_header(l1_batch_number) - .await? - .ok_or_else(|| anyhow!("L1 block header should exist"))?; + .await + .expect("Failed fetching L1 block from DB") + .expect("L1 block header should exist"); let initial_heap_content = connection .blocks_dal() .get_initial_bootloader_heap(l1_batch_number) - .await? - .ok_or_else(|| anyhow!("Initial bootloader heap should exist"))?; + .await + .expect("Failed fetching initial heap content from DB") + .expect("Initial bootloader heap should exist"); let account_code_hash = h256_to_u256(block_header.base_system_contracts_hashes.default_aa); let account_bytecode_bytes = connection .factory_deps_dal() .get_sealed_factory_dep(block_header.base_system_contracts_hashes.default_aa) - .await? - .ok_or_else(|| anyhow!("Default account bytecode should exist"))?; + .await + .expect("Failed fetching default account bytecode from DB") + .expect("Default account bytecode should exist"); let account_bytecode = bytes_to_chunks(&account_bytecode_bytes); let hashes: HashSet = block_header @@ -316,93 +317,56 @@ async fn get_database_witness_input_data( } = connection .blocks_dal() .get_storage_oracle_info(block_header.number) - .await? - .ok_or_else(|| anyhow!("Storage oracle info should exist"))?; + .await + .expect("Failed fetching L1 block from DB") + .expect("Storage oracle info should exist"); + let pubdata_costs = pubdata_costs.unwrap(); let bootloader_code_bytes = connection .factory_deps_dal() .get_sealed_factory_dep(block_header.base_system_contracts_hashes.bootloader) - .await? - .ok_or_else(|| anyhow!("Bootloader bytecode should exist"))?; + .await + .expect("Failed fetching bootloader bytecode from DB") + .expect("Bootloader bytecode should exist"); let bootloader_code = bytes_to_chunks(&bootloader_code_bytes); - Ok(VMRunWitnessInputData { - l1_batch_number: block_header.number, - previous_root_hash: None, - previous_meta_hash: None, - previous_aux_hash: None, - used_bytecodes, - initial_heap_content, - - protocol_version: block_header - .protocol_version - .unwrap_or(ProtocolVersionId::last_potentially_undefined()), - - bootloader_code, - default_account_code_hash: account_code_hash, - storage_refunds, - pubdata_costs: pubdata_costs.unwrap(), - witness_block_state: WitnessStorageState::default(), - }) -} - -fn compare_witness_input_data(db_result: &VMRunWitnessInputData, result: &VMRunWitnessInputData) { - if db_result.protocol_version != result.protocol_version { - tracing::error!( - "Protocol version mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", - db_result.protocol_version, - result.protocol_version - ); - } - if db_result.l1_batch_number != result.l1_batch_number { - tracing::error!( - "L1 batch number mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", - db_result.l1_batch_number, - result.l1_batch_number - ); - } - if db_result.used_bytecodes.len() != result.used_bytecodes.len() { - tracing::error!( - "Used bytecodes length mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", - db_result.used_bytecodes.len(), - result.used_bytecodes.len() - ); - } - if db_result.storage_refunds != result.storage_refunds { - tracing::error!( - "Storage refunds mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", - db_result.storage_refunds, - result.storage_refunds - ); - } - if db_result.pubdata_costs != result.pubdata_costs { - tracing::error!( - "Pubdata costs mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", - db_result.pubdata_costs, - result.pubdata_costs - ); - } - if db_result.initial_heap_content != result.initial_heap_content { - tracing::error!( - "Initial heap content mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", - db_result.initial_heap_content, - result.initial_heap_content - ); - } - if db_result.bootloader_code != result.bootloader_code { - tracing::error!( - "Bootloader code mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", - db_result.bootloader_code, - result.bootloader_code - ); - } - if db_result.default_account_code_hash != result.default_account_code_hash { - tracing::error!( - "Default account code hash mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", - db_result.default_account_code_hash, - result.default_account_code_hash - ); - } + assert_eq!( + block_header.protocol_version.unwrap(), + result.protocol_version, + "Protocol version mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", + block_header.protocol_version, + result.protocol_version + ); + assert_eq!( + used_bytecodes, result.used_bytecodes, + "Used bytecodes mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", + used_bytecodes, result.used_bytecodes + ); + assert_eq!( + storage_refunds, result.storage_refunds, + "Storage refunds mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", + storage_refunds, result.storage_refunds + ); + assert_eq!( + pubdata_costs, result.pubdata_costs, + "Pubdata costs mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", + pubdata_costs, result.pubdata_costs + ); + assert_eq!( + initial_heap_content, result.initial_heap_content, + "Initial heap content mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", + initial_heap_content, result.initial_heap_content + ); + assert_eq!( + bootloader_code, result.bootloader_code, + "Bootloader code mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", + bootloader_code, result.bootloader_code + ); + assert_eq!( + account_code_hash, result.default_account_code_hash, + "Default account code hash mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", + account_code_hash, result.default_account_code_hash + ); } #[derive(Debug)] From 3f04efe20a79a0d710ad8f12f833ac77d8861c8e Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Wed, 3 Jul 2024 17:36:41 +0300 Subject: [PATCH 42/56] address comments --- core/lib/dal/src/proof_generation_dal.rs | 8 +- core/lib/prover_interface/src/api.rs | 3 +- core/lib/prover_interface/src/inputs.rs | 4 +- .../src/request_processor.rs | 15 +-- ...c6fadb8e12a9218399d189b4d95e2ca4fcc48.json | 25 ++++ ...1ed762158a27449f61d3b1bb80069ca446727.json | 109 ------------------ .../src/fri_witness_generator_dal.rs | 20 +--- .../src/proof_gen_data_fetcher.rs | 6 +- .../witness_generator/src/basic_circuits.rs | 40 ++----- 9 files changed, 56 insertions(+), 174 deletions(-) create mode 100644 prover/prover_dal/.sqlx/query-d91c931e2a14cf1183a608d041fc6fadb8e12a9218399d189b4d95e2ca4fcc48.json delete mode 100644 prover/prover_dal/.sqlx/query-e8412d5ad1b17269da02f9a5c201ed762158a27449f61d3b1bb80069ca446727.json diff --git a/core/lib/dal/src/proof_generation_dal.rs b/core/lib/dal/src/proof_generation_dal.rs index 5960770cd577..025234a784f4 100644 --- a/core/lib/dal/src/proof_generation_dal.rs +++ b/core/lib/dal/src/proof_generation_dal.rs @@ -19,8 +19,8 @@ pub struct ProofGenerationDal<'a, 'c> { #[derive(Debug, EnumString, Display)] enum ProofGenerationJobStatus { - #[strum(serialize = "waiting_for_data")] - WaitingForData, + #[strum(serialize = "unpicked")] + Unpicked, #[strum(serialize = "picked_by_prover")] PickedByProver, #[strum(serialize = "generated")] @@ -56,7 +56,7 @@ impl ProofGenerationDal<'_, '_> { AND l1_batches.hash IS NOT NULL AND l1_batches.aux_data_hash IS NOT NULL AND l1_batches.meta_parameters_hash IS NOT NULL - AND status NOT IN ('picked_by_prover', 'generated') + AND status = 'unpicked' ) OR ( status = 'picked_by_prover' @@ -166,7 +166,7 @@ impl ProofGenerationDal<'_, '_> { INSERT INTO proof_generation_details (l1_batch_number, status, proof_gen_data_blob_url, created_at, updated_at) VALUES - ($1, 'waiting_for_data', $2, NOW(), NOW()) + ($1, 'unpicked', $2, NOW(), NOW()) ON CONFLICT (l1_batch_number) DO NOTHING "#, i64::from(l1_batch_number.0), diff --git a/core/lib/prover_interface/src/api.rs b/core/lib/prover_interface/src/api.rs index 09f44fca1b77..7d33287a8452 100644 --- a/core/lib/prover_interface/src/api.rs +++ b/core/lib/prover_interface/src/api.rs @@ -18,10 +18,9 @@ use crate::{ #[derive(Debug, Serialize, Deserialize)] pub struct ProofGenerationData { pub l1_batch_number: L1BatchNumber, - pub data: WitnessInputData, + pub witness_input_data: WitnessInputData, pub protocol_version: ProtocolSemanticVersion, pub l1_verifier_config: L1VerifierConfig, - pub eip_4844_blobs: Eip4844Blobs, } #[derive(Debug, Serialize, Deserialize)] diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index 929cc616fa58..40233357c396 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -5,7 +5,8 @@ use serde_with::{serde_as, Bytes}; use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; pub use zksync_state::WitnessStorage; use zksync_types::{ - witness_block_state::WitnessStorageState, L1BatchNumber, ProtocolVersionId, H256, U256, + basic_fri_types::Eip4844Blobs, witness_block_state::WitnessStorageState, L1BatchNumber, + ProtocolVersionId, H256, U256, }; const HASH_LEN: usize = H256::len_bytes(); @@ -167,6 +168,7 @@ impl StoredObject for VMRunWitnessInputData { pub struct WitnessInputData { pub vm_run_data: VMRunWitnessInputData, pub merkle_paths: WitnessInputMerklePaths, + pub eip_4844_blobs: Eip4844Blobs, } impl StoredObject for WitnessInputData { diff --git a/core/node/proof_data_handler/src/request_processor.rs b/core/node/proof_data_handler/src/request_processor.rs index c48922bfc16e..b20a9834ab2a 100644 --- a/core/node/proof_data_handler/src/request_processor.rs +++ b/core/node/proof_data_handler/src/request_processor.rs @@ -91,11 +91,6 @@ impl RequestProcessor { Some(previous_batch_metadata.metadata.meta_parameters_hash); vm_run_data.previous_aux_hash = Some(previous_batch_metadata.metadata.aux_data_hash); - let blob = WitnessInputData { - vm_run_data, - merkle_paths, - }; - let header = self .pool .connection() @@ -144,13 +139,19 @@ impl RequestProcessor { } }; + let blob = WitnessInputData { + vm_run_data, + merkle_paths, + eip_4844_blobs, + }; + let proof_gen_data = ProofGenerationData { l1_batch_number, - data: blob, + witness_input_data: blob, protocol_version: protocol_version.version, l1_verifier_config: protocol_version.l1_verifier_config, - eip_4844_blobs, }; + Ok(Json(ProofGenerationDataResponse::Success(Some(Box::new( proof_gen_data, ))))) diff --git a/prover/prover_dal/.sqlx/query-d91c931e2a14cf1183a608d041fc6fadb8e12a9218399d189b4d95e2ca4fcc48.json b/prover/prover_dal/.sqlx/query-d91c931e2a14cf1183a608d041fc6fadb8e12a9218399d189b4d95e2ca4fcc48.json new file mode 100644 index 000000000000..c353ecf1bad3 --- /dev/null +++ b/prover/prover_dal/.sqlx/query-d91c931e2a14cf1183a608d041fc6fadb8e12a9218399d189b4d95e2ca4fcc48.json @@ -0,0 +1,25 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE witness_inputs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $3\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n witness_inputs_fri\n WHERE\n l1_batch_number <= $1\n AND status = 'queued'\n AND protocol_version = $2\n AND protocol_version_patch = $4\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n witness_inputs_fri.l1_batch_number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int4", + "Text", + "Int4" + ] + }, + "nullable": [ + false + ] + }, + "hash": "d91c931e2a14cf1183a608d041fc6fadb8e12a9218399d189b4d95e2ca4fcc48" +} diff --git a/prover/prover_dal/.sqlx/query-e8412d5ad1b17269da02f9a5c201ed762158a27449f61d3b1bb80069ca446727.json b/prover/prover_dal/.sqlx/query-e8412d5ad1b17269da02f9a5c201ed762158a27449f61d3b1bb80069ca446727.json deleted file mode 100644 index b8bad5ec76eb..000000000000 --- a/prover/prover_dal/.sqlx/query-e8412d5ad1b17269da02f9a5c201ed762158a27449f61d3b1bb80069ca446727.json +++ /dev/null @@ -1,109 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE witness_inputs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $3\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n witness_inputs_fri\n WHERE\n l1_batch_number <= $1\n AND status = 'queued'\n AND protocol_version = $2\n AND protocol_version_patch = $4\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n witness_inputs_fri.*\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "merkle_tree_paths_blob_url", - "type_info": "Text" - }, - { - "ordinal": 2, - "name": "attempts", - "type_info": "Int2" - }, - { - "ordinal": 3, - "name": "status", - "type_info": "Text" - }, - { - "ordinal": 4, - "name": "error", - "type_info": "Text" - }, - { - "ordinal": 5, - "name": "created_at", - "type_info": "Timestamp" - }, - { - "ordinal": 6, - "name": "updated_at", - "type_info": "Timestamp" - }, - { - "ordinal": 7, - "name": "processing_started_at", - "type_info": "Timestamp" - }, - { - "ordinal": 8, - "name": "time_taken", - "type_info": "Time" - }, - { - "ordinal": 9, - "name": "is_blob_cleaned", - "type_info": "Bool" - }, - { - "ordinal": 10, - "name": "protocol_version", - "type_info": "Int4" - }, - { - "ordinal": 11, - "name": "picked_by", - "type_info": "Text" - }, - { - "ordinal": 12, - "name": "eip_4844_blobs", - "type_info": "Bytea" - }, - { - "ordinal": 13, - "name": "protocol_version_patch", - "type_info": "Int4" - }, - { - "ordinal": 14, - "name": "witness_inputs_blob_url", - "type_info": "Text" - } - ], - "parameters": { - "Left": [ - "Int8", - "Int4", - "Text", - "Int4" - ] - }, - "nullable": [ - false, - true, - false, - false, - true, - false, - false, - true, - true, - true, - true, - true, - true, - false, - true - ] - }, - "hash": "e8412d5ad1b17269da02f9a5c201ed762158a27449f61d3b1bb80069ca446727" -} diff --git a/prover/prover_dal/src/fri_witness_generator_dal.rs b/prover/prover_dal/src/fri_witness_generator_dal.rs index 27503f0ee9a7..d56d18550e50 100644 --- a/prover/prover_dal/src/fri_witness_generator_dal.rs +++ b/prover/prover_dal/src/fri_witness_generator_dal.rs @@ -86,7 +86,7 @@ impl FriWitnessGeneratorDal<'_, '_> { last_l1_batch_to_process: u32, protocol_version: ProtocolSemanticVersion, picked_by: &str, - ) -> Option<(L1BatchNumber, Eip4844Blobs)> { + ) -> Option { sqlx::query!( r#" UPDATE witness_inputs_fri @@ -115,7 +115,7 @@ impl FriWitnessGeneratorDal<'_, '_> { SKIP LOCKED ) RETURNING - witness_inputs_fri.* + witness_inputs_fri.l1_batch_number "#, i64::from(last_l1_batch_to_process), protocol_version.minor as i32, @@ -125,21 +125,7 @@ impl FriWitnessGeneratorDal<'_, '_> { .fetch_optional(self.storage.conn()) .await .unwrap() - .map(|row| { - // Blobs can be `None` if we are using an `off-chain DA` - let blobs = if row.eip_4844_blobs.is_none() { - Eip4844Blobs::empty() - } else { - Eip4844Blobs::decode(&row.eip_4844_blobs.unwrap_or_else(|| { - panic!( - "missing eip 4844 blobs from the database for batch {}", - row.l1_batch_number - ) - })) - .expect("failed to decode EIP4844 blobs") - }; - (L1BatchNumber(row.l1_batch_number as u32), blobs) - }) + .map(|row| L1BatchNumber(row.l1_batch_number as u32)) } pub async fn get_basic_circuit_witness_job_attempts( diff --git a/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs b/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs index ffcbcc92a04f..9dcc93a4be77 100644 --- a/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs +++ b/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs @@ -10,11 +10,11 @@ impl PeriodicApiStruct { async fn save_proof_gen_data(&self, data: ProofGenerationData) { let store = &*self.blob_store; let merkle_paths = store - .put(data.l1_batch_number, &data.data.merkle_paths) + .put(data.l1_batch_number, &data.witness_input_data.merkle_paths) .await .expect("Failed to save proof generation data to GCS"); let witness_inputs = store - .put(data.l1_batch_number, &data.data) + .put(data.l1_batch_number, &data.witness_input_data) .await .expect("Failed to save proof generation data to GCS"); let mut connection = self.pool.connection().await.unwrap(); @@ -31,7 +31,7 @@ impl PeriodicApiStruct { &merkle_paths, &witness_inputs, data.protocol_version, - data.eip_4844_blobs, + data.witness_input_data.eip_4844_blobs, ) .await; } diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index 7b6383bc96f6..02beff52938d 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -39,9 +39,8 @@ use zksync_prover_interface::inputs::WitnessInputData; use zksync_queued_job_processor::JobProcessor; use zksync_state::{StorageView, WitnessStorage}; use zksync_types::{ - basic_fri_types::{AggregationRound, Eip4844Blobs}, - protocol_version::ProtocolSemanticVersion, - Address, L1BatchNumber, BOOTLOADER_ADDRESS, + basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, Address, + L1BatchNumber, BOOTLOADER_ADDRESS, }; use crate::{ @@ -76,7 +75,6 @@ struct BlobUrls { pub struct BasicWitnessGeneratorJob { block_number: L1BatchNumber, job: WitnessInputData, - eip_4844_blobs: Eip4844Blobs, } #[derive(Debug)] @@ -110,11 +108,7 @@ impl BasicWitnessGenerator { basic_job: BasicWitnessGeneratorJob, started_at: Instant, ) -> Option { - let BasicWitnessGeneratorJob { - block_number, - job, - eip_4844_blobs, - } = basic_job; + let BasicWitnessGeneratorJob { block_number, job } = basic_job; tracing::info!( "Starting witness generation of type {:?} for block {}", @@ -122,16 +116,7 @@ impl BasicWitnessGenerator { block_number.0 ); - Some( - process_basic_circuits_job( - &*object_store, - started_at, - block_number, - job, - eip_4844_blobs, - ) - .await, - ) + Some(process_basic_circuits_job(&*object_store, started_at, block_number, job).await) } } @@ -157,13 +142,13 @@ impl JobProcessor for BasicWitnessGenerator { ) .await { - Some((block_number, eip_4844_blobs)) => { + Some(block_number) => { tracing::info!( "Processing FRI basic witness-gen for block {}", block_number ); let started_at = Instant::now(); - let job = get_artifacts(block_number, &*self.object_store, eip_4844_blobs).await; + let job = get_artifacts(block_number, &*self.object_store).await; WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::BasicCircuits.into()] .observe(started_at.elapsed()); @@ -264,10 +249,9 @@ async fn process_basic_circuits_job( started_at: Instant, block_number: L1BatchNumber, job: WitnessInputData, - eip_4844_blobs: Eip4844Blobs, ) -> BasicCircuitArtifacts { let (circuit_urls, queue_urls, scheduler_witness, aux_output_witness) = - generate_witness(block_number, object_store, job, eip_4844_blobs).await; + generate_witness(block_number, object_store, job).await; WITNESS_GENERATOR_METRICS.witness_generation_time[&AggregationRound::BasicCircuits.into()] .observe(started_at.elapsed()); tracing::info!( @@ -324,14 +308,9 @@ async fn update_database( async fn get_artifacts( block_number: L1BatchNumber, object_store: &dyn ObjectStore, - eip_4844_blobs: Eip4844Blobs, ) -> BasicWitnessGeneratorJob { let job = object_store.get(block_number).await.unwrap(); - BasicWitnessGeneratorJob { - block_number, - job, - eip_4844_blobs, - } + BasicWitnessGeneratorJob { block_number, job } } async fn save_scheduler_artifacts( @@ -387,7 +366,6 @@ async fn generate_witness( block_number: L1BatchNumber, object_store: &dyn ObjectStore, input: WitnessInputData, - eip_4844_blobs: Eip4844Blobs, ) -> ( Vec<(u8, String)>, Vec<(u8, String, usize)>, @@ -456,7 +434,7 @@ async fn generate_witness( storage_oracle, &mut tree, path, - eip_4844_blobs.blobs(), + input.eip_4844_blobs.blobs(), |circuit| { circuit_sender.blocking_send(circuit).unwrap(); }, From bb9865bd00f983697270de2e29823cf28f10218c Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Wed, 3 Jul 2024 17:39:57 +0300 Subject: [PATCH 43/56] undo formatting --- core/lib/dal/src/tee_proof_generation_dal.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index 1415dca1df7a..d5625935fa1b 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -75,10 +75,10 @@ impl TeeProofGenerationDal<'_, '_> { "#, &processing_timeout, ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| L1BatchNumber(row.l1_batch_number as u32)); + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| L1BatchNumber(row.l1_batch_number as u32)); Ok(result) } @@ -171,10 +171,10 @@ impl TeeProofGenerationDal<'_, '_> { 1 "#, ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| L1BatchNumber(row.l1_batch_number as u32)); + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| L1BatchNumber(row.l1_batch_number as u32)); Ok(result) } From 807132641f2bfc556630c4e7ef724b8b5f8f5f01 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Wed, 3 Jul 2024 17:58:20 +0300 Subject: [PATCH 44/56] address comments --- ...0e42abbaf365a1b041d0e7a809796ef0fe63.json} | 4 ++-- ...5601ff39acd03e3c8a2265c9036b3dc54383.json} | 4 ++-- ...a55c6fa93f854a5a9777778acb66275cc7be7.json | 22 ------------------- ...afcf939e8352e21689baf861b61a666bdc1fd.json | 20 +++++++++++++++++ core/lib/dal/src/vm_runner_dal.rs | 10 +++------ core/node/vm_runner/src/impls/bwip.rs | 5 +++-- core/node/vm_runner/src/process.rs | 1 + 7 files changed, 31 insertions(+), 35 deletions(-) rename core/lib/dal/.sqlx/{query-e59fb40d833837707e8d1e3c78e87fe0da8905c344755f264292cb436393069e.json => query-05c2a77d9f65d435e2df63a300850e42abbaf365a1b041d0e7a809796ef0fe63.json} (66%) rename core/lib/dal/.sqlx/{query-ff510d87494636482d1abd05c9afd1fa6facc419ce16b8d628a0f5f78e28a0ee.json => query-41a2731a3fe6ae441902632dcce15601ff39acd03e3c8a2265c9036b3dc54383.json} (65%) delete mode 100644 core/lib/dal/.sqlx/query-a4658abdec913690849378a85b2a55c6fa93f854a5a9777778acb66275cc7be7.json create mode 100644 core/lib/dal/.sqlx/query-a85a15aa2e0be1c1f50d15a8354afcf939e8352e21689baf861b61a666bdc1fd.json diff --git a/core/lib/dal/.sqlx/query-e59fb40d833837707e8d1e3c78e87fe0da8905c344755f264292cb436393069e.json b/core/lib/dal/.sqlx/query-05c2a77d9f65d435e2df63a300850e42abbaf365a1b041d0e7a809796ef0fe63.json similarity index 66% rename from core/lib/dal/.sqlx/query-e59fb40d833837707e8d1e3c78e87fe0da8905c344755f264292cb436393069e.json rename to core/lib/dal/.sqlx/query-05c2a77d9f65d435e2df63a300850e42abbaf365a1b041d0e7a809796ef0fe63.json index 689dd4626939..f3c85b9b43dc 100644 --- a/core/lib/dal/.sqlx/query-e59fb40d833837707e8d1e3c78e87fe0da8905c344755f264292cb436393069e.json +++ b/core/lib/dal/.sqlx/query-05c2a77d9f65d435e2df63a300850e42abbaf365a1b041d0e7a809796ef0fe63.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE proof_generation_details\n SET\n status = 'picked_by_prover',\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n proof_generation_details\n LEFT JOIN l1_batches ON l1_batch_number = l1_batches.number\n WHERE\n (\n vm_run_data_blob_url IS NOT NULL\n AND proof_gen_data_blob_url IS NOT NULL\n AND l1_batches.hash IS NOT NULL\n AND l1_batches.aux_data_hash IS NOT NULL\n AND l1_batches.meta_parameters_hash IS NOT NULL\n AND status NOT IN ('picked_by_prover', 'generated')\n )\n OR (\n status = 'picked_by_prover'\n AND prover_taken_at < NOW() - $1::INTERVAL\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n )\n RETURNING\n proof_generation_details.l1_batch_number\n ", + "query": "\n UPDATE proof_generation_details\n SET\n status = 'picked_by_prover',\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n proof_generation_details\n LEFT JOIN l1_batches ON l1_batch_number = l1_batches.number\n WHERE\n (\n vm_run_data_blob_url IS NOT NULL\n AND proof_gen_data_blob_url IS NOT NULL\n AND l1_batches.hash IS NOT NULL\n AND l1_batches.aux_data_hash IS NOT NULL\n AND l1_batches.meta_parameters_hash IS NOT NULL\n AND status = 'unpicked'\n )\n OR (\n status = 'picked_by_prover'\n AND prover_taken_at < NOW() - $1::INTERVAL\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n )\n RETURNING\n proof_generation_details.l1_batch_number\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ false ] }, - "hash": "e59fb40d833837707e8d1e3c78e87fe0da8905c344755f264292cb436393069e" + "hash": "05c2a77d9f65d435e2df63a300850e42abbaf365a1b041d0e7a809796ef0fe63" } diff --git a/core/lib/dal/.sqlx/query-ff510d87494636482d1abd05c9afd1fa6facc419ce16b8d628a0f5f78e28a0ee.json b/core/lib/dal/.sqlx/query-41a2731a3fe6ae441902632dcce15601ff39acd03e3c8a2265c9036b3dc54383.json similarity index 65% rename from core/lib/dal/.sqlx/query-ff510d87494636482d1abd05c9afd1fa6facc419ce16b8d628a0f5f78e28a0ee.json rename to core/lib/dal/.sqlx/query-41a2731a3fe6ae441902632dcce15601ff39acd03e3c8a2265c9036b3dc54383.json index 59828610a817..9ec433e52acb 100644 --- a/core/lib/dal/.sqlx/query-ff510d87494636482d1abd05c9afd1fa6facc419ce16b8d628a0f5f78e28a0ee.json +++ b/core/lib/dal/.sqlx/query-41a2731a3fe6ae441902632dcce15601ff39acd03e3c8a2265c9036b3dc54383.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n proof_generation_details (l1_batch_number, status, proof_gen_data_blob_url, created_at, updated_at)\n VALUES\n ($1, 'waiting_for_data', $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", + "query": "\n INSERT INTO\n proof_generation_details (l1_batch_number, status, proof_gen_data_blob_url, created_at, updated_at)\n VALUES\n ($1, 'unpicked', $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", "describe": { "columns": [], "parameters": { @@ -11,5 +11,5 @@ }, "nullable": [] }, - "hash": "ff510d87494636482d1abd05c9afd1fa6facc419ce16b8d628a0f5f78e28a0ee" + "hash": "41a2731a3fe6ae441902632dcce15601ff39acd03e3c8a2265c9036b3dc54383" } diff --git a/core/lib/dal/.sqlx/query-a4658abdec913690849378a85b2a55c6fa93f854a5a9777778acb66275cc7be7.json b/core/lib/dal/.sqlx/query-a4658abdec913690849378a85b2a55c6fa93f854a5a9777778acb66275cc7be7.json deleted file mode 100644 index 4fb3640a169e..000000000000 --- a/core/lib/dal/.sqlx/query-a4658abdec913690849378a85b2a55c6fa93f854a5a9777778acb66275cc7be7.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n COALESCE(MAX(l1_batch_number), $1) AS \"last_processed_l1_batch!\"\n FROM\n vm_runner_bwip\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "last_processed_l1_batch!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - null - ] - }, - "hash": "a4658abdec913690849378a85b2a55c6fa93f854a5a9777778acb66275cc7be7" -} diff --git a/core/lib/dal/.sqlx/query-a85a15aa2e0be1c1f50d15a8354afcf939e8352e21689baf861b61a666bdc1fd.json b/core/lib/dal/.sqlx/query-a85a15aa2e0be1c1f50d15a8354afcf939e8352e21689baf861b61a666bdc1fd.json new file mode 100644 index 000000000000..cf1fad78a462 --- /dev/null +++ b/core/lib/dal/.sqlx/query-a85a15aa2e0be1c1f50d15a8354afcf939e8352e21689baf861b61a666bdc1fd.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n MAX(l1_batch_number) AS \"last_processed_l1_batch\"\n FROM\n vm_runner_bwip\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "last_processed_l1_batch", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "a85a15aa2e0be1c1f50d15a8354afcf939e8352e21689baf861b61a666bdc1fd" +} diff --git a/core/lib/dal/src/vm_runner_dal.rs b/core/lib/dal/src/vm_runner_dal.rs index 00b01410c0cf..d1c93824f199 100644 --- a/core/lib/dal/src/vm_runner_dal.rs +++ b/core/lib/dal/src/vm_runner_dal.rs @@ -113,24 +113,20 @@ impl VmRunnerDal<'_, '_> { Ok(()) } - pub async fn get_bwip_latest_processed_batch( - &mut self, - default_batch: L1BatchNumber, - ) -> DalResult { + pub async fn get_bwip_latest_processed_batch(&mut self) -> DalResult> { let row = sqlx::query!( r#" SELECT - COALESCE(MAX(l1_batch_number), $1) AS "last_processed_l1_batch!" + MAX(l1_batch_number) AS "last_processed_l1_batch" FROM vm_runner_bwip "#, - default_batch.0 as i32 ) .instrument("get_bwip_latest_processed_batch") .report_latency() .fetch_one(self.storage) .await?; - Ok(L1BatchNumber(row.last_processed_l1_batch as u32)) + Ok(row.last_processed_l1_batch.map(|n| L1BatchNumber(n as u32))) } pub async fn get_bwip_last_ready_batch( diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index 4de92aa2c3ae..40e047ff141d 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -104,8 +104,9 @@ impl VmRunnerIo for BasicWitnessInputProducerIo { ) -> anyhow::Result { Ok(conn .vm_runner_dal() - .get_bwip_latest_processed_batch(self.first_processed_batch) - .await?) + .get_bwip_latest_processed_batch() + .await? + .unwrap_or(self.first_processed_batch)) } async fn last_ready_to_be_loaded_batch( diff --git a/core/node/vm_runner/src/process.rs b/core/node/vm_runner/src/process.rs index f44d26c1b4e0..b300915cef64 100644 --- a/core/node/vm_runner/src/process.rs +++ b/core/node/vm_runner/src/process.rs @@ -116,6 +116,7 @@ impl VmRunner { .await .context("Failed getting storage view cache")?; updates_manager.finish_batch(finished_batch); + // this is needed for Basic Witness Input Producer to use in memory reads, but not database queries updates_manager.update_storage_view_cache(storage_view_cache); latency.observe(); From ecd2d10966e33933ddff82189ff0d1fd5d631e5e Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Wed, 3 Jul 2024 20:14:57 +0300 Subject: [PATCH 45/56] address comments --- core/lib/dal/src/proof_generation_dal.rs | 2 +- prover/witness_generator/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/lib/dal/src/proof_generation_dal.rs b/core/lib/dal/src/proof_generation_dal.rs index 025234a784f4..5af162955600 100644 --- a/core/lib/dal/src/proof_generation_dal.rs +++ b/core/lib/dal/src/proof_generation_dal.rs @@ -233,7 +233,7 @@ impl ProofGenerationDal<'_, '_> { FROM proof_generation_details WHERE - status NOT IN ('picked_by_prover', 'generated') + status = 'unpicked' ORDER BY l1_batch_number ASC LIMIT diff --git a/prover/witness_generator/Cargo.toml b/prover/witness_generator/Cargo.toml index b637ac7f20dd..c31e1662d733 100644 --- a/prover/witness_generator/Cargo.toml +++ b/prover/witness_generator/Cargo.toml @@ -31,7 +31,7 @@ zksync_core_leftovers.workspace = true zksync_protobuf_config.workspace = true zkevm_test_harness = { workspace = true } -circuit_definitions = { workspace = true, features = ["log_tracing"] } +circuit_definitions = { workspace = true, features = [ "log_tracing" ] } zk_evm.workspace = true anyhow.workspace = true From aec5bd9f4f7bb784ca7108c88f0e79e0e5ba3596 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Wed, 3 Jul 2024 20:38:26 +0300 Subject: [PATCH 46/56] updated query --- ...8a9777126abebaf648c00fdcc24beb9967010.json | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 core/lib/dal/.sqlx/query-815a7037a11dfc32e9d084d57178a9777126abebaf648c00fdcc24beb9967010.json diff --git a/core/lib/dal/.sqlx/query-815a7037a11dfc32e9d084d57178a9777126abebaf648c00fdcc24beb9967010.json b/core/lib/dal/.sqlx/query-815a7037a11dfc32e9d084d57178a9777126abebaf648c00fdcc24beb9967010.json new file mode 100644 index 000000000000..a5419ff6706b --- /dev/null +++ b/core/lib/dal/.sqlx/query-815a7037a11dfc32e9d084d57178a9777126abebaf648c00fdcc24beb9967010.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n l1_batch_number\n FROM\n proof_generation_details\n WHERE\n status = 'unpicked'\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "815a7037a11dfc32e9d084d57178a9777126abebaf648c00fdcc24beb9967010" +} From 8b982951296467dbff9a31b611088029e6449cef Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Wed, 3 Jul 2024 20:46:39 +0300 Subject: [PATCH 47/56] fix build --- core/lib/tee_verifier/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index a53a43bc581a..e4adbd37f340 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -249,7 +249,7 @@ mod tests { #[test] fn test_v1_serialization() { let tvi = V1TeeVerifierInput::new( - PrepareBasicCircuitsJob::new(0), + WitnessInputMerklePaths::new(0), vec![], L1BatchEnv { previous_batch_hash: Some(H256([1; 32])), From 9c415ba40abcc4309188778ee02b01aa0eed44bd Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Thu, 4 Jul 2024 09:36:00 +0300 Subject: [PATCH 48/56] fix lint --- core/lib/types/src/storage/witness_block_state.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/lib/types/src/storage/witness_block_state.rs b/core/lib/types/src/storage/witness_block_state.rs index 7ab89144f22c..bce9cc9034d7 100644 --- a/core/lib/types/src/storage/witness_block_state.rs +++ b/core/lib/types/src/storage/witness_block_state.rs @@ -24,7 +24,7 @@ impl Serialize for WitnessStorageState { read_storage_key: self .read_storage_key .iter() - .map(|(k, v)| (*k, v.clone())) + .map(|(k, v)| (*k, *v)) .collect(), is_write_initial: self .is_write_initial From c0cc2d2842a42548d87eb252141379e1bc68fece Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Thu, 4 Jul 2024 11:31:49 +0300 Subject: [PATCH 49/56] fix lint, add vm_runner to CI --- .github/workflows/ci-core-reusable.yml | 6 +++--- core/node/vm_runner/src/impls/bwip.rs | 16 +++++----------- 2 files changed, 8 insertions(+), 14 deletions(-) diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 288bed7f9671..42b39989c05d 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -105,7 +105,7 @@ jobs: # `sleep 60` because we need to wait until server added all the tokens - name: Run server run: | - ci_run zk server --uring --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log & + ci_run zk server --uring --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip &>server.log & ci_run sleep 60 - name: Deploy legacy era contracts @@ -135,7 +135,7 @@ jobs: base_token: ["Eth", "Custom"] deployment_mode: ["Rollup", "Validium"] env: - SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,da_dispatcher,base_token_ratio_persister${{ matrix.consensus && ',consensus' || '' }}" + SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,da_dispatcher,base_token_ratio_persister${{ matrix.consensus && ',consensus' || '' }}" runs-on: [matterlabs-ci-runner] steps: @@ -309,7 +309,7 @@ jobs: runs-on: [matterlabs-ci-runner] env: - SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,da_dispatcher,base_token_ratio_persister${{ matrix.consensus && ',consensus' || '' }}" + SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,,vm_runner_bwip,da_dispatcher,base_token_ratio_persister${{ matrix.consensus && ',consensus' || '' }}" EXT_NODE_FLAGS: "${{ matrix.consensus && '-- --enable-consensus' || '' }}" steps: diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index 40e047ff141d..f76b6c0120ef 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -178,7 +178,7 @@ async fn get_updates_manager_witness_input_data( connection: &mut Connection<'_, Core>, updates_manager: Arc, ) -> anyhow::Result { - let l1_batch_number = updates_manager.l1_batch.number.clone(); + let l1_batch_number = updates_manager.l1_batch.number; let finished_batch = updates_manager .l1_batch .finished @@ -186,14 +186,8 @@ async fn get_updates_manager_witness_input_data( .ok_or_else(|| anyhow!("L1 batch {l1_batch_number:?} is not finished"))?; let initial_heap_content = finished_batch.final_bootloader_memory.unwrap(); // might be just empty - let default_aa = updates_manager - .base_system_contract_hashes() - .default_aa - .clone(); - let bootloader = updates_manager - .base_system_contract_hashes() - .bootloader - .clone(); + let default_aa = updates_manager.base_system_contract_hashes().default_aa; + let bootloader = updates_manager.base_system_contract_hashes().bootloader; let bootloader_code_bytes = connection .factory_deps_dal() .get_sealed_factory_dep(bootloader) @@ -230,7 +224,7 @@ async fn get_updates_manager_witness_input_data( } let storage_refunds = finished_batch.final_execution_state.storage_refunds; - let pubdata_costs = Some(finished_batch.final_execution_state.pubdata_costs); + let pubdata_costs = finished_batch.final_execution_state.pubdata_costs; let storage_view_cache = updates_manager .storage_view_cache() @@ -254,7 +248,7 @@ async fn get_updates_manager_witness_input_data( bootloader_code, default_account_code_hash: account_code_hash, storage_refunds, - pubdata_costs: pubdata_costs.unwrap(), + pubdata_costs, witness_block_state, }) } From d813bdcff5ce855dd082f5e1e5b41184175a899d Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Thu, 4 Jul 2024 11:56:39 +0300 Subject: [PATCH 50/56] remove bwip from loadtest --- .github/workflows/ci-core-reusable.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 42b39989c05d..e113168080b2 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -105,7 +105,7 @@ jobs: # `sleep 60` because we need to wait until server added all the tokens - name: Run server run: | - ci_run zk server --uring --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip &>server.log & + ci_run zk server --uring --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log & ci_run sleep 60 - name: Deploy legacy era contracts From dc860cfae8da92a4c6af6d3d72110e28eea1e808 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Thu, 4 Jul 2024 13:48:55 +0300 Subject: [PATCH 51/56] address comments --- .github/workflows/ci-core-reusable.yml | 2 +- core/lib/dal/src/proof_generation_dal.rs | 2 +- core/lib/prover_interface/src/inputs.rs | 11 ++++++++--- core/lib/state/src/storage_view.rs | 5 ----- .../proof_data_handler/src/request_processor.rs | 14 ++++++++------ core/node/vm_runner/src/impls/bwip.rs | 6 +----- etc/env/file_based/general.yaml | 5 +++++ 7 files changed, 24 insertions(+), 21 deletions(-) diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index e113168080b2..504f7761bb8e 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -309,7 +309,7 @@ jobs: runs-on: [matterlabs-ci-runner] env: - SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,,vm_runner_bwip,da_dispatcher,base_token_ratio_persister${{ matrix.consensus && ',consensus' || '' }}" + SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,da_dispatcher,base_token_ratio_persister${{ matrix.consensus && ',consensus' || '' }}" EXT_NODE_FLAGS: "${{ matrix.consensus && '-- --enable-consensus' || '' }}" steps: diff --git a/core/lib/dal/src/proof_generation_dal.rs b/core/lib/dal/src/proof_generation_dal.rs index 5af162955600..d64df3a752f8 100644 --- a/core/lib/dal/src/proof_generation_dal.rs +++ b/core/lib/dal/src/proof_generation_dal.rs @@ -146,7 +146,7 @@ impl ProofGenerationDal<'_, '_> { .await?; if result.rows_affected() == 0 { let err = instrumentation.constraint_error(anyhow::anyhow!( - "Cannot save proof_blob_url for a batch number {} that does not exist", + "Cannot save vm_run_data_blob_url for a batch number {} that does not exist", batch_number )); return Err(err); diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index d0263551e1d2..8f2403d3369a 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -140,9 +140,6 @@ impl WitnessInputMerklePaths { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct VMRunWitnessInputData { pub l1_batch_number: L1BatchNumber, - pub previous_root_hash: Option, - pub previous_meta_hash: Option, - pub previous_aux_hash: Option, pub used_bytecodes: HashMap>, pub initial_heap_content: Vec<(usize, U256)>, pub protocol_version: ProtocolVersionId, @@ -169,6 +166,7 @@ impl StoredObject for VMRunWitnessInputData { pub struct WitnessInputData { pub vm_run_data: VMRunWitnessInputData, pub merkle_paths: WitnessInputMerklePaths, + pub previous_batch_metadata: L1BatchMetadataHashes, pub eip_4844_blobs: Eip4844Blobs, } @@ -184,6 +182,13 @@ impl StoredObject for WitnessInputData { serialize_using_bincode!(); } +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct L1BatchMetadataHashes { + pub root_hash: H256, + pub meta_hash: H256, + pub aux_hash: H256, +} + /// Version 1 of the data used as input for the TEE verifier. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct V1TeeVerifierInput { diff --git a/core/lib/state/src/storage_view.rs b/core/lib/state/src/storage_view.rs index 0529196a9e93..7dcfda2ba406 100644 --- a/core/lib/state/src/storage_view.rs +++ b/core/lib/state/src/storage_view.rs @@ -75,11 +75,6 @@ impl StorageView { pub fn cache(&self) -> StorageViewCache { self.cache.clone() } - - /// Returns the modified storage keys - pub fn modified_storage_keys(&self) -> &HashMap { - &self.modified_storage_keys - } } impl ReadStorage for Box diff --git a/core/node/proof_data_handler/src/request_processor.rs b/core/node/proof_data_handler/src/request_processor.rs index b20a9834ab2a..3d506b1466a3 100644 --- a/core/node/proof_data_handler/src/request_processor.rs +++ b/core/node/proof_data_handler/src/request_processor.rs @@ -9,7 +9,9 @@ use zksync_prover_interface::{ ProofGenerationData, ProofGenerationDataRequest, ProofGenerationDataResponse, SubmitProofRequest, SubmitProofResponse, }, - inputs::{VMRunWitnessInputData, WitnessInputData, WitnessInputMerklePaths}, + inputs::{ + L1BatchMetadataHashes, VMRunWitnessInputData, WitnessInputData, WitnessInputMerklePaths, + }, }; use zksync_types::{ basic_fri_types::Eip4844Blobs, @@ -86,11 +88,6 @@ impl RequestProcessor { .unwrap() .expect("No metadata for previous batch"); - vm_run_data.previous_root_hash = Some(previous_batch_metadata.metadata.root_hash); - vm_run_data.previous_meta_hash = - Some(previous_batch_metadata.metadata.meta_parameters_hash); - vm_run_data.previous_aux_hash = Some(previous_batch_metadata.metadata.aux_data_hash); - let header = self .pool .connection() @@ -143,6 +140,11 @@ impl RequestProcessor { vm_run_data, merkle_paths, eip_4844_blobs, + previous_batch_metadata: L1BatchMetadataHashes { + root_hash: previous_batch_metadata.metadata.root_hash, + meta_hash: previous_batch_metadata.metadata.meta_parameters_hash, + aux_hash: previous_batch_metadata.metadata.aux_data_hash, + }, }; let proof_gen_data = ProofGenerationData { diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index f76b6c0120ef..b79bd751944d 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -156,8 +156,7 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { ); let result = - get_updates_manager_witness_input_data(&mut connection, updates_manager.clone()) - .await?; + get_updates_manager_witness_input_data(&mut connection, updates_manager).await?; assert_database_witness_input_data(&mut connection, l1_batch_number, &result).await; @@ -237,9 +236,6 @@ async fn get_updates_manager_witness_input_data( Ok(VMRunWitnessInputData { l1_batch_number, - previous_aux_hash: None, - previous_meta_hash: None, - previous_root_hash: None, used_bytecodes, initial_heap_content, diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 4a258a7cd99d..fbd7c816b1bb 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -336,6 +336,11 @@ protective_reads_writer: window_size: 3 first_processed_batch: 0 +basic_witness_input_producer: + db_path: "./db/main/basic_witness_input_producer" + window_size: 3 + first_processed_batch: 0 + snapshot_recovery: enabled: false postgres: From cdba7328a65424dc8da112d6a1c635c451a06c3a Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Thu, 4 Jul 2024 13:57:09 +0300 Subject: [PATCH 52/56] fix build --- .../node/proof_data_handler/src/request_processor.rs | 2 +- prover/witness_generator/src/basic_circuits.rs | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/core/node/proof_data_handler/src/request_processor.rs b/core/node/proof_data_handler/src/request_processor.rs index 3d506b1466a3..bdb55237c4b6 100644 --- a/core/node/proof_data_handler/src/request_processor.rs +++ b/core/node/proof_data_handler/src/request_processor.rs @@ -66,7 +66,7 @@ impl RequestProcessor { None => return Ok(Json(ProofGenerationDataResponse::Success(None))), // no batches pending to be proven }; - let mut vm_run_data: VMRunWitnessInputData = self + let vm_run_data: VMRunWitnessInputData = self .blob_store .get(l1_batch_number) .await diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index 02beff52938d..0a9161366e9b 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -384,8 +384,8 @@ async fn generate_witness( let mut tree = PrecalculatedMerklePathsProvider::new( input.merkle_paths, input - .vm_run_data - .previous_root_hash + .previous_batch_metadata + .root_hash .expect("Previous root hash should exist") .0, ); @@ -472,13 +472,13 @@ async fn generate_witness( recursion_urls.retain(|(circuit_id, _, _)| circuits_present.contains(circuit_id)); scheduler_witness.previous_block_meta_hash = input - .vm_run_data - .previous_meta_hash + .previous_batch_metadata + .meta_hash .expect("Previous metadata hash should exist") .0; scheduler_witness.previous_block_aux_hash = input - .vm_run_data - .previous_aux_hash + .previous_batch_metadata + .aux_hash .expect("Previous aux data hash should exist") .0; From 3e8c4c84af23f9c3c5fed6bb4689b2adcc571830 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Thu, 4 Jul 2024 14:20:15 +0300 Subject: [PATCH 53/56] fix build --- prover/witness_generator/src/basic_circuits.rs | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index 0a9161366e9b..cf271b5b943e 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -471,16 +471,8 @@ async fn generate_witness( recursion_urls.retain(|(circuit_id, _, _)| circuits_present.contains(circuit_id)); - scheduler_witness.previous_block_meta_hash = input - .previous_batch_metadata - .meta_hash - .expect("Previous metadata hash should exist") - .0; - scheduler_witness.previous_block_aux_hash = input - .previous_batch_metadata - .aux_hash - .expect("Previous aux data hash should exist") - .0; + scheduler_witness.previous_block_meta_hash = input.previous_batch_metadata.meta_hash.0; + scheduler_witness.previous_block_aux_hash = input.previous_batch_metadata.aux_hash.0; ( circuit_urls, From ec68265b15fa0fd69c08c916fe19660173412ca3 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Thu, 4 Jul 2024 14:42:51 +0300 Subject: [PATCH 54/56] fix build --- prover/witness_generator/src/basic_circuits.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index cf271b5b943e..c17458ab4338 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -383,11 +383,7 @@ async fn generate_witness( let mut tree = PrecalculatedMerklePathsProvider::new( input.merkle_paths, - input - .previous_batch_metadata - .root_hash - .expect("Previous root hash should exist") - .0, + input.previous_batch_metadata.root_hash.0, ); let geometry_config = get_geometry_config(); let mut hasher = DefaultHasher::new(); From 419a723a3e090a6629d51e82fdbec56075817c2d Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Thu, 4 Jul 2024 22:44:23 +0300 Subject: [PATCH 55/56] Update core/node/vm_runner/src/impls/bwip.rs Co-authored-by: perekopskiy <53865202+perekopskiy@users.noreply.github.com> --- core/node/vm_runner/src/impls/bwip.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index b79bd751944d..bea308ad8265 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -150,10 +150,7 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { let l1_batch_number = updates_manager.l1_batch.number; let mut connection = self.pool.connection().await?; - tracing::info!( - "Started saving VM run data for L1 batch {:?}", - l1_batch_number - ); + tracing::info!(%l1_batch_number, "Started saving VM run data"); let result = get_updates_manager_witness_input_data(&mut connection, updates_manager).await?; From c39e7a1981b3c43ddd8c63094bc26d08a146c4ec Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Thu, 4 Jul 2024 22:44:31 +0300 Subject: [PATCH 56/56] Update core/node/vm_runner/src/impls/bwip.rs Co-authored-by: perekopskiy <53865202+perekopskiy@users.noreply.github.com> --- core/node/vm_runner/src/impls/bwip.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index bea308ad8265..f3bdf55400e6 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -159,7 +159,7 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { let blob_url = self.object_store.put(l1_batch_number, &result).await?; - tracing::info!("Saved VM run data for L1 batch {:?}", l1_batch_number); + tracing::info!(%l1_batch_number, "Saved VM run data"); connection .proof_generation_dal()