From 87768755e8653e4be5f29945b56fd05a5246d5a8 Mon Sep 17 00:00:00 2001 From: Daniyar Itegulov Date: Thu, 22 Aug 2024 21:22:53 +1000 Subject: [PATCH 01/39] fix(prover): fail when fri prover job is not found (#2711) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Compressor should fail when a corresponding fri prover job is not found. Unlikely to occur in real world but helps when manually populating DB for debugging. ## Why ❔ This behaviour makes more sense ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: EmilLuta --- prover/crates/bin/proof_fri_compressor/src/compressor.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prover/crates/bin/proof_fri_compressor/src/compressor.rs b/prover/crates/bin/proof_fri_compressor/src/compressor.rs index dc5ca939d9b4..34a2c965a311 100644 --- a/prover/crates/bin/proof_fri_compressor/src/compressor.rs +++ b/prover/crates/bin/proof_fri_compressor/src/compressor.rs @@ -136,7 +136,7 @@ impl JobProcessor for ProofCompressor { .get_scheduler_proof_job_id(l1_batch_number) .await else { - return Ok(None); + anyhow::bail!("Scheduler proof is missing from database for batch {l1_batch_number}"); }; tracing::info!( "Started proof compression for L1 batch: {:?}", From 30edda404193938fbd55815bed164b5321d7c642 Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Thu, 22 Aug 2024 17:47:12 +0300 Subject: [PATCH 02/39] feat: Provide easy prover setup (#2683) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Allow to run `zk_inception prover init` without `chain init` Add docs for running provers and proving the batch. ## Why ❔ To provide easy way to spin up prover subsystem locally. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- etc/env/file_based/general.yaml | 2 +- .../proof_fri_compressor/src/compressor.rs | 9 +- .../bin/proof_fri_compressor/src/main.rs | 5 + prover/crates/bin/prover_cli/src/cli.rs | 7 +- .../prover_cli/src/commands/insert_batch.rs | 43 ++++++ .../prover_cli/src/commands/insert_version.rs | 52 +++++++ .../crates/bin/prover_cli/src/commands/mod.rs | 2 + .../src/gpu_prover_job_processor.rs | 5 +- .../prover_fri/src/prover_job_processor.rs | 5 +- prover/crates/bin/witness_generator/README.md | 73 --------- .../witness_generator/src/leaf_aggregation.rs | 16 +- .../crates/bin/witness_generator/src/main.rs | 14 +- .../witness_generator/src/node_aggregation.rs | 8 +- .../witness_generator/src/recursion_tip.rs | 7 +- .../bin/witness_generator/src/scheduler.rs | 17 +- .../bin/witness_generator/tests/basic_test.rs | 20 ++- prover/docs/05_proving_batch.md | 145 ++++++++++++++++++ zk_toolbox/Cargo.lock | 1 + zk_toolbox/crates/config/src/secrets.rs | 12 ++ zk_toolbox/crates/zk_inception/README.md | 9 +- .../src/commands/prover/args/init.rs | 101 +++++++++++- .../src/commands/prover/args/run.rs | 2 + .../src/commands/prover/generate_sk.rs | 6 +- .../zk_inception/src/commands/prover/init.rs | 78 ++++++++-- .../zk_inception/src/commands/prover/run.rs | 18 ++- .../crates/zk_inception/src/messages.rs | 1 + zk_toolbox/crates/zk_supervisor/Cargo.toml | 1 + zk_toolbox/crates/zk_supervisor/README.md | 7 + .../crates/zk_supervisor/src/commands/mod.rs | 1 + .../src/commands/prover_version.rs | 41 +++++ zk_toolbox/crates/zk_supervisor/src/main.rs | 9 +- .../crates/zk_supervisor/src/messages.rs | 1 + 32 files changed, 589 insertions(+), 129 deletions(-) create mode 100644 prover/crates/bin/prover_cli/src/commands/insert_batch.rs create mode 100644 prover/crates/bin/prover_cli/src/commands/insert_version.rs create mode 100644 prover/docs/05_proving_batch.md create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/prover_version.rs diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 8e7e6eca4280..19921cf536c4 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -140,7 +140,7 @@ prover: file_backed: file_backed_base_path: artifacts max_retries: 10 - setup_data_path: vk_setup_data_generator_server_fri/data + setup_data_path: crates/bin/vk_setup_data_generator_server_fri/data prometheus_port: 3315 max_attempts: 10 generation_timeout_in_secs: 600 diff --git a/prover/crates/bin/proof_fri_compressor/src/compressor.rs b/prover/crates/bin/proof_fri_compressor/src/compressor.rs index 34a2c965a311..067114ca5a6c 100644 --- a/prover/crates/bin/proof_fri_compressor/src/compressor.rs +++ b/prover/crates/bin/proof_fri_compressor/src/compressor.rs @@ -35,6 +35,7 @@ pub struct ProofCompressor { compression_mode: u8, max_attempts: u32, protocol_version: ProtocolSemanticVersion, + setup_data_path: String, } impl ProofCompressor { @@ -44,6 +45,7 @@ impl ProofCompressor { compression_mode: u8, max_attempts: u32, protocol_version: ProtocolSemanticVersion, + setup_data_path: String, ) -> Self { Self { blob_store, @@ -51,6 +53,7 @@ impl ProofCompressor { compression_mode, max_attempts, protocol_version, + setup_data_path, } } @@ -59,8 +62,9 @@ impl ProofCompressor { l1_batch: L1BatchNumber, proof: ZkSyncRecursionLayerProof, _compression_mode: u8, + setup_data_path: String, ) -> anyhow::Result { - let keystore = Keystore::default(); + let keystore = Keystore::new_with_setup_data_path(setup_data_path); let scheduler_vk = keystore .load_recursive_layer_verification_key( ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8, @@ -174,8 +178,9 @@ impl JobProcessor for ProofCompressor { ) -> JoinHandle> { let compression_mode = self.compression_mode; let block_number = *job_id; + let setup_data_path = self.setup_data_path.clone(); tokio::task::spawn_blocking(move || { - Self::compress_proof(block_number, job, compression_mode) + Self::compress_proof(block_number, job, compression_mode, setup_data_path) }) } diff --git a/prover/crates/bin/proof_fri_compressor/src/main.rs b/prover/crates/bin/proof_fri_compressor/src/main.rs index a1a8ac90253e..e2086b228b69 100644 --- a/prover/crates/bin/proof_fri_compressor/src/main.rs +++ b/prover/crates/bin/proof_fri_compressor/src/main.rs @@ -59,6 +59,7 @@ async fn main() -> anyhow::Result<()> { let object_store_config = ProverObjectStoreConfig( general_config .prover_config + .clone() .expect("ProverConfig") .prover_object_store .context("ProverObjectStoreConfig")?, @@ -75,6 +76,10 @@ async fn main() -> anyhow::Result<()> { config.compression_mode, config.max_attempts, protocol_version, + general_config + .prover_config + .expect("ProverConfig doesn't exist") + .setup_data_path, ); let (stop_sender, stop_receiver) = watch::channel(false); diff --git a/prover/crates/bin/prover_cli/src/cli.rs b/prover/crates/bin/prover_cli/src/cli.rs index 0c7022cae297..41ef94980056 100644 --- a/prover/crates/bin/prover_cli/src/cli.rs +++ b/prover/crates/bin/prover_cli/src/cli.rs @@ -2,7 +2,8 @@ use clap::{command, Args, Parser, Subcommand}; use zksync_types::url::SensitiveUrl; use crate::commands::{ - config, debug_proof, delete, get_file_info, requeue, restart, stats, status::StatusCommand, + config, debug_proof, delete, get_file_info, insert_batch, insert_version, requeue, restart, + stats, status::StatusCommand, }; pub const VERSION_STRING: &str = env!("CARGO_PKG_VERSION"); @@ -27,6 +28,8 @@ impl ProverCLI { ProverCommand::Restart(args) => restart::run(args).await?, ProverCommand::DebugProof(args) => debug_proof::run(args).await?, ProverCommand::Stats(args) => stats::run(args, self.config).await?, + ProverCommand::InsertVersion(args) => insert_version::run(args, self.config).await?, + ProverCommand::InsertBatch(args) => insert_batch::run(args, self.config).await?, }; Ok(()) } @@ -55,4 +58,6 @@ pub enum ProverCommand { Restart(restart::Args), #[command(about = "Displays L1 Batch proving stats for a given period")] Stats(stats::Options), + InsertVersion(insert_version::Args), + InsertBatch(insert_batch::Args), } diff --git a/prover/crates/bin/prover_cli/src/commands/insert_batch.rs b/prover/crates/bin/prover_cli/src/commands/insert_batch.rs new file mode 100644 index 000000000000..add1474633d7 --- /dev/null +++ b/prover/crates/bin/prover_cli/src/commands/insert_batch.rs @@ -0,0 +1,43 @@ +use anyhow::Context as _; +use clap::Args as ClapArgs; +use zksync_basic_types::{ + protocol_version::{ProtocolSemanticVersion, ProtocolVersionId, VersionPatch}, + L1BatchNumber, +}; +use zksync_db_connection::connection_pool::ConnectionPool; +use zksync_prover_dal::{Prover, ProverDal}; + +use crate::cli::ProverCLIConfig; + +#[derive(ClapArgs)] +pub struct Args { + #[clap(short, long)] + pub number: L1BatchNumber, + #[clap(short, long)] + pub version: u16, + #[clap(short, long)] + pub patch: u32, +} + +pub async fn run(args: Args, config: ProverCLIConfig) -> anyhow::Result<()> { + let connection = ConnectionPool::::singleton(config.db_url) + .build() + .await + .context("failed to build a prover_connection_pool")?; + let mut conn = connection.connection().await.unwrap(); + + let protocol_version = ProtocolVersionId::try_from(args.version) + .map_err(|_| anyhow::anyhow!("Invalid protocol version"))?; + + let protocol_version_patch = VersionPatch(args.patch); + + conn.fri_witness_generator_dal() + .save_witness_inputs( + args.number, + &format!("witness_inputs_{}", args.number.0), + ProtocolSemanticVersion::new(protocol_version, protocol_version_patch), + ) + .await; + + Ok(()) +} diff --git a/prover/crates/bin/prover_cli/src/commands/insert_version.rs b/prover/crates/bin/prover_cli/src/commands/insert_version.rs new file mode 100644 index 000000000000..7f30719a713b --- /dev/null +++ b/prover/crates/bin/prover_cli/src/commands/insert_version.rs @@ -0,0 +1,52 @@ +use std::str::FromStr; + +use anyhow::Context as _; +use clap::Args as ClapArgs; +use zksync_basic_types::{ + protocol_version::{ + L1VerifierConfig, ProtocolSemanticVersion, ProtocolVersionId, VersionPatch, + }, + H256, +}; +use zksync_db_connection::connection_pool::ConnectionPool; +use zksync_prover_dal::{Prover, ProverDal}; + +use crate::cli::ProverCLIConfig; + +#[derive(ClapArgs)] +pub struct Args { + #[clap(short, long)] + pub version: u16, + #[clap(short, long)] + pub patch: u32, + #[clap(short, long)] + pub snark_wrapper: String, +} + +pub async fn run(args: Args, config: ProverCLIConfig) -> anyhow::Result<()> { + let connection = ConnectionPool::::singleton(config.db_url) + .build() + .await + .context("failed to build a prover_connection_pool")?; + let mut conn = connection.connection().await.unwrap(); + + let protocol_version = ProtocolVersionId::try_from(args.version) + .map_err(|_| anyhow::anyhow!("Invalid protocol version"))?; + + let protocol_version_patch = VersionPatch(args.patch); + + let snark_wrapper = H256::from_str(&args.snark_wrapper).unwrap_or_else(|_| { + panic!("Invalid snark wrapper hash"); + }); + + conn.fri_protocol_versions_dal() + .save_prover_protocol_version( + ProtocolSemanticVersion::new(protocol_version, protocol_version_patch), + L1VerifierConfig { + recursion_scheduler_level_vk_hash: snark_wrapper, + }, + ) + .await; + + Ok(()) +} diff --git a/prover/crates/bin/prover_cli/src/commands/mod.rs b/prover/crates/bin/prover_cli/src/commands/mod.rs index d9dde52284b4..bafe229884b9 100644 --- a/prover/crates/bin/prover_cli/src/commands/mod.rs +++ b/prover/crates/bin/prover_cli/src/commands/mod.rs @@ -2,6 +2,8 @@ pub(crate) mod config; pub(crate) mod debug_proof; pub(crate) mod delete; pub(crate) mod get_file_info; +pub(crate) mod insert_batch; +pub(crate) mod insert_version; pub(crate) mod requeue; pub(crate) mod restart; pub(crate) mod stats; diff --git a/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs b/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs index 4407dbcd8523..dc8594cbdc1b 100644 --- a/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs +++ b/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs @@ -112,7 +112,8 @@ pub mod gpu_prover { .clone(), SetupLoadMode::FromDisk => { let started_at = Instant::now(); - let keystore = Keystore::default(); + let keystore = + Keystore::new_with_setup_data_path(self.config.setup_data_path.clone()); let artifact: GoldilocksGpuProverSetupData = keystore .load_gpu_setup_data_for_circuit_type(key.clone()) .context("load_gpu_setup_data_for_circuit_type()")?; @@ -347,7 +348,7 @@ pub mod gpu_prover { &config.specialized_group_id, prover_setup_metadata_list ); - let keystore = Keystore::default(); + let keystore = Keystore::new_with_setup_data_path(config.setup_data_path.clone()); for prover_setup_metadata in prover_setup_metadata_list { let key = setup_metadata_to_setup_data_key(&prover_setup_metadata); let setup_data = keystore diff --git a/prover/crates/bin/prover_fri/src/prover_job_processor.rs b/prover/crates/bin/prover_fri/src/prover_job_processor.rs index 09c9d38348ff..2df1b626497f 100644 --- a/prover/crates/bin/prover_fri/src/prover_job_processor.rs +++ b/prover/crates/bin/prover_fri/src/prover_job_processor.rs @@ -85,7 +85,8 @@ impl Prover { .clone(), SetupLoadMode::FromDisk => { let started_at = Instant::now(); - let keystore = Keystore::default(); + let keystore = + Keystore::new_with_setup_data_path(self.config.setup_data_path.clone()); let artifact: GoldilocksProverSetupData = keystore .load_cpu_setup_data_for_circuit_type(key.clone()) .context("get_cpu_setup_data_for_circuit_type()")?; @@ -298,7 +299,7 @@ pub fn load_setup_data_cache(config: &FriProverConfig) -> anyhow::Result.bin` generated by different core -components). - -This file is stored by prover gateway in GCS (or your choice of object storage -- check config). To access it from GCS -(assuming you have access to the bucket), run: - -```shell -gsutil cp gs://your_bucket/witness_inputs/witness_inputs_.bin -``` - -Note, that you need to have `gsutil` installed, and you need to have access to the bucket. - -Now, database needs to know about the batch and the protocol version it should use. Check the latest protocol version in -the codebase by checking const `PROVER_PROTOCOL_SEMANTIC_VERSION` or run the binary in `prover` workspace: - -```console -cargo run --bin prover_version -``` - -It will give you the latest prover protocol version in a semver format, like `0.24.2`, you need to know only minor and -patch versions. Now, go to the `prover/crates/bin/vk_setup_data_generator_server_fri/data/commitments.json` and get -`snark_wrapper` value from it. Then, you need to insert the info about protocol version into the database. First, -connect to the database, e.g. locally you can do it like that: - -```shell -psql postgres://postgres:notsecurepassword@localhost/prover_local -``` - -And run the following query: - -```shell -INSERT INTO -prover_fri_protocol_versions ( -id, -recursion_scheduler_level_vk_hash, -created_at, -protocol_version_patch -) -VALUES -(, ''::bytea, NOW(), ) -ON CONFLICT (id, protocol_version_patch) DO NOTHING - -``` - -Now, you need to insert the batch into the database. Run the following query: - -```shell -INSERT INTO -witness_inputs_fri ( -l1_batch_number, -witness_inputs_blob_url, -protocol_version, -status, -created_at, -updated_at, -protocol_version_patch -) -VALUES -(, 'witness_inputs_.bin', , 'queued', NOW(), NOW(), ) -ON CONFLICT (l1_batch_number) DO NOTHING -``` - -Finally, run the basic witness generator itself: - -```shell -API_PROMETHEUS_LISTENER_PORT=3116 zk f cargo run --release --bin zksync_witness_generator -- --round=basic_circuits -``` - -And you are good to go! diff --git a/prover/crates/bin/witness_generator/src/leaf_aggregation.rs b/prover/crates/bin/witness_generator/src/leaf_aggregation.rs index d8cad84e777d..2f4494187975 100644 --- a/prover/crates/bin/witness_generator/src/leaf_aggregation.rs +++ b/prover/crates/bin/witness_generator/src/leaf_aggregation.rs @@ -72,6 +72,7 @@ pub struct LeafAggregationWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, + setup_data_path: String, } impl LeafAggregationWitnessGenerator { @@ -80,12 +81,14 @@ impl LeafAggregationWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, + setup_data_path: String, ) -> Self { Self { config, object_store, prover_connection_pool, protocol_version, + setup_data_path, } } @@ -131,9 +134,13 @@ impl JobProcessor for LeafAggregationWitnessGenerator { tracing::info!("Processing leaf aggregation job {:?}", metadata.id); Ok(Some(( metadata.id, - prepare_leaf_aggregation_job(metadata, &*self.object_store) - .await - .context("prepare_leaf_aggregation_job()")?, + prepare_leaf_aggregation_job( + metadata, + &*self.object_store, + self.setup_data_path.clone(), + ) + .await + .context("prepare_leaf_aggregation_job()")?, ))) } @@ -219,6 +226,7 @@ impl JobProcessor for LeafAggregationWitnessGenerator { pub async fn prepare_leaf_aggregation_job( metadata: LeafAggregationJobMetadata, object_store: &dyn ObjectStore, + setup_data_path: String, ) -> anyhow::Result { let started_at = Instant::now(); let closed_form_input = get_artifacts(&metadata, object_store).await; @@ -227,7 +235,7 @@ pub async fn prepare_leaf_aggregation_job( .observe(started_at.elapsed()); let started_at = Instant::now(); - let keystore = Keystore::default(); + let keystore = Keystore::new_with_setup_data_path(setup_data_path); let base_vk = keystore .load_base_layer_verification_key(metadata.circuit_id) .context("get_base_layer_vk_for_circuit_type()")?; diff --git a/prover/crates/bin/witness_generator/src/main.rs b/prover/crates/bin/witness_generator/src/main.rs index a88dd8726d39..50c955168602 100644 --- a/prover/crates/bin/witness_generator/src/main.rs +++ b/prover/crates/bin/witness_generator/src/main.rs @@ -80,9 +80,10 @@ async fn main() -> anyhow::Result<()> { let store_factory = ObjectStoreFactory::new(object_store_config.0); let config = general_config .witness_generator_config - .context("witness generator config")?; + .context("witness generator config")? + .clone(); - let prometheus_config = general_config.prometheus_config; + let prometheus_config = general_config.prometheus_config.clone(); // If the prometheus listener port is not set in the witness generator config, use the one from the prometheus config. let prometheus_listener_port = if let Some(port) = config.prometheus_listener_port { @@ -158,6 +159,8 @@ async fn main() -> anyhow::Result<()> { let mut tasks = Vec::new(); tasks.push(tokio::spawn(prometheus_task)); + let setup_data_path = prover_config.setup_data_path.clone(); + for round in rounds { tracing::info!( "initializing the {:?} witness generator, batch size: {:?} with protocol_version: {:?}", @@ -168,8 +171,7 @@ async fn main() -> anyhow::Result<()> { let witness_generator_task = match round { AggregationRound::BasicCircuits => { - let setup_data_path = prover_config.setup_data_path.clone(); - let vk_commitments = get_cached_commitments(Some(setup_data_path)); + let vk_commitments = get_cached_commitments(Some(setup_data_path.clone())); assert_eq!( vk_commitments, vk_commitments_in_db, @@ -204,6 +206,7 @@ async fn main() -> anyhow::Result<()> { store_factory.create_store().await?, prover_connection_pool.clone(), protocol_version, + setup_data_path.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } @@ -213,6 +216,7 @@ async fn main() -> anyhow::Result<()> { store_factory.create_store().await?, prover_connection_pool.clone(), protocol_version, + setup_data_path.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } @@ -222,6 +226,7 @@ async fn main() -> anyhow::Result<()> { store_factory.create_store().await?, prover_connection_pool.clone(), protocol_version, + setup_data_path.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } @@ -231,6 +236,7 @@ async fn main() -> anyhow::Result<()> { store_factory.create_store().await?, prover_connection_pool.clone(), protocol_version, + setup_data_path.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } diff --git a/prover/crates/bin/witness_generator/src/node_aggregation.rs b/prover/crates/bin/witness_generator/src/node_aggregation.rs index a7dce2a513d8..b6fc6b8f7c65 100644 --- a/prover/crates/bin/witness_generator/src/node_aggregation.rs +++ b/prover/crates/bin/witness_generator/src/node_aggregation.rs @@ -70,6 +70,7 @@ pub struct NodeAggregationWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, + setup_data_path: String, } impl NodeAggregationWitnessGenerator { @@ -78,12 +79,14 @@ impl NodeAggregationWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, + setup_data_path: String, ) -> Self { Self { config, object_store, prover_connection_pool, protocol_version, + setup_data_path, } } @@ -241,7 +244,7 @@ impl JobProcessor for NodeAggregationWitnessGenerator { tracing::info!("Processing node aggregation job {:?}", metadata.id); Ok(Some(( metadata.id, - prepare_job(metadata, &*self.object_store) + prepare_job(metadata, &*self.object_store, self.setup_data_path.clone()) .await .context("prepare_job()")?, ))) @@ -326,6 +329,7 @@ impl JobProcessor for NodeAggregationWitnessGenerator { pub async fn prepare_job( metadata: NodeAggregationJobMetadata, object_store: &dyn ObjectStore, + setup_data_path: String, ) -> anyhow::Result { let started_at = Instant::now(); let artifacts = get_artifacts(&metadata, object_store).await; @@ -334,7 +338,7 @@ pub async fn prepare_job( .observe(started_at.elapsed()); let started_at = Instant::now(); - let keystore = Keystore::default(); + let keystore = Keystore::new_with_setup_data_path(setup_data_path); let leaf_vk = keystore .load_recursive_layer_verification_key(metadata.circuit_id) .context("get_recursive_layer_vk_for_circuit_type")?; diff --git a/prover/crates/bin/witness_generator/src/recursion_tip.rs b/prover/crates/bin/witness_generator/src/recursion_tip.rs index 2a57ffff85ff..e05a0cc38cf8 100644 --- a/prover/crates/bin/witness_generator/src/recursion_tip.rs +++ b/prover/crates/bin/witness_generator/src/recursion_tip.rs @@ -75,6 +75,7 @@ pub struct RecursionTipWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, + setup_data_path: String, } impl RecursionTipWitnessGenerator { @@ -83,12 +84,14 @@ impl RecursionTipWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, + setup_data_path: String, ) -> Self { Self { config, object_store, prover_connection_pool, protocol_version, + setup_data_path, } } @@ -172,6 +175,7 @@ impl JobProcessor for RecursionTipWitnessGenerator { l1_batch_number, final_node_proof_job_ids, &*self.object_store, + self.setup_data_path.clone(), ) .await .context("prepare_job()")?, @@ -284,6 +288,7 @@ pub async fn prepare_job( l1_batch_number: L1BatchNumber, final_node_proof_job_ids: Vec<(u8, u32)>, object_store: &dyn ObjectStore, + setup_data_path: String, ) -> anyhow::Result { let started_at = Instant::now(); let recursion_tip_proofs = @@ -291,7 +296,7 @@ pub async fn prepare_job( WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::RecursionTip.into()] .observe(started_at.elapsed()); - let keystore = Keystore::default(); + let keystore = Keystore::new_with_setup_data_path(setup_data_path); let node_vk = keystore .load_recursive_layer_verification_key( ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, diff --git a/prover/crates/bin/witness_generator/src/scheduler.rs b/prover/crates/bin/witness_generator/src/scheduler.rs index f69d338061e2..c389e037ffa6 100644 --- a/prover/crates/bin/witness_generator/src/scheduler.rs +++ b/prover/crates/bin/witness_generator/src/scheduler.rs @@ -57,6 +57,7 @@ pub struct SchedulerWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, + setup_data_path: String, } impl SchedulerWitnessGenerator { @@ -65,12 +66,14 @@ impl SchedulerWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, + setup_data_path: String, ) -> Self { Self { config, object_store, prover_connection_pool, protocol_version, + setup_data_path, } } @@ -147,9 +150,14 @@ impl JobProcessor for SchedulerWitnessGenerator { Ok(Some(( l1_batch_number, - prepare_job(l1_batch_number, recursion_tip_job_id, &*self.object_store) - .await - .context("prepare_job()")?, + prepare_job( + l1_batch_number, + recursion_tip_job_id, + &*self.object_store, + self.setup_data_path.clone(), + ) + .await + .context("prepare_job()")?, ))) } @@ -258,6 +266,7 @@ pub async fn prepare_job( l1_batch_number: L1BatchNumber, recursion_tip_job_id: u32, object_store: &dyn ObjectStore, + setup_data_path: String, ) -> anyhow::Result { let started_at = Instant::now(); let wrapper = object_store.get(recursion_tip_job_id).await?; @@ -271,7 +280,7 @@ pub async fn prepare_job( .observe(started_at.elapsed()); let started_at = Instant::now(); - let keystore = Keystore::default(); + let keystore = Keystore::new_with_setup_data_path(setup_data_path); let node_vk = keystore .load_recursive_layer_verification_key( ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, diff --git a/prover/crates/bin/witness_generator/tests/basic_test.rs b/prover/crates/bin/witness_generator/tests/basic_test.rs index f8a21179adb7..b034ab57d82c 100644 --- a/prover/crates/bin/witness_generator/tests/basic_test.rs +++ b/prover/crates/bin/witness_generator/tests/basic_test.rs @@ -50,9 +50,13 @@ async fn test_leaf_witness_gen() { .await .unwrap(); - let job = prepare_leaf_aggregation_job(leaf_aggregation_job_metadata, &*object_store) - .await - .unwrap(); + let job = prepare_leaf_aggregation_job( + leaf_aggregation_job_metadata, + &*object_store, + "crates/bin/vk_setup_data_generator/data".to_string(), + ) + .await + .unwrap(); let artifacts = LeafAggregationWitnessGenerator::process_job_impl( job, @@ -139,9 +143,13 @@ async fn test_node_witness_gen() { prover_job_ids_for_proofs: vec![5211320], }; - let job = node_aggregation::prepare_job(node_aggregation_job_metadata, &*object_store) - .await - .unwrap(); + let job = node_aggregation::prepare_job( + node_aggregation_job_metadata, + &*object_store, + "crates/bin/vk_setup_data_generator/data".to_string(), + ) + .await + .unwrap(); let artifacts = NodeAggregationWitnessGenerator::process_job_impl( job, diff --git a/prover/docs/05_proving_batch.md b/prover/docs/05_proving_batch.md new file mode 100644 index 000000000000..441a8225f866 --- /dev/null +++ b/prover/docs/05_proving_batch.md @@ -0,0 +1,145 @@ +# Proving a batch + +If you got to this section, then most likely you are wondering how to prove and verify the batch by yourself. After +releases `prover-v15.1.0` and `core-v24.9.0` prover subsystem doesn't need access to core database anymore, which means +you can run only prover subsystem and prove batches without running the whole core system. This guide will help you with +that. + +## Requirements + +### Hardware + +Setup for running the whole process should be the same as described [here](./01_gcp_vm.md), except you need 48 GB of +GPU, which requires an NVIDIA A100 80GB GPU. + +### Prerequisites + +First of all, you need to install CUDA drivers, all other things will be dealt with by `zk_inception` and `prover_cli` +tools. For that, check the following [guide](./02_setup.md)(you can skip bellman-cuda step). + +Install the prerequisites, which you can find +[here](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/setup-dev.md). Note, that if you are not using +Google VM instance, you also need to install [gcloud](https://cloud.google.com/sdk/docs/install#deb). + +Now, you can use `zk_inception` and `prover_cli` tools for setting up the env and running prover subsystem. + +```shell +cargo +nightly-2024-08-01 install --git https://github.com/matter-labs/zksync-era/ --locked zk_inception zk_supervisor prover_cli --force +``` + +## Initializing system + +After you have installed the tool, you can create ecosystem(you need to run only if you are outside of `zksync-era`) by +running: + +```shell +zk_inception ecosystem create --l1-network=localhost --prover-mode=gpu --wallet-creation=localhost --l1-batch-commit-data-generator-mode=rollup --start-containers=true +``` + +The command will create the ecosystem and all the necessary components for the prover subsystem. You can leave default +values for all the prompts you will see Now, you need to initialize the prover subsystem by running: + +```shell +zk_inception prover init --shall-save-to-public-bucket=false --setup-database=true --use-default=true --dont-drop=false +``` + +For prompts you can leave default values as well. + +## Proving the batch + +### Getting data needed for proving + +At this step, we need to get the witness inputs data for the batch you want to prove. Database information now lives in +input file, called `witness_inputs_.bin` generated by different core components). + +- If batch was produced by your system, the file is stored by prover gateway in GCS (or your choice of object storage -- + check config). At the point of getting it, most likely there is no artifacts directory created. If you have cloned the + zksync-era repo, then it is in the root of ecosystem directory. Create artifacts directory by running: + + ```shell + mkdir -p + ``` + + To access it from GCS (assuming you have access to the bucket), run: + + ```shell + gsutil cp gs://your_bucket/witness_inputs/witness_inputs_.bin + ``` + +- If you want to prove the batch produced by zkSync, you can get the data from the `ExternalProofIntegrationAPI` using + `{address}/proof_generation_data` endpoint. You need to replace `{address}` with the address of the API and provide + the batch number as a query data to get the data for specific batch, otherwise, you will receive latest data for the + batch, that was already proven. Example: + + ```shell + curl -H "Content-Type: application/json" -X POST {address}/proof_generation_data -d 'null' + ``` + + or + + ```shell + curl -H "Content-Type: application/json" -X POST {address}/proof_generation_data -d '1000' + ``` + +### Preparing database + +After you have the data, you need to prepare the system to run the batch. So, database needs to know about the batch and +the protocol version it should use. You can do that with running + +```shell +zk_supervisor prover-version +``` + +Example output: + +```shell +Current protocol version found in zksync-era: 0.24.2, snark_wrapper: "0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824ed4f42bbca2" +``` + +This command will provide you with the information about the semantic protocol version(you need to know only minor and +patch versions) and snark wrapper value. In the example, `MINOR_VERSION` is 24, `PATCH_VERSION` is 2, and +`SNARK_WRAPPER` is `0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824ed4f42bbca2`. + +Now, with the use of `prover_cli` tool, you can insert the data about the batch and protocol version into the database: + +First, get the database URL(you can find it in `/chains//configs/secrets.yaml` - it is the +`prover_url` value) Now, insert the information about protocol version in the database: + +```shell +prover_cli insert-version --version= --patch= --snark-wrapper= +``` + +And finally, provide the data about the batch: + +```shell +prover_cli insert-batch --number= --version= --patch= +``` + +Also, provers need to know which setup keys they should use. It may take some time, but you can generate them with: + +```shell +zk_inception prover generate-sk +``` + +## Running prover subsystem + +At this step, all the data is prepared and you can run the prover subsystem. To do that, run the following commands: + +```shell +zk_inception prover run --component=prover +zk_inception prover run --component=witness-generator --round=all-rounds +zk_inception prover run --component=witness-vector-generator --threads=10 +zk_inception prover run --component=compressor +zk_inception prover run --component=prover-job-monitor +``` + +And you are good to go! The prover subsystem will prove the batch and you can check the results in the database. + +## Verifying zkSync batch + +Now, assuming the proof is already generated, you can verify using `ExternalProofIntegrationAPI`. Usually proof is +stored in GCS bucket(for which you can use the same steps as for getting the witness inputs data +[here](#getting-data-needed-for-proving), but locally you can find it in `/artifacts/proofs_fri` directory). Now, simply +send the data to the endpoint `{address}/verify_batch/{batch_number}`. Note, that you need to pass the generated proof +as serialized JSON data when calling the endpoint. API will respond with status 200 if the proof is valid and with the +error message otherwise. diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index c76556272e82..7682b92a4f2d 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -6298,6 +6298,7 @@ dependencies = [ "futures", "human-panic", "serde", + "serde_json", "strum", "tokio", "url", diff --git a/zk_toolbox/crates/config/src/secrets.rs b/zk_toolbox/crates/config/src/secrets.rs index 5bcad19ad339..f0a39148b034 100644 --- a/zk_toolbox/crates/config/src/secrets.rs +++ b/zk_toolbox/crates/config/src/secrets.rs @@ -26,6 +26,18 @@ pub fn set_databases( Ok(()) } +pub fn set_prover_database( + secrets: &mut SecretsConfig, + prover_db_config: &DatabaseConfig, +) -> anyhow::Result<()> { + let database = secrets + .database + .as_mut() + .context("Databases must be presented")?; + database.prover_url = Some(SensitiveUrl::from(prover_db_config.full_url())); + Ok(()) +} + pub fn set_l1_rpc_url(secrets: &mut SecretsConfig, l1_rpc_url: String) -> anyhow::Result<()> { secrets .l1 diff --git a/zk_toolbox/crates/zk_inception/README.md b/zk_toolbox/crates/zk_inception/README.md index 4cb6d213688e..8b6368ce8c24 100644 --- a/zk_toolbox/crates/zk_inception/README.md +++ b/zk_toolbox/crates/zk_inception/README.md @@ -428,7 +428,7 @@ Initialize prover - `--project-id ` - `--shall-save-to-public-bucket ` - Possible values: `true`, `false` +Possible values: `true`, `false` - `--public-store-dir ` - `--public-bucket-base-url ` @@ -438,8 +438,13 @@ Initialize prover - `--public-project-id ` - `--bellman-cuda-dir ` - `--download-key ` +- `--setup-database` +- `--use-default` - use default database +- `--dont-drop` - don't drop database +- `--prover-db-url` - URL of database to use +- `--prover-db-name` - Name of database to use - Possible values: `true`, `false` +Possible values: `true`, `false` - `--setup-key-path ` - `--cloud-type ` diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs index cef435625716..e8c9cf1888d5 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs @@ -1,7 +1,10 @@ use clap::{Parser, ValueEnum}; -use common::{logger, Prompt, PromptConfirm, PromptSelect}; +use common::{db::DatabaseConfig, logger, Prompt, PromptConfirm, PromptSelect}; +use config::ChainConfig; use serde::{Deserialize, Serialize}; +use slugify_rs::slugify; use strum::{EnumIter, IntoEnumIterator}; +use url::Url; use xshell::Shell; use zksync_config::configs::fri_prover::CloudConnectionMode; @@ -9,15 +12,18 @@ use super::init_bellman_cuda::InitBellmanCudaArgs; use crate::{ commands::prover::gcs::get_project_ids, consts::{DEFAULT_CREDENTIALS_FILE, DEFAULT_PROOF_STORE_DIR}, + defaults::{generate_db_names, DBNames, DATABASE_PROVER_URL}, messages::{ - MSG_CLOUD_TYPE_PROMPT, MSG_CREATE_GCS_BUCKET_LOCATION_PROMPT, - MSG_CREATE_GCS_BUCKET_NAME_PROMTP, MSG_CREATE_GCS_BUCKET_PROJECT_ID_NO_PROJECTS_PROMPT, + msg_prover_db_name_prompt, msg_prover_db_url_prompt, MSG_CLOUD_TYPE_PROMPT, + MSG_CREATE_GCS_BUCKET_LOCATION_PROMPT, MSG_CREATE_GCS_BUCKET_NAME_PROMTP, + MSG_CREATE_GCS_BUCKET_PROJECT_ID_NO_PROJECTS_PROMPT, MSG_CREATE_GCS_BUCKET_PROJECT_ID_PROMPT, MSG_CREATE_GCS_BUCKET_PROMPT, MSG_DOWNLOAD_SETUP_KEY_PROMPT, MSG_GETTING_PROOF_STORE_CONFIG, MSG_GETTING_PUBLIC_STORE_CONFIG, MSG_PROOF_STORE_CONFIG_PROMPT, MSG_PROOF_STORE_DIR_PROMPT, MSG_PROOF_STORE_GCS_BUCKET_BASE_URL_ERR, MSG_PROOF_STORE_GCS_BUCKET_BASE_URL_PROMPT, - MSG_PROOF_STORE_GCS_CREDENTIALS_FILE_PROMPT, MSG_SAVE_TO_PUBLIC_BUCKET_PROMPT, - MSG_SETUP_KEY_PATH_PROMPT, + MSG_PROOF_STORE_GCS_CREDENTIALS_FILE_PROMPT, MSG_PROVER_DB_NAME_HELP, + MSG_PROVER_DB_URL_HELP, MSG_SAVE_TO_PUBLIC_BUCKET_PROMPT, MSG_SETUP_KEY_PATH_PROMPT, + MSG_USE_DEFAULT_DATABASES_HELP, }, }; @@ -54,6 +60,17 @@ pub struct ProverInitArgs { #[serde(flatten)] pub setup_key_config: SetupKeyConfigTmp, + #[clap(long)] + pub setup_database: Option, + #[clap(long, help = MSG_PROVER_DB_URL_HELP)] + pub prover_db_url: Option, + #[clap(long, help = MSG_PROVER_DB_NAME_HELP)] + pub prover_db_name: Option, + #[clap(long, short, help = MSG_USE_DEFAULT_DATABASES_HELP)] + pub use_default: Option, + #[clap(long, short, action)] + pub dont_drop: Option, + #[clap(long)] cloud_type: Option, } @@ -160,6 +177,12 @@ pub struct SetupKeyConfig { pub setup_key_path: String, } +#[derive(Debug, Clone)] +pub struct ProverDatabaseConfig { + pub database_config: DatabaseConfig, + pub dont_drop: bool, +} + #[derive(Debug, Clone)] pub struct ProverInitArgsFinal { pub proof_store: ProofStorageConfig, @@ -167,6 +190,7 @@ pub struct ProverInitArgsFinal { pub setup_key_config: SetupKeyConfig, pub bellman_cuda_config: InitBellmanCudaArgs, pub cloud_type: CloudConnectionMode, + pub database_config: Option, } impl ProverInitArgs { @@ -174,12 +198,14 @@ impl ProverInitArgs { &self, shell: &Shell, setup_key_path: &str, + chain_config: &ChainConfig, ) -> anyhow::Result { let proof_store = self.fill_proof_storage_values_with_prompt(shell)?; let public_store = self.fill_public_storage_values_with_prompt(shell)?; let setup_key_config = self.fill_setup_key_values_with_prompt(setup_key_path); let bellman_cuda_config = self.fill_bellman_cuda_values_with_prompt()?; let cloud_type = self.get_cloud_type_with_prompt(); + let database_config = self.fill_database_values_with_prompt(chain_config); Ok(ProverInitArgsFinal { proof_store, @@ -187,6 +213,7 @@ impl ProverInitArgs { setup_key_config, bellman_cuda_config, cloud_type, + database_config, }) } @@ -314,7 +341,11 @@ impl ProverInitArgs { .clone() .setup_key_config .download_key - .unwrap_or_else(|| PromptConfirm::new(MSG_DOWNLOAD_SETUP_KEY_PROMPT).ask()); + .unwrap_or_else(|| { + PromptConfirm::new(MSG_DOWNLOAD_SETUP_KEY_PROMPT) + .default(true) + .ask() + }); let setup_key_path = self .clone() .setup_key_config @@ -435,9 +466,65 @@ impl ProverInitArgs { fn get_cloud_type_with_prompt(&self) -> CloudConnectionMode { let cloud_type = self.cloud_type.clone().unwrap_or_else(|| { - PromptSelect::new(MSG_CLOUD_TYPE_PROMPT, InternalCloudConnectionMode::iter()).ask() + PromptSelect::new( + MSG_CLOUD_TYPE_PROMPT, + InternalCloudConnectionMode::iter().rev(), + ) + .ask() }); cloud_type.into() } + + fn fill_database_values_with_prompt( + &self, + config: &ChainConfig, + ) -> Option { + let setup_database = self + .setup_database + .unwrap_or_else(|| PromptConfirm::new("Do you want to setup the database?").ask()); + + if setup_database { + let DBNames { prover_name, .. } = generate_db_names(config); + let chain_name = config.name.clone(); + + let dont_drop = self.dont_drop.unwrap_or_else(|| { + !PromptConfirm::new("Do you want to drop the database?") + .default(true) + .ask() + }); + + if self.use_default.unwrap_or_else(|| { + PromptConfirm::new(MSG_USE_DEFAULT_DATABASES_HELP) + .default(true) + .ask() + }) { + Some(ProverDatabaseConfig { + database_config: DatabaseConfig::new(DATABASE_PROVER_URL.clone(), prover_name), + dont_drop, + }) + } else { + let prover_db_url = self.prover_db_url.clone().unwrap_or_else(|| { + Prompt::new(&msg_prover_db_url_prompt(&chain_name)) + .default(DATABASE_PROVER_URL.as_str()) + .ask() + }); + + let prover_db_name: String = self.prover_db_name.clone().unwrap_or_else(|| { + Prompt::new(&msg_prover_db_name_prompt(&chain_name)) + .default(&prover_name) + .ask() + }); + + let prover_db_name = slugify!(&prover_db_name, separator = "_"); + + Some(ProverDatabaseConfig { + database_config: DatabaseConfig::new(prover_db_url, prover_db_name), + dont_drop, + }) + } + } else { + None + } + } } diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs index c2d5cef26ad4..6bdd62c1d488 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs @@ -28,6 +28,8 @@ pub enum ProverComponent { Prover, #[strum(to_string = "Compressor")] Compressor, + #[strum(to_string = "ProverJobMonitor")] + ProverJobMonitor, } #[derive(Debug, Clone, Parser, Default)] diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs index 1657ab2c99fb..7f678470d178 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs @@ -17,9 +17,9 @@ pub(crate) async fn run(shell: &Shell) -> anyhow::Result<()> { let cmd = Cmd::new(cmd!( shell, "cargo run --features gpu --release --bin key_generator -- - generate-sk all --recompute-if-missing - --setup-path=vk_setup_data_generator_server_fri/data - --path={link_to_prover}/vk_setup_data_generator_server_fri/data" + generate-sk-gpu all --recompute-if-missing + --setup-path=crates/bin/vk_setup_data_generator_server_fri/data + --path={link_to_prover}/crates/bin/vk_setup_data_generator_server_fri/data" )); cmd.run()?; spinner.finish(); diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs index a27e5f1b0bec..803ef56df832 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs @@ -1,6 +1,15 @@ +use std::path::PathBuf; + use anyhow::Context; -use common::{check_prover_prequisites, cmd::Cmd, logger, spinner::Spinner}; -use config::EcosystemConfig; +use common::{ + check_prover_prequisites, + cmd::Cmd, + config::global_config, + db::{drop_db_if_exists, init_db, migrate_db, DatabaseConfig}, + logger, + spinner::Spinner, +}; +use config::{copy_configs, set_prover_database, traits::SaveConfigWithBasePath, EcosystemConfig}; use xshell::{cmd, Shell}; use zksync_config::{ configs::{object_store::ObjectStoreMode, GeneralConfig}, @@ -14,28 +23,36 @@ use super::{ utils::get_link_to_prover, }; use crate::{ - consts::PROVER_STORE_MAX_RETRIES, + consts::{PROVER_MIGRATIONS, PROVER_STORE_MAX_RETRIES}, messages::{ MSG_CHAIN_NOT_FOUND_ERR, MSG_DOWNLOADING_SETUP_KEY_SPINNER, - MSG_GENERAL_CONFIG_NOT_FOUND_ERR, MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR, - MSG_PROVER_CONFIG_NOT_FOUND_ERR, MSG_PROVER_INITIALIZED, MSG_SETUP_KEY_PATH_ERROR, + MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR, MSG_GENERAL_CONFIG_NOT_FOUND_ERR, + MSG_INITIALIZING_DATABASES_SPINNER, MSG_INITIALIZING_PROVER_DATABASE, + MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR, MSG_PROVER_CONFIG_NOT_FOUND_ERR, + MSG_PROVER_INITIALIZED, MSG_SETUP_KEY_PATH_ERROR, }, }; pub(crate) async fn run(args: ProverInitArgs, shell: &Shell) -> anyhow::Result<()> { check_prover_prequisites(shell); + let ecosystem_config = EcosystemConfig::from_file(shell)?; + + let setup_key_path = get_default_setup_key_path(&ecosystem_config)?; + let chain_config = ecosystem_config .load_chain(Some(ecosystem_config.default_chain.clone())) .context(MSG_CHAIN_NOT_FOUND_ERR)?; + let args = args.fill_values_with_prompt(shell, &setup_key_path, &chain_config)?; + + if chain_config.get_general_config().is_err() || chain_config.get_secrets_config().is_err() { + copy_configs(shell, &ecosystem_config.link_to_code, &chain_config.configs)?; + } + let mut general_config = chain_config .get_general_config() .context(MSG_GENERAL_CONFIG_NOT_FOUND_ERR)?; - let setup_key_path = get_default_setup_key_path(&ecosystem_config)?; - - let args = args.fill_values_with_prompt(shell, &setup_key_path)?; - let proof_object_store_config = get_object_store_config(shell, Some(args.proof_store))?; let public_object_store_config = get_object_store_config(shell, args.public_store)?; @@ -72,6 +89,23 @@ pub(crate) async fn run(args: ProverInitArgs, shell: &Shell) -> anyhow::Result<( init_bellman_cuda(shell, args.bellman_cuda_config).await?; + if let Some(prover_db) = &args.database_config { + let spinner = Spinner::new(MSG_INITIALIZING_DATABASES_SPINNER); + + let mut secrets = chain_config.get_secrets_config()?; + set_prover_database(&mut secrets, &prover_db.database_config)?; + secrets.save_with_base_path(shell, &chain_config.configs)?; + initialize_prover_database( + shell, + &prover_db.database_config, + ecosystem_config.link_to_code.clone(), + prover_db.dont_drop, + ) + .await?; + + spinner.finish(); + } + logger::outro(MSG_PROVER_INITIALIZED); Ok(()) } @@ -138,3 +172,29 @@ fn get_object_store_config( Ok(object_store) } + +async fn initialize_prover_database( + shell: &Shell, + prover_db_config: &DatabaseConfig, + link_to_code: PathBuf, + dont_drop: bool, +) -> anyhow::Result<()> { + if global_config().verbose { + logger::debug(MSG_INITIALIZING_PROVER_DATABASE) + } + if !dont_drop { + drop_db_if_exists(prover_db_config) + .await + .context(MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR)?; + init_db(prover_db_config).await?; + } + let path_to_prover_migration = link_to_code.join(PROVER_MIGRATIONS); + migrate_db( + shell, + path_to_prover_migration, + &prover_db_config.full_url(), + ) + .await?; + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs index 5497db8a21e0..056723836662 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs @@ -13,9 +13,10 @@ use super::{ use crate::messages::{ MSG_BELLMAN_CUDA_DIR_ERR, MSG_CHAIN_NOT_FOUND_ERR, MSG_MISSING_COMPONENT_ERR, MSG_RUNNING_COMPRESSOR, MSG_RUNNING_COMPRESSOR_ERR, MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR, - MSG_RUNNING_PROVER_GATEWAY, MSG_RUNNING_PROVER_GATEWAY_ERR, MSG_RUNNING_WITNESS_GENERATOR, - MSG_RUNNING_WITNESS_GENERATOR_ERR, MSG_RUNNING_WITNESS_VECTOR_GENERATOR, - MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR, MSG_WITNESS_GENERATOR_ROUND_ERR, + MSG_RUNNING_PROVER_GATEWAY, MSG_RUNNING_PROVER_GATEWAY_ERR, MSG_RUNNING_PROVER_JOB_MONITOR, + MSG_RUNNING_WITNESS_GENERATOR, MSG_RUNNING_WITNESS_GENERATOR_ERR, + MSG_RUNNING_WITNESS_VECTOR_GENERATOR, MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR, + MSG_WITNESS_GENERATOR_ROUND_ERR, }; pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<()> { @@ -39,6 +40,7 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() } Some(ProverComponent::Prover) => run_prover(shell, &chain)?, Some(ProverComponent::Compressor) => run_compressor(shell, &chain, &ecosystem_config)?, + Some(ProverComponent::ProverJobMonitor) => run_prover_job_monitor(shell, &chain)?, None => anyhow::bail!(MSG_MISSING_COMPONENT_ERR), } @@ -127,3 +129,13 @@ fn run_compressor( cmd = cmd.with_force_run(); cmd.run().context(MSG_RUNNING_COMPRESSOR_ERR) } + +fn run_prover_job_monitor(shell: &Shell, chain: &ChainConfig) -> anyhow::Result<()> { + logger::info(MSG_RUNNING_PROVER_JOB_MONITOR); + let config_path = chain.path_to_general_config(); + let secrets_path = chain.path_to_secrets_config(); + + let mut cmd = Cmd::new(cmd!(shell, "cargo run --release --bin zksync_prover_job_monitor -- --config-path={config_path} --secrets-path={secrets_path}")); + cmd = cmd.with_force_run(); + cmd.run().context(MSG_RUNNING_PROVER_JOB_MONITOR) +} diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zk_toolbox/crates/zk_inception/src/messages.rs index f0e46aaf4869..1ec2b006452f 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zk_toolbox/crates/zk_inception/src/messages.rs @@ -259,6 +259,7 @@ pub(super) const MSG_GENERATING_SK_SPINNER: &str = "Generating setup keys..."; pub(super) const MSG_SK_GENERATED: &str = "Setup keys generated successfully"; pub(super) const MSG_MISSING_COMPONENT_ERR: &str = "Missing component"; pub(super) const MSG_RUNNING_PROVER_GATEWAY: &str = "Running gateway"; +pub(super) const MSG_RUNNING_PROVER_JOB_MONITOR: &str = "Running prover job monitor"; pub(super) const MSG_RUNNING_WITNESS_GENERATOR: &str = "Running witness generator"; pub(super) const MSG_RUNNING_WITNESS_VECTOR_GENERATOR: &str = "Running witness vector generator"; pub(super) const MSG_RUNNING_PROVER: &str = "Running prover"; diff --git a/zk_toolbox/crates/zk_supervisor/Cargo.toml b/zk_toolbox/crates/zk_supervisor/Cargo.toml index e1225de96d32..e24c88f3ec25 100644 --- a/zk_toolbox/crates/zk_supervisor/Cargo.toml +++ b/zk_toolbox/crates/zk_supervisor/Cargo.toml @@ -23,3 +23,4 @@ xshell.workspace = true serde.workspace = true clap-markdown.workspace = true futures.workspace = true +serde_json.workspace = true diff --git a/zk_toolbox/crates/zk_supervisor/README.md b/zk_toolbox/crates/zk_supervisor/README.md index 4648fe6cb366..1f880cdcb30a 100644 --- a/zk_toolbox/crates/zk_supervisor/README.md +++ b/zk_toolbox/crates/zk_supervisor/README.md @@ -5,6 +5,7 @@ This document contains the help content for the `zk_supervisor` command-line pro **Command Overview:** - [`zk_supervisor`↴](#zk_supervisor) +- [`zk_supervisor prover-version`↴](#zk_supervisor-prover-version) - [`zk_supervisor database`↴](#zk_supervisor-database) - [`zk_supervisor database check-sqlx-data`↴](#zk_supervisor-database-check-sqlx-data) - [`zk_supervisor database drop`↴](#zk_supervisor-database-drop) @@ -44,6 +45,12 @@ ZK Toolbox is a set of tools for working with zk stack. - `--chain ` — Chain to use - `--ignore-prerequisites` — Ignores prerequisites checks +## `zk_supervisor prover-version` + +Gets information about current protocol version of provers in `zksync-era` and snark wrapper hash. + +**Usage:** `zk_supervisor prover-version` + ## `zk_supervisor database` Database related commands diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs index 99a8fa5e0a5f..181ce50c2134 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs @@ -3,5 +3,6 @@ pub mod database; pub mod fmt; pub mod lint; pub(crate) mod lint_utils; +pub mod prover_version; pub mod snapshot; pub mod test; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover_version.rs b/zk_toolbox/crates/zk_supervisor/src/commands/prover_version.rs new file mode 100644 index 000000000000..479f796294fa --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/prover_version.rs @@ -0,0 +1,41 @@ +use std::{fs, path::Path}; + +use common::logger; +use config::EcosystemConfig; +use xshell::{cmd, Shell}; + +pub async fn run(shell: &Shell) -> anyhow::Result<()> { + let link_to_code = EcosystemConfig::from_file(shell)?.link_to_code; + let link_to_prover = link_to_code.join("prover"); + + let protocol_version = get_protocol_version(shell, &link_to_prover).await?; + let snark_wrapper = get_snark_wrapper(&link_to_prover).await?; + + logger::info(format!( + "Current protocol version found in zksync-era: {}, snark_wrapper: {}", + protocol_version, snark_wrapper + )); + + Ok(()) +} + +async fn get_protocol_version(shell: &Shell, link_to_prover: &Path) -> anyhow::Result { + shell.change_dir(link_to_prover); + let protocol_version = cmd!(shell, "cargo run --release --bin prover_version").read()?; + + Ok(protocol_version) +} + +async fn get_snark_wrapper(link_to_prover: &Path) -> anyhow::Result { + let path = + link_to_prover.join("crates/bin/vk_setup_data_generator_server_fri/data/commitments.json"); + let file = fs::File::open(path).expect("Could not find commitments file in zksync-era"); + let json: serde_json::Value = + serde_json::from_reader(file).expect("Could not parse commitments.json"); + + let snark_wrapper = json + .get("snark_wrapper") + .expect("Could not find snark_wrapper in commitments.json"); + + Ok(snark_wrapper.to_string()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/main.rs b/zk_toolbox/crates/zk_supervisor/src/main.rs index 965def9263aa..9a1c1ad74bcd 100644 --- a/zk_toolbox/crates/zk_supervisor/src/main.rs +++ b/zk_toolbox/crates/zk_supervisor/src/main.rs @@ -10,9 +10,9 @@ use common::{ }; use config::EcosystemConfig; use messages::{ - msg_global_chain_does_not_exist, MSG_SUBCOMMAND_CLEAN, MSG_SUBCOMMAND_DATABASE_ABOUT, - MSG_SUBCOMMAND_FMT_ABOUT, MSG_SUBCOMMAND_LINT_ABOUT, MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT, - MSG_SUBCOMMAND_TESTS_ABOUT, + msg_global_chain_does_not_exist, MSG_PROVER_VERSION_ABOUT, MSG_SUBCOMMAND_CLEAN, + MSG_SUBCOMMAND_DATABASE_ABOUT, MSG_SUBCOMMAND_FMT_ABOUT, MSG_SUBCOMMAND_LINT_ABOUT, + MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT, MSG_SUBCOMMAND_TESTS_ABOUT, }; use xshell::Shell; @@ -47,6 +47,8 @@ enum SupervisorSubcommands { Fmt(FmtArgs), #[command(hide = true)] Markdown, + #[command(about = MSG_PROVER_VERSION_ABOUT)] + ProverVersion, } #[derive(Parser, Debug)] @@ -103,6 +105,7 @@ async fn run_subcommand(args: Supervisor, shell: &Shell) -> anyhow::Result<()> { } SupervisorSubcommands::Lint(args) => commands::lint::run(shell, args)?, SupervisorSubcommands::Fmt(args) => commands::fmt::run(shell.clone(), args).await?, + SupervisorSubcommands::ProverVersion => commands::prover_version::run(shell).await?, } Ok(()) } diff --git a/zk_toolbox/crates/zk_supervisor/src/messages.rs b/zk_toolbox/crates/zk_supervisor/src/messages.rs index df0cf0c311df..de25be281328 100644 --- a/zk_toolbox/crates/zk_supervisor/src/messages.rs +++ b/zk_toolbox/crates/zk_supervisor/src/messages.rs @@ -8,6 +8,7 @@ pub(super) fn msg_global_chain_does_not_exist(chain: &str, available_chains: &st } // Subcommands help +pub(super) const MSG_PROVER_VERSION_ABOUT: &str = "Protocol version used by provers"; pub(super) const MSG_SUBCOMMAND_DATABASE_ABOUT: &str = "Database related commands"; pub(super) const MSG_SUBCOMMAND_TESTS_ABOUT: &str = "Run tests"; pub(super) const MSG_SUBCOMMAND_CLEAN: &str = "Clean artifacts"; From c83cca8fe7fa105ec6b1491e4efb9f9e4bd66d41 Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Thu, 22 Aug 2024 19:46:22 +0300 Subject: [PATCH 03/39] feat: External prover API metrics, refactoring (#2630) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Added metrics for external proof integration API, refactored code a little bit ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- Cargo.lock | 1 + core/lib/dal/src/proof_generation_dal.rs | 2 +- .../external_proof_integration_api/Cargo.toml | 1 + .../external_proof_integration_api/src/lib.rs | 1 + .../src/metrics.rs | 55 +++++++++++++ .../src/processor.rs | 78 +++++++++++-------- .../layers/external_proof_integration_api.rs | 18 ++--- 7 files changed, 115 insertions(+), 41 deletions(-) create mode 100644 core/node/external_proof_integration_api/src/metrics.rs diff --git a/Cargo.lock b/Cargo.lock index 8fd242326638..8b8349bf3c21 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8755,6 +8755,7 @@ dependencies = [ "bincode", "tokio", "tracing", + "vise", "zksync_basic_types", "zksync_config", "zksync_dal", diff --git a/core/lib/dal/src/proof_generation_dal.rs b/core/lib/dal/src/proof_generation_dal.rs index f83f026073e6..dada6c69ed34 100644 --- a/core/lib/dal/src/proof_generation_dal.rs +++ b/core/lib/dal/src/proof_generation_dal.rs @@ -88,7 +88,7 @@ impl ProofGenerationDal<'_, '_> { Ok(result) } - pub async fn get_available_batch(&mut self) -> DalResult { + pub async fn get_latest_proven_batch(&mut self) -> DalResult { let result = sqlx::query!( r#" SELECT diff --git a/core/node/external_proof_integration_api/Cargo.toml b/core/node/external_proof_integration_api/Cargo.toml index ae7cd4c4d031..2e8176cd8832 100644 --- a/core/node/external_proof_integration_api/Cargo.toml +++ b/core/node/external_proof_integration_api/Cargo.toml @@ -21,3 +21,4 @@ zksync_dal.workspace = true tokio.workspace = true bincode.workspace = true anyhow.workspace = true +vise.workspace = true diff --git a/core/node/external_proof_integration_api/src/lib.rs b/core/node/external_proof_integration_api/src/lib.rs index 51fecf8c23fc..b1ef33b44c10 100644 --- a/core/node/external_proof_integration_api/src/lib.rs +++ b/core/node/external_proof_integration_api/src/lib.rs @@ -1,4 +1,5 @@ mod error; +mod metrics; mod processor; use std::{net::SocketAddr, sync::Arc}; diff --git a/core/node/external_proof_integration_api/src/metrics.rs b/core/node/external_proof_integration_api/src/metrics.rs new file mode 100644 index 000000000000..70815f542a05 --- /dev/null +++ b/core/node/external_proof_integration_api/src/metrics.rs @@ -0,0 +1,55 @@ +use std::time::Duration; + +use tokio::time::Instant; +use vise::{EncodeLabelSet, EncodeLabelValue, Histogram, LabeledFamily, Metrics}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet, EncodeLabelValue)] +#[metrics(label = "outcome", rename_all = "snake_case")] +pub(crate) enum CallOutcome { + Success, + Failure, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet, EncodeLabelValue)] +#[metrics(label = "type", rename_all = "snake_case")] +pub(crate) enum Method { + GetLatestProofGenerationData, + GetSpecificProofGenerationData, + VerifyProof, +} + +#[derive(Debug, Metrics)] +#[metrics(prefix = "external_proof_integration_api")] +pub(crate) struct ProofIntegrationApiMetrics { + #[metrics(labels = ["method", "outcome"], buckets = vise::Buckets::LATENCIES)] + pub call_latency: LabeledFamily<(Method, CallOutcome), Histogram, 2>, +} + +pub(crate) struct MethodCallGuard { + method_type: Method, + outcome: CallOutcome, + started_at: Instant, +} + +impl MethodCallGuard { + pub(crate) fn new(method_type: Method) -> Self { + MethodCallGuard { + method_type, + outcome: CallOutcome::Failure, + started_at: Instant::now(), + } + } + + pub(crate) fn mark_successful(&mut self) { + self.outcome = CallOutcome::Success; + } +} + +impl Drop for MethodCallGuard { + fn drop(&mut self) { + METRICS.call_latency[&(self.method_type, self.outcome)].observe(self.started_at.elapsed()); + } +} + +#[vise::register] +pub(crate) static METRICS: vise::Global = vise::Global::new(); diff --git a/core/node/external_proof_integration_api/src/processor.rs b/core/node/external_proof_integration_api/src/processor.rs index a15e45e48037..e9e56df4a068 100644 --- a/core/node/external_proof_integration_api/src/processor.rs +++ b/core/node/external_proof_integration_api/src/processor.rs @@ -17,7 +17,10 @@ use zksync_prover_interface::{ outputs::L1BatchProofForL1, }; -use crate::error::ProcessorError; +use crate::{ + error::ProcessorError, + metrics::{Method, MethodCallGuard}, +}; #[derive(Clone)] pub(crate) struct Processor { @@ -39,6 +42,36 @@ impl Processor { } } + pub(crate) async fn verify_proof( + &self, + Path(l1_batch_number): Path, + Json(payload): Json, + ) -> Result<(), ProcessorError> { + let mut guard = MethodCallGuard::new(Method::VerifyProof); + + let l1_batch_number = L1BatchNumber(l1_batch_number); + tracing::info!( + "Received request to verify proof for batch: {:?}", + l1_batch_number + ); + + let serialized_proof = bincode::serialize(&payload.0)?; + let expected_proof = bincode::serialize( + &self + .blob_store + .get::((l1_batch_number, payload.0.protocol_version)) + .await?, + )?; + + if serialized_proof != expected_proof { + return Err(ProcessorError::InvalidProof); + } + + guard.mark_successful(); + + Ok(()) + } + #[tracing::instrument(skip_all)] pub(crate) async fn get_proof_generation_data( &mut self, @@ -46,13 +79,18 @@ impl Processor { ) -> Result, ProcessorError> { tracing::info!("Received request for proof generation data: {:?}", request); + let mut guard = match request.0 .0 { + Some(_) => MethodCallGuard::new(Method::GetSpecificProofGenerationData), + None => MethodCallGuard::new(Method::GetLatestProofGenerationData), + }; + let latest_available_batch = self .pool .connection() .await .unwrap() .proof_generation_dal() - .get_available_batch() + .get_latest_proven_batch() .await?; let l1_batch_number = if let Some(l1_batch_number) = request.0 .0 { @@ -74,9 +112,13 @@ impl Processor { .await; match proof_generation_data { - Ok(data) => Ok(Json(ProofGenerationDataResponse::Success(Some(Box::new( - data, - ))))), + Ok(data) => { + guard.mark_successful(); + + Ok(Json(ProofGenerationDataResponse::Success(Some(Box::new( + data, + ))))) + } Err(err) => Err(err), } } @@ -161,30 +203,4 @@ impl Processor { l1_verifier_config: protocol_version.l1_verifier_config, }) } - - pub(crate) async fn verify_proof( - &self, - Path(l1_batch_number): Path, - Json(payload): Json, - ) -> Result<(), ProcessorError> { - let l1_batch_number = L1BatchNumber(l1_batch_number); - tracing::info!( - "Received request to verify proof for batch: {:?}", - l1_batch_number - ); - - let serialized_proof = bincode::serialize(&payload.0)?; - let expected_proof = bincode::serialize( - &self - .blob_store - .get::((l1_batch_number, payload.0.protocol_version)) - .await?, - )?; - - if serialized_proof != expected_proof { - return Err(ProcessorError::InvalidProof); - } - - Ok(()) - } } diff --git a/core/node/node_framework/src/implementations/layers/external_proof_integration_api.rs b/core/node/node_framework/src/implementations/layers/external_proof_integration_api.rs index 6f8805bc5fa3..9678c0a97932 100644 --- a/core/node/node_framework/src/implementations/layers/external_proof_integration_api.rs +++ b/core/node/node_framework/src/implementations/layers/external_proof_integration_api.rs @@ -26,7 +26,7 @@ pub struct ExternalProofIntegrationApiLayer { #[derive(Debug, FromContext)] #[context(crate = crate)] pub struct Input { - pub master_pool: PoolResource, + pub replica_pool: PoolResource, pub object_store: ObjectStoreResource, } @@ -34,7 +34,7 @@ pub struct Input { #[context(crate = crate)] pub struct Output { #[context(task)] - pub task: ProverApiTask, + pub task: ExternalProofIntegrationApiTask, } impl ExternalProofIntegrationApiLayer { @@ -59,13 +59,13 @@ impl WiringLayer for ExternalProofIntegrationApiLayer { } async fn wire(self, input: Self::Input) -> Result { - let main_pool = input.master_pool.get().await?; + let replica_pool = input.replica_pool.get().await.unwrap(); let blob_store = input.object_store.0; - let task = ProverApiTask { + let task = ExternalProofIntegrationApiTask { external_proof_integration_api_config: self.external_proof_integration_api_config, blob_store, - main_pool, + replica_pool, commitment_mode: self.commitment_mode, }; @@ -74,15 +74,15 @@ impl WiringLayer for ExternalProofIntegrationApiLayer { } #[derive(Debug)] -pub struct ProverApiTask { +pub struct ExternalProofIntegrationApiTask { external_proof_integration_api_config: ExternalProofIntegrationApiConfig, blob_store: Arc, - main_pool: ConnectionPool, + replica_pool: ConnectionPool, commitment_mode: L1BatchCommitmentMode, } #[async_trait::async_trait] -impl Task for ProverApiTask { +impl Task for ExternalProofIntegrationApiTask { fn id(&self) -> TaskId { "external_proof_integration_api".into() } @@ -91,7 +91,7 @@ impl Task for ProverApiTask { zksync_external_proof_integration_api::run_server( self.external_proof_integration_api_config, self.blob_store, - self.main_pool, + self.replica_pool, self.commitment_mode, stop_receiver.0, ) From 835aec32f642b0d0d5fc3a746bd2cb156f0a9279 Mon Sep 17 00:00:00 2001 From: zksync-era-bot <147085853+zksync-era-bot@users.noreply.github.com> Date: Fri, 23 Aug 2024 10:10:13 +0300 Subject: [PATCH 04/39] chore(main): release core 24.21.0 (#2714) :robot: I have created a release *beep* *boop* --- ## [24.21.0](https://github.com/matter-labs/zksync-era/compare/core-v24.20.0...core-v24.21.0) (2024-08-22) ### Features * External prover API metrics, refactoring ([#2630](https://github.com/matter-labs/zksync-era/issues/2630)) ([c83cca8](https://github.com/matter-labs/zksync-era/commit/c83cca8fe7fa105ec6b1491e4efb9f9e4bd66d41)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --------- Co-authored-by: zksync-era-bot --- .github/release-please/manifest.json | 2 +- Cargo.lock | 2 +- core/CHANGELOG.md | 7 +++++++ core/bin/external_node/Cargo.toml | 2 +- 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index ffd9838d6c31..232939b78334 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { - "core": "24.20.0", + "core": "24.21.0", "prover": "16.4.0", "zk_toolbox": "0.1.2" } diff --git a/Cargo.lock b/Cargo.lock index 8b8349bf3c21..6c6a8d96123f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8673,7 +8673,7 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "24.20.0" +version = "24.21.0" dependencies = [ "anyhow", "assert_matches", diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index e727a8326603..cc0590a79d20 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## [24.21.0](https://github.com/matter-labs/zksync-era/compare/core-v24.20.0...core-v24.21.0) (2024-08-22) + + +### Features + +* External prover API metrics, refactoring ([#2630](https://github.com/matter-labs/zksync-era/issues/2630)) ([c83cca8](https://github.com/matter-labs/zksync-era/commit/c83cca8fe7fa105ec6b1491e4efb9f9e4bd66d41)) + ## [24.20.0](https://github.com/matter-labs/zksync-era/compare/core-v24.19.0...core-v24.20.0) (2024-08-21) diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index 29b839c6a1fe..33a460daba50 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "zksync_external_node" description = "Non-validator ZKsync node" -version = "24.20.0" # x-release-please-version +version = "24.21.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true From 8d0eee7ca8fe117b2ee286c6080bfa0057ee31ae Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Fri, 23 Aug 2024 12:47:37 +0300 Subject: [PATCH 05/39] feat: Change default_protective_reads_persistence_enabled to false (#2716) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Changes default_protective_reads_persistence_enabled to false both for main and external node ## Why ❔ For EN: it was confirmed that it works well without protective reads For main node: it's expected that vm_runner_protective_reads is run by default ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- core/bin/external_node/src/config/mod.rs | 9 ++------- core/lib/config/src/configs/chain.rs | 9 +++------ core/lib/config/src/configs/experimental.rs | 12 ++++-------- core/lib/protobuf_config/src/chain.rs | 7 +++---- .../layers/state_keeper/output_handler.rs | 9 +++------ 5 files changed, 15 insertions(+), 31 deletions(-) diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 568d3195bbea..cd4e845b8f3e 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -391,8 +391,7 @@ pub(crate) struct OptionalENConfig { /// Configures whether to persist protective reads when persisting L1 batches in the state keeper. /// Protective reads are never required by full nodes so far, not until such a node runs a full Merkle tree /// (presumably, to participate in L1 batch proving). - /// By default, set to `true` as a temporary safety measure. - #[serde(default = "OptionalENConfig::default_protective_reads_persistence_enabled")] + #[serde(default)] pub protective_reads_persistence_enabled: bool, /// Address of the L1 diamond proxy contract used by the consistency checker to match with the origin of logs emitted /// by commit transactions. If not set, it will not be verified. @@ -645,7 +644,7 @@ impl OptionalENConfig { .db_config .as_ref() .map(|a| a.experimental.protective_reads_persistence_enabled) - .unwrap_or(true), + .unwrap_or_default(), merkle_tree_processing_delay_ms: load_config_or_default!( general_config.db_config, experimental.processing_delay_ms, @@ -769,10 +768,6 @@ impl OptionalENConfig { 10 } - const fn default_protective_reads_persistence_enabled() -> bool { - true - } - const fn default_mempool_cache_update_interval_ms() -> u64 { 50 } diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index 6ac70b27b84a..7e33f6964bb7 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -127,8 +127,9 @@ pub struct StateKeeperConfig { /// Configures whether to persist protective reads when persisting L1 batches in the state keeper. /// Protective reads can be written asynchronously in VM runner instead. - /// By default, set to `true` as a temporary safety measure. - #[serde(default = "StateKeeperConfig::default_protective_reads_persistence_enabled")] + /// By default, set to `false` as it is expected that a separate `vm_runner_protective_reads` component + /// which is capable of saving protective reads is run. + #[serde(default)] pub protective_reads_persistence_enabled: bool, // Base system contract hashes, required only for generating genesis config. @@ -143,10 +144,6 @@ pub struct StateKeeperConfig { } impl StateKeeperConfig { - fn default_protective_reads_persistence_enabled() -> bool { - true - } - /// Creates a config object suitable for use in unit tests. /// Values mostly repeat the values used in the localhost environment. pub fn for_tests() -> Self { diff --git a/core/lib/config/src/configs/experimental.rs b/core/lib/config/src/configs/experimental.rs index 8309b36e7f22..097f3c4112b3 100644 --- a/core/lib/config/src/configs/experimental.rs +++ b/core/lib/config/src/configs/experimental.rs @@ -16,8 +16,9 @@ pub struct ExperimentalDBConfig { /// Configures whether to persist protective reads when persisting L1 batches in the state keeper. /// Protective reads are never required by full nodes so far, not until such a node runs a full Merkle tree /// (presumably, to participate in L1 batch proving). - /// By default, set to `true` as a temporary safety measure. - #[serde(default = "ExperimentalDBConfig::default_protective_reads_persistence_enabled")] + /// By default, set to `false` as it is expected that a separate `vm_runner_protective_reads` component + /// which is capable of saving protective reads is run. + #[serde(default)] pub protective_reads_persistence_enabled: bool, // Merkle tree config /// Processing delay between processing L1 batches in the Merkle tree. @@ -36,8 +37,7 @@ impl Default for ExperimentalDBConfig { state_keeper_db_block_cache_capacity_mb: Self::default_state_keeper_db_block_cache_capacity_mb(), state_keeper_db_max_open_files: None, - protective_reads_persistence_enabled: - Self::default_protective_reads_persistence_enabled(), + protective_reads_persistence_enabled: false, processing_delay_ms: Self::default_merkle_tree_processing_delay_ms(), include_indices_and_filters_in_block_cache: false, } @@ -53,10 +53,6 @@ impl ExperimentalDBConfig { self.state_keeper_db_block_cache_capacity_mb * super::BYTES_IN_MEGABYTE } - const fn default_protective_reads_persistence_enabled() -> bool { - true - } - const fn default_merkle_tree_processing_delay_ms() -> u64 { 100 } diff --git a/core/lib/protobuf_config/src/chain.rs b/core/lib/protobuf_config/src/chain.rs index fafecc0131cd..f91bf07e43f8 100644 --- a/core/lib/protobuf_config/src/chain.rs +++ b/core/lib/protobuf_config/src/chain.rs @@ -78,10 +78,9 @@ impl ProtoRepr for proto::StateKeeper { max_circuits_per_batch: required(&self.max_circuits_per_batch) .and_then(|x| Ok((*x).try_into()?)) .context("max_circuits_per_batch")?, - protective_reads_persistence_enabled: *required( - &self.protective_reads_persistence_enabled, - ) - .context("protective_reads_persistence_enabled")?, + protective_reads_persistence_enabled: self + .protective_reads_persistence_enabled + .unwrap_or_default(), // We need these values only for instantiating configs from environmental variables, so it's not // needed during the initialization from files diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs b/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs index f639d72fe40a..5f63e4e19475 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs @@ -42,8 +42,8 @@ pub struct OutputHandlerLayer { /// before they are included into L2 blocks. pre_insert_txs: bool, /// Whether protective reads persistence is enabled. - /// Must be `true` for any node that maintains a full Merkle Tree (e.g. any instance of main node). - /// May be set to `false` for nodes that do not participate in the sequencing process (e.g. external nodes). + /// May be set to `false` for nodes that do not participate in the sequencing process (e.g. external nodes) + /// or run `vm_runner_protective_reads` component. protective_reads_persistence_enabled: bool, } @@ -68,7 +68,7 @@ impl OutputHandlerLayer { l2_shared_bridge_addr, l2_block_seal_queue_capacity, pre_insert_txs: false, - protective_reads_persistence_enabled: true, + protective_reads_persistence_enabled: false, } } @@ -112,9 +112,6 @@ impl WiringLayer for OutputHandlerLayer { persistence = persistence.with_tx_insertion(); } if !self.protective_reads_persistence_enabled { - // **Important:** Disabling protective reads persistence is only sound if the node will never - // run a full Merkle tree OR an accompanying protective-reads-writer is being run. - tracing::warn!("Disabling persisting protective reads; this should be safe, but is considered an experimental option at the moment"); persistence = persistence.without_protective_reads(); } From 9080428ed427bb741317a807263329621c014a16 Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Fri, 23 Aug 2024 12:21:18 +0200 Subject: [PATCH 06/39] feat: Add prover-job-monitor Dockerfile and build rules (#2719) --- .github/workflows/build-prover-template.yml | 4 ++-- docker/prover-job-monitor/Dockerfile | 15 +++++++++++++++ infrastructure/zk/src/docker.ts | 2 ++ 3 files changed, 19 insertions(+), 2 deletions(-) create mode 100644 docker/prover-job-monitor/Dockerfile diff --git a/.github/workflows/build-prover-template.yml b/.github/workflows/build-prover-template.yml index 25bf14728dd6..7591c45b49e4 100644 --- a/.github/workflows/build-prover-template.yml +++ b/.github/workflows/build-prover-template.yml @@ -53,6 +53,7 @@ jobs: - prover-gpu-fri - witness-vector-generator - prover-fri-gateway + - prover-job-monitor - proof-fri-gpu-compressor outputs: protocol_version: ${{ steps.protocolversion.outputs.protocol_version }} @@ -163,7 +164,7 @@ jobs: run: | ci_run sccache --show-stats || true ci_run cat /tmp/sccache_log.txt || true - + copy-images: name: Copy images between docker registries needs: build-images @@ -197,4 +198,3 @@ jobs: docker buildx imagetools create \ --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} - diff --git a/docker/prover-job-monitor/Dockerfile b/docker/prover-job-monitor/Dockerfile new file mode 100644 index 000000000000..25d5dcd3af95 --- /dev/null +++ b/docker/prover-job-monitor/Dockerfile @@ -0,0 +1,15 @@ +FROM matterlabs/zksync-build-base:latest as builder + +ARG DEBIAN_FRONTEND=noninteractive + +WORKDIR /usr/src/zksync +COPY . . + +RUN cd prover && cargo build --release --bin zksync_prover_job_monitor + +FROM debian:bookworm-slim +RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* + +COPY --from=builder /usr/src/zksync/prover/target/release/zksync_prover_job_monitor /usr/bin/ + +ENTRYPOINT ["/usr/bin/zksync_prover_job_monitor"] diff --git a/infrastructure/zk/src/docker.ts b/infrastructure/zk/src/docker.ts index 76576fd243cb..27de68d1d98d 100644 --- a/infrastructure/zk/src/docker.ts +++ b/infrastructure/zk/src/docker.ts @@ -12,6 +12,7 @@ const IMAGES = [ 'prover-gpu-fri', 'witness-vector-generator', 'prover-fri-gateway', + 'prover-job-monitor', 'proof-fri-gpu-compressor', 'snapshots-creator', 'verified-sources-fetcher' @@ -73,6 +74,7 @@ function defaultTagList(image: string, imageTagSha: string, imageTagShaTS: strin 'external-node', 'contract-verifier', 'prover-fri-gateway', + 'prover-job-monitor', 'snapshots-creator' ].includes(image) ? ['latest', 'latest2.0', `2.0-${imageTagSha}`, `${imageTagSha}`, `2.0-${imageTagShaTS}`, `${imageTagShaTS}`] From d9266e5ef3910732666c00c1324256fb5b54452d Mon Sep 17 00:00:00 2001 From: fyInALT <97101459+fyInALT@users.noreply.github.com> Date: Fri, 23 Aug 2024 23:00:57 +0800 Subject: [PATCH 07/39] feat(zk_toolbox): Add holesky testnet as layer1 network (#2632) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add `holesky` for `--l1-network` config in ecosystem cmd, that will make the testnet use holesky testnet (chain id: 17000) as l1 network ## Why ❔ It can make us deploy testnet into holesky testnet more easy, in zk_inception, it will write the layer1 's chain id into the config, when use `ecosystem init` cmd, it will start chain init also, if use wrong chain id for layer1 endpoint, it will make cmd run failed, so we cannot just use layer1 endpoint to deploy a testnet in holesky. Now we add `holesky` for `--l1-network` config to let it write 17000 as chain id, that will make us to deploy testnet more easy. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: Danil --- core/lib/basic_types/src/network.rs | 6 ++++ zk_toolbox/crates/types/src/l1_network.rs | 2 ++ zk_toolbox/crates/zk_inception/README.md | 2 +- .../src/commands/ecosystem/init.rs | 28 ++++++++++++++----- .../crates/zk_inception/src/messages.rs | 4 +++ 5 files changed, 34 insertions(+), 8 deletions(-) diff --git a/core/lib/basic_types/src/network.rs b/core/lib/basic_types/src/network.rs index 41a5c5c4d73f..3403ec404738 100644 --- a/core/lib/basic_types/src/network.rs +++ b/core/lib/basic_types/src/network.rs @@ -26,6 +26,8 @@ pub enum Network { Goerli, /// Ethereum Sepolia testnet. Sepolia, + /// Ethereum Holešky testnet. + Holesky, /// Self-hosted Ethereum network. Localhost, /// Self-hosted L2 network. @@ -48,6 +50,7 @@ impl FromStr for Network { "localhost" => Self::Localhost, "localhostL2" => Self::LocalhostL2, "sepolia" => Self::Sepolia, + "holesky" => Self::Holesky, "test" => Self::Test, another => return Err(another.to_owned()), }) @@ -64,6 +67,7 @@ impl fmt::Display for Network { Self::Localhost => write!(f, "localhost"), Self::LocalhostL2 => write!(f, "localhostL2"), Self::Sepolia => write!(f, "sepolia"), + Self::Holesky => write!(f, "holesky"), Self::Unknown => write!(f, "unknown"), Self::Test => write!(f, "test"), } @@ -80,6 +84,7 @@ impl Network { 5 => Self::Goerli, 9 => Self::Localhost, 11155111 => Self::Sepolia, + 17000 => Self::Holesky, 270 => Self::LocalhostL2, _ => Self::Unknown, } @@ -94,6 +99,7 @@ impl Network { Self::Goerli => SLChainId(5), Self::Localhost => SLChainId(9), Self::Sepolia => SLChainId(11155111), + Self::Holesky => SLChainId(17000), Self::LocalhostL2 => SLChainId(270), Self::Unknown => panic!("Unknown chain ID"), Self::Test => panic!("Test chain ID"), diff --git a/zk_toolbox/crates/types/src/l1_network.rs b/zk_toolbox/crates/types/src/l1_network.rs index 822235611a33..cc7b47147548 100644 --- a/zk_toolbox/crates/types/src/l1_network.rs +++ b/zk_toolbox/crates/types/src/l1_network.rs @@ -21,6 +21,7 @@ pub enum L1Network { #[default] Localhost, Sepolia, + Holesky, Mainnet, } @@ -30,6 +31,7 @@ impl L1Network { match self { L1Network::Localhost => 9, L1Network::Sepolia => 11_155_111, + L1Network::Holesky => 17000, L1Network::Mainnet => 1, } } diff --git a/zk_toolbox/crates/zk_inception/README.md b/zk_toolbox/crates/zk_inception/README.md index 8b6368ce8c24..73bfb56cfd39 100644 --- a/zk_toolbox/crates/zk_inception/README.md +++ b/zk_toolbox/crates/zk_inception/README.md @@ -87,7 +87,7 @@ Create a new ecosystem and chain, setting necessary configurations for later ini - `--ecosystem-name ` - `--l1-network ` — L1 Network - Possible values: `localhost`, `sepolia`, `mainnet` + Possible values: `localhost`, `sepolia`, `holesky`, `mainnet` - `--link-to-code ` — Code link - `--chain-name ` diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs index 101d272494a0..fc4a3c9b3201 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs @@ -44,7 +44,8 @@ use crate::{ }, }, messages::{ - msg_ecosystem_initialized, msg_initializing_chain, MSG_CHAIN_NOT_INITIALIZED, + msg_ecosystem_initialized, msg_ecosystem_no_found_preexisting_contract, + msg_initializing_chain, MSG_CHAIN_NOT_INITIALIZED, MSG_DEPLOYING_ECOSYSTEM_CONTRACTS_SPINNER, MSG_DEPLOYING_ERC20, MSG_DEPLOYING_ERC20_SPINNER, MSG_ECOSYSTEM_CONTRACTS_PATH_INVALID_ERR, MSG_ECOSYSTEM_CONTRACTS_PATH_PROMPT, MSG_INITIALIZING_ECOSYSTEM, @@ -242,17 +243,30 @@ async fn deploy_ecosystem( } }; + let ecosystem_preexisting_configs_path = + ecosystem_config + .get_preexisting_configs_path() + .join(format!( + "{}.yaml", + ecosystem_config.l1_network.to_string().to_lowercase() + )); + + // currently there are not some preexisting ecosystem contracts in + // chains, so we need check if this file exists. + if ecosystem_contracts_path.is_none() && !ecosystem_preexisting_configs_path.exists() { + anyhow::bail!(msg_ecosystem_no_found_preexisting_contract( + &ecosystem_config.l1_network.to_string() + )) + } + let ecosystem_contracts_path = ecosystem_contracts_path.unwrap_or_else(|| match ecosystem_config.l1_network { L1Network::Localhost => { ContractsConfig::get_path_with_base_path(&ecosystem_config.config) } - L1Network::Sepolia | L1Network::Mainnet => ecosystem_config - .get_preexisting_configs_path() - .join(format!( - "{}.yaml", - ecosystem_config.l1_network.to_string().to_lowercase() - )), + L1Network::Sepolia | L1Network::Holesky | L1Network::Mainnet => { + ecosystem_preexisting_configs_path + } }); ContractsConfig::read(shell, ecosystem_contracts_path) diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zk_toolbox/crates/zk_inception/src/messages.rs index 1ec2b006452f..2eef0688b035 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zk_toolbox/crates/zk_inception/src/messages.rs @@ -84,6 +84,10 @@ pub(super) const MSG_ERA_OBSERVABILITY_ALREADY_SETUP: &str = "Era observability pub(super) const MSG_DOWNLOADING_ERA_OBSERVABILITY_SPINNER: &str = "Downloading era observability..."; +pub(super) fn msg_ecosystem_no_found_preexisting_contract(chains: &str) -> String { + format!("Not found preexisting ecosystem Contracts with chains {chains}") +} + pub(super) fn msg_initializing_chain(chain_name: &str) -> String { format!("Initializing chain {chain_name}") } From 58438eb174c30edf62e2ff8abb74567de2a4bea8 Mon Sep 17 00:00:00 2001 From: Ivan Schasny <31857042+ischasny@users.noreply.github.com> Date: Fri, 23 Aug 2024 18:56:14 +0100 Subject: [PATCH 08/39] feat(Base token): add cbt metrics (#2720) * Add cbt-related metrics; * Move last hardcoded cbt-related properties to the config. --- Cargo.lock | 2 + .../config/src/configs/base_token_adjuster.rs | 28 ++++ core/lib/config/src/testonly.rs | 2 + .../lib/env_config/src/base_token_adjuster.rs | 8 + .../src/base_token_adjuster.rs | 8 + .../proto/config/base_token_adjuster.proto | 2 + core/node/base_token_adjuster/Cargo.toml | 3 +- .../src/base_token_ratio_persister.rs | 150 +++++++++++------- core/node/base_token_adjuster/src/lib.rs | 1 + core/node/base_token_adjuster/src/metrics.rs | 28 ++++ 10 files changed, 174 insertions(+), 58 deletions(-) create mode 100644 core/node/base_token_adjuster/src/metrics.rs diff --git a/Cargo.lock b/Cargo.lock index 6c6a8d96123f..f60faf9fdf96 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8050,6 +8050,7 @@ dependencies = [ "rand 0.8.5", "tokio", "tracing", + "vise", "zksync_config", "zksync_contracts", "zksync_dal", @@ -8057,6 +8058,7 @@ dependencies = [ "zksync_external_price_api", "zksync_node_fee_model", "zksync_types", + "zksync_utils", ] [[package]] diff --git a/core/lib/config/src/configs/base_token_adjuster.rs b/core/lib/config/src/configs/base_token_adjuster.rs index 0ae451a62d9c..c8a0fe6312e3 100644 --- a/core/lib/config/src/configs/base_token_adjuster.rs +++ b/core/lib/config/src/configs/base_token_adjuster.rs @@ -26,6 +26,12 @@ const DEFAULT_L1_TX_SENDING_MAX_ATTEMPTS: u32 = 3; /// Default number of milliseconds to sleep between receipt checking attempts const DEFAULT_L1_RECEIPT_CHECKING_SLEEP_MS: u64 = 30_000; +/// Default maximum number of attempts to fetch price from a remote API +const DEFAULT_PRICE_FETCHING_MAX_ATTEMPTS: u32 = 3; + +/// Default number of milliseconds to sleep between price fetching attempts +const DEFAULT_PRICE_FETCHING_SLEEP_MS: u64 = 5_000; + /// Default number of milliseconds to sleep between transaction sending attempts const DEFAULT_L1_TX_SENDING_SLEEP_MS: u64 = 30_000; @@ -73,6 +79,14 @@ pub struct BaseTokenAdjusterConfig { #[serde(default = "BaseTokenAdjusterConfig::default_l1_tx_sending_sleep_ms")] pub l1_tx_sending_sleep_ms: u64, + /// Maximum number of attempts to fetch quote from a remote API before failing over + #[serde(default = "BaseTokenAdjusterConfig::default_price_fetching_max_attempts")] + pub price_fetching_max_attempts: u32, + + /// Number of seconds to sleep between price fetching attempts + #[serde(default = "BaseTokenAdjusterConfig::default_price_fetching_sleep_ms")] + pub price_fetching_sleep_ms: u64, + /// Defines whether base_token_adjuster should halt the process if there was an error while /// fetching or persisting the quote. Generally that should be set to false to not to halt /// the server process if an external api is not available or if L1 is congested. @@ -93,6 +107,8 @@ impl Default for BaseTokenAdjusterConfig { l1_receipt_checking_sleep_ms: Self::default_l1_receipt_checking_sleep_ms(), l1_tx_sending_max_attempts: Self::default_l1_tx_sending_max_attempts(), l1_tx_sending_sleep_ms: Self::default_l1_tx_sending_sleep_ms(), + price_fetching_sleep_ms: Self::default_price_fetching_sleep_ms(), + price_fetching_max_attempts: Self::default_price_fetching_max_attempts(), halt_on_error: Self::default_halt_on_error(), } } @@ -135,6 +151,10 @@ impl BaseTokenAdjusterConfig { Duration::from_millis(self.l1_tx_sending_sleep_ms) } + pub fn price_fetching_sleep_duration(&self) -> Duration { + Duration::from_millis(self.price_fetching_sleep_ms) + } + pub fn default_l1_receipt_checking_max_attempts() -> u32 { DEFAULT_L1_RECEIPT_CHECKING_MAX_ATTEMPTS } @@ -151,6 +171,14 @@ impl BaseTokenAdjusterConfig { DEFAULT_L1_TX_SENDING_SLEEP_MS } + pub fn default_price_fetching_sleep_ms() -> u64 { + DEFAULT_PRICE_FETCHING_SLEEP_MS + } + + pub fn default_price_fetching_max_attempts() -> u32 { + DEFAULT_PRICE_FETCHING_MAX_ATTEMPTS + } + pub fn default_max_tx_gas() -> u64 { DEFAULT_MAX_TX_GAS } diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 1f4bfbc0265b..e028c3d3aec0 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -1045,6 +1045,8 @@ impl Distribution for Enc l1_receipt_checking_sleep_ms: self.sample(rng), l1_tx_sending_max_attempts: self.sample(rng), l1_tx_sending_sleep_ms: self.sample(rng), + price_fetching_max_attempts: self.sample(rng), + price_fetching_sleep_ms: self.sample(rng), halt_on_error: self.sample(rng), } } diff --git a/core/lib/env_config/src/base_token_adjuster.rs b/core/lib/env_config/src/base_token_adjuster.rs index 67cdef9425cd..f94e9c8f92a2 100644 --- a/core/lib/env_config/src/base_token_adjuster.rs +++ b/core/lib/env_config/src/base_token_adjuster.rs @@ -26,6 +26,8 @@ mod tests { l1_receipt_checking_sleep_ms: 20_000, l1_tx_sending_max_attempts: 10, l1_tx_sending_sleep_ms: 30_000, + price_fetching_max_attempts: 20, + price_fetching_sleep_ms: 10_000, halt_on_error: true, } } @@ -41,6 +43,8 @@ mod tests { l1_receipt_checking_sleep_ms: 30_000, l1_tx_sending_max_attempts: 3, l1_tx_sending_sleep_ms: 30_000, + price_fetching_max_attempts: 3, + price_fetching_sleep_ms: 5_000, halt_on_error: false, } } @@ -58,6 +62,8 @@ mod tests { BASE_TOKEN_ADJUSTER_L1_RECEIPT_CHECKING_SLEEP_MS=20000 BASE_TOKEN_ADJUSTER_L1_TX_SENDING_MAX_ATTEMPTS=10 BASE_TOKEN_ADJUSTER_L1_TX_SENDING_SLEEP_MS=30000 + BASE_TOKEN_ADJUSTER_PRICE_FETCHING_MAX_ATTEMPTS=20 + BASE_TOKEN_ADJUSTER_PRICE_FETCHING_SLEEP_MS=10000 BASE_TOKEN_ADJUSTER_HALT_ON_ERROR=true "#; lock.set_env(config); @@ -79,6 +85,8 @@ mod tests { "BASE_TOKEN_ADJUSTER_L1_RECEIPT_CHECKING_SLEEP_MS", "BASE_TOKEN_ADJUSTER_L1_TX_SENDING_MAX_ATTEMPTS", "BASE_TOKEN_ADJUSTER_L1_TX_SENDING_SLEEP_MS", + "BASE_TOKEN_ADJUSTER_PRICE_FETCHING_MAX_ATTEMPTS", + "BASE_TOKEN_ADJUSTER_PRICE_FETCHING_SLEEP_MS", "BASE_TOKEN_ADJUSTER_HALT_ON_ERROR", ]); diff --git a/core/lib/protobuf_config/src/base_token_adjuster.rs b/core/lib/protobuf_config/src/base_token_adjuster.rs index d68db5fd9796..951feac16533 100644 --- a/core/lib/protobuf_config/src/base_token_adjuster.rs +++ b/core/lib/protobuf_config/src/base_token_adjuster.rs @@ -30,6 +30,12 @@ impl ProtoRepr for proto::BaseTokenAdjuster { l1_receipt_checking_max_attempts: self .l1_receipt_checking_max_attempts .unwrap_or(Self::Type::default_l1_receipt_checking_max_attempts()), + price_fetching_sleep_ms: self + .price_fetching_sleep_ms + .unwrap_or(Self::Type::default_price_fetching_sleep_ms()), + price_fetching_max_attempts: self + .price_fetching_max_attempts + .unwrap_or(Self::Type::default_price_fetching_max_attempts()), l1_tx_sending_max_attempts: self .l1_tx_sending_max_attempts .unwrap_or(Self::Type::default_l1_tx_sending_max_attempts()), @@ -47,6 +53,8 @@ impl ProtoRepr for proto::BaseTokenAdjuster { l1_receipt_checking_max_attempts: Some(this.l1_receipt_checking_max_attempts), l1_tx_sending_max_attempts: Some(this.l1_tx_sending_max_attempts), l1_tx_sending_sleep_ms: Some(this.l1_tx_sending_sleep_ms), + price_fetching_max_attempts: Some(this.price_fetching_max_attempts), + price_fetching_sleep_ms: Some(this.price_fetching_sleep_ms), max_tx_gas: Some(this.max_tx_gas), default_priority_fee_per_gas: Some(this.default_priority_fee_per_gas), max_acceptable_priority_fee_in_gwei: Some(this.max_acceptable_priority_fee_in_gwei), diff --git a/core/lib/protobuf_config/src/proto/config/base_token_adjuster.proto b/core/lib/protobuf_config/src/proto/config/base_token_adjuster.proto index 1132858bfa6f..396bd400c04b 100644 --- a/core/lib/protobuf_config/src/proto/config/base_token_adjuster.proto +++ b/core/lib/protobuf_config/src/proto/config/base_token_adjuster.proto @@ -13,4 +13,6 @@ message BaseTokenAdjuster { optional uint32 l1_tx_sending_max_attempts = 8; optional uint64 l1_tx_sending_sleep_ms = 9; optional bool halt_on_error = 10; + optional uint32 price_fetching_max_attempts = 11; + optional uint64 price_fetching_sleep_ms = 12; } diff --git a/core/node/base_token_adjuster/Cargo.toml b/core/node/base_token_adjuster/Cargo.toml index c21576e37327..3a0beb2ea137 100644 --- a/core/node/base_token_adjuster/Cargo.toml +++ b/core/node/base_token_adjuster/Cargo.toml @@ -19,7 +19,8 @@ zksync_external_price_api.workspace = true zksync_contracts.workspace = true zksync_eth_client.workspace = true zksync_node_fee_model.workspace = true - +zksync_utils.workspace = true +vise.workspace = true tokio = { workspace = true, features = ["time"] } anyhow.workspace = true diff --git a/core/node/base_token_adjuster/src/base_token_ratio_persister.rs b/core/node/base_token_adjuster/src/base_token_ratio_persister.rs index 41796cf2197a..12cd6233efbb 100644 --- a/core/node/base_token_adjuster/src/base_token_ratio_persister.rs +++ b/core/node/base_token_adjuster/src/base_token_ratio_persister.rs @@ -1,4 +1,4 @@ -use std::{cmp::max, fmt::Debug, sync::Arc, time::Duration}; +use std::{cmp::max, fmt::Debug, sync::Arc, time::Instant}; use anyhow::Context as _; use tokio::{sync::watch, time::sleep}; @@ -14,6 +14,8 @@ use zksync_types::{ Address, U256, }; +use crate::metrics::{OperationResult, OperationResultLabels, METRICS}; + #[derive(Debug, Clone)] pub struct BaseTokenRatioPersisterL1Params { pub eth_client: Box, @@ -82,47 +84,7 @@ impl BaseTokenRatioPersister { // TODO(PE-148): Consider shifting retry upon adding external API redundancy. let new_ratio = self.retry_fetch_ratio().await?; self.persist_ratio(new_ratio).await?; - - let Some(l1_params) = &self.l1_params else { - return Ok(()); - }; - - let max_attempts = self.config.l1_tx_sending_max_attempts; - let sleep_duration = self.config.l1_tx_sending_sleep_duration(); - let mut result: anyhow::Result<()> = Ok(()); - let mut prev_base_fee_per_gas: Option = None; - let mut prev_priority_fee_per_gas: Option = None; - - for attempt in 0..max_attempts { - let (base_fee_per_gas, priority_fee_per_gas) = - self.get_eth_fees(l1_params, prev_base_fee_per_gas, prev_priority_fee_per_gas); - - result = self - .send_ratio_to_l1(l1_params, new_ratio, base_fee_per_gas, priority_fee_per_gas) - .await; - if let Some(err) = result.as_ref().err() { - tracing::info!( - "Failed to update base token multiplier on L1, attempt {}, base_fee_per_gas {}, priority_fee_per_gas {}: {}", - attempt + 1, - base_fee_per_gas, - priority_fee_per_gas, - err - ); - tokio::time::sleep(sleep_duration).await; - prev_base_fee_per_gas = Some(base_fee_per_gas); - prev_priority_fee_per_gas = Some(priority_fee_per_gas); - } else { - tracing::info!( - "Updated base token multiplier on L1: numerator {}, denominator {}, base_fee_per_gas {}, priority_fee_per_gas {}", - new_ratio.numerator.get(), - new_ratio.denominator.get(), - base_fee_per_gas, - priority_fee_per_gas - ); - return result; - } - } - result + self.retry_update_ratio_on_l1(new_ratio).await } fn get_eth_fees( @@ -157,36 +119,110 @@ impl BaseTokenRatioPersister { (base_fee_per_gas, priority_fee_per_gas) } + async fn retry_update_ratio_on_l1(&self, new_ratio: BaseTokenAPIRatio) -> anyhow::Result<()> { + let Some(l1_params) = &self.l1_params else { + return Ok(()); + }; + + let max_attempts = self.config.l1_tx_sending_max_attempts; + let sleep_duration = self.config.l1_tx_sending_sleep_duration(); + let mut prev_base_fee_per_gas: Option = None; + let mut prev_priority_fee_per_gas: Option = None; + let mut last_error = None; + for attempt in 0..max_attempts { + let (base_fee_per_gas, priority_fee_per_gas) = + self.get_eth_fees(l1_params, prev_base_fee_per_gas, prev_priority_fee_per_gas); + + let start_time = Instant::now(); + let result = self + .update_ratio_on_l1(l1_params, new_ratio, base_fee_per_gas, priority_fee_per_gas) + .await; + + match result { + Ok(x) => { + tracing::info!( + "Updated base token multiplier on L1: numerator {}, denominator {}, base_fee_per_gas {}, priority_fee_per_gas {}", + new_ratio.numerator.get(), + new_ratio.denominator.get(), + base_fee_per_gas, + priority_fee_per_gas + ); + METRICS + .l1_gas_used + .set(x.unwrap_or(U256::zero()).low_u128() as u64); + METRICS.l1_update_latency[&OperationResultLabels { + result: OperationResult::Success, + }] + .observe(start_time.elapsed()); + + return Ok(()); + } + Err(err) => { + tracing::info!( + "Failed to update base token multiplier on L1, attempt {}, base_fee_per_gas {}, priority_fee_per_gas {}: {}", + attempt, + base_fee_per_gas, + priority_fee_per_gas, + err + ); + METRICS.l1_update_latency[&OperationResultLabels { + result: OperationResult::Failure, + }] + .observe(start_time.elapsed()); + + tokio::time::sleep(sleep_duration).await; + prev_base_fee_per_gas = Some(base_fee_per_gas); + prev_priority_fee_per_gas = Some(priority_fee_per_gas); + last_error = Some(err) + } + } + } + + let error_message = "Failed to update base token multiplier on L1"; + Err(last_error + .map(|x| x.context(error_message)) + .unwrap_or_else(|| anyhow::anyhow!(error_message))) + } + async fn retry_fetch_ratio(&self) -> anyhow::Result { - let sleep_duration = Duration::from_secs(1); - let max_retries = 5; - let mut attempts = 0; + let sleep_duration = self.config.price_fetching_sleep_duration(); + let max_retries = self.config.price_fetching_max_attempts; + let mut last_error = None; - loop { + for attempt in 0..max_retries { + let start_time = Instant::now(); match self .price_api_client .fetch_ratio(self.base_token_address) .await { Ok(ratio) => { + METRICS.external_price_api_latency[&OperationResultLabels { + result: OperationResult::Success, + }] + .observe(start_time.elapsed()); return Ok(ratio); } - Err(err) if attempts < max_retries => { - attempts += 1; + Err(err) => { tracing::warn!( - "Attempt {}/{} to fetch ratio from coingecko failed with err: {}. Retrying...", - attempts, + "Attempt {}/{} to fetch ratio from external price api failed with err: {}. Retrying...", + attempt, max_retries, err ); + last_error = Some(err); + METRICS.external_price_api_latency[&OperationResultLabels { + result: OperationResult::Failure, + }] + .observe(start_time.elapsed()); sleep(sleep_duration).await; } - Err(err) => { - return Err(err) - .context("Failed to fetch base token ratio after multiple attempts"); - } } } + let error_message = "Failed to fetch base token ratio after multiple attempts"; + Err(last_error + .map(|x| x.context(error_message)) + .unwrap_or_else(|| anyhow::anyhow!(error_message))) } async fn persist_ratio(&self, api_ratio: BaseTokenAPIRatio) -> anyhow::Result { @@ -209,13 +245,13 @@ impl BaseTokenRatioPersister { Ok(id) } - async fn send_ratio_to_l1( + async fn update_ratio_on_l1( &self, l1_params: &BaseTokenRatioPersisterL1Params, api_ratio: BaseTokenAPIRatio, base_fee_per_gas: u64, priority_fee_per_gas: u64, - ) -> anyhow::Result<()> { + ) -> anyhow::Result> { let fn_set_token_multiplier = l1_params .chain_admin_contract .function("setTokenMultiplier") @@ -276,7 +312,7 @@ impl BaseTokenRatioPersister { .context("failed getting receipt for `setTokenMultiplier` transaction")?; if let Some(receipt) = maybe_receipt { if receipt.status == Some(1.into()) { - return Ok(()); + return Ok(receipt.gas_used); } return Err(anyhow::Error::msg(format!( "`setTokenMultiplier` transaction {:?} failed with status {:?}", diff --git a/core/node/base_token_adjuster/src/lib.rs b/core/node/base_token_adjuster/src/lib.rs index 332fb5f47aab..d786b440f622 100644 --- a/core/node/base_token_adjuster/src/lib.rs +++ b/core/node/base_token_adjuster/src/lib.rs @@ -5,3 +5,4 @@ pub use self::{ mod base_token_ratio_persister; mod base_token_ratio_provider; +mod metrics; diff --git a/core/node/base_token_adjuster/src/metrics.rs b/core/node/base_token_adjuster/src/metrics.rs new file mode 100644 index 000000000000..e6f6571adc1d --- /dev/null +++ b/core/node/base_token_adjuster/src/metrics.rs @@ -0,0 +1,28 @@ +use std::time::Duration; + +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet, EncodeLabelValue)] +#[metrics(label = "operation_result", rename_all = "snake_case")] +pub(super) enum OperationResult { + Success, + Failure, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet)] +pub(crate) struct OperationResultLabels { + pub result: OperationResult, +} + +#[derive(Debug, Metrics)] +#[metrics(prefix = "snapshots_creator")] +pub(crate) struct BaseTokenAdjusterMetrics { + pub l1_gas_used: Gauge, + #[metrics(buckets = Buckets::LATENCIES)] + pub external_price_api_latency: Family>, + #[metrics(buckets = Buckets::LATENCIES)] + pub l1_update_latency: Family>, +} + +#[vise::register] +pub(crate) static METRICS: vise::Global = vise::Global::new(); From 62d7e193e9b5c1f78695983a9f79d1b7db635052 Mon Sep 17 00:00:00 2001 From: Vladislav Volosnikov Date: Mon, 26 Aug 2024 10:22:05 +0200 Subject: [PATCH 09/39] chore(prover): Add avx512 bwg build to stage release workflow (#2718) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Same changes as in https://github.com/matter-labs/zksync-era/pull/2687, but for stage release workflow ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/release-test-stage.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/.github/workflows/release-test-stage.yml b/.github/workflows/release-test-stage.yml index 3f83d208f66c..9f921be78292 100644 --- a/.github/workflows/release-test-stage.yml +++ b/.github/workflows/release-test-stage.yml @@ -106,6 +106,20 @@ jobs: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + build-push-witness-generator-image-avx512: + name: Build and push prover images with avx512 instructions + needs: [setup, changed_files] + uses: ./.github/workflows/build-witness-generator-template.yml + if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' + with: + image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }}-avx512 + ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} + CUDA_ARCH: "60;70;75;89" + WITNESS_GENERATOR_RUST_FLAGS: "-Ctarget_feature=+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl" + secrets: + DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + build-gar-prover-fri-gpu: name: Build GAR prover FRI GPU needs: [setup, build-push-prover-images] From c162510598b45dc062c2c91085868f8aa966360e Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 26 Aug 2024 10:53:32 +0200 Subject: [PATCH 10/39] fix(proof_data_handler): TEE blob fetching error handling (#2674) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We ran into a problem in the staging environment where TEE blob fetching failed because of a 30-day retention policy on blobs in Google Cloud Storage. The TEE prover was failing for all old batches (`l1_batch_number < 58300`). This commit fixes the issue by adding better error handling when the blob for a given batch number isn't available. ## What ❔ Graceful error handling for the TEE proof data handler when there is no blob in Google Cloud Storage for the specified batch number. ## Why ❔ We need more robust error handling. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/bin/zksync_tee_prover/src/tee_prover.rs | 2 +- ...fee3209a950943dc2b4da82c324e1c09132f.json} | 7 +- ...468765628fd2c3b7c2a408d18b5aba0df9a30.json | 15 +++ core/lib/dal/doc/TeeProofGenerationDal.md | 4 +- core/lib/dal/src/tee_proof_generation_dal.rs | 46 ++++++- core/node/proof_data_handler/src/errors.rs | 6 + .../src/tee_request_processor.rs | 120 +++++++++++------- 7 files changed, 146 insertions(+), 54 deletions(-) rename core/lib/dal/.sqlx/{query-286f27e32a152c293d07e7c22e893c6f5a43386d4183745a4668507cf672b3f6.json => query-47975cc0b5e4f3a6b5224cb452b8fee3209a950943dc2b4da82c324e1c09132f.json} (75%) create mode 100644 core/lib/dal/.sqlx/query-5c5bdb0e419049f9fb4d8b3bbec468765628fd2c3b7c2a408d18b5aba0df9a30.json diff --git a/core/bin/zksync_tee_prover/src/tee_prover.rs b/core/bin/zksync_tee_prover/src/tee_prover.rs index 64a3a9c5749d..7f874533b4b3 100644 --- a/core/bin/zksync_tee_prover/src/tee_prover.rs +++ b/core/bin/zksync_tee_prover/src/tee_prover.rs @@ -201,8 +201,8 @@ impl Task for TeeProver { if !err.is_retriable() || retries > self.config.max_retries { return Err(err.into()); } - retries += 1; tracing::warn!(%err, "Failed TEE prover step function {retries}/{}, retrying in {} milliseconds.", self.config.max_retries, backoff.as_millis()); + retries += 1; backoff = std::cmp::min( backoff.mul_f32(self.config.retry_backoff_multiplier), self.config.max_backoff, diff --git a/core/lib/dal/.sqlx/query-286f27e32a152c293d07e7c22e893c6f5a43386d4183745a4668507cf672b3f6.json b/core/lib/dal/.sqlx/query-47975cc0b5e4f3a6b5224cb452b8fee3209a950943dc2b4da82c324e1c09132f.json similarity index 75% rename from core/lib/dal/.sqlx/query-286f27e32a152c293d07e7c22e893c6f5a43386d4183745a4668507cf672b3f6.json rename to core/lib/dal/.sqlx/query-47975cc0b5e4f3a6b5224cb452b8fee3209a950943dc2b4da82c324e1c09132f.json index 540660bddf34..7e5f9e1713c4 100644 --- a/core/lib/dal/.sqlx/query-286f27e32a152c293d07e7c22e893c6f5a43386d4183745a4668507cf672b3f6.json +++ b/core/lib/dal/.sqlx/query-47975cc0b5e4f3a6b5224cb452b8fee3209a950943dc2b4da82c324e1c09132f.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE tee_proof_generation_details\n SET\n status = 'picked_by_prover',\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n tee_type = $1\n AND l1_batch_number = (\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $2\n AND (\n proofs.status = 'ready_to_be_proven'\n OR (\n proofs.status = 'picked_by_prover'\n AND proofs.prover_taken_at < NOW() - $3::INTERVAL\n )\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n tee_proof_generation_details.l1_batch_number\n ", + "query": "\n UPDATE tee_proof_generation_details\n SET\n status = 'picked_by_prover',\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n tee_type = $1\n AND l1_batch_number = (\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $2\n AND (\n proofs.status = 'ready_to_be_proven'\n OR (\n proofs.status = 'picked_by_prover'\n AND proofs.prover_taken_at < NOW() - $3::INTERVAL\n )\n )\n AND proofs.l1_batch_number >= $4\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n tee_proof_generation_details.l1_batch_number\n ", "describe": { "columns": [ { @@ -26,12 +26,13 @@ } } }, - "Interval" + "Interval", + "Int8" ] }, "nullable": [ false ] }, - "hash": "286f27e32a152c293d07e7c22e893c6f5a43386d4183745a4668507cf672b3f6" + "hash": "47975cc0b5e4f3a6b5224cb452b8fee3209a950943dc2b4da82c324e1c09132f" } diff --git a/core/lib/dal/.sqlx/query-5c5bdb0e419049f9fb4d8b3bbec468765628fd2c3b7c2a408d18b5aba0df9a30.json b/core/lib/dal/.sqlx/query-5c5bdb0e419049f9fb4d8b3bbec468765628fd2c3b7c2a408d18b5aba0df9a30.json new file mode 100644 index 000000000000..2d9a24d6d79c --- /dev/null +++ b/core/lib/dal/.sqlx/query-5c5bdb0e419049f9fb4d8b3bbec468765628fd2c3b7c2a408d18b5aba0df9a30.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE tee_proof_generation_details\n SET\n status = 'unpicked',\n updated_at = NOW()\n WHERE\n l1_batch_number = $1\n AND tee_type = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text" + ] + }, + "nullable": [] + }, + "hash": "5c5bdb0e419049f9fb4d8b3bbec468765628fd2c3b7c2a408d18b5aba0df9a30" +} diff --git a/core/lib/dal/doc/TeeProofGenerationDal.md b/core/lib/dal/doc/TeeProofGenerationDal.md index 23474d5cb5c5..167e6b3c42ce 100644 --- a/core/lib/dal/doc/TeeProofGenerationDal.md +++ b/core/lib/dal/doc/TeeProofGenerationDal.md @@ -12,8 +12,10 @@ title: Status Diagram --- stateDiagram-v2 [*] --> ready_to_be_proven : insert_tee_proof_generation_job -ready_to_be_proven --> picked_by_prover : get_next_batch_to_be_proven +ready_to_be_proven --> picked_by_prover : lock_batch_for_proving picked_by_prover --> generated : save_proof_artifacts_metadata generated --> [*] +picked_by_prover --> unpicked : unlock_batch +unpicked --> [*] ``` diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index 2bd73323eb10..80e364273f69 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -2,7 +2,9 @@ use std::time::Duration; use zksync_db_connection::{ - connection::Connection, error::DalResult, instrument::Instrumented, + connection::Connection, + error::DalResult, + instrument::{InstrumentExt, Instrumented}, utils::pg_interval_from_duration, }; use zksync_types::{tee_types::TeeType, L1BatchNumber}; @@ -18,12 +20,14 @@ pub struct TeeProofGenerationDal<'a, 'c> { } impl TeeProofGenerationDal<'_, '_> { - pub async fn get_next_batch_to_be_proven( + pub async fn lock_batch_for_proving( &mut self, tee_type: TeeType, processing_timeout: Duration, + min_batch_number: Option, ) -> DalResult> { let processing_timeout = pg_interval_from_duration(processing_timeout); + let min_batch_number = min_batch_number.map_or(0, |num| i64::from(num.0)); let query = sqlx::query!( r#" UPDATE tee_proof_generation_details @@ -48,6 +52,7 @@ impl TeeProofGenerationDal<'_, '_> { AND proofs.prover_taken_at < NOW() - $3::INTERVAL ) ) + AND proofs.l1_batch_number >= $4 ORDER BY l1_batch_number ASC LIMIT @@ -58,13 +63,16 @@ impl TeeProofGenerationDal<'_, '_> { RETURNING tee_proof_generation_details.l1_batch_number "#, - &tee_type.to_string(), + tee_type.to_string(), TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, - &processing_timeout, + processing_timeout, + min_batch_number ); - let batch_number = Instrumented::new("get_next_batch_to_be_proven") + + let batch_number = Instrumented::new("lock_batch_for_proving") .with_arg("tee_type", &tee_type) .with_arg("processing_timeout", &processing_timeout) + .with_arg("l1_batch_number", &min_batch_number) .with(query) .fetch_optional(self.storage) .await? @@ -73,6 +81,34 @@ impl TeeProofGenerationDal<'_, '_> { Ok(batch_number) } + pub async fn unlock_batch( + &mut self, + l1_batch_number: L1BatchNumber, + tee_type: TeeType, + ) -> DalResult<()> { + let batch_number = i64::from(l1_batch_number.0); + sqlx::query!( + r#" + UPDATE tee_proof_generation_details + SET + status = 'unpicked', + updated_at = NOW() + WHERE + l1_batch_number = $1 + AND tee_type = $2 + "#, + batch_number, + tee_type.to_string() + ) + .instrument("unlock_batch") + .with_arg("l1_batch_number", &batch_number) + .with_arg("tee_type", &tee_type) + .execute(self.storage) + .await?; + + Ok(()) + } + pub async fn save_proof_artifacts_metadata( &mut self, batch_number: L1BatchNumber, diff --git a/core/node/proof_data_handler/src/errors.rs b/core/node/proof_data_handler/src/errors.rs index f170b3b53e7c..15ef393294aa 100644 --- a/core/node/proof_data_handler/src/errors.rs +++ b/core/node/proof_data_handler/src/errors.rs @@ -10,6 +10,12 @@ pub(crate) enum RequestProcessorError { Dal(DalError), } +impl From for RequestProcessorError { + fn from(err: DalError) -> Self { + RequestProcessorError::Dal(err) + } +} + impl IntoResponse for RequestProcessorError { fn into_response(self) -> Response { let (status_code, message) = match self { diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs index d85591dd2c90..4ae1a5026f14 100644 --- a/core/node/proof_data_handler/src/tee_request_processor.rs +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -3,15 +3,12 @@ use std::sync::Arc; use axum::{extract::Path, Json}; use zksync_config::configs::ProofDataHandlerConfig; use zksync_dal::{ConnectionPool, Core, CoreDal}; -use zksync_object_store::ObjectStore; -use zksync_prover_interface::{ - api::{ - RegisterTeeAttestationRequest, RegisterTeeAttestationResponse, SubmitProofResponse, - SubmitTeeProofRequest, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, - }, - inputs::TeeVerifierInput, +use zksync_object_store::{ObjectStore, ObjectStoreError}; +use zksync_prover_interface::api::{ + RegisterTeeAttestationRequest, RegisterTeeAttestationResponse, SubmitProofResponse, + SubmitTeeProofRequest, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, }; -use zksync_types::L1BatchNumber; +use zksync_types::{tee_types::TeeType, L1BatchNumber}; use crate::errors::RequestProcessorError; @@ -41,32 +38,77 @@ impl TeeRequestProcessor { ) -> Result, RequestProcessorError> { tracing::info!("Received request for proof generation data: {:?}", request); - let mut connection = self - .pool - .connection() - .await - .map_err(RequestProcessorError::Dal)?; - - let l1_batch_number_result = connection - .tee_proof_generation_dal() - .get_next_batch_to_be_proven(request.tee_type, self.config.proof_generation_timeout()) - .await - .map_err(RequestProcessorError::Dal)?; - - let l1_batch_number = match l1_batch_number_result { - Some(number) => number, - None => return Ok(Json(TeeProofGenerationDataResponse(None))), + let mut min_batch_number: Option = None; + let mut missing_range: Option<(L1BatchNumber, L1BatchNumber)> = None; + + let result = loop { + let l1_batch_number = match self + .lock_batch_for_proving(request.tee_type, min_batch_number) + .await? + { + Some(number) => number, + None => break Ok(Json(TeeProofGenerationDataResponse(None))), + }; + + match self.blob_store.get(l1_batch_number).await { + Ok(input) => break Ok(Json(TeeProofGenerationDataResponse(Some(Box::new(input))))), + Err(ObjectStoreError::KeyNotFound(_)) => { + missing_range = match missing_range { + Some((start, _)) => Some((start, l1_batch_number)), + None => Some((l1_batch_number, l1_batch_number)), + }; + self.unlock_batch(l1_batch_number, request.tee_type).await?; + min_batch_number = Some(min_batch_number.unwrap_or(l1_batch_number) + 1); + } + Err(err) => { + self.unlock_batch(l1_batch_number, request.tee_type).await?; + break Err(RequestProcessorError::ObjectStore(err)); + } + } }; - let tee_verifier_input: TeeVerifierInput = self - .blob_store - .get(l1_batch_number) - .await - .map_err(RequestProcessorError::ObjectStore)?; + if let Some((start, end)) = missing_range { + tracing::warn!( + "Blobs for batch numbers {} to {} not found in the object store. Marked as unpicked.", + start, + end + ); + } + + result + } - let response = TeeProofGenerationDataResponse(Some(Box::new(tee_verifier_input))); + async fn lock_batch_for_proving( + &self, + tee_type: TeeType, + min_batch_number: Option, + ) -> Result, RequestProcessorError> { + let result = self + .pool + .connection() + .await? + .tee_proof_generation_dal() + .lock_batch_for_proving( + tee_type, + self.config.proof_generation_timeout(), + min_batch_number, + ) + .await?; + Ok(result) + } - Ok(Json(response)) + async fn unlock_batch( + &self, + l1_batch_number: L1BatchNumber, + tee_type: TeeType, + ) -> Result<(), RequestProcessorError> { + self.pool + .connection() + .await? + .tee_proof_generation_dal() + .unlock_batch(l1_batch_number, tee_type) + .await?; + Ok(()) } pub(crate) async fn submit_proof( @@ -75,11 +117,7 @@ impl TeeRequestProcessor { Json(proof): Json, ) -> Result, RequestProcessorError> { let l1_batch_number = L1BatchNumber(l1_batch_number); - let mut connection = self - .pool - .connection() - .await - .map_err(RequestProcessorError::Dal)?; + let mut connection = self.pool.connection().await?; let mut dal = connection.tee_proof_generation_dal(); tracing::info!( @@ -94,8 +132,7 @@ impl TeeRequestProcessor { &proof.0.signature, &proof.0.proof, ) - .await - .map_err(RequestProcessorError::Dal)?; + .await?; Ok(Json(SubmitProofResponse::Success)) } @@ -106,16 +143,11 @@ impl TeeRequestProcessor { ) -> Result, RequestProcessorError> { tracing::info!("Received attestation: {:?}", payload); - let mut connection = self - .pool - .connection() - .await - .map_err(RequestProcessorError::Dal)?; + let mut connection = self.pool.connection().await?; let mut dal = connection.tee_proof_generation_dal(); dal.save_attestation(&payload.pubkey, &payload.attestation) - .await - .map_err(RequestProcessorError::Dal)?; + .await?; Ok(Json(RegisterTeeAttestationResponse::Success)) } From 09ad544e1e979fa3d6b8ec2849fa2ad77046cf55 Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Mon, 26 Aug 2024 14:12:42 +0400 Subject: [PATCH 11/39] docs(prover): Recommend standard provisioning over spot (#2729) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ A few reports have shown that using spot instances is very luck-dependent, so it's not worth trying at the cost of flow disruption. ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- prover/docs/01_gcp_vm.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/prover/docs/01_gcp_vm.md b/prover/docs/01_gcp_vm.md index a541495e978a..8cc9f31de696 100644 --- a/prover/docs/01_gcp_vm.md +++ b/prover/docs/01_gcp_vm.md @@ -31,9 +31,8 @@ When you choose the region, set the following options: - GPU Type: NVIDIA L4 - Number of GPUs: 1 - Machine type: Preset, `g2-standard-16` -- Availability policies: Spot instances are much cheaper, but there is a chance that it will be preempted while you work - on it. If you're working on something that is not very important, spot instances are recommended. If any disruption - will be harmful, choose standard provisioning. +- Availability policies: Choose standard provisioning. Spot instances can be preempted while you work on them, which + will disrupt your flow. - Then click on "VM provisioning model advanced settings" and - Click on "Set a time limit for the VM" - Set the limit to 8 hours From 8e1e6db03cc0235fcecbe3eacc887e17486c2208 Mon Sep 17 00:00:00 2001 From: Vladislav Volosnikov Date: Mon, 26 Aug 2024 12:35:53 +0200 Subject: [PATCH 12/39] chore: Remove unneeded step from avx512-BWG build flow (#2727) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .../build-witness-generator-template.yml | 36 +------------------ 1 file changed, 1 insertion(+), 35 deletions(-) diff --git a/.github/workflows/build-witness-generator-template.yml b/.github/workflows/build-witness-generator-template.yml index a7139e5e0a8c..9c29297460d9 100644 --- a/.github/workflows/build-witness-generator-template.yml +++ b/.github/workflows/build-witness-generator-template.yml @@ -169,38 +169,4 @@ jobs: if: always() run: | ci_run sccache --show-stats || true - ci_run cat /tmp/sccache_log.txt || true - - copy-images: - name: Copy images between docker registries - needs: build-images - env: - IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }} - PROTOCOL_VERSION: ${{ needs.build-images.outputs.protocol_version }} - runs-on: matterlabs-ci-runner - if: ${{ inputs.action == 'push' }} - strategy: - matrix: - component: - - witness-vector-generator - steps: - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Login to us-central1 GAR - run: | - gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://us-docker.pkg.dev - - - name: Login and push to Asia GAR - run: | - gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://asia-docker.pkg.dev - docker buildx imagetools create \ - --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} - - - name: Login and push to Europe GAR - run: | - gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://europe-docker.pkg.dev - docker buildx imagetools create \ - --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} + ci_run cat /tmp/sccache_log.txt || true \ No newline at end of file From 30e072bd695615b0095c8bdcfd62b77c6b0ae5e6 Mon Sep 17 00:00:00 2001 From: Danil Date: Mon, 26 Aug 2024 13:14:56 +0200 Subject: [PATCH 13/39] feat(zk_toolbox): Update rust for zk_toolbox (#2730) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. Signed-off-by: Danil --- zk_toolbox/rust-toolchain | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zk_toolbox/rust-toolchain b/zk_toolbox/rust-toolchain index 54227249d1ff..aaceec04e040 100644 --- a/zk_toolbox/rust-toolchain +++ b/zk_toolbox/rust-toolchain @@ -1 +1 @@ -1.78.0 +1.80.0 From 7b9e7bf249157272f2c437b86e88d382dd845618 Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 26 Aug 2024 13:21:56 +0200 Subject: [PATCH 14/39] docs(dal): update ProofGenerationDal docs chart (#2722) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It should have been updated as part of these 2 PRs: - https://github.com/matter-labs/zksync-era/pull/2258 - https://github.com/matter-labs/zksync-era/pull/2486 ## What ❔ Update ProofGenerationDal docs chart. ## Why ❔ We like up-to-date docs. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/lib/dal/doc/ProofGenerationDal.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/core/lib/dal/doc/ProofGenerationDal.md b/core/lib/dal/doc/ProofGenerationDal.md index 618fdfba13b0..40ee31a4b1a2 100644 --- a/core/lib/dal/doc/ProofGenerationDal.md +++ b/core/lib/dal/doc/ProofGenerationDal.md @@ -11,9 +11,10 @@ proof_generation_details title: Status Diagram --- stateDiagram-v2 -[*] --> ready_to_be_proven : insert_proof_generation_details -ready_to_be_proven --> picked_by_prover : get_next_block_to_be_proven +[*] --> unpicked : insert_proof_generation_details +unpicked --> picked_by_prover : lock_batch_for_proving picked_by_prover --> generated : save_proof_artifacts_metadata +picked_by_prover --> unpicked : unlock_batch generated --> [*] [*] --> skipped : mark_proof_generation_job_as_skipped From aea3726c88b4e881bcd0f4a60ff32a730f200938 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Mon, 26 Aug 2024 18:05:04 +0300 Subject: [PATCH 15/39] fix(api): `tx.gas_price` field (#2734) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Fixes `tx.gas_price` field for legacy and EIP-2930 transactions. ## Why ❔ Follow the [spec](https://ethereum.github.io/execution-apis/api-documentation/) ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .../lib/dal/src/models/storage_transaction.rs | 19 ++++++++++++++----- .../ts-integration/tests/api/web3.test.ts | 6 ++++-- .../src/commands/database/reset.rs | 2 +- .../zk_supervisor/src/commands/test/prover.rs | 2 +- 4 files changed, 20 insertions(+), 9 deletions(-) diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index aca93ee8c5a9..9f67e9025e0c 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -508,6 +508,19 @@ impl StorageApiTransaction { .signature .and_then(|signature| PackedEthSignature::deserialize_packed(&signature).ok()); + // For legacy and EIP-2930 transactions it is gas price willing to be paid by the sender in wei. + // For other transactions it should be the effective gas price if transaction is included in block, + // otherwise this value should be set equal to the max fee per gas. + let gas_price = match self.tx_format { + None | Some(0) | Some(1) => self + .max_fee_per_gas + .clone() + .unwrap_or_else(BigDecimal::zero), + _ => self + .effective_gas_price + .or_else(|| self.max_fee_per_gas.clone()) + .unwrap_or_else(BigDecimal::zero), + }; let mut tx = api::Transaction { hash: H256::from_slice(&self.tx_hash), nonce: U256::from(self.nonce.unwrap_or(0) as u64), @@ -517,11 +530,7 @@ impl StorageApiTransaction { from: Some(Address::from_slice(&self.initiator_address)), to: Some(serde_json::from_value(self.execute_contract_address).unwrap()), value: bigdecimal_to_u256(self.value), - gas_price: Some(bigdecimal_to_u256( - self.effective_gas_price - .or_else(|| self.max_fee_per_gas.clone()) - .unwrap_or_else(BigDecimal::zero), - )), + gas_price: Some(bigdecimal_to_u256(gas_price)), gas: bigdecimal_to_u256(self.gas_limit.unwrap_or_else(BigDecimal::zero)), input: serde_json::from_value(self.calldata).expect("incorrect calldata in Postgres"), v: signature.as_ref().map(|s| U64::from(s.v())), diff --git a/core/tests/ts-integration/tests/api/web3.test.ts b/core/tests/ts-integration/tests/api/web3.test.ts index c6d0ae40a43a..b20e9d1e37d3 100644 --- a/core/tests/ts-integration/tests/api/web3.test.ts +++ b/core/tests/ts-integration/tests/api/web3.test.ts @@ -249,14 +249,16 @@ describe('web3 API compatibility tests', () => { test('Should check transactions from API / Legacy tx', async () => { const LEGACY_TX_TYPE = 0; + const gasPrice = (await alice._providerL2().getGasPrice()) * 2n; const legacyTx = await alice.sendTransaction({ type: LEGACY_TX_TYPE, - to: alice.address + to: alice.address, + gasPrice }); await legacyTx.wait(); const legacyApiReceipt = await alice.provider.getTransaction(legacyTx.hash); - expect(legacyApiReceipt.gasPrice).toBeLessThanOrEqual(legacyTx.gasPrice!); + expect(legacyApiReceipt.gasPrice).toEqual(gasPrice); }); test('Should check transactions from API / EIP1559 tx', async () => { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs b/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs index d25f2a8cd54b..88f2069bf3ae 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs @@ -26,7 +26,7 @@ pub async fn run(shell: &Shell, args: DatabaseCommonArgs) -> anyhow::Result<()> let dals = get_dals(shell, &args.selected_dals)?; for dal in dals { - logger::info(&msg_database_loading(MSG_DATABASE_RESET_GERUND, &dal.path)); + logger::info(msg_database_loading(MSG_DATABASE_RESET_GERUND, &dal.path)); reset_database(shell, ecoseystem_config.link_to_code.clone(), dal).await?; } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs index 3d8131a180c3..4e9c4fc25283 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs @@ -6,7 +6,7 @@ use crate::messages::MSG_PROVER_TEST_SUCCESS; pub fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem = EcosystemConfig::from_file(shell)?; - let _dir_guard = shell.push_dir(&ecosystem.link_to_code.join("prover")); + let _dir_guard = shell.push_dir(ecosystem.link_to_code.join("prover")); Cmd::new(cmd!(shell, "cargo test --release --workspace --locked")) .with_force_run() From d8e43e77ed9bf91dde1cacdb1698afd366bb3c1a Mon Sep 17 00:00:00 2001 From: EmilLuta Date: Mon, 26 Aug 2024 18:14:01 +0200 Subject: [PATCH 16/39] chore: Fix SQLX vulnerability (#2736) SQLX 0.8.0 had a vulnerability, which didn't affect us. At the time of discovery, there was no fix. We silenced the warning to unlock development. This PR bumps SQLX to 0.8.1 which includes the vulnerability fix and removes the cargo deny allowlist. Co-authored-by: perekopskiy <53865202+perekopskiy@users.noreply.github.com> --- Cargo.lock | 41 +++++++++++++++++++------------------ Cargo.toml | 2 +- deny.toml | 4 +--- docs/guides/setup-dev.md | 4 ++-- prover/Cargo.lock | 38 +++++++++++++++++----------------- prover/Cargo.toml | 2 +- zk_toolbox/Cargo.lock | 44 +++++++++++++++++++++++----------------- zk_toolbox/Cargo.toml | 2 +- 8 files changed, 71 insertions(+), 66 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f60faf9fdf96..0d4ba4c23834 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -889,12 +889,13 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.83" +version = "1.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "50d2eb3cd3d1bf4529e31c215ee6f93ec5a3d536d9f578f93d9d33ee19562932" dependencies = [ "jobserver", "libc", + "shlex", ] [[package]] @@ -3184,9 +3185,9 @@ checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" [[package]] name = "jobserver" -version = "0.1.27" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" dependencies = [ "libc", ] @@ -3486,9 +3487,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.28.0" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f" +checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149" dependencies = [ "cc", "pkg-config", @@ -6073,9 +6074,9 @@ dependencies = [ [[package]] name = "sqlx" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27144619c6e5802f1380337a209d2ac1c431002dd74c6e60aebff3c506dc4f0c" +checksum = "fcfa89bea9500db4a0d038513d7a060566bfc51d46d1c014847049a45cce85e8" dependencies = [ "sqlx-core", "sqlx-macros", @@ -6086,9 +6087,9 @@ dependencies = [ [[package]] name = "sqlx-core" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a999083c1af5b5d6c071d34a708a19ba3e02106ad82ef7bbd69f5e48266b613b" +checksum = "d06e2f2bd861719b1f3f0c7dbe1d80c30bf59e76cf019f07d9014ed7eefb8e08" dependencies = [ "atoi", "bigdecimal", @@ -6130,9 +6131,9 @@ dependencies = [ [[package]] name = "sqlx-macros" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a23217eb7d86c584b8cbe0337b9eacf12ab76fe7673c513141ec42565698bb88" +checksum = "2f998a9defdbd48ed005a89362bd40dd2117502f15294f61c8d47034107dbbdc" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.36", @@ -6143,9 +6144,9 @@ dependencies = [ [[package]] name = "sqlx-macros-core" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a099220ae541c5db479c6424bdf1b200987934033c2584f79a0e1693601e776" +checksum = "3d100558134176a2629d46cec0c8891ba0be8910f7896abfdb75ef4ab6f4e7ce" dependencies = [ "dotenvy", "either", @@ -6169,9 +6170,9 @@ dependencies = [ [[package]] name = "sqlx-mysql" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5afe4c38a9b417b6a9a5eeffe7235d0a106716495536e7727d1c7f4b1ff3eba6" +checksum = "936cac0ab331b14cb3921c62156d913e4c15b74fb6ec0f3146bd4ef6e4fb3c12" dependencies = [ "atoi", "base64 0.22.1", @@ -6214,9 +6215,9 @@ dependencies = [ [[package]] name = "sqlx-postgres" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1dbb157e65f10dbe01f729339c06d239120221c9ad9fa0ba8408c4cc18ecf21" +checksum = "9734dbce698c67ecf67c442f768a5e90a49b2a4d61a9f1d59f73874bd4cf0710" dependencies = [ "atoi", "base64 0.22.1", @@ -6257,9 +6258,9 @@ dependencies = [ [[package]] name = "sqlx-sqlite" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2cdd83c008a622d94499c0006d8ee5f821f36c89b7d625c900e5dc30b5c5ee" +checksum = "a75b419c3c1b1697833dd927bdc4c6545a620bc1bbafabd44e1efbe9afcd337e" dependencies = [ "atoi", "chrono", diff --git a/Cargo.toml b/Cargo.toml index d4855a34b9de..6ee6ce79e490 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -167,7 +167,7 @@ serde_with = "1" serde_yaml = "0.9" sha2 = "0.10.8" sha3 = "0.10.8" -sqlx = "0.8.0" +sqlx = "0.8.1" static_assertions = "1.1" structopt = "0.3.20" strum = "0.26" diff --git a/deny.toml b/deny.toml index 3ed6dcb74413..1e4a30ad6231 100644 --- a/deny.toml +++ b/deny.toml @@ -6,9 +6,7 @@ vulnerability = "deny" unmaintained = "warn" yanked = "warn" notice = "warn" -ignore = [ - "RUSTSEC-2024-0363", # allows sqlx@0.8.0 until fix is released, more here -- https://github.com/launchbadge/sqlx/issues/3440 -] +ignore = [] [licenses] unlicensed = "deny" diff --git a/docs/guides/setup-dev.md b/docs/guides/setup-dev.md index f656eab0fdc6..10eb329628c1 100644 --- a/docs/guides/setup-dev.md +++ b/docs/guides/setup-dev.md @@ -43,7 +43,7 @@ yarn set version 1.22.19 # For running unit tests cargo install cargo-nextest # SQL tools -cargo install sqlx-cli --version 0.8.0 +cargo install sqlx-cli --version 0.8.1 # Foundry curl -L https://foundry.paradigm.xyz | bash @@ -217,7 +217,7 @@ SQLx is a Rust library we use to interact with Postgres, and its CLI is used to features of the library. ```bash -cargo install --locked sqlx-cli --version 0.8.0 +cargo install --locked sqlx-cli --version 0.8.1 ``` ## Easier method using `nix` diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 8268b121847c..c510198ab65b 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -839,13 +839,13 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.98" +version = "1.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41c270e7540d725e65ac7f1b212ac8ce349719624d7bcff99f8e2e488e8cf03f" +checksum = "50d2eb3cd3d1bf4529e31c215ee6f93ec5a3d536d9f578f93d9d33ee19562932" dependencies = [ "jobserver", "libc", - "once_cell", + "shlex", ] [[package]] @@ -3276,9 +3276,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.28.0" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f" +checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149" dependencies = [ "cc", "pkg-config", @@ -5746,9 +5746,9 @@ dependencies = [ [[package]] name = "sqlx" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27144619c6e5802f1380337a209d2ac1c431002dd74c6e60aebff3c506dc4f0c" +checksum = "fcfa89bea9500db4a0d038513d7a060566bfc51d46d1c014847049a45cce85e8" dependencies = [ "sqlx-core", "sqlx-macros", @@ -5759,9 +5759,9 @@ dependencies = [ [[package]] name = "sqlx-core" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a999083c1af5b5d6c071d34a708a19ba3e02106ad82ef7bbd69f5e48266b613b" +checksum = "d06e2f2bd861719b1f3f0c7dbe1d80c30bf59e76cf019f07d9014ed7eefb8e08" dependencies = [ "atoi", "bigdecimal", @@ -5803,9 +5803,9 @@ dependencies = [ [[package]] name = "sqlx-macros" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a23217eb7d86c584b8cbe0337b9eacf12ab76fe7673c513141ec42565698bb88" +checksum = "2f998a9defdbd48ed005a89362bd40dd2117502f15294f61c8d47034107dbbdc" dependencies = [ "proc-macro2 1.0.85", "quote 1.0.36", @@ -5816,9 +5816,9 @@ dependencies = [ [[package]] name = "sqlx-macros-core" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a099220ae541c5db479c6424bdf1b200987934033c2584f79a0e1693601e776" +checksum = "3d100558134176a2629d46cec0c8891ba0be8910f7896abfdb75ef4ab6f4e7ce" dependencies = [ "dotenvy", "either", @@ -5842,9 +5842,9 @@ dependencies = [ [[package]] name = "sqlx-mysql" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5afe4c38a9b417b6a9a5eeffe7235d0a106716495536e7727d1c7f4b1ff3eba6" +checksum = "936cac0ab331b14cb3921c62156d913e4c15b74fb6ec0f3146bd4ef6e4fb3c12" dependencies = [ "atoi", "base64 0.22.1", @@ -5887,9 +5887,9 @@ dependencies = [ [[package]] name = "sqlx-postgres" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1dbb157e65f10dbe01f729339c06d239120221c9ad9fa0ba8408c4cc18ecf21" +checksum = "9734dbce698c67ecf67c442f768a5e90a49b2a4d61a9f1d59f73874bd4cf0710" dependencies = [ "atoi", "base64 0.22.1", @@ -5930,9 +5930,9 @@ dependencies = [ [[package]] name = "sqlx-sqlite" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2cdd83c008a622d94499c0006d8ee5f821f36c89b7d625c900e5dc30b5c5ee" +checksum = "a75b419c3c1b1697833dd927bdc4c6545a620bc1bbafabd44e1efbe9afcd337e" dependencies = [ "atoi", "chrono", diff --git a/prover/Cargo.toml b/prover/Cargo.toml index 9a1a50a2ddb5..88b5b626704b 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -45,7 +45,7 @@ serde = "1.0" serde_derive = "1.0" serde_json = "1.0" sha3 = "0.10.8" -sqlx = { version = "0.8.0", default-features = false } +sqlx = { version = "0.8.1", default-features = false } structopt = "0.3.26" strum = { version = "0.26" } tempfile = "3" diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index 7682b92a4f2d..efc0e56ac948 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -501,13 +501,13 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.104" +version = "1.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74b6a57f98764a267ff415d50a25e6e166f3831a5071af4995296ea97d210490" +checksum = "50d2eb3cd3d1bf4529e31c215ee6f93ec5a3d536d9f578f93d9d33ee19562932" dependencies = [ "jobserver", "libc", - "once_cell", + "shlex", ] [[package]] @@ -2573,9 +2573,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.28.0" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f" +checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149" dependencies = [ "cc", "pkg-config", @@ -4564,6 +4564,12 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + [[package]] name = "signal-hook-registry" version = "1.4.2" @@ -4716,9 +4722,9 @@ dependencies = [ [[package]] name = "sqlx" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27144619c6e5802f1380337a209d2ac1c431002dd74c6e60aebff3c506dc4f0c" +checksum = "fcfa89bea9500db4a0d038513d7a060566bfc51d46d1c014847049a45cce85e8" dependencies = [ "sqlx-core", "sqlx-macros", @@ -4729,9 +4735,9 @@ dependencies = [ [[package]] name = "sqlx-core" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a999083c1af5b5d6c071d34a708a19ba3e02106ad82ef7bbd69f5e48266b613b" +checksum = "d06e2f2bd861719b1f3f0c7dbe1d80c30bf59e76cf019f07d9014ed7eefb8e08" dependencies = [ "atoi", "byteorder", @@ -4768,9 +4774,9 @@ dependencies = [ [[package]] name = "sqlx-macros" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a23217eb7d86c584b8cbe0337b9eacf12ab76fe7673c513141ec42565698bb88" +checksum = "2f998a9defdbd48ed005a89362bd40dd2117502f15294f61c8d47034107dbbdc" dependencies = [ "proc-macro2", "quote", @@ -4781,9 +4787,9 @@ dependencies = [ [[package]] name = "sqlx-macros-core" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a099220ae541c5db479c6424bdf1b200987934033c2584f79a0e1693601e776" +checksum = "3d100558134176a2629d46cec0c8891ba0be8910f7896abfdb75ef4ab6f4e7ce" dependencies = [ "dotenvy", "either", @@ -4807,9 +4813,9 @@ dependencies = [ [[package]] name = "sqlx-mysql" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5afe4c38a9b417b6a9a5eeffe7235d0a106716495536e7727d1c7f4b1ff3eba6" +checksum = "936cac0ab331b14cb3921c62156d913e4c15b74fb6ec0f3146bd4ef6e4fb3c12" dependencies = [ "atoi", "base64 0.22.1", @@ -4849,9 +4855,9 @@ dependencies = [ [[package]] name = "sqlx-postgres" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1dbb157e65f10dbe01f729339c06d239120221c9ad9fa0ba8408c4cc18ecf21" +checksum = "9734dbce698c67ecf67c442f768a5e90a49b2a4d61a9f1d59f73874bd4cf0710" dependencies = [ "atoi", "base64 0.22.1", @@ -4887,9 +4893,9 @@ dependencies = [ [[package]] name = "sqlx-sqlite" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2cdd83c008a622d94499c0006d8ee5f821f36c89b7d625c900e5dc30b5c5ee" +checksum = "a75b419c3c1b1697833dd927bdc4c6545a620bc1bbafabd44e1efbe9afcd337e" dependencies = [ "atoi", "flume", diff --git a/zk_toolbox/Cargo.toml b/zk_toolbox/Cargo.toml index ef2aed7c99c1..4a08776558ed 100644 --- a/zk_toolbox/Cargo.toml +++ b/zk_toolbox/Cargo.toml @@ -47,7 +47,7 @@ rand = "0.8.5" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" serde_yaml = "0.9" -sqlx = { version = "0.8.0", features = [ +sqlx = { version = "0.8.1", features = [ "runtime-tokio", "migrate", "postgres", From 650a63ae193d12b7cf921af73895dda3a00caa40 Mon Sep 17 00:00:00 2001 From: Zach Kolodny Date: Mon, 26 Aug 2024 12:33:38 -0400 Subject: [PATCH 17/39] feat(contracts: integrate protocol defense changes --- contracts | 2 +- .../versions/vm_fast/tests/l1_tx_execution.rs | 7 +- .../versions/vm_fast/tests/nonce_holder.rs | 11 +-- .../src/versions/vm_fast/tests/rollbacks.rs | 47 +++++++++--- .../tests/tester/transaction_test_info.rs | 73 ++++++++----------- .../vm_latest/tests/l1_tx_execution.rs | 7 +- .../versions/vm_latest/tests/nonce_holder.rs | 11 +-- .../src/versions/vm_latest/tests/rollbacks.rs | 47 +++++++++--- .../tests/tester/transaction_test_info.rs | 72 +++++++++--------- core/node/eth_sender/src/eth_tx_aggregator.rs | 5 ++ etc/env/base/chain.toml | 4 +- etc/env/base/contracts.toml | 4 +- 12 files changed, 169 insertions(+), 121 deletions(-) diff --git a/contracts b/contracts index 7ca5517510f2..75db4f372d9c 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 7ca5517510f2534a2fc25b16c429fdd4a439b89d +Subproject commit 75db4f372d9c5dc998626ba45451fad5af359ad7 diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs index f1411497c24c..a4dbcfbe2ee8 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs @@ -117,9 +117,8 @@ fn test_l1_tx_execution() { let res = vm.vm.execute(VmExecutionMode::OneTx); let storage_logs = res.logs.storage_logs; let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract. However, the rewrite of the `basePubdataSpent` didn't happen, since it was the same - // as the start of the previous tx. Thus we have `+1` slot for the changed counter and `-1` slot for base pubdata spent - assert_eq!(res.initial_storage_writes, basic_initial_writes); + // We changed one slot inside contract. + assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); // No repeated writes let repeated_writes = res.repeated_storage_writes; @@ -146,7 +145,7 @@ fn test_l1_tx_execution() { assert!(result.result.is_failed(), "The transaction should fail"); let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - assert_eq!(res.initial_storage_writes, basic_initial_writes); + assert_eq!(res.initial_storage_writes, basic_initial_writes + 1); assert_eq!(res.repeated_storage_writes, 1); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs index b18676cf2ba6..7b75d2b8bb72 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs @@ -36,6 +36,7 @@ impl From for u8 { #[test] fn test_nonce_holder() { let mut account = Account::random(); + let hex_addr = hex::encode(account.address.to_fixed_bytes()); let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() @@ -92,7 +93,7 @@ fn test_nonce_holder() { run_nonce_test( 1u32, NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), + Some("Error function_selector = 0x13595475, data = 0x13595475".to_string()), "Allowed to set value under non sequential value", ); @@ -133,7 +134,7 @@ fn test_nonce_holder() { run_nonce_test( 10u32, NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), + Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000a")), "Allowed to reuse nonce below the minimal one", ); @@ -149,7 +150,7 @@ fn test_nonce_holder() { run_nonce_test( 13u32, NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), + Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000d")), "Allowed to reuse the same nonce twice", ); @@ -165,7 +166,7 @@ fn test_nonce_holder() { run_nonce_test( 16u32, NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), + Some("Error function_selector = 0x45ac24a6, data = 0x45ac24a600000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000040000000000000000000000".to_string()), "Allowed for incrementing min nonce too much", ); @@ -173,7 +174,7 @@ fn test_nonce_holder() { run_nonce_test( 16u32, NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), + Some(format!("Error function_selector = 0x1f2f8478, data = 0x1f2f8478000000000000000000000000{hex_addr}0000000000000000000000000000000000000000000000000000000000000010")), "Allowed to leave nonce as unused", ); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs index c530c5af18ea..fd5dc495435d 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs @@ -1,6 +1,6 @@ use ethabi::Token; use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use zksync_types::{Execute, U256}; +use zksync_types::{Execute, Nonce, U256}; use crate::{ interface::TxExecutionMode, @@ -38,22 +38,40 @@ fn test_vm_rollbacks() { TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(0)).into(), + ), // This tx will succeed TransactionTestInfo::new_processed(tx_0.clone(), false), // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(1)).into(), + ), // This tx will succeed TransactionTestInfo::new_processed(tx_1, false), // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), // This tx will succeed TransactionTestInfo::new_processed(tx_2.clone(), false), // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::NonceReused(tx_2.initiator_account(), tx_2.nonce().unwrap()).into(), + ), + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), ]); assert_eq!(result_without_rollbacks, result_with_rollbacks); @@ -131,12 +149,23 @@ fn test_vm_loadnext_rollbacks() { TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), TransactionTestInfo::new_rejected( loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), + TxModifier::NonceReused( + loadnext_deploy_tx.initiator_account(), + loadnext_deploy_tx.nonce().unwrap(), + ) + .into(), ), TransactionTestInfo::new_processed(loadnext_tx_1, false), TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + loadnext_deploy_tx.clone(), + TxModifier::NonceReused( + loadnext_deploy_tx.initiator_account(), + loadnext_deploy_tx.nonce().unwrap(), + ) + .into(), + ), TransactionTestInfo::new_processed(loadnext_tx_2, false), ]); diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs index 562a8a6a6bdd..c3c1736902c9 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs @@ -1,4 +1,4 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction, H160, U256}; +use zksync_types::{ExecuteTransactionCommon, Nonce, Transaction, H160, U256}; use super::VmTester; use crate::{ @@ -15,8 +15,8 @@ pub(crate) enum TxModifier { WrongSignatureLength, WrongSignature, WrongMagicValue, - WrongNonce, - NonceReused, + WrongNonce(Nonce, Nonce), + NonceReused(H160, Nonce), } #[derive(Debug, Clone)] @@ -41,15 +41,9 @@ impl From for ExpectedError { fn from(value: TxModifier) -> Self { let revert_reason = match value { TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], + Halt::ValidationFailed(VmRevertReason::Unknown { + function_selector: vec![144, 240, 73, 201], + data: vec![144, 240, 73, 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 45], }) } TxModifier::WrongSignature => { @@ -59,38 +53,35 @@ impl From for ExpectedError { }) } TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], + Halt::ValidationFailed(VmRevertReason::Unknown { + function_selector: vec![144, 240, 73, 201], + data: vec![144, 240, 73, 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], }) } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], + TxModifier::WrongNonce(expected, actual) => { + let function_selector = vec![98, 106, 222, 48]; + let expected_nonce_bytes = expected.0.to_be_bytes().to_vec(); + let actual_nonce_bytes = actual.0.to_be_bytes().to_vec(); + // padding is 28 because an address takes up 4 bytes and we need it to fill a 32 byte field + let nonce_padding = vec![0u8; 28]; + let data = [function_selector.clone(), nonce_padding.clone(), expected_nonce_bytes, nonce_padding.clone(), actual_nonce_bytes].concat(); + Halt::ValidationFailed(VmRevertReason::Unknown { + function_selector, + data }) } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], + TxModifier::NonceReused(addr, nonce) => { + let function_selector = vec![233, 10, 222, 212]; + let addr = addr.as_bytes().to_vec(); + // padding is 12 because an address takes up 20 bytes and we need it to fill a 32 byte field + let addr_padding = vec![0u8; 12]; + // padding is 28 because an address takes up 4 bytes and we need it to fill a 32 byte field + let nonce_padding = vec![0u8; 28]; + let data = [function_selector.clone(), addr_padding, addr, nonce_padding, nonce.0.to_be_bytes().to_vec()].concat(); + Halt::ValidationFailed(VmRevertReason::Unknown { + function_selector, + data, }) } }; @@ -116,10 +107,10 @@ impl TransactionTestInfo { } TxModifier::WrongSignature => data.signature = vec![27u8; 65], TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { + TxModifier::WrongNonce(_, _) => { // Do not need to modify signature for nonce error } - TxModifier::NonceReused => { + TxModifier::NonceReused(_, _) => { // Do not need to modify signature for nonce error } } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs index 4d42bb96cc96..3e76fbf12723 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs @@ -112,9 +112,8 @@ fn test_l1_tx_execution() { let res = vm.vm.execute(VmExecutionMode::OneTx); let storage_logs = res.logs.storage_logs; let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract. However, the rewrite of the `basePubdataSpent` didn't happen, since it was the same - // as the start of the previous tx. Thus we have `+1` slot for the changed counter and `-1` slot for base pubdata spent - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); + // We changed one slot inside contract. + assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); // No repeated writes let repeated_writes = res.repeated_storage_writes; @@ -142,7 +141,7 @@ fn test_l1_tx_execution() { let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); + assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); } #[test] diff --git a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs index 076ecb523618..9e3e1cbadda1 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs @@ -40,6 +40,7 @@ impl From for u8 { #[test] fn test_nonce_holder() { let mut account = Account::random(); + let hex_addr = hex::encode(account.address.to_fixed_bytes()); let mut vm = VmTesterBuilder::new(HistoryEnabled) .with_empty_in_memory_storage() @@ -101,7 +102,7 @@ fn test_nonce_holder() { run_nonce_test( 1u32, NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), + Some("Error function_selector = 0x13595475, data = 0x13595475".to_string()), "Allowed to set value under non sequential value", ); @@ -142,7 +143,7 @@ fn test_nonce_holder() { run_nonce_test( 10u32, NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), + Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000a")), "Allowed to reuse nonce below the minimal one", ); @@ -158,7 +159,7 @@ fn test_nonce_holder() { run_nonce_test( 13u32, NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), + Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000d")), "Allowed to reuse the same nonce twice", ); @@ -174,7 +175,7 @@ fn test_nonce_holder() { run_nonce_test( 16u32, NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), + Some("Error function_selector = 0x45ac24a6, data = 0x45ac24a600000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000040000000000000000000000".to_string()), "Allowed for incrementing min nonce too much", ); @@ -182,7 +183,7 @@ fn test_nonce_holder() { run_nonce_test( 16u32, NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), + Some(format!("Error function_selector = 0x1f2f8478, data = 0x1f2f8478000000000000000000000000{hex_addr}0000000000000000000000000000000000000000000000000000000000000010")), "Allowed to leave nonce as unused", ); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs index 489c762aac4e..d5ea40d1f929 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs @@ -1,6 +1,6 @@ use ethabi::Token; use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use zksync_types::{get_nonce_key, Execute, U256}; +use zksync_types::{get_nonce_key, Execute, Nonce, U256}; use crate::{ interface::{ @@ -47,22 +47,40 @@ fn test_vm_rollbacks() { TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(0)).into(), + ), // This tx will succeed TransactionTestInfo::new_processed(tx_0.clone(), false), // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(1)).into(), + ), // This tx will succeed TransactionTestInfo::new_processed(tx_1, false), // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), // This tx will succeed TransactionTestInfo::new_processed(tx_2.clone(), false), // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::NonceReused(tx_2.initiator_account(), tx_2.nonce().unwrap()).into(), + ), + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), ]); assert_eq!(result_without_rollbacks, result_with_rollbacks); @@ -140,12 +158,23 @@ fn test_vm_loadnext_rollbacks() { TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), TransactionTestInfo::new_rejected( loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), + TxModifier::NonceReused( + loadnext_deploy_tx.initiator_account(), + loadnext_deploy_tx.nonce().unwrap(), + ) + .into(), ), TransactionTestInfo::new_processed(loadnext_tx_1, false), TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + loadnext_deploy_tx.clone(), + TxModifier::NonceReused( + loadnext_deploy_tx.initiator_account(), + loadnext_deploy_tx.nonce().unwrap(), + ) + .into(), + ), TransactionTestInfo::new_processed(loadnext_tx_2, false), ]); diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs index 114f80d1a217..ccaab547c20c 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs @@ -1,4 +1,4 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; +use zksync_types::{ExecuteTransactionCommon, Nonce, Transaction, H160}; use crate::{ interface::{ @@ -13,8 +13,8 @@ pub(crate) enum TxModifier { WrongSignatureLength, WrongSignature, WrongMagicValue, - WrongNonce, - NonceReused, + WrongNonce(Nonce, Nonce), + NonceReused(H160, Nonce), } #[derive(Debug, Clone)] @@ -39,14 +39,11 @@ impl From for ExpectedError { fn from(value: TxModifier) -> Self { let revert_reason = match value { TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), + Halt::ValidationFailed(VmRevertReason::Unknown { + function_selector: vec![144, 240, 73, 201], data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, + 144, 240, 73, 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 45 ], }) } @@ -57,38 +54,35 @@ impl From for ExpectedError { }) } TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], + Halt::ValidationFailed(VmRevertReason::Unknown { + function_selector: vec![144, 240, 73, 201], + data: vec![144, 240, 73, 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], }) } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], + TxModifier::WrongNonce(expected, actual) => { + let function_selector = vec![98, 106, 222, 48]; + let expected_nonce_bytes = expected.0.to_be_bytes().to_vec(); + let actual_nonce_bytes = actual.0.to_be_bytes().to_vec(); + // padding is 28 because an address takes up 4 bytes and we need it to fill a 32 byte field + let nonce_padding = vec![0u8; 28]; + let data = [function_selector.clone(), nonce_padding.clone(), expected_nonce_bytes, nonce_padding.clone(), actual_nonce_bytes].concat(); + Halt::ValidationFailed(VmRevertReason::Unknown { + function_selector, + data }) } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], + TxModifier::NonceReused(addr, nonce) => { + let function_selector = vec![233, 10, 222, 212]; + let addr = addr.as_bytes().to_vec(); + // padding is 12 because an address takes up 20 bytes and we need it to fill a 32 byte field + let addr_padding = vec![0u8; 12]; + // padding is 28 because an address takes up 4 bytes and we need it to fill a 32 byte field + let nonce_padding = vec![0u8; 28]; + let data = [function_selector.clone(), addr_padding, addr, nonce_padding, nonce.0.to_be_bytes().to_vec()].concat(); + Halt::ValidationFailed(VmRevertReason::Unknown { + function_selector, + data, }) } }; @@ -114,10 +108,10 @@ impl TransactionTestInfo { } TxModifier::WrongSignature => data.signature = vec![27u8; 65], TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { + TxModifier::WrongNonce(_, _) => { // Do not need to modify signature for nonce error } - TxModifier::NonceReused => { + TxModifier::NonceReused(_, _) => { // Do not need to modify signature for nonce error } } diff --git a/core/node/eth_sender/src/eth_tx_aggregator.rs b/core/node/eth_sender/src/eth_tx_aggregator.rs index 7d6a6b234742..276e62d01a8c 100644 --- a/core/node/eth_sender/src/eth_tx_aggregator.rs +++ b/core/node/eth_sender/src/eth_tx_aggregator.rs @@ -493,6 +493,11 @@ impl EthTxAggregator { .encode_input(&op.into_tokens()) .expect("Failed to encode execute transaction data") } else { + dbg!(self + .functions + .post_shared_bridge_execute + .as_ref() + .expect("Missing ABI for executeBatchesSharedBridge")); args.extend(op.into_tokens()); self.functions .post_shared_bridge_execute diff --git a/etc/env/base/chain.toml b/etc/env/base/chain.toml index 6cfacb3c72ce..7d0ec3791431 100644 --- a/etc/env/base/chain.toml +++ b/etc/env/base/chain.toml @@ -90,8 +90,8 @@ fee_model_version = "V2" validation_computational_gas_limit = 300000 save_call_traces = true -bootloader_hash = "0x010008e742608b21bf7eb23c1a9d0602047e3618b464c9b59c0fba3b3d7ab66e" -default_aa_hash = "0x01000563374c277a2c1e34659a2a1e87371bb6d852ce142022d497bfb50b9e32" +bootloader_hash = "0x010008e50a84cb5e11b650d08b8040a90cd42203490f09362e5e39f1925234aa" +default_aa_hash = "0x0100055da70d970f98ca4677a4b2fcecef5354f345cc5c6d13a78339e5fd87a9" protective_reads_persistence_enabled = false diff --git a/etc/env/base/contracts.toml b/etc/env/base/contracts.toml index daa317a8bc90..a01979ad6e88 100644 --- a/etc/env/base/contracts.toml +++ b/etc/env/base/contracts.toml @@ -26,8 +26,8 @@ RECURSION_NODE_LEVEL_VK_HASH = "0x1186ec268d49f1905f8d9c1e9d39fc33e98c74f91d91a2 RECURSION_LEAF_LEVEL_VK_HASH = "0x101e08b00193e529145ee09823378ef51a3bc8966504064f1f6ba3f1ba863210" RECURSION_CIRCUITS_SET_VKS_HASH = "0x18c1639094f58177409186e8c48d9f577c9410901d2f1d486b3e7d6cf553ae4c" GENESIS_TX_HASH = "0xb99ebfea46cbe05a21cd80fe5597d97b204befc52a16303f579c607dc1ac2e2e" -GENESIS_ROOT = "0xabdb766b18a479a5c783a4b80e12686bc8ea3cc2d8a3050491b701d72370ebb5" -GENESIS_BATCH_COMMITMENT = "0x2d00e5f8d77afcebf58a6b82ae56ba967566fe7dfbcb6760319fb0d215d18ffd" +GENESIS_ROOT = "0x28a7e67393021f957572495f8fdadc2c477ae3f4f413ae18c16cff6ee65680e2" +GENESIS_BATCH_COMMITMENT = "0xf4d78dee7dbe63223c5de200e6cbce37dceff86c07421b7c96e0ba142db8a14e" PRIORITY_TX_MAX_GAS_LIMIT = 72000000 DEPLOY_L2_BRIDGE_COUNTERPART_GAS_LIMIT = 10000000 GENESIS_ROLLUP_LEAF_INDEX = "54" From fd54692c267773622c934e129251f76ce2732a1f Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Tue, 27 Aug 2024 10:53:53 +0300 Subject: [PATCH 18/39] feat(contract-verifier): Add compilers to contract-verifier (#2738) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Adds zksolc 1.5.3, zkvyper 1.5.4 ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- docker/contract-verifier/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 6f7df349d66f..7ed1906b8574 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -34,7 +34,7 @@ RUN mkdir -p /etc/zksolc-bin/vm-1.5.0-a167aa3 && \ chmod +x /etc/zksolc-bin/vm-1.5.0-a167aa3/zksolc # install zksolc 1.5.x -RUN for VERSION in $(seq -f "v1.5.%g" 0 2); do \ +RUN for VERSION in $(seq -f "v1.5.%g" 0 3); do \ mkdir -p /etc/zksolc-bin/$VERSION && \ wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-$VERSION -O /etc/zksolc-bin/$VERSION/zksolc && \ chmod +x /etc/zksolc-bin/$VERSION/zksolc; \ @@ -55,7 +55,7 @@ RUN for VERSION in $(seq -f "v1.4.%g" 0 1); do \ done # install zkvyper 1.5.x -RUN for VERSION in $(seq -f "v1.5.%g" 0 3); do \ +RUN for VERSION in $(seq -f "v1.5.%g" 0 4); do \ mkdir -p /etc/zkvyper-bin/$VERSION && \ wget https://github.com/matter-labs/zkvyper-bin/raw/main/linux-amd64/zkvyper-linux-amd64-musl-$VERSION -O /etc/zkvyper-bin/$VERSION/zkvyper && \ chmod +x /etc/zkvyper-bin/$VERSION/zkvyper; \ From dda48ba6d3dbdaa65683b784c57f3841ccb57fbc Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Tue, 27 Aug 2024 13:06:26 +0400 Subject: [PATCH 19/39] chore: Use a team in CODEOWNERS (#2739) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Uses a dedicated team for release management in CODEOWNERS. ## Why ❔ Better configurability. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- CODEOWNERS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CODEOWNERS b/CODEOWNERS index 63094b333057..813cd396d2c2 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,4 +1,4 @@ -.github/release-please/** @RomanBrodetski @perekopskiy @Deniallugo @popzxc @EmilLuta -**/CHANGELOG.md @RomanBrodetski @perekopskiy @Deniallugo @popzxc @EmilLuta +.github/release-please/** @matter-labs/core-release-managers +**/CHANGELOG.md @matter-labs/core-release-managers CODEOWNERS @RomanBrodetski @perekopskiy @Deniallugo @popzxc .github/workflows/** @matter-labs/devops From beaf155f24b4b7efa7ffc15d6482b47b4ed92ea4 Mon Sep 17 00:00:00 2001 From: EmilLuta Date: Tue, 27 Aug 2024 11:30:07 +0200 Subject: [PATCH 20/39] chore(PJM): Nits & cleanups post initial merge (#2740) --- prover/crates/bin/prover_job_monitor/src/main.rs | 1 - .../proptest-regressions/tests.txt | 9 --------- .../lib/prover_dal/src/fri_witness_generator_dal.rs | 12 ++++++------ 3 files changed, 6 insertions(+), 16 deletions(-) delete mode 100644 prover/crates/bin/vk_setup_data_generator_server_fri/proptest-regressions/tests.txt diff --git a/prover/crates/bin/prover_job_monitor/src/main.rs b/prover/crates/bin/prover_job_monitor/src/main.rs index e585c06ad779..734a4bac38a2 100644 --- a/prover/crates/bin/prover_job_monitor/src/main.rs +++ b/prover/crates/bin/prover_job_monitor/src/main.rs @@ -37,7 +37,6 @@ async fn main() -> anyhow::Result<()> { let general_config = load_general_config(opt.config_path).context("general config")?; - println!("general_config = {general_config:?}"); let database_secrets = load_database_secrets(opt.secrets_path).context("database secrets")?; let observability_config = general_config diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/proptest-regressions/tests.txt b/prover/crates/bin/vk_setup_data_generator_server_fri/proptest-regressions/tests.txt deleted file mode 100644 index 7e50d86cb4f8..000000000000 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/proptest-regressions/tests.txt +++ /dev/null @@ -1,9 +0,0 @@ -# Seeds for failure cases proptest has generated in the past. It is -# automatically read and these particular cases re-run before any -# novel cases are generated. -# -# It is recommended to check this file in to source control so that -# everyone who runs the test benefits from these saved cases. -cc ca181a7669a6e07b68bce71c8c723efcb8fd2a4e895fc962ca1d33ce5f8188f7 # shrinks to circuit_id = 1 -cc ce71957c410fa7af30e04b3e85423555a8e1bbd26b4682b748fa67162bc5687f # shrinks to circuit_id = 1 -cc 6d3b0c60d8a5e7d7dc3bb4a2a21cce97461827583ae01b2414345175a02a1221 # shrinks to key = ProverServiceDataKey { circuit_id: 1, round: BasicCircuits } diff --git a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs index 65d490ee4e08..9958527a98b0 100644 --- a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs @@ -927,12 +927,12 @@ impl FriWitnessGeneratorDal<'_, '_> { "#, AggregationRound::RecursionTip as i64, ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| (row.l1_batch_number as u64)) - .collect() + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| (row.l1_batch_number as u64)) + .collect() } pub async fn requeue_stuck_leaf_jobs( From 951d5f208e5d16a5d95878dd345a8bd2a4144aa7 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Tue, 27 Aug 2024 12:58:16 +0300 Subject: [PATCH 21/39] feat(vm): Extract oneshot VM executor interface (#2671) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Extracts oneshot VM executor from the API server crate. ## Why ❔ Simplifies reasoning about oneshot VM execution and its maintenance. Allows for alternative implementations. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/lib/multivm/src/tracers/mod.rs | 4 +- core/lib/multivm/src/tracers/validator/mod.rs | 6 +- core/lib/vm_interface/src/lib.rs | 5 +- .../vm_interface/src/types/inputs/l2_block.rs | 19 +- core/lib/vm_interface/src/types/inputs/mod.rs | 14 +- .../api_server/src/execution_sandbox/apply.rs | 676 ++++++++++-------- .../src/execution_sandbox/execute.rs | 221 +++--- .../api_server/src/execution_sandbox/mod.rs | 57 +- .../src/execution_sandbox/testonly.rs | 104 ++- .../api_server/src/execution_sandbox/tests.rs | 58 +- .../src/execution_sandbox/tracers.rs | 41 +- .../src/execution_sandbox/validate.rs | 112 +-- core/node/api_server/src/tx_sender/mod.rs | 106 ++- core/node/api_server/src/tx_sender/tests.rs | 8 +- .../api_server/src/web3/namespaces/debug.rs | 41 +- .../api_server/src/web3/namespaces/eth.rs | 13 +- core/node/api_server/src/web3/testonly.rs | 13 +- core/node/api_server/src/web3/tests/mod.rs | 49 +- core/node/api_server/src/web3/tests/vm.rs | 123 ++-- 19 files changed, 859 insertions(+), 811 deletions(-) diff --git a/core/lib/multivm/src/tracers/mod.rs b/core/lib/multivm/src/tracers/mod.rs index 0a6517a6cd2f..69501cf39882 100644 --- a/core/lib/multivm/src/tracers/mod.rs +++ b/core/lib/multivm/src/tracers/mod.rs @@ -3,7 +3,9 @@ pub use self::{ multivm_dispatcher::TracerDispatcher, prestate_tracer::PrestateTracer, storage_invocation::StorageInvocations, - validator::{ValidationError, ValidationTracer, ValidationTracerParams}, + validator::{ + ValidationError, ValidationTracer, ValidationTracerParams, ViolatedValidationRule, + }, }; mod call_tracer; diff --git a/core/lib/multivm/src/tracers/validator/mod.rs b/core/lib/multivm/src/tracers/validator/mod.rs index a91006368b6a..307256792cf7 100644 --- a/core/lib/multivm/src/tracers/validator/mod.rs +++ b/core/lib/multivm/src/tracers/validator/mod.rs @@ -10,13 +10,11 @@ use zksync_types::{ }; use zksync_utils::{be_bytes_to_safe_address, u256_to_account_address, u256_to_h256}; -pub use crate::tracers::validator::types::{ValidationError, ValidationTracerParams}; +use self::types::{NewTrustedValidationItems, ValidationTracerMode}; +pub use self::types::{ValidationError, ValidationTracerParams, ViolatedValidationRule}; use crate::{ glue::tracers::IntoOldVmTracer, interface::storage::{StoragePtr, WriteStorage}, - tracers::validator::types::{ - NewTrustedValidationItems, ValidationTracerMode, ViolatedValidationRule, - }, }; mod types; diff --git a/core/lib/vm_interface/src/lib.rs b/core/lib/vm_interface/src/lib.rs index b2b7d6484dad..120812842ad0 100644 --- a/core/lib/vm_interface/src/lib.rs +++ b/core/lib/vm_interface/src/lib.rs @@ -23,7 +23,10 @@ pub use crate::{ BytecodeCompressionError, Halt, TxRevertReason, VmRevertReason, VmRevertReasonParsingError, }, - inputs::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode}, + inputs::{ + L1BatchEnv, L2BlockEnv, OneshotEnv, StoredL2BlockEnv, SystemEnv, TxExecutionMode, + VmExecutionMode, + }, outputs::{ BootloaderMemory, Call, CallType, CircuitStatistic, CompressedBytecodeInfo, CurrentExecutionState, DeduplicatedWritesMetrics, ExecutionResult, FinishedL1Batch, diff --git a/core/lib/vm_interface/src/types/inputs/l2_block.rs b/core/lib/vm_interface/src/types/inputs/l2_block.rs index 7c9a028bbad7..b081dfbdeacc 100644 --- a/core/lib/vm_interface/src/types/inputs/l2_block.rs +++ b/core/lib/vm_interface/src/types/inputs/l2_block.rs @@ -10,12 +10,21 @@ pub struct L2BlockEnv { } impl L2BlockEnv { - pub fn from_l2_block_data(miniblock_execution_data: &L2BlockExecutionData) -> Self { + pub fn from_l2_block_data(execution_data: &L2BlockExecutionData) -> Self { Self { - number: miniblock_execution_data.number.0, - timestamp: miniblock_execution_data.timestamp, - prev_block_hash: miniblock_execution_data.prev_block_hash, - max_virtual_blocks_to_create: miniblock_execution_data.virtual_blocks, + number: execution_data.number.0, + timestamp: execution_data.timestamp, + prev_block_hash: execution_data.prev_block_hash, + max_virtual_blocks_to_create: execution_data.virtual_blocks, } } } + +/// Current block information stored in the system context contract. Can be used to set up +/// oneshot transaction / call execution. +#[derive(Debug, Clone, Copy)] +pub struct StoredL2BlockEnv { + pub number: u32, + pub timestamp: u64, + pub txs_rolling_hash: H256, +} diff --git a/core/lib/vm_interface/src/types/inputs/mod.rs b/core/lib/vm_interface/src/types/inputs/mod.rs index 1d2c49cdfa11..4801c4d88b55 100644 --- a/core/lib/vm_interface/src/types/inputs/mod.rs +++ b/core/lib/vm_interface/src/types/inputs/mod.rs @@ -1,7 +1,7 @@ pub use self::{ execution_mode::VmExecutionMode, l1_batch_env::L1BatchEnv, - l2_block::L2BlockEnv, + l2_block::{L2BlockEnv, StoredL2BlockEnv}, system_env::{SystemEnv, TxExecutionMode}, }; @@ -9,3 +9,15 @@ mod execution_mode; mod l1_batch_env; mod l2_block; mod system_env; + +/// Full environment for oneshot transaction / call execution. +#[derive(Debug)] +pub struct OneshotEnv { + /// System environment. + pub system: SystemEnv, + /// Part of the environment specific to an L1 batch. + pub l1_batch: L1BatchEnv, + /// Part of the environment representing the current L2 block. Can be used to override storage slots + /// in the system context contract, which are set from `L1BatchEnv.first_l2_block` by default. + pub current_block: Option, +} diff --git a/core/node/api_server/src/execution_sandbox/apply.rs b/core/node/api_server/src/execution_sandbox/apply.rs index c0c8398f690d..0ec857e1e2b1 100644 --- a/core/node/api_server/src/execution_sandbox/apply.rs +++ b/core/node/api_server/src/execution_sandbox/apply.rs @@ -9,16 +9,19 @@ use std::time::{Duration, Instant}; use anyhow::Context as _; +use async_trait::async_trait; use tokio::runtime::Handle; -use zksync_dal::{Connection, ConnectionPool, Core, CoreDal, DalError}; +use zksync_dal::{Connection, Core, CoreDal, DalError}; use zksync_multivm::{ interface::{ storage::{ReadStorage, StoragePtr, StorageView, WriteStorage}, - L1BatchEnv, L2BlockEnv, SystemEnv, VmInterface, + BytecodeCompressionError, L1BatchEnv, L2BlockEnv, OneshotEnv, StoredL2BlockEnv, SystemEnv, + TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, }, - utils::adjust_pubdata_price_for_tx, + tracers::StorageInvocations, + utils::{adjust_pubdata_price_for_tx, get_eth_call_gas_limit}, vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, HistoryDisabled}, - VmInstance, + MultiVMTracer, MultiVmTracerPointer, VmInstance, }; use zksync_state::PostgresStorage; use zksync_system_constants::{ @@ -26,7 +29,7 @@ use zksync_system_constants::{ SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, ZKPORTER_IS_AVAILABLE, }; use zksync_types::{ - api::{self, state_override::StateOverride}, + api, block::{pack_block_info, unpack_block_info, L2BlockHasher}, fee_model::BatchFeeInput, get_nonce_key, @@ -37,179 +40,250 @@ use zksync_types::{ use zksync_utils::{h256_to_u256, time::seconds_since_epoch, u256_to_h256}; use super::{ - storage::StorageWithOverrides, vm_metrics::{self, SandboxStage, SANDBOX_METRICS}, - BlockArgs, TxExecutionArgs, TxSharedArgs, VmPermit, + ApiTracer, BlockArgs, OneshotExecutor, TxExecutionArgs, TxSetupArgs, }; -type VmStorageView<'a> = StorageView>>; -type BoxedVm<'a> = Box>, HistoryDisabled>>; +pub(super) async fn prepare_env_and_storage( + mut connection: Connection<'static, Core>, + setup_args: TxSetupArgs, + block_args: &BlockArgs, +) -> anyhow::Result<(OneshotEnv, PostgresStorage<'static>)> { + let initialization_stage = SANDBOX_METRICS.sandbox[&SandboxStage::Initialization].start(); -#[derive(Debug)] -struct Sandbox<'a> { - system_env: SystemEnv, - l1_batch_env: L1BatchEnv, - execution_args: &'a TxExecutionArgs, - l2_block_info_to_reset: Option, - storage_view: VmStorageView<'a>, -} - -impl<'a> Sandbox<'a> { - async fn new( - mut connection: Connection<'a, Core>, - shared_args: TxSharedArgs, - execution_args: &'a TxExecutionArgs, - block_args: BlockArgs, - state_override: &StateOverride, - ) -> anyhow::Result> { - let resolve_started_at = Instant::now(); - let resolved_block_info = block_args - .resolve_block_info(&mut connection) - .await - .with_context(|| format!("cannot resolve block numbers for {block_args:?}"))?; - let resolve_time = resolve_started_at.elapsed(); - // We don't want to emit too many logs. - if resolve_time > Duration::from_millis(10) { - tracing::debug!("Resolved block numbers (took {resolve_time:?})"); - } - - if block_args.resolves_to_latest_sealed_l2_block() { - shared_args - .caches - .schedule_values_update(resolved_block_info.state_l2_block_number); - } - - let (next_l2_block_info, l2_block_info_to_reset) = Self::load_l2_block_info( - &mut connection, - block_args.is_pending_l2_block(), - &resolved_block_info, - ) - .await?; - - let storage = PostgresStorage::new_async( - Handle::current(), - connection, - resolved_block_info.state_l2_block_number, - false, - ) + let resolve_started_at = Instant::now(); + let resolved_block_info = block_args + .resolve_block_info(&mut connection) .await - .context("cannot create `PostgresStorage`")? - .with_caches(shared_args.caches.clone()); - - let storage_with_overrides = StorageWithOverrides::new(storage, state_override); - let storage_view = StorageView::new(storage_with_overrides); - let (system_env, l1_batch_env) = Self::prepare_env( - shared_args, - execution_args, - &resolved_block_info, - next_l2_block_info, - ); + .with_context(|| format!("cannot resolve block numbers for {block_args:?}"))?; + let resolve_time = resolve_started_at.elapsed(); + // We don't want to emit too many logs. + if resolve_time > Duration::from_millis(10) { + tracing::debug!("Resolved block numbers (took {resolve_time:?})"); + } - Ok(Self { - system_env, - l1_batch_env, - storage_view, - execution_args, - l2_block_info_to_reset, - }) + if block_args.resolves_to_latest_sealed_l2_block() { + setup_args + .caches + .schedule_values_update(resolved_block_info.state_l2_block_number); } - async fn load_l2_block_info( - connection: &mut Connection<'_, Core>, - is_pending_block: bool, - resolved_block_info: &ResolvedBlockInfo, - ) -> anyhow::Result<(L2BlockEnv, Option)> { - let mut l2_block_info_to_reset = None; - let current_l2_block_info = StoredL2BlockInfo::new( - connection, - resolved_block_info.state_l2_block_number, - Some(resolved_block_info.state_l2_block_hash), - ) + let (next_block, current_block) = load_l2_block_info( + &mut connection, + block_args.is_pending_l2_block(), + &resolved_block_info, + ) + .await?; + + let storage = PostgresStorage::new_async( + Handle::current(), + connection, + resolved_block_info.state_l2_block_number, + false, + ) + .await + .context("cannot create `PostgresStorage`")? + .with_caches(setup_args.caches.clone()); + + let (system, l1_batch) = prepare_env(setup_args, &resolved_block_info, next_block); + + let env = OneshotEnv { + system, + l1_batch, + current_block, + }; + initialization_stage.observe(); + Ok((env, storage)) +} + +async fn load_l2_block_info( + connection: &mut Connection<'_, Core>, + is_pending_block: bool, + resolved_block_info: &ResolvedBlockInfo, +) -> anyhow::Result<(L2BlockEnv, Option)> { + let mut current_block = None; + let next_block = read_stored_l2_block(connection, resolved_block_info.state_l2_block_number) .await .context("failed reading L2 block info")?; - let next_l2_block_info = if is_pending_block { - L2BlockEnv { - number: current_l2_block_info.l2_block_number + 1, - timestamp: resolved_block_info.l1_batch_timestamp, - prev_block_hash: current_l2_block_info.l2_block_hash, - // For simplicity, we assume each L2 block create one virtual block. - // This may be wrong only during transition period. - max_virtual_blocks_to_create: 1, - } - } else if current_l2_block_info.l2_block_number == 0 { - // Special case: - // - For environments, where genesis block was created before virtual block upgrade it doesn't matter what we put here. - // - Otherwise, we need to put actual values here. We cannot create next L2 block with block_number=0 and `max_virtual_blocks_to_create=0` - // because of SystemContext requirements. But, due to intrinsics of SystemContext, block.number still will be resolved to 0. - L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - } - } else { - // We need to reset L2 block info in storage to process transaction in the current block context. - // Actual resetting will be done after `storage_view` is created. - let prev_l2_block_info = StoredL2BlockInfo::new( - connection, - resolved_block_info.state_l2_block_number - 1, - None, - ) + let next_block = if is_pending_block { + L2BlockEnv { + number: next_block.number + 1, + timestamp: resolved_block_info.l1_batch_timestamp, + prev_block_hash: resolved_block_info.state_l2_block_hash, + // For simplicity, we assume each L2 block create one virtual block. + // This may be wrong only during transition period. + max_virtual_blocks_to_create: 1, + } + } else if next_block.number == 0 { + // Special case: + // - For environments, where genesis block was created before virtual block upgrade it doesn't matter what we put here. + // - Otherwise, we need to put actual values here. We cannot create next L2 block with block_number=0 and `max_virtual_blocks_to_create=0` + // because of SystemContext requirements. But, due to intrinsics of SystemContext, block.number still will be resolved to 0. + L2BlockEnv { + number: 1, + timestamp: 0, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 1, + } + } else { + // We need to reset L2 block info in storage to process transaction in the current block context. + // Actual resetting will be done after `storage_view` is created. + let prev_block_number = resolved_block_info.state_l2_block_number - 1; + let prev_l2_block = read_stored_l2_block(connection, prev_block_number) .await .context("failed reading previous L2 block info")?; - l2_block_info_to_reset = Some(prev_l2_block_info); - L2BlockEnv { - number: current_l2_block_info.l2_block_number, - timestamp: current_l2_block_info.l2_block_timestamp, - prev_block_hash: prev_l2_block_info.l2_block_hash, - max_virtual_blocks_to_create: 1, - } + let mut prev_block_hash = connection + .blocks_web3_dal() + .get_l2_block_hash(prev_block_number) + .await + .map_err(DalError::generalize)?; + if prev_block_hash.is_none() { + // We might need to load the previous block hash from the snapshot recovery metadata + let snapshot_recovery = connection + .snapshot_recovery_dal() + .get_applied_snapshot_status() + .await + .map_err(DalError::generalize)?; + prev_block_hash = snapshot_recovery.and_then(|recovery| { + (recovery.l2_block_number == prev_block_number).then_some(recovery.l2_block_hash) + }); + } + + current_block = Some(prev_l2_block); + L2BlockEnv { + number: next_block.number, + timestamp: next_block.timestamp, + prev_block_hash: prev_block_hash.with_context(|| { + format!("missing hash for previous L2 block #{prev_block_number}") + })?, + max_virtual_blocks_to_create: 1, + } + }; + + Ok((next_block, current_block)) +} + +fn prepare_env( + setup_args: TxSetupArgs, + resolved_block_info: &ResolvedBlockInfo, + next_block: L2BlockEnv, +) -> (SystemEnv, L1BatchEnv) { + let TxSetupArgs { + execution_mode, + operator_account, + fee_input, + base_system_contracts, + validation_computational_gas_limit, + chain_id, + enforced_base_fee, + .. + } = setup_args; + + // In case we are executing in a past block, we'll use the historical fee data. + let fee_input = resolved_block_info + .historical_fee_input + .unwrap_or(fee_input); + let system_env = SystemEnv { + zk_porter_available: ZKPORTER_IS_AVAILABLE, + version: resolved_block_info.protocol_version, + base_system_smart_contracts: base_system_contracts + .get_by_protocol_version(resolved_block_info.protocol_version), + bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + execution_mode, + default_validation_computational_gas_limit: validation_computational_gas_limit, + chain_id, + }; + let l1_batch_env = L1BatchEnv { + previous_batch_hash: None, + number: resolved_block_info.vm_l1_batch_number, + timestamp: resolved_block_info.l1_batch_timestamp, + fee_input, + fee_account: *operator_account.address(), + enforced_base_fee, + first_l2_block: next_block, + }; + (system_env, l1_batch_env) +} + +// public for testing purposes +#[derive(Debug)] +pub(super) struct VmSandbox { + vm: Box>, + storage_view: StoragePtr>, + transaction: Transaction, +} + +impl VmSandbox { + /// This method is blocking. + pub fn new(storage: S, mut env: OneshotEnv, execution_args: TxExecutionArgs) -> Self { + let mut storage_view = StorageView::new(storage); + Self::setup_storage_view(&mut storage_view, &execution_args, env.current_block); + + let protocol_version = env.system.version; + if execution_args.adjust_pubdata_price { + env.l1_batch.fee_input = adjust_pubdata_price_for_tx( + env.l1_batch.fee_input, + execution_args.transaction.gas_per_pubdata_byte_limit(), + env.l1_batch.enforced_base_fee.map(U256::from), + protocol_version.into(), + ); }; - Ok((next_l2_block_info, l2_block_info_to_reset)) + let storage_view = storage_view.to_rc_ptr(); + let vm = Box::new(VmInstance::new_with_specific_version( + env.l1_batch, + env.system, + storage_view.clone(), + protocol_version.into_api_vm_version(), + )); + + Self { + vm, + storage_view, + transaction: execution_args.transaction, + } } /// This method is blocking. - fn setup_storage_view(&mut self, tx: &Transaction) { + fn setup_storage_view( + storage_view: &mut StorageView, + execution_args: &TxExecutionArgs, + current_block: Option, + ) { let storage_view_setup_started_at = Instant::now(); - if let Some(nonce) = self.execution_args.enforced_nonce { - let nonce_key = get_nonce_key(&tx.initiator_account()); - let full_nonce = self.storage_view.read_value(&nonce_key); + if let Some(nonce) = execution_args.enforced_nonce { + let nonce_key = get_nonce_key(&execution_args.transaction.initiator_account()); + let full_nonce = storage_view.read_value(&nonce_key); let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce)); let enforced_full_nonce = nonces_to_full_nonce(U256::from(nonce.0), deployment_nonce); - self.storage_view - .set_value(nonce_key, u256_to_h256(enforced_full_nonce)); + storage_view.set_value(nonce_key, u256_to_h256(enforced_full_nonce)); } - let payer = tx.payer(); + let payer = execution_args.transaction.payer(); let balance_key = storage_key_for_eth_balance(&payer); - let mut current_balance = h256_to_u256(self.storage_view.read_value(&balance_key)); - current_balance += self.execution_args.added_balance; - self.storage_view - .set_value(balance_key, u256_to_h256(current_balance)); + let mut current_balance = h256_to_u256(storage_view.read_value(&balance_key)); + current_balance += execution_args.added_balance; + storage_view.set_value(balance_key, u256_to_h256(current_balance)); // Reset L2 block info if necessary. - if let Some(l2_block_info_to_reset) = self.l2_block_info_to_reset { + if let Some(current_block) = current_block { let l2_block_info_key = StorageKey::new( AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, ); - let l2_block_info = pack_block_info( - l2_block_info_to_reset.l2_block_number as u64, - l2_block_info_to_reset.l2_block_timestamp, - ); - self.storage_view - .set_value(l2_block_info_key, u256_to_h256(l2_block_info)); + let l2_block_info = + pack_block_info(current_block.number.into(), current_block.timestamp); + storage_view.set_value(l2_block_info_key, u256_to_h256(l2_block_info)); let l2_block_txs_rolling_hash_key = StorageKey::new( AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, ); - self.storage_view.set_value( + storage_view.set_value( l2_block_txs_rolling_hash_key, - l2_block_info_to_reset.txs_rolling_hash, + current_block.txs_rolling_hash, ); } @@ -220,201 +294,155 @@ impl<'a> Sandbox<'a> { } } - fn prepare_env( - shared_args: TxSharedArgs, - execution_args: &TxExecutionArgs, - resolved_block_info: &ResolvedBlockInfo, - next_l2_block_info: L2BlockEnv, - ) -> (SystemEnv, L1BatchEnv) { - let TxSharedArgs { - operator_account, - fee_input, - base_system_contracts, - validation_computational_gas_limit, - chain_id, - .. - } = shared_args; - - // In case we are executing in a past block, we'll use the historical fee data. - let fee_input = resolved_block_info - .historical_fee_input - .unwrap_or(fee_input); - let system_env = SystemEnv { - zk_porter_available: ZKPORTER_IS_AVAILABLE, - version: resolved_block_info.protocol_version, - base_system_smart_contracts: base_system_contracts - .get_by_protocol_version(resolved_block_info.protocol_version), - bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - execution_mode: execution_args.execution_mode, - default_validation_computational_gas_limit: validation_computational_gas_limit, - chain_id, - }; - let l1_batch_env = L1BatchEnv { - previous_batch_hash: None, - number: resolved_block_info.vm_l1_batch_number, - timestamp: resolved_block_info.l1_batch_timestamp, - fee_input, - fee_account: *operator_account.address(), - enforced_base_fee: execution_args.enforced_base_fee, - first_l2_block: next_l2_block_info, - }; - (system_env, l1_batch_env) + fn wrap_tracers( + tracers: Vec, + env: &OneshotEnv, + missed_storage_invocation_limit: usize, + ) -> Vec, HistoryDisabled>> { + let storage_invocation_tracer = StorageInvocations::new(missed_storage_invocation_limit); + let protocol_version = env.system.version; + tracers + .into_iter() + .map(|tracer| tracer.into_boxed(protocol_version)) + .chain([storage_invocation_tracer.into_tracer_pointer()]) + .collect() } - /// This method is blocking. - fn into_vm( - mut self, - tx: &Transaction, - adjust_pubdata_price: bool, - ) -> (BoxedVm<'a>, StoragePtr>) { - self.setup_storage_view(tx); - let protocol_version = self.system_env.version; - if adjust_pubdata_price { - self.l1_batch_env.fee_input = adjust_pubdata_price_for_tx( - self.l1_batch_env.fee_input, - tx.gas_per_pubdata_byte_limit(), - self.l1_batch_env.enforced_base_fee.map(U256::from), - protocol_version.into(), - ); - }; + pub(super) fn apply(mut self, apply_fn: F) -> T + where + F: FnOnce(&mut VmInstance, Transaction) -> T, + { + let tx_id = format!( + "{:?}-{}", + self.transaction.initiator_account(), + self.transaction.nonce().unwrap_or(Nonce(0)) + ); - let storage_view = self.storage_view.to_rc_ptr(); - let vm = Box::new(VmInstance::new_with_specific_version( - self.l1_batch_env, - self.system_env, - storage_view.clone(), - protocol_version.into_api_vm_version(), - )); + let execution_latency = SANDBOX_METRICS.sandbox[&SandboxStage::Execution].start(); + let result = apply_fn(&mut *self.vm, self.transaction); + let vm_execution_took = execution_latency.observe(); - (vm, storage_view) + let memory_metrics = self.vm.record_vm_memory_metrics(); + vm_metrics::report_vm_memory_metrics( + &tx_id, + &memory_metrics, + vm_execution_took, + self.storage_view.as_ref().borrow_mut().metrics(), + ); + result } } -#[allow(clippy::too_many_arguments)] -pub(super) fn apply_vm_in_sandbox( - vm_permit: VmPermit, - shared_args: TxSharedArgs, - // If `true`, then the batch's L1/pubdata gas price will be adjusted so that the transaction's gas per pubdata limit is <= - // to the one in the block. This is often helpful in case we want the transaction validation to work regardless of the - // current L1 prices for gas or pubdata. - adjust_pubdata_price: bool, - execution_args: &TxExecutionArgs, - connection_pool: &ConnectionPool, - tx: Transaction, - block_args: BlockArgs, // Block arguments for the transaction. - state_override: Option, - apply: impl FnOnce( - &mut VmInstance>, HistoryDisabled>, - Transaction, - ProtocolVersionId, - ) -> T, -) -> anyhow::Result { - let stage_started_at = Instant::now(); - let span = tracing::debug_span!("initialization").entered(); - - let rt_handle = vm_permit.rt_handle(); - let connection = rt_handle - .block_on(connection_pool.connection_tagged("api")) - .context("failed acquiring DB connection")?; - let connection_acquire_time = stage_started_at.elapsed(); - // We don't want to emit too many logs. - if connection_acquire_time > Duration::from_millis(10) { - tracing::debug!("Obtained connection (took {connection_acquire_time:?})"); - } - - let sandbox = rt_handle.block_on(Sandbox::new( - connection, - shared_args, - execution_args, - block_args, - state_override.as_ref().unwrap_or(&StateOverride::default()), - ))?; - let protocol_version = sandbox.system_env.version; - let (mut vm, storage_view) = sandbox.into_vm(&tx, adjust_pubdata_price); - - SANDBOX_METRICS.sandbox[&SandboxStage::Initialization].observe(stage_started_at.elapsed()); - span.exit(); - - let tx_id = format!( - "{:?}-{}", - tx.initiator_account(), - tx.nonce().unwrap_or(Nonce(0)) - ); - - let execution_latency = SANDBOX_METRICS.sandbox[&SandboxStage::Execution].start(); - let result = apply(&mut vm, tx, protocol_version); - let vm_execution_took = execution_latency.observe(); - - let memory_metrics = vm.record_vm_memory_metrics(); - vm_metrics::report_vm_memory_metrics( - &tx_id, - &memory_metrics, - vm_execution_took, - storage_view.as_ref().borrow_mut().metrics(), - ); - Ok(result) +/// Main [`OneshotExecutor`] implementation used by the API server. +#[derive(Debug, Default)] +pub struct MainOneshotExecutor { + missed_storage_invocation_limit: usize, } -#[derive(Debug, Clone, Copy)] -struct StoredL2BlockInfo { - l2_block_number: u32, - l2_block_timestamp: u64, - l2_block_hash: H256, - txs_rolling_hash: H256, +impl MainOneshotExecutor { + /// Creates a new executor with the specified limit of cache misses for storage read operations (an anti-DoS measure). + /// The limit is applied for calls and gas estimations, but not during transaction validation. + pub fn new(missed_storage_invocation_limit: usize) -> Self { + Self { + missed_storage_invocation_limit, + } + } } -impl StoredL2BlockInfo { - /// If `l2_block_hash` is `None`, it needs to be fetched from the storage. - async fn new( - connection: &mut Connection<'_, Core>, - l2_block_number: L2BlockNumber, - l2_block_hash: Option, - ) -> anyhow::Result { - let l2_block_info_key = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let l2_block_info = connection - .storage_web3_dal() - .get_historical_value_unchecked(l2_block_info_key.hashed_key(), l2_block_number) - .await - .context("failed reading L2 block info from VM state")?; - let (l2_block_number_from_state, l2_block_timestamp) = - unpack_block_info(h256_to_u256(l2_block_info)); +#[async_trait] +impl OneshotExecutor for MainOneshotExecutor +where + S: ReadStorage + Send + 'static, +{ + type Tracers = Vec; - let l2_block_txs_rolling_hash_key = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let txs_rolling_hash = connection - .storage_web3_dal() - .get_historical_value_unchecked( - l2_block_txs_rolling_hash_key.hashed_key(), - l2_block_number, - ) - .await - .context("failed reading transaction rolling hash from VM state")?; + async fn inspect_transaction( + &self, + storage: S, + env: OneshotEnv, + args: TxExecutionArgs, + tracers: Self::Tracers, + ) -> anyhow::Result { + let missed_storage_invocation_limit = match env.system.execution_mode { + // storage accesses are not limited for tx validation + TxExecutionMode::VerifyExecute => usize::MAX, + TxExecutionMode::EthCall | TxExecutionMode::EstimateFee => { + self.missed_storage_invocation_limit + } + }; - let l2_block_hash = if let Some(hash) = l2_block_hash { - hash - } else { - connection - .blocks_web3_dal() - .get_l2_block_hash(l2_block_number) - .await - .map_err(DalError::generalize)? - .with_context(|| format!("L2 block #{l2_block_number} not present in storage"))? + tokio::task::spawn_blocking(move || { + let tracers = VmSandbox::wrap_tracers(tracers, &env, missed_storage_invocation_limit); + let executor = VmSandbox::new(storage, env, args); + executor.apply(|vm, transaction| { + vm.push_transaction(transaction); + vm.inspect(tracers.into(), VmExecutionMode::OneTx) + }) + }) + .await + .context("VM execution panicked") + } + + async fn inspect_transaction_with_bytecode_compression( + &self, + storage: S, + env: OneshotEnv, + args: TxExecutionArgs, + tracers: Self::Tracers, + ) -> anyhow::Result<( + Result<(), BytecodeCompressionError>, + VmExecutionResultAndLogs, + )> { + let missed_storage_invocation_limit = match env.system.execution_mode { + // storage accesses are not limited for tx validation + TxExecutionMode::VerifyExecute => usize::MAX, + TxExecutionMode::EthCall | TxExecutionMode::EstimateFee => { + self.missed_storage_invocation_limit + } }; - Ok(Self { - l2_block_number: l2_block_number_from_state as u32, - l2_block_timestamp, - l2_block_hash, - txs_rolling_hash, + tokio::task::spawn_blocking(move || { + let tracers = VmSandbox::wrap_tracers(tracers, &env, missed_storage_invocation_limit); + let executor = VmSandbox::new(storage, env, args); + executor.apply(|vm, transaction| { + vm.inspect_transaction_with_bytecode_compression(tracers.into(), transaction, true) + }) }) + .await + .context("VM execution panicked") } } +async fn read_stored_l2_block( + connection: &mut Connection<'_, Core>, + l2_block_number: L2BlockNumber, +) -> anyhow::Result { + let l2_block_info_key = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + ); + let l2_block_info = connection + .storage_web3_dal() + .get_historical_value_unchecked(l2_block_info_key.hashed_key(), l2_block_number) + .await?; + let (l2_block_number_from_state, timestamp) = unpack_block_info(h256_to_u256(l2_block_info)); + + let l2_block_txs_rolling_hash_key = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, + ); + let txs_rolling_hash = connection + .storage_web3_dal() + .get_historical_value_unchecked(l2_block_txs_rolling_hash_key.hashed_key(), l2_block_number) + .await?; + + Ok(StoredL2BlockEnv { + number: l2_block_number_from_state as u32, + timestamp, + txs_rolling_hash, + }) +} + #[derive(Debug)] pub(crate) struct ResolvedBlockInfo { state_l2_block_number: L2BlockNumber, @@ -442,7 +470,19 @@ impl BlockArgs { ) } - pub(crate) async fn resolve_block_info( + pub(crate) async fn default_eth_call_gas( + &self, + connection: &mut Connection<'_, Core>, + ) -> anyhow::Result { + let protocol_version = self + .resolve_block_info(connection) + .await + .context("failed to resolve block info")? + .protocol_version; + Ok(get_eth_call_gas_limit(protocol_version.into()).into()) + } + + async fn resolve_block_info( &self, connection: &mut Connection<'_, Core>, ) -> anyhow::Result { diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index 741bcaea18f4..086a75c81de9 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -1,80 +1,80 @@ //! Implementation of "executing" methods, e.g. `eth_call`. -use anyhow::Context as _; -use tracing::{span, Level}; -use zksync_dal::{ConnectionPool, Core}; -use zksync_multivm::{ - interface::{ - TransactionExecutionMetrics, TxExecutionMode, VmExecutionResultAndLogs, VmInterface, - }, - tracers::StorageInvocations, - MultiVMTracer, +use async_trait::async_trait; +use zksync_dal::{Connection, Core}; +use zksync_multivm::interface::{ + storage::ReadStorage, BytecodeCompressionError, OneshotEnv, TransactionExecutionMetrics, + VmExecutionResultAndLogs, }; use zksync_types::{ - l2::L2Tx, transaction_request::CallOverrides, ExecuteTransactionCommon, Nonce, + api::state_override::StateOverride, l2::L2Tx, ExecuteTransactionCommon, Nonce, PackedEthSignature, Transaction, U256, }; use super::{ - apply, testonly::MockTransactionExecutor, vm_metrics, ApiTracer, BlockArgs, TxSharedArgs, - VmPermit, + apply::{self, MainOneshotExecutor}, + storage::StorageWithOverrides, + testonly::MockOneshotExecutor, + vm_metrics, ApiTracer, BlockArgs, OneshotExecutor, TxSetupArgs, VmPermit, }; -use crate::execution_sandbox::api::state_override::StateOverride; +/// Executor-independent arguments necessary to for oneshot transaction execution. +/// +/// # Developer guidelines +/// +/// Please don't add fields that duplicate `SystemEnv` or `L1BatchEnv` information, since both of these +/// are also provided to an executor. #[derive(Debug)] pub(crate) struct TxExecutionArgs { - pub execution_mode: TxExecutionMode, + /// Transaction / call itself. + pub transaction: Transaction, + /// Nonce override for the initiator account. pub enforced_nonce: Option, + /// Balance added to the initiator account. pub added_balance: U256, - pub enforced_base_fee: Option, - pub missed_storage_invocation_limit: usize, + /// If `true`, then the batch's L1 / pubdata gas price will be adjusted so that the transaction's gas per pubdata limit is <= + /// to the one in the block. This is often helpful in case we want the transaction validation to work regardless of the + /// current L1 prices for gas or pubdata. + pub adjust_pubdata_price: bool, } impl TxExecutionArgs { - pub fn for_validation(tx: &L2Tx) -> Self { + pub fn for_validation(tx: L2Tx) -> Self { Self { - execution_mode: TxExecutionMode::VerifyExecute, enforced_nonce: Some(tx.nonce()), added_balance: U256::zero(), - enforced_base_fee: Some(tx.common_data.fee.max_fee_per_gas.as_u64()), - missed_storage_invocation_limit: usize::MAX, + adjust_pubdata_price: true, + transaction: tx.into(), } } - fn for_eth_call( - enforced_base_fee: Option, - vm_execution_cache_misses_limit: Option, - ) -> Self { - let missed_storage_invocation_limit = vm_execution_cache_misses_limit.unwrap_or(usize::MAX); + pub fn for_eth_call(mut call: L2Tx) -> Self { + if call.common_data.signature.is_empty() { + call.common_data.signature = PackedEthSignature::default().serialize_packed().into(); + } + Self { - execution_mode: TxExecutionMode::EthCall, enforced_nonce: None, added_balance: U256::zero(), - enforced_base_fee, - missed_storage_invocation_limit, + adjust_pubdata_price: false, + transaction: call.into(), } } - pub fn for_gas_estimate( - vm_execution_cache_misses_limit: Option, - tx: &Transaction, - base_fee: u64, - ) -> Self { - let missed_storage_invocation_limit = vm_execution_cache_misses_limit.unwrap_or(usize::MAX); + pub fn for_gas_estimate(transaction: Transaction) -> Self { // For L2 transactions we need to explicitly put enough balance into the account of the users // while for L1->L2 transactions the `to_mint` field plays this role - let added_balance = match &tx.common_data { + let added_balance = match &transaction.common_data { ExecuteTransactionCommon::L2(data) => data.fee.gas_limit * data.fee.max_fee_per_gas, ExecuteTransactionCommon::L1(_) => U256::zero(), ExecuteTransactionCommon::ProtocolUpgrade(_) => U256::zero(), }; Self { - execution_mode: TxExecutionMode::EstimateFee, - missed_storage_invocation_limit, - enforced_nonce: tx.nonce(), + enforced_nonce: transaction.nonce(), added_balance, - enforced_base_fee: Some(base_fee), + adjust_pubdata_price: true, + transaction, } } } @@ -92,68 +92,40 @@ pub(crate) struct TransactionExecutionOutput { /// Executor of transactions. #[derive(Debug)] pub(crate) enum TransactionExecutor { - Real, + Real(MainOneshotExecutor), #[doc(hidden)] // Intended for tests only - Mock(MockTransactionExecutor), + Mock(MockOneshotExecutor), } impl TransactionExecutor { + pub fn real(missed_storage_invocation_limit: usize) -> Self { + Self::Real(MainOneshotExecutor::new(missed_storage_invocation_limit)) + } + /// This method assumes that (block with number `resolved_block_number` is present in DB) /// or (`block_id` is `pending` and block with number `resolved_block_number - 1` is present in DB) #[allow(clippy::too_many_arguments)] - #[tracing::instrument(skip_all)] + #[tracing::instrument(level = "debug", skip_all)] pub async fn execute_tx_in_sandbox( &self, vm_permit: VmPermit, - shared_args: TxSharedArgs, - // If `true`, then the batch's L1/pubdata gas price will be adjusted so that the transaction's gas per pubdata limit is <= - // to the one in the block. This is often helpful in case we want the transaction validation to work regardless of the - // current L1 prices for gas or pubdata. - adjust_pubdata_price: bool, + setup_args: TxSetupArgs, execution_args: TxExecutionArgs, - connection_pool: ConnectionPool, - tx: Transaction, + connection: Connection<'static, Core>, block_args: BlockArgs, state_override: Option, - custom_tracers: Vec, + tracers: Vec, ) -> anyhow::Result { - if let Self::Mock(mock_executor) = self { - return mock_executor.execute_tx(&tx, &block_args); - } - - let total_factory_deps = tx.execute.factory_deps.len() as u16; - - let (published_bytecodes, execution_result) = tokio::task::spawn_blocking(move || { - let span = span!(Level::DEBUG, "execute_in_sandbox").entered(); - let result = apply::apply_vm_in_sandbox( - vm_permit, - shared_args, - adjust_pubdata_price, - &execution_args, - &connection_pool, - tx, - block_args, - state_override, - |vm, tx, _| { - let storage_invocation_tracer = - StorageInvocations::new(execution_args.missed_storage_invocation_limit); - let custom_tracers: Vec<_> = custom_tracers - .into_iter() - .map(|tracer| tracer.into_boxed()) - .chain(vec![storage_invocation_tracer.into_tracer_pointer()]) - .collect(); - vm.inspect_transaction_with_bytecode_compression( - custom_tracers.into(), - tx, - true, - ) - }, - ); - span.exit(); - result - }) - .await - .context("transaction execution panicked")??; + let total_factory_deps = execution_args.transaction.execute.factory_deps.len() as u16; + let (env, storage) = + apply::prepare_env_and_storage(connection, setup_args, &block_args).await?; + let state_override = state_override.unwrap_or_default(); + let storage = StorageWithOverrides::new(storage, &state_override); + + let (published_bytecodes, execution_result) = self + .inspect_transaction_with_bytecode_compression(storage, env, execution_args, tracers) + .await?; + drop(vm_permit); let metrics = vm_metrics::collect_tx_execution_metrics(total_factory_deps, &execution_result); @@ -163,42 +135,53 @@ impl TransactionExecutor { are_published_bytecodes_ok: published_bytecodes.is_ok(), }) } +} - #[allow(clippy::too_many_arguments)] - pub async fn execute_tx_eth_call( +#[async_trait] +impl OneshotExecutor for TransactionExecutor +where + S: ReadStorage + Send + 'static, +{ + type Tracers = Vec; + + async fn inspect_transaction( &self, - vm_permit: VmPermit, - shared_args: TxSharedArgs, - connection_pool: ConnectionPool, - call_overrides: CallOverrides, - mut tx: L2Tx, - block_args: BlockArgs, - vm_execution_cache_misses_limit: Option, - custom_tracers: Vec, - state_override: Option, + storage: S, + env: OneshotEnv, + args: TxExecutionArgs, + tracers: Self::Tracers, ) -> anyhow::Result { - let execution_args = TxExecutionArgs::for_eth_call( - call_overrides.enforced_base_fee, - vm_execution_cache_misses_limit, - ); - - if tx.common_data.signature.is_empty() { - tx.common_data.signature = PackedEthSignature::default().serialize_packed().into(); + match self { + Self::Real(executor) => { + executor + .inspect_transaction(storage, env, args, tracers) + .await + } + Self::Mock(executor) => executor.inspect_transaction(storage, env, args, ()).await, } + } - let output = self - .execute_tx_in_sandbox( - vm_permit, - shared_args, - false, - execution_args, - connection_pool, - tx.into(), - block_args, - state_override, - custom_tracers, - ) - .await?; - Ok(output.vm) + async fn inspect_transaction_with_bytecode_compression( + &self, + storage: S, + env: OneshotEnv, + args: TxExecutionArgs, + tracers: Self::Tracers, + ) -> anyhow::Result<( + Result<(), BytecodeCompressionError>, + VmExecutionResultAndLogs, + )> { + match self { + Self::Real(executor) => { + executor + .inspect_transaction_with_bytecode_compression(storage, env, args, tracers) + .await + } + Self::Mock(executor) => { + executor + .inspect_transaction_with_bytecode_compression(storage, env, args, ()) + .await + } + } } } diff --git a/core/node/api_server/src/execution_sandbox/mod.rs b/core/node/api_server/src/execution_sandbox/mod.rs index f7c876679cb0..f2a3f0e5f8c3 100644 --- a/core/node/api_server/src/execution_sandbox/mod.rs +++ b/core/node/api_server/src/execution_sandbox/mod.rs @@ -4,9 +4,13 @@ use std::{ }; use anyhow::Context as _; +use async_trait::async_trait; use rand::{thread_rng, Rng}; -use tokio::runtime::Handle; use zksync_dal::{pruning_dal::PruningInfo, Connection, Core, CoreDal, DalError}; +use zksync_multivm::interface::{ + storage::ReadStorage, BytecodeCompressionError, OneshotEnv, TxExecutionMode, + VmExecutionResultAndLogs, +}; use zksync_state::PostgresStorageCaches; use zksync_types::{ api, fee_model::BatchFeeInput, AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, @@ -40,17 +44,9 @@ mod vm_metrics; /// as a proof that the caller obtained a token from `VmConcurrencyLimiter`, #[derive(Debug, Clone)] pub struct VmPermit { - /// A handle to the runtime that is used to query the VM storage. - rt_handle: Handle, _permit: Arc, } -impl VmPermit { - fn rt_handle(&self) -> &Handle { - &self.rt_handle - } -} - /// Barrier-like synchronization primitive allowing to close a [`VmConcurrencyLimiter`] it's attached to /// so that it doesn't issue new permits, and to wait for all permits to drop. #[derive(Debug, Clone)] @@ -103,7 +99,6 @@ impl VmConcurrencyBarrier { pub struct VmConcurrencyLimiter { /// Semaphore that limits the number of concurrent VM executions. limiter: Arc, - rt_handle: Handle, } impl VmConcurrencyLimiter { @@ -116,7 +111,6 @@ impl VmConcurrencyLimiter { let this = Self { limiter: Arc::clone(&limiter), - rt_handle: Handle::current(), }; let barrier = VmConcurrencyBarrier { limiter, @@ -144,7 +138,6 @@ impl VmConcurrencyLimiter { } Some(VmPermit { - rt_handle: self.rt_handle.clone(), _permit: Arc::new(permit), }) } @@ -163,9 +156,10 @@ async fn get_pending_state( Ok((block_id, resolved_block_number)) } -/// Arguments for VM execution not specific to a particular transaction. +/// Arguments for VM execution necessary to set up storage and environment. #[derive(Debug, Clone)] -pub(crate) struct TxSharedArgs { +pub(crate) struct TxSetupArgs { + pub execution_mode: TxExecutionMode, pub operator_account: AccountTreeId, pub fee_input: BatchFeeInput, pub base_system_contracts: MultiVMBaseSystemContracts, @@ -173,12 +167,17 @@ pub(crate) struct TxSharedArgs { pub validation_computational_gas_limit: u32, pub chain_id: L2ChainId, pub whitelisted_tokens_for_aa: Vec
, + pub enforced_base_fee: Option, } -impl TxSharedArgs { +impl TxSetupArgs { #[cfg(test)] - pub fn mock(base_system_contracts: MultiVMBaseSystemContracts) -> Self { + pub fn mock( + execution_mode: TxExecutionMode, + base_system_contracts: MultiVMBaseSystemContracts, + ) -> Self { Self { + execution_mode, operator_account: AccountTreeId::default(), fee_input: BatchFeeInput::l1_pegged(55, 555), base_system_contracts, @@ -186,6 +185,7 @@ impl TxSharedArgs { validation_computational_gas_limit: u32::MAX, chain_id: L2ChainId::default(), whitelisted_tokens_for_aa: Vec::new(), + enforced_base_fee: None, } } } @@ -417,3 +417,28 @@ impl BlockArgs { ) } } + +/// VM executor capable of executing isolated transactions / calls (as opposed to batch execution). +#[async_trait] +trait OneshotExecutor { + type Tracers: Default; + + async fn inspect_transaction( + &self, + storage: S, + env: OneshotEnv, + args: TxExecutionArgs, + tracers: Self::Tracers, + ) -> anyhow::Result; + + async fn inspect_transaction_with_bytecode_compression( + &self, + storage: S, + env: OneshotEnv, + args: TxExecutionArgs, + tracers: Self::Tracers, + ) -> anyhow::Result<( + Result<(), BytecodeCompressionError>, + VmExecutionResultAndLogs, + )>; +} diff --git a/core/node/api_server/src/execution_sandbox/testonly.rs b/core/node/api_server/src/execution_sandbox/testonly.rs index 59fa2e38db7a..d9d60f52415a 100644 --- a/core/node/api_server/src/execution_sandbox/testonly.rs +++ b/core/node/api_server/src/execution_sandbox/testonly.rs @@ -1,24 +1,24 @@ use std::fmt; +use async_trait::async_trait; +#[cfg(test)] +use zksync_multivm::interface::ExecutionResult; use zksync_multivm::interface::{ - ExecutionResult, TransactionExecutionMetrics, VmExecutionResultAndLogs, + storage::ReadStorage, BytecodeCompressionError, OneshotEnv, TxExecutionMode, + VmExecutionResultAndLogs, }; -use zksync_types::{l2::L2Tx, ExecuteTransactionCommon, Transaction}; +use zksync_types::Transaction; -use super::{ - execute::{TransactionExecutionOutput, TransactionExecutor}, - validate::ValidationError, - BlockArgs, -}; +use super::{execute::TransactionExecutor, OneshotExecutor, TxExecutionArgs}; -type TxResponseFn = dyn Fn(&Transaction, &BlockArgs) -> VmExecutionResultAndLogs + Send + Sync; +type TxResponseFn = dyn Fn(&Transaction, &OneshotEnv) -> VmExecutionResultAndLogs + Send + Sync; -pub struct MockTransactionExecutor { +pub struct MockOneshotExecutor { call_responses: Box, tx_responses: Box, } -impl fmt::Debug for MockTransactionExecutor { +impl fmt::Debug for MockOneshotExecutor { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter .debug_struct("MockTransactionExecutor") @@ -26,7 +26,7 @@ impl fmt::Debug for MockTransactionExecutor { } } -impl Default for MockTransactionExecutor { +impl Default for MockOneshotExecutor { fn default() -> Self { Self { call_responses: Box::new(|tx, _| { @@ -42,11 +42,11 @@ impl Default for MockTransactionExecutor { } } -impl MockTransactionExecutor { +impl MockOneshotExecutor { #[cfg(test)] pub(crate) fn set_call_responses(&mut self, responses: F) where - F: Fn(&Transaction, &BlockArgs) -> ExecutionResult + 'static + Send + Sync, + F: Fn(&Transaction, &OneshotEnv) -> ExecutionResult + 'static + Send + Sync, { self.call_responses = self.wrap_responses(responses); } @@ -54,7 +54,7 @@ impl MockTransactionExecutor { #[cfg(test)] pub(crate) fn set_tx_responses(&mut self, responses: F) where - F: Fn(&Transaction, &BlockArgs) -> ExecutionResult + 'static + Send + Sync, + F: Fn(&Transaction, &OneshotEnv) -> ExecutionResult + 'static + Send + Sync, { self.tx_responses = self.wrap_responses(responses); } @@ -62,12 +62,12 @@ impl MockTransactionExecutor { #[cfg(test)] fn wrap_responses(&mut self, responses: F) -> Box where - F: Fn(&Transaction, &BlockArgs) -> ExecutionResult + 'static + Send + Sync, + F: Fn(&Transaction, &OneshotEnv) -> ExecutionResult + 'static + Send + Sync, { Box::new( - move |tx: &Transaction, ba: &BlockArgs| -> VmExecutionResultAndLogs { + move |tx: &Transaction, env: &OneshotEnv| -> VmExecutionResultAndLogs { VmExecutionResultAndLogs { - result: responses(tx, ba), + result: responses(tx, env), logs: Default::default(), statistics: Default::default(), refunds: Default::default(), @@ -79,56 +79,54 @@ impl MockTransactionExecutor { #[cfg(test)] pub(crate) fn set_tx_responses_with_logs(&mut self, responses: F) where - F: Fn(&Transaction, &BlockArgs) -> VmExecutionResultAndLogs + 'static + Send + Sync, + F: Fn(&Transaction, &OneshotEnv) -> VmExecutionResultAndLogs + 'static + Send + Sync, { self.tx_responses = Box::new(responses); } - pub(crate) fn validate_tx( - &self, - tx: L2Tx, - block_args: &BlockArgs, - ) -> Result<(), ValidationError> { - let result = (self.tx_responses)(&tx.into(), block_args); - match result.result { - ExecutionResult::Success { .. } => Ok(()), - other => Err(ValidationError::Internal(anyhow::anyhow!( - "transaction validation failed: {other:?}" - ))), + fn mock_inspect(&self, env: OneshotEnv, args: TxExecutionArgs) -> VmExecutionResultAndLogs { + match env.system.execution_mode { + TxExecutionMode::EthCall => (self.call_responses)(&args.transaction, &env), + TxExecutionMode::VerifyExecute | TxExecutionMode::EstimateFee => { + (self.tx_responses)(&args.transaction, &env) + } } } +} - pub(crate) fn execute_tx( - &self, - tx: &Transaction, - block_args: &BlockArgs, - ) -> anyhow::Result { - let result = self.get_execution_result(tx, block_args); - let output = TransactionExecutionOutput { - vm: result, - metrics: TransactionExecutionMetrics::default(), - are_published_bytecodes_ok: true, - }; +#[async_trait] +impl OneshotExecutor for MockOneshotExecutor +where + S: ReadStorage + Send + 'static, +{ + type Tracers = (); - Ok(output) + async fn inspect_transaction( + &self, + _storage: S, + env: OneshotEnv, + args: TxExecutionArgs, + (): Self::Tracers, + ) -> anyhow::Result { + Ok(self.mock_inspect(env, args)) } - fn get_execution_result( + async fn inspect_transaction_with_bytecode_compression( &self, - tx: &Transaction, - block_args: &BlockArgs, - ) -> VmExecutionResultAndLogs { - if let ExecuteTransactionCommon::L2(data) = &tx.common_data { - if data.input.is_none() { - return (self.call_responses)(tx, block_args); - } - } - (self.tx_responses)(tx, block_args) + _storage: S, + env: OneshotEnv, + args: TxExecutionArgs, + (): Self::Tracers, + ) -> anyhow::Result<( + Result<(), BytecodeCompressionError>, + VmExecutionResultAndLogs, + )> { + Ok((Ok(()), self.mock_inspect(env, args))) } } -impl From for TransactionExecutor { - fn from(executor: MockTransactionExecutor) -> Self { +impl From for TransactionExecutor { + fn from(executor: MockOneshotExecutor) -> Self { Self::Mock(executor) } } diff --git a/core/node/api_server/src/execution_sandbox/tests.rs b/core/node/api_server/src/execution_sandbox/tests.rs index 0a8af35597b3..da593292e2e1 100644 --- a/core/node/api_server/src/execution_sandbox/tests.rs +++ b/core/node/api_server/src/execution_sandbox/tests.rs @@ -4,9 +4,13 @@ use assert_matches::assert_matches; use zksync_dal::ConnectionPool; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_node_test_utils::{create_l2_block, create_l2_transaction, prepare_recovery_snapshot}; +use zksync_types::{api::state_override::StateOverride, Transaction}; use super::*; -use crate::{execution_sandbox::apply::apply_vm_in_sandbox, tx_sender::ApiContracts}; +use crate::{ + execution_sandbox::{apply::VmSandbox, storage::StorageWithOverrides}, + tx_sender::ApiContracts, +}; #[tokio::test] async fn creating_block_args() { @@ -165,43 +169,43 @@ async fn creating_block_args_after_snapshot_recovery() { #[tokio::test] async fn instantiating_vm() { let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.connection().await.unwrap(); - insert_genesis_batch(&mut storage, &GenesisParams::mock()) + let mut connection = pool.connection().await.unwrap(); + insert_genesis_batch(&mut connection, &GenesisParams::mock()) .await .unwrap(); - let block_args = BlockArgs::pending(&mut storage).await.unwrap(); - test_instantiating_vm(pool.clone(), block_args).await; - let start_info = BlockStartInfo::new(&mut storage, Duration::MAX) + let block_args = BlockArgs::pending(&mut connection).await.unwrap(); + test_instantiating_vm(connection, block_args).await; + + let mut connection = pool.connection().await.unwrap(); + let start_info = BlockStartInfo::new(&mut connection, Duration::MAX) .await .unwrap(); - let block_args = BlockArgs::new(&mut storage, api::BlockId::Number(0.into()), &start_info) + let block_args = BlockArgs::new(&mut connection, api::BlockId::Number(0.into()), &start_info) .await .unwrap(); - test_instantiating_vm(pool.clone(), block_args).await; + test_instantiating_vm(connection, block_args).await; } -async fn test_instantiating_vm(pool: ConnectionPool, block_args: BlockArgs) { - let (vm_concurrency_limiter, _) = VmConcurrencyLimiter::new(1); - let vm_permit = vm_concurrency_limiter.acquire().await.unwrap(); - let transaction = create_l2_transaction(10, 100).into(); +async fn test_instantiating_vm(connection: Connection<'static, Core>, block_args: BlockArgs) { + let transaction = Transaction::from(create_l2_transaction(10, 100)); let estimate_gas_contracts = ApiContracts::load_from_disk().await.unwrap().estimate_gas; + + let execution_args = TxExecutionArgs::for_gas_estimate(transaction.clone()); + let (env, storage) = apply::prepare_env_and_storage( + connection, + TxSetupArgs::mock(TxExecutionMode::EstimateFee, estimate_gas_contracts), + &block_args, + ) + .await + .unwrap(); + let storage = StorageWithOverrides::new(storage, &StateOverride::default()); + tokio::task::spawn_blocking(move || { - apply_vm_in_sandbox( - vm_permit, - TxSharedArgs::mock(estimate_gas_contracts), - true, - &TxExecutionArgs::for_gas_estimate(None, &transaction, 123), - &pool, - transaction.clone(), - block_args, - None, - |_, received_tx, _| { - assert_eq!(received_tx, transaction); - }, - ) + VmSandbox::new(storage, env, execution_args).apply(|_, received_tx| { + assert_eq!(received_tx, transaction); + }); }) .await - .expect("VM instantiation panicked") - .expect("VM instantiation errored"); + .expect("VM execution panicked") } diff --git a/core/node/api_server/src/execution_sandbox/tracers.rs b/core/node/api_server/src/execution_sandbox/tracers.rs index 8d61d896a362..31384b7a0898 100644 --- a/core/node/api_server/src/execution_sandbox/tracers.rs +++ b/core/node/api_server/src/execution_sandbox/tracers.rs @@ -3,26 +3,49 @@ use std::sync::Arc; use once_cell::sync::OnceCell; use zksync_multivm::{ interface::{storage::WriteStorage, Call}, - tracers::CallTracer, - vm_latest::HistoryMode, + tracers::{CallTracer, ValidationTracer, ValidationTracerParams, ViolatedValidationRule}, + vm_latest::HistoryDisabled, MultiVMTracer, MultiVmTracerPointer, }; +use zksync_types::ProtocolVersionId; -/// Custom tracers supported by our API +/// Custom tracers supported by the API sandbox. #[derive(Debug)] pub(crate) enum ApiTracer { CallTracer(Arc>>), + Validation { + params: ValidationTracerParams, + result: Arc>, + }, } impl ApiTracer { - pub fn into_boxed< - S: WriteStorage, - H: HistoryMode + zksync_multivm::HistoryMode + 'static, - >( + pub fn validation( + params: ValidationTracerParams, + ) -> (Self, Arc>) { + let result = Arc::>::default(); + let this = Self::Validation { + params, + result: result.clone(), + }; + (this, result) + } + + pub(super) fn into_boxed( self, - ) -> MultiVmTracerPointer { + protocol_version: ProtocolVersionId, + ) -> MultiVmTracerPointer + where + S: WriteStorage, + { match self { - ApiTracer::CallTracer(tracer) => CallTracer::new(tracer.clone()).into_tracer_pointer(), + Self::CallTracer(traces) => CallTracer::new(traces).into_tracer_pointer(), + Self::Validation { params, result } => { + let (mut tracer, _) = + ValidationTracer::::new(params, protocol_version.into()); + tracer.result = result; + tracer.into_tracer_pointer() + } } } } diff --git a/core/node/api_server/src/execution_sandbox/validate.rs b/core/node/api_server/src/execution_sandbox/validate.rs index a856386b4562..a95cf6c3a91e 100644 --- a/core/node/api_server/src/execution_sandbox/validate.rs +++ b/core/node/api_server/src/execution_sandbox/validate.rs @@ -1,23 +1,23 @@ use std::collections::HashSet; use anyhow::Context as _; -use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use tracing::Instrument; +use zksync_dal::{Connection, Core, CoreDal}; use zksync_multivm::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface}, - tracers::{ - StorageInvocations, ValidationError as RawValidationError, ValidationTracer, - ValidationTracerParams, - }, - vm_latest::HistoryDisabled, - MultiVMTracer, + interface::ExecutionResult, + tracers::{ValidationError as RawValidationError, ValidationTracerParams}, +}; +use zksync_types::{ + api::state_override::StateOverride, l2::L2Tx, Address, TRUSTED_ADDRESS_SLOTS, + TRUSTED_TOKEN_SLOTS, }; -use zksync_types::{l2::L2Tx, Address, Transaction, TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS}; use super::{ apply, execute::TransactionExecutor, + storage::StorageWithOverrides, vm_metrics::{SandboxStage, EXECUTION_METRICS, SANDBOX_METRICS}, - BlockArgs, TxExecutionArgs, TxSharedArgs, VmPermit, + ApiTracer, BlockArgs, OneshotExecutor, TxExecutionArgs, TxSetupArgs, VmPermit, }; /// Validation error used by the sandbox. Besides validation errors returned by VM, it also includes an internal error @@ -31,88 +31,46 @@ pub(crate) enum ValidationError { } impl TransactionExecutor { + #[tracing::instrument(level = "debug", skip_all)] pub(crate) async fn validate_tx_in_sandbox( &self, - connection_pool: ConnectionPool, + mut connection: Connection<'static, Core>, vm_permit: VmPermit, tx: L2Tx, - shared_args: TxSharedArgs, + setup_args: TxSetupArgs, block_args: BlockArgs, computational_gas_limit: u32, ) -> Result<(), ValidationError> { - if let Self::Mock(mock) = self { - return mock.validate_tx(tx, &block_args); - } - - let stage_latency = SANDBOX_METRICS.sandbox[&SandboxStage::ValidateInSandbox].start(); - let mut connection = connection_pool - .connection_tagged("api") - .await - .context("failed acquiring DB connection")?; - let validation_params = get_validation_params( + let total_latency = SANDBOX_METRICS.sandbox[&SandboxStage::ValidateInSandbox].start(); + let params = get_validation_params( &mut connection, &tx, computational_gas_limit, - &shared_args.whitelisted_tokens_for_aa, + &setup_args.whitelisted_tokens_for_aa, ) .await .context("failed getting validation params")?; - drop(connection); - - let execution_args = TxExecutionArgs::for_validation(&tx); - let tx: Transaction = tx.into(); - - let validation_result = tokio::task::spawn_blocking(move || { - let span = tracing::debug_span!("validate_in_sandbox").entered(); - let result = apply::apply_vm_in_sandbox( - vm_permit, - shared_args, - true, - &execution_args, - &connection_pool, - tx, - block_args, - None, - |vm, tx, protocol_version| { - let stage_latency = SANDBOX_METRICS.sandbox[&SandboxStage::Validation].start(); - let span = tracing::debug_span!("validation").entered(); - vm.push_transaction(tx); - - let (tracer, validation_result) = ValidationTracer::::new( - validation_params, - protocol_version.into(), - ); - - let result = vm.inspect( - vec![ - tracer.into_tracer_pointer(), - StorageInvocations::new(execution_args.missed_storage_invocation_limit) - .into_tracer_pointer(), - ] - .into(), - VmExecutionMode::OneTx, - ); - - let result = match (result.result, validation_result.get()) { - (_, Some(err)) => Err(RawValidationError::ViolatedRule(err.clone())), - (ExecutionResult::Halt { reason }, _) => { - Err(RawValidationError::FailedTx(reason)) - } - (_, None) => Ok(()), - }; - - stage_latency.observe(); - span.exit(); - result - }, - ); - span.exit(); - result - }) - .await - .context("transaction validation panicked")??; + let (env, storage) = + apply::prepare_env_and_storage(connection, setup_args, &block_args).await?; + let storage = StorageWithOverrides::new(storage, &StateOverride::default()); + + let execution_args = TxExecutionArgs::for_validation(tx); + let (tracer, validation_result) = ApiTracer::validation(params); + let stage_latency = SANDBOX_METRICS.sandbox[&SandboxStage::Validation].start(); + let result = self + .inspect_transaction(storage, env, execution_args, vec![tracer]) + .instrument(tracing::debug_span!("validation")) + .await?; + drop(vm_permit); stage_latency.observe(); + + let validation_result = match (result.result, validation_result.get()) { + (_, Some(rule)) => Err(RawValidationError::ViolatedRule(rule.clone())), + (ExecutionResult::Halt { reason }, _) => Err(RawValidationError::FailedTx(reason)), + (_, None) => Ok(()), + }; + total_latency.observe(); validation_result.map_err(ValidationError::Vm) } } diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index cec2e14ddb26..c6f652da0167 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -10,10 +10,10 @@ use zksync_dal::{ transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, Core, CoreDal, }; use zksync_multivm::{ - interface::{TransactionExecutionMetrics, VmExecutionResultAndLogs}, + interface::{TransactionExecutionMetrics, TxExecutionMode, VmExecutionResultAndLogs}, utils::{ adjust_pubdata_price_for_tx, derive_base_fee_and_gas_per_pubdata, derive_overhead, - get_eth_call_gas_limit, get_max_batch_gas_limit, + get_max_batch_gas_limit, }, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; @@ -41,7 +41,7 @@ pub(super) use self::result::SubmitTxError; use self::{master_pool_sink::MasterPoolSink, tx_sink::TxSink}; use crate::{ execution_sandbox::{ - BlockArgs, SubmitTxStage, TransactionExecutor, TxExecutionArgs, TxSharedArgs, + BlockArgs, SubmitTxStage, TransactionExecutor, TxExecutionArgs, TxSetupArgs, VmConcurrencyBarrier, VmConcurrencyLimiter, VmPermit, SANDBOX_METRICS, }, tx_sender::result::ApiCallResult, @@ -252,6 +252,10 @@ impl TxSenderBuilder { self.whitelisted_tokens_for_aa_cache.unwrap_or_else(|| { Arc::new(RwLock::new(self.config.whitelisted_tokens_for_aa.clone())) }); + let missed_storage_invocation_limit = self + .config + .vm_execution_cache_misses_limit + .unwrap_or(usize::MAX); TxSender(Arc::new(TxSenderInner { sender_config: self.config, @@ -263,7 +267,7 @@ impl TxSenderBuilder { storage_caches, whitelisted_tokens_for_aa_cache, sealer, - executor: TransactionExecutor::Real, + executor: TransactionExecutor::real(missed_storage_invocation_limit), })) } } @@ -320,7 +324,7 @@ pub struct TxSenderInner { // Cache for white-listed tokens. pub(super) whitelisted_tokens_for_aa_cache: Arc>>, /// Batch sealer used to check whether transaction can be executed by the sequencer. - sealer: Arc, + pub(super) sealer: Arc, pub(super) executor: TransactionExecutor, } @@ -346,7 +350,7 @@ impl TxSender { self.0.whitelisted_tokens_for_aa_cache.read().await.clone() } - async fn acquire_replica_connection(&self) -> anyhow::Result> { + async fn acquire_replica_connection(&self) -> anyhow::Result> { self.0 .replica_connection_pool .connection_tagged("api") @@ -368,23 +372,20 @@ impl TxSender { stage_latency.observe(); let stage_latency = SANDBOX_METRICS.start_tx_submit_stage(tx_hash, SubmitTxStage::DryRun); - let shared_args = self.shared_args().await?; + let setup_args = self.call_args(&tx, None).await?; let vm_permit = self.0.vm_concurrency_limiter.acquire().await; let vm_permit = vm_permit.ok_or(SubmitTxError::ServerShuttingDown)?; let mut connection = self.acquire_replica_connection().await?; let block_args = BlockArgs::pending(&mut connection).await?; - drop(connection); let execution_output = self .0 .executor .execute_tx_in_sandbox( vm_permit.clone(), - shared_args.clone(), - true, - TxExecutionArgs::for_validation(&tx), - self.0.replica_connection_pool.clone(), - tx.clone().into(), + setup_args.clone(), + TxExecutionArgs::for_validation(tx.clone()), + connection, block_args, None, vec![], @@ -398,15 +399,16 @@ impl TxSender { let stage_latency = SANDBOX_METRICS.start_tx_submit_stage(tx_hash, SubmitTxStage::VerifyExecute); + let connection = self.acquire_replica_connection().await?; let computational_gas_limit = self.0.sender_config.validation_computational_gas_limit; let validation_result = self .0 .executor .validate_tx_in_sandbox( - self.0.replica_connection_pool.clone(), + connection, vm_permit, tx.clone(), - shared_args, + setup_args, block_args, computational_gas_limit, ) @@ -462,14 +464,23 @@ impl TxSender { /// **Important.** For the main node, this method acquires a DB connection inside `get_batch_fee_input()`. /// Thus, you shouldn't call it if you're holding a DB connection already. - async fn shared_args(&self) -> anyhow::Result { + async fn call_args( + &self, + tx: &L2Tx, + call_overrides: Option<&CallOverrides>, + ) -> anyhow::Result { let fee_input = self .0 .batch_fee_input_provider .get_batch_fee_input() .await .context("cannot get batch fee input")?; - Ok(TxSharedArgs { + Ok(TxSetupArgs { + execution_mode: if call_overrides.is_some() { + TxExecutionMode::EthCall + } else { + TxExecutionMode::VerifyExecute + }, operator_account: AccountTreeId::new(self.0.sender_config.fee_account_addr), fee_input, base_system_contracts: self.0.api_contracts.eth_call.clone(), @@ -480,6 +491,11 @@ impl TxSender { .validation_computational_gas_limit, chain_id: self.0.sender_config.chain_id, whitelisted_tokens_for_aa: self.read_whitelisted_tokens_for_aa_cache().await, + enforced_base_fee: if let Some(overrides) = call_overrides { + overrides.enforced_base_fee + } else { + Some(tx.common_data.fee.max_fee_per_gas.as_u64()) + }, }) } @@ -696,20 +712,17 @@ impl TxSender { } } - let shared_args = self.shared_args_for_gas_estimate(fee_model_params).await; - let vm_execution_cache_misses_limit = self.0.sender_config.vm_execution_cache_misses_limit; - let execution_args = - TxExecutionArgs::for_gas_estimate(vm_execution_cache_misses_limit, &tx, base_fee); + let setup_args = self.args_for_gas_estimate(fee_model_params, base_fee).await; + let execution_args = TxExecutionArgs::for_gas_estimate(tx); + let connection = self.acquire_replica_connection().await?; let execution_output = self .0 .executor .execute_tx_in_sandbox( vm_permit, - shared_args, - true, + setup_args, execution_args, - self.0.replica_connection_pool.clone(), - tx.clone(), + connection, block_args, state_override, vec![], @@ -718,10 +731,10 @@ impl TxSender { Ok((execution_output.vm, execution_output.metrics)) } - async fn shared_args_for_gas_estimate(&self, fee_input: BatchFeeInput) -> TxSharedArgs { + async fn args_for_gas_estimate(&self, fee_input: BatchFeeInput, base_fee: u64) -> TxSetupArgs { let config = &self.0.sender_config; - - TxSharedArgs { + TxSetupArgs { + execution_mode: TxExecutionMode::EstimateFee, operator_account: AccountTreeId::new(config.fee_account_addr), fee_input, // We want to bypass the computation gas limit check for gas estimation @@ -730,6 +743,7 @@ impl TxSender { caches: self.storage_caches(), chain_id: config.chain_id, whitelisted_tokens_for_aa: self.read_whitelisted_tokens_for_aa_cache().await, + enforced_base_fee: Some(base_fee), } } @@ -999,22 +1013,21 @@ impl TxSender { let vm_permit = self.0.vm_concurrency_limiter.acquire().await; let vm_permit = vm_permit.ok_or(SubmitTxError::ServerShuttingDown)?; - let vm_execution_cache_misses_limit = self.0.sender_config.vm_execution_cache_misses_limit; - self.0 + let connection = self.acquire_replica_connection().await?; + let result = self + .0 .executor - .execute_tx_eth_call( + .execute_tx_in_sandbox( vm_permit, - self.shared_args().await?, - self.0.replica_connection_pool.clone(), - call_overrides, - tx, + self.call_args(&tx, Some(&call_overrides)).await?, + TxExecutionArgs::for_eth_call(tx), + connection, block_args, - vm_execution_cache_misses_limit, - vec![], state_override, + vec![], ) - .await? - .into_api_call_result() + .await?; + result.vm.into_api_call_result() } pub async fn gas_price(&self) -> anyhow::Result { @@ -1067,19 +1080,4 @@ impl TxSender { } Ok(()) } - - pub(crate) async fn get_default_eth_call_gas( - &self, - block_args: BlockArgs, - ) -> anyhow::Result { - let mut connection = self.acquire_replica_connection().await?; - - let protocol_version = block_args - .resolve_block_info(&mut connection) - .await - .context("failed to resolve block info")? - .protocol_version; - - Ok(get_eth_call_gas_limit(protocol_version.into())) - } } diff --git a/core/node/api_server/src/tx_sender/tests.rs b/core/node/api_server/src/tx_sender/tests.rs index 06b6b7a1301b..5f0f0dc925a2 100644 --- a/core/node/api_server/src/tx_sender/tests.rs +++ b/core/node/api_server/src/tx_sender/tests.rs @@ -10,7 +10,7 @@ use zksync_utils::u256_to_h256; use super::*; use crate::{ - execution_sandbox::testonly::MockTransactionExecutor, web3::testonly::create_test_tx_sender, + execution_sandbox::testonly::MockOneshotExecutor, web3::testonly::create_test_tx_sender, }; #[tokio::test] @@ -31,7 +31,7 @@ async fn getting_nonce_for_account() { .await .unwrap(); - let tx_executor = MockTransactionExecutor::default().into(); + let tx_executor = MockOneshotExecutor::default().into(); let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); @@ -81,7 +81,7 @@ async fn getting_nonce_for_account_after_snapshot_recovery() { .await; let l2_chain_id = L2ChainId::default(); - let tx_executor = MockTransactionExecutor::default().into(); + let tx_executor = MockOneshotExecutor::default().into(); let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; storage @@ -136,7 +136,7 @@ async fn submitting_tx_requires_one_connection() { .unwrap(); drop(storage); - let mut tx_executor = MockTransactionExecutor::default(); + let mut tx_executor = MockOneshotExecutor::default(); tx_executor.set_tx_responses(move |received_tx, _| { assert_eq!(received_tx.hash(), tx_hash); ExecutionResult::Success { output: vec![] } diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index e71f4bd1e1ef..473391476a3b 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -4,7 +4,7 @@ use anyhow::Context as _; use once_cell::sync::OnceCell; use zksync_dal::{CoreDal, DalError}; use zksync_multivm::{ - interface::{Call, CallType, ExecutionResult}, + interface::{Call, CallType, ExecutionResult, TxExecutionMode}, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; use zksync_system_constants::MAX_ENCODED_TX_SIZE; @@ -19,7 +19,7 @@ use zksync_types::{ use zksync_web3_decl::error::Web3Error; use crate::{ - execution_sandbox::{ApiTracer, TxSharedArgs}, + execution_sandbox::{ApiTracer, TxExecutionArgs, TxSetupArgs}, tx_sender::{ApiContracts, TxSenderConfig}, web3::{backend_jsonrpsee::MethodTracer, state::RpcState}, }; @@ -167,29 +167,20 @@ impl DebugNamespace { .state .resolve_block_args(&mut connection, block_id) .await?; - drop(connection); - self.current_method().set_block_diff( self.state .last_sealed_l2_block .diff_with_block_args(&block_args), ); - if request.gas.is_none() { - request.gas = Some( - self.state - .tx_sender - .get_default_eth_call_gas(block_args) - .await - .map_err(Web3Error::InternalError)? - .into(), - ) + request.gas = Some(block_args.default_eth_call_gas(&mut connection).await?); } + drop(connection); let call_overrides = request.get_call_overrides()?; let tx = L2Tx::from_request(request.into(), MAX_ENCODED_TX_SIZE)?; - let shared_args = self.shared_args().await; + let setup_args = self.call_args(call_overrides.enforced_base_fee).await; let vm_permit = self .state .tx_sender @@ -206,20 +197,20 @@ impl DebugNamespace { vec![ApiTracer::CallTracer(call_tracer_result.clone())] }; + let connection = self.state.acquire_connection().await?; let executor = &self.state.tx_sender.0.executor; let result = executor - .execute_tx_eth_call( + .execute_tx_in_sandbox( vm_permit, - shared_args, - self.state.connection_pool.clone(), - call_overrides, - tx.clone(), + setup_args, + TxExecutionArgs::for_eth_call(tx.clone()), + connection, block_args, - self.sender_config().vm_execution_cache_misses_limit, - custom_tracers, None, + custom_tracers, ) - .await?; + .await? + .vm; let (output, revert_reason) = match result.result { ExecutionResult::Success { output, .. } => (output, None), @@ -249,9 +240,10 @@ impl DebugNamespace { Ok(Self::map_call(call, false)) } - async fn shared_args(&self) -> TxSharedArgs { + async fn call_args(&self, enforced_base_fee: Option) -> TxSetupArgs { let sender_config = self.sender_config(); - TxSharedArgs { + TxSetupArgs { + execution_mode: TxExecutionMode::EthCall, operator_account: AccountTreeId::default(), fee_input: self.batch_fee_input, base_system_contracts: self.api_contracts.eth_call.clone(), @@ -263,6 +255,7 @@ impl DebugNamespace { .tx_sender .read_whitelisted_tokens_for_aa_cache() .await, + enforced_base_fee, } } } diff --git a/core/node/api_server/src/web3/namespaces/eth.rs b/core/node/api_server/src/web3/namespaces/eth.rs index c3bed64a1468..fda5ff6f06be 100644 --- a/core/node/api_server/src/web3/namespaces/eth.rs +++ b/core/node/api_server/src/web3/namespaces/eth.rs @@ -70,18 +70,11 @@ impl EthNamespace { .last_sealed_l2_block .diff_with_block_args(&block_args), ); - drop(connection); - if request.gas.is_none() { - request.gas = Some( - self.state - .tx_sender - .get_default_eth_call_gas(block_args) - .await - .map_err(Web3Error::InternalError)? - .into(), - ) + request.gas = Some(block_args.default_eth_call_gas(&mut connection).await?); } + drop(connection); + let call_overrides = request.get_call_overrides()?; let tx = L2Tx::from_request(request.into(), self.state.api_config.max_tx_size)?; diff --git a/core/node/api_server/src/web3/testonly.rs b/core/node/api_server/src/web3/testonly.rs index 0f8c71aa6281..d8e7d0b65393 100644 --- a/core/node/api_server/src/web3/testonly.rs +++ b/core/node/api_server/src/web3/testonly.rs @@ -8,11 +8,12 @@ use zksync_dal::ConnectionPool; use zksync_health_check::CheckHealth; use zksync_node_fee_model::MockBatchFeeParamsProvider; use zksync_state::PostgresStorageCaches; +use zksync_state_keeper::seal_criteria::NoopSealer; use zksync_types::L2ChainId; use super::{metrics::ApiTransportLabel, *}; use crate::{ - execution_sandbox::{testonly::MockTransactionExecutor, TransactionExecutor}, + execution_sandbox::{testonly::MockOneshotExecutor, TransactionExecutor}, tx_sender::TxSenderConfig, }; @@ -48,7 +49,9 @@ pub(crate) async fn create_test_tx_sender( .await .expect("failed building transaction sender"); - Arc::get_mut(&mut tx_sender.0).unwrap().executor = tx_executor; + let tx_sender_inner = Arc::get_mut(&mut tx_sender.0).unwrap(); + tx_sender_inner.executor = tx_executor; + tx_sender_inner.sealer = Arc::new(NoopSealer); // prevents "unexecutable transaction" errors (tx_sender, vm_barrier) } @@ -99,7 +102,7 @@ impl ApiServerHandles { pub async fn spawn_http_server( api_config: InternalApiConfig, pool: ConnectionPool, - tx_executor: MockTransactionExecutor, + tx_executor: MockOneshotExecutor, method_tracer: Arc, stop_receiver: watch::Receiver, ) -> ApiServerHandles { @@ -127,7 +130,7 @@ pub async fn spawn_ws_server( api_config, pool, websocket_requests_per_minute_limit, - MockTransactionExecutor::default(), + MockOneshotExecutor::default(), Arc::default(), stop_receiver, ) @@ -139,7 +142,7 @@ async fn spawn_server( api_config: InternalApiConfig, pool: ConnectionPool, websocket_requests_per_minute_limit: Option, - tx_executor: MockTransactionExecutor, + tx_executor: MockOneshotExecutor, method_tracer: Arc, stop_receiver: watch::Receiver, ) -> (ApiServerHandles, mpsc::UnboundedReceiver) { diff --git a/core/node/api_server/src/web3/tests/mod.rs b/core/node/api_server/src/web3/tests/mod.rs index 409eb2004d17..5617b097c0c1 100644 --- a/core/node/api_server/src/web3/tests/mod.rs +++ b/core/node/api_server/src/web3/tests/mod.rs @@ -26,9 +26,12 @@ use zksync_node_test_utils::{ create_l1_batch, create_l1_batch_metadata, create_l2_block, create_l2_transaction, l1_batch_metadata_to_commitment_artifacts, prepare_recovery_snapshot, }; +use zksync_system_constants::{ + SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, +}; use zksync_types::{ api, - block::L2BlockHeader, + block::{pack_block_info, L2BlockHeader}, get_nonce_key, l2::L2Tx, storage::get_code_key, @@ -55,7 +58,7 @@ use zksync_web3_decl::{ use super::*; use crate::{ - execution_sandbox::testonly::MockTransactionExecutor, + execution_sandbox::testonly::MockOneshotExecutor, web3::testonly::{spawn_http_server, spawn_ws_server}, }; @@ -135,8 +138,8 @@ trait HttpTest: Send + Sync { StorageInitialization::Genesis } - fn transaction_executor(&self) -> MockTransactionExecutor { - MockTransactionExecutor::default() + fn transaction_executor(&self) -> MockOneshotExecutor { + MockOneshotExecutor::default() } fn method_tracer(&self) -> Arc { @@ -174,7 +177,7 @@ impl StorageInitialization { } async fn prepare_storage( - &self, + self, network_config: &NetworkConfig, storage: &mut Connection<'_, Core>, ) -> anyhow::Result<()> { @@ -189,17 +192,33 @@ impl StorageInitialization { insert_genesis_batch(storage, ¶ms).await?; } } - Self::Recovery { logs, factory_deps } => { + Self::Recovery { + mut logs, + factory_deps, + } => { + let l2_block_info_key = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + ); + let block_info = pack_block_info( + Self::SNAPSHOT_RECOVERY_BLOCK.0.into(), + Self::SNAPSHOT_RECOVERY_BLOCK.0.into(), + ); + logs.push(StorageLog::new_write_log( + l2_block_info_key, + u256_to_h256(block_info), + )); + prepare_recovery_snapshot( storage, Self::SNAPSHOT_RECOVERY_BATCH, Self::SNAPSHOT_RECOVERY_BLOCK, - logs, + &logs, ) .await; storage .factory_deps_dal() - .insert_factory_deps(Self::SNAPSHOT_RECOVERY_BLOCK, factory_deps) + .insert_factory_deps(Self::SNAPSHOT_RECOVERY_BLOCK, &factory_deps) .await?; // Insert the next L1 batch in the storage so that the API server doesn't hang up. @@ -282,7 +301,7 @@ fn execute_l2_transaction(transaction: L2Tx) -> TransactionExecutionResult { } } -/// Stores L2 block with a single transaction and returns the L2 block header + transaction hash. +/// Stores L2 block and returns the L2 block header. async fn store_l2_block( storage: &mut Connection<'_, Core>, number: L2BlockNumber, @@ -298,6 +317,18 @@ async fn store_l2_block( assert_matches!(tx_submission_result, L2TxSubmissionResult::Added); } + // Record L2 block info which is read by the VM sandbox logic + let l2_block_info_key = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + ); + let block_info = pack_block_info(number.0.into(), number.0.into()); + let l2_block_log = StorageLog::new_write_log(l2_block_info_key, u256_to_h256(block_info)); + storage + .storage_logs_dal() + .append_storage_logs(number, &[l2_block_log]) + .await?; + let new_l2_block = create_l2_block(number.0); storage.blocks_dal().insert_l2_block(&new_l2_block).await?; storage diff --git a/core/node/api_server/src/web3/tests/vm.rs b/core/node/api_server/src/web3/tests/vm.rs index 90e1373a5cc6..5b04250eebf4 100644 --- a/core/node/api_server/src/web3/tests/vm.rs +++ b/core/node/api_server/src/web3/tests/vm.rs @@ -30,15 +30,15 @@ impl CallTest { } } - fn create_executor(only_block: L2BlockNumber) -> MockTransactionExecutor { - let mut tx_executor = MockTransactionExecutor::default(); - tx_executor.set_call_responses(move |tx, block_args| { + fn create_executor(latest_block: L2BlockNumber) -> MockOneshotExecutor { + let mut tx_executor = MockOneshotExecutor::default(); + tx_executor.set_call_responses(move |tx, env| { let expected_block_number = match tx.execute.calldata() { - b"pending" => only_block + 1, - b"first" => only_block, + b"pending" => latest_block + 1, + b"latest" => latest_block, data => panic!("Unexpected calldata: {data:?}"), }; - assert_eq!(block_args.resolved_block_number(), expected_block_number); + assert_eq!(env.l1_batch.first_l2_block.number, expected_block_number.0); ExecutionResult::Success { output: b"output".to_vec(), @@ -50,15 +50,20 @@ impl CallTest { #[async_trait] impl HttpTest for CallTest { - fn transaction_executor(&self) -> MockTransactionExecutor { - Self::create_executor(L2BlockNumber(0)) + fn transaction_executor(&self) -> MockOneshotExecutor { + Self::create_executor(L2BlockNumber(1)) } async fn test( &self, client: &DynClient, - _pool: &ConnectionPool, + pool: &ConnectionPool, ) -> anyhow::Result<()> { + // Store an additional L2 block because L2 block #0 has some special processing making it work incorrectly. + let mut connection = pool.connection().await?; + store_l2_block(&mut connection, L2BlockNumber(1), &[]).await?; + drop(connection); + let call_result = client .call(Self::call_request(b"pending"), None, None) .await?; @@ -66,8 +71,8 @@ impl HttpTest for CallTest { let valid_block_numbers_and_calldata = [ (api::BlockNumber::Pending, b"pending" as &[_]), - (api::BlockNumber::Latest, b"first"), - (0.into(), b"first"), + (api::BlockNumber::Latest, b"latest"), + (0.into(), b"latest"), ]; for (number, calldata) in valid_block_numbers_and_calldata { let number = api::BlockIdVariant::BlockNumber(number); @@ -107,7 +112,7 @@ impl HttpTest for CallTestAfterSnapshotRecovery { StorageInitialization::empty_recovery() } - fn transaction_executor(&self) -> MockTransactionExecutor { + fn transaction_executor(&self) -> MockOneshotExecutor { let first_local_l2_block = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; CallTest::create_executor(first_local_l2_block) } @@ -146,7 +151,7 @@ impl HttpTest for CallTestAfterSnapshotRecovery { for number in first_l2_block_numbers { let number = api::BlockIdVariant::BlockNumber(number); let call_result = client - .call(CallTest::call_request(b"first"), Some(number), None) + .call(CallTest::call_request(b"latest"), Some(number), None) .await?; assert_eq!(call_result.0, b"output"); } @@ -213,16 +218,16 @@ impl HttpTest for SendRawTransactionTest { } } - fn transaction_executor(&self) -> MockTransactionExecutor { - let mut tx_executor = MockTransactionExecutor::default(); + fn transaction_executor(&self) -> MockOneshotExecutor { + let mut tx_executor = MockOneshotExecutor::default(); let pending_block = if self.snapshot_recovery { StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 2 } else { L2BlockNumber(1) }; - tx_executor.set_tx_responses(move |tx, block_args| { + tx_executor.set_tx_responses(move |tx, env| { assert_eq!(tx.hash(), Self::transaction_bytes_and_hash().1); - assert_eq!(block_args.resolved_block_number(), pending_block); + assert_eq!(env.l1_batch.first_l2_block.number, pending_block.0); ExecutionResult::Success { output: vec![] } }); tx_executor @@ -311,8 +316,8 @@ impl SendTransactionWithDetailedOutputTest { } #[async_trait] impl HttpTest for SendTransactionWithDetailedOutputTest { - fn transaction_executor(&self) -> MockTransactionExecutor { - let mut tx_executor = MockTransactionExecutor::default(); + fn transaction_executor(&self) -> MockOneshotExecutor { + let mut tx_executor = MockOneshotExecutor::default(); let tx_bytes_and_hash = SendRawTransactionTest::transaction_bytes_and_hash(); let vm_execution_logs = VmExecutionLogs { storage_logs: self.storage_logs(), @@ -322,9 +327,9 @@ impl HttpTest for SendTransactionWithDetailedOutputTest { total_log_queries_count: 0, }; - tx_executor.set_tx_responses_with_logs(move |tx, block_args| { + tx_executor.set_tx_responses_with_logs(move |tx, env| { assert_eq!(tx.hash(), tx_bytes_and_hash.1); - assert_eq!(block_args.resolved_block_number(), L2BlockNumber(1)); + assert_eq!(env.l1_batch.first_l2_block.number, 1); VmExecutionResultAndLogs { result: ExecutionResult::Success { output: vec![] }, @@ -406,15 +411,20 @@ impl TraceCallTest { #[async_trait] impl HttpTest for TraceCallTest { - fn transaction_executor(&self) -> MockTransactionExecutor { - CallTest::create_executor(L2BlockNumber(0)) + fn transaction_executor(&self) -> MockOneshotExecutor { + CallTest::create_executor(L2BlockNumber(1)) } async fn test( &self, client: &DynClient, - _pool: &ConnectionPool, + pool: &ConnectionPool, ) -> anyhow::Result<()> { + // Store an additional L2 block because L2 block #0 has some special processing making it work incorrectly. + let mut connection = pool.connection().await?; + store_l2_block(&mut connection, L2BlockNumber(1), &[]).await?; + drop(connection); + let call_request = CallTest::call_request(b"pending"); let call_result = client.trace_call(call_request.clone(), None, None).await?; Self::assert_debug_call(&call_request, &call_result); @@ -424,13 +434,9 @@ impl HttpTest for TraceCallTest { .await?; Self::assert_debug_call(&call_request, &call_result); - let genesis_block_numbers = [ - api::BlockNumber::Earliest, - api::BlockNumber::Latest, - 0.into(), - ]; - let call_request = CallTest::call_request(b"first"); - for number in genesis_block_numbers { + let latest_block_numbers = [api::BlockNumber::Latest, 1.into()]; + let call_request = CallTest::call_request(b"latest"); + for number in latest_block_numbers { let call_result = client .trace_call( call_request.clone(), @@ -474,7 +480,7 @@ impl HttpTest for TraceCallTestAfterSnapshotRecovery { StorageInitialization::empty_recovery() } - fn transaction_executor(&self) -> MockTransactionExecutor { + fn transaction_executor(&self) -> MockOneshotExecutor { let number = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; CallTest::create_executor(number) } @@ -504,7 +510,7 @@ impl HttpTest for TraceCallTestAfterSnapshotRecovery { assert_pruned_block_error(&error, first_local_l2_block); } - let call_request = CallTest::call_request(b"first"); + let call_request = CallTest::call_request(b"latest"); let first_l2_block_numbers = [api::BlockNumber::Latest, first_local_l2_block.0.into()]; for number in first_l2_block_numbers { let number = api::BlockId::Number(number); @@ -544,18 +550,18 @@ impl HttpTest for EstimateGasTest { SendRawTransactionTest { snapshot_recovery }.storage_initialization() } - fn transaction_executor(&self) -> MockTransactionExecutor { - let mut tx_executor = MockTransactionExecutor::default(); + fn transaction_executor(&self) -> MockOneshotExecutor { + let mut tx_executor = MockOneshotExecutor::default(); let pending_block_number = if self.snapshot_recovery { StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 2 } else { L2BlockNumber(1) }; let gas_limit_threshold = self.gas_limit_threshold.clone(); - tx_executor.set_call_responses(move |tx, block_args| { + tx_executor.set_tx_responses(move |tx, env| { assert_eq!(tx.execute.calldata(), [] as [u8; 0]); assert_eq!(tx.nonce(), Some(Nonce(0))); - assert_eq!(block_args.resolved_block_number(), pending_block_number); + assert_eq!(env.l1_batch.first_l2_block.number, pending_block_number.0); let gas_limit_threshold = gas_limit_threshold.load(Ordering::SeqCst); if tx.gas_limit() >= U256::from(gas_limit_threshold) { @@ -637,49 +643,17 @@ async fn estimate_gas_after_snapshot_recovery() { #[derive(Debug)] struct EstimateGasWithStateOverrideTest { - gas_limit_threshold: Arc, - snapshot_recovery: bool, -} - -impl EstimateGasWithStateOverrideTest { - fn new(snapshot_recovery: bool) -> Self { - Self { - gas_limit_threshold: Arc::default(), - snapshot_recovery, - } - } + inner: EstimateGasTest, } #[async_trait] impl HttpTest for EstimateGasWithStateOverrideTest { fn storage_initialization(&self) -> StorageInitialization { - let snapshot_recovery = self.snapshot_recovery; - SendRawTransactionTest { snapshot_recovery }.storage_initialization() + self.inner.storage_initialization() } - fn transaction_executor(&self) -> MockTransactionExecutor { - let mut tx_executor = MockTransactionExecutor::default(); - let pending_block_number = if self.snapshot_recovery { - StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 2 - } else { - L2BlockNumber(1) - }; - let gas_limit_threshold = self.gas_limit_threshold.clone(); - tx_executor.set_call_responses(move |tx, block_args| { - assert_eq!(tx.execute.calldata(), [] as [u8; 0]); - assert_eq!(tx.nonce(), Some(Nonce(0))); - assert_eq!(block_args.resolved_block_number(), pending_block_number); - - let gas_limit_threshold = gas_limit_threshold.load(Ordering::SeqCst); - if tx.gas_limit() >= U256::from(gas_limit_threshold) { - ExecutionResult::Success { output: vec![] } - } else { - ExecutionResult::Revert { - output: VmRevertReason::VmError, - } - } - }); - tx_executor + fn transaction_executor(&self) -> MockOneshotExecutor { + self.inner.transaction_executor() } async fn test( @@ -735,5 +709,6 @@ impl HttpTest for EstimateGasWithStateOverrideTest { #[tokio::test] async fn estimate_gas_with_state_override() { - test_http_server(EstimateGasWithStateOverrideTest::new(false)).await; + let inner = EstimateGasTest::new(false); + test_http_server(EstimateGasWithStateOverrideTest { inner }).await; } From b4255618708349c51f60f5c7fc26f9356d32b6ff Mon Sep 17 00:00:00 2001 From: Dima Zhornyk <55756184+dimazhornyk@users.noreply.github.com> Date: Tue, 27 Aug 2024 12:20:27 +0200 Subject: [PATCH 22/39] feat: add flag to enable/disable DA inclusion verification (#2647) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ This PR adds a config to explicitly enable/disable DA verification onchain. ## Why ❔ Without this feature, any chain using custom DA had to wait for full inclusion before they could commit a batch even if they were not doing the onchain verification. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- core/lib/config/src/configs/da_dispatcher.rs | 11 +++++++ core/lib/config/src/testonly.rs | 1 + core/lib/env_config/src/da_dispatcher.rs | 2 ++ core/lib/protobuf_config/src/da_dispatcher.rs | 2 ++ .../src/proto/config/da_dispatcher.proto | 3 +- core/node/da_dispatcher/src/da_dispatcher.rs | 30 ++++++++++++------- 6 files changed, 36 insertions(+), 13 deletions(-) diff --git a/core/lib/config/src/configs/da_dispatcher.rs b/core/lib/config/src/configs/da_dispatcher.rs index 303a2c0b54c1..e9ad6bd3c074 100644 --- a/core/lib/config/src/configs/da_dispatcher.rs +++ b/core/lib/config/src/configs/da_dispatcher.rs @@ -5,6 +5,7 @@ use serde::Deserialize; pub const DEFAULT_POLLING_INTERVAL_MS: u32 = 5000; pub const DEFAULT_MAX_ROWS_TO_DISPATCH: u32 = 100; pub const DEFAULT_MAX_RETRIES: u16 = 5; +pub const DEFAULT_USE_DUMMY_INCLUSION_DATA: bool = false; #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct DADispatcherConfig { @@ -14,6 +15,10 @@ pub struct DADispatcherConfig { pub max_rows_to_dispatch: Option, /// The maximum number of retries for the dispatch of a blob. pub max_retries: Option, + /// Use dummy value as inclusion proof instead of getting it from the client. + // TODO: run a verification task to check if the L1 contract expects the inclusion proofs to + // avoid the scenario where contracts expect real proofs, and server is using dummy proofs. + pub use_dummy_inclusion_data: Option, } impl DADispatcherConfig { @@ -22,6 +27,7 @@ impl DADispatcherConfig { polling_interval_ms: Some(DEFAULT_POLLING_INTERVAL_MS), max_rows_to_dispatch: Some(DEFAULT_MAX_ROWS_TO_DISPATCH), max_retries: Some(DEFAULT_MAX_RETRIES), + use_dummy_inclusion_data: Some(DEFAULT_USE_DUMMY_INCLUSION_DATA), } } @@ -40,4 +46,9 @@ impl DADispatcherConfig { pub fn max_retries(&self) -> u16 { self.max_retries.unwrap_or(DEFAULT_MAX_RETRIES) } + + pub fn use_dummy_inclusion_data(&self) -> bool { + self.use_dummy_inclusion_data + .unwrap_or(DEFAULT_USE_DUMMY_INCLUSION_DATA) + } } diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index e028c3d3aec0..2ec91f5bec71 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -940,6 +940,7 @@ impl Distribution for EncodeDist { polling_interval_ms: self.sample(rng), max_rows_to_dispatch: self.sample(rng), max_retries: self.sample(rng), + use_dummy_inclusion_data: self.sample(rng), } } } diff --git a/core/lib/env_config/src/da_dispatcher.rs b/core/lib/env_config/src/da_dispatcher.rs index 194e4185b286..246752db91ac 100644 --- a/core/lib/env_config/src/da_dispatcher.rs +++ b/core/lib/env_config/src/da_dispatcher.rs @@ -26,6 +26,7 @@ mod tests { polling_interval_ms: Some(interval), max_rows_to_dispatch: Some(rows_limit), max_retries: Some(max_retries), + use_dummy_inclusion_data: Some(true), } } @@ -36,6 +37,7 @@ mod tests { DA_DISPATCHER_POLLING_INTERVAL_MS=5000 DA_DISPATCHER_MAX_ROWS_TO_DISPATCH=60 DA_DISPATCHER_MAX_RETRIES=7 + DA_DISPATCHER_USE_DUMMY_INCLUSION_DATA="true" "#; lock.set_env(config); let actual = DADispatcherConfig::from_env().unwrap(); diff --git a/core/lib/protobuf_config/src/da_dispatcher.rs b/core/lib/protobuf_config/src/da_dispatcher.rs index 1cafa37a1e19..d77073bd32cf 100644 --- a/core/lib/protobuf_config/src/da_dispatcher.rs +++ b/core/lib/protobuf_config/src/da_dispatcher.rs @@ -11,6 +11,7 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { polling_interval_ms: self.polling_interval_ms, max_rows_to_dispatch: self.max_rows_to_dispatch, max_retries: self.max_retries.map(|x| x as u16), + use_dummy_inclusion_data: self.use_dummy_inclusion_data, }) } @@ -19,6 +20,7 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { polling_interval_ms: this.polling_interval_ms, max_rows_to_dispatch: this.max_rows_to_dispatch, max_retries: this.max_retries.map(Into::into), + use_dummy_inclusion_data: this.use_dummy_inclusion_data, } } } diff --git a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto index d1d913498a4e..dd366bd5b925 100644 --- a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto +++ b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto @@ -2,10 +2,9 @@ syntax = "proto3"; package zksync.config.da_dispatcher; -import "zksync/config/object_store.proto"; - message DataAvailabilityDispatcher { optional uint32 polling_interval_ms = 1; optional uint32 max_rows_to_dispatch = 2; optional uint32 max_retries = 3; + optional bool use_dummy_inclusion_data = 4; } diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index ea1858da25d3..f8e6f6b31723 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -5,7 +5,10 @@ use chrono::Utc; use rand::Rng; use tokio::sync::watch::Receiver; use zksync_config::DADispatcherConfig; -use zksync_da_client::{types::DAError, DataAvailabilityClient}; +use zksync_da_client::{ + types::{DAError, InclusionData}, + DataAvailabilityClient, +}; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_types::L1BatchNumber; @@ -133,16 +136,21 @@ impl DataAvailabilityDispatcher { return Ok(()); }; - let inclusion_data = self - .client - .get_inclusion_data(blob_info.blob_id.as_str()) - .await - .with_context(|| { - format!( - "failed to get inclusion data for blob_id: {}, batch_number: {}", - blob_info.blob_id, blob_info.l1_batch_number - ) - })?; + let inclusion_data = if self.config.use_dummy_inclusion_data() { + self.client + .get_inclusion_data(blob_info.blob_id.as_str()) + .await + .with_context(|| { + format!( + "failed to get inclusion data for blob_id: {}, batch_number: {}", + blob_info.blob_id, blob_info.l1_batch_number + ) + })? + } else { + // if the inclusion verification is disabled, we don't need to wait for the inclusion + // data before committing the batch, so simply return an empty vector + Some(InclusionData { data: vec![] }) + }; let Some(inclusion_data) = inclusion_data else { return Ok(()); From 64b2ff8b81dcc146cd0535eb0d2d898c18ad5f7f Mon Sep 17 00:00:00 2001 From: Ivan Schasny <31857042+ischasny@users.noreply.github.com> Date: Tue, 27 Aug 2024 12:45:00 +0100 Subject: [PATCH 23/39] fix(base_token_adjuster): bug with a wrong metrics namespace (#2744) Fix a bug with base_token_adjuster metrics reported under a wrong namespace. --- core/node/base_token_adjuster/src/metrics.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/node/base_token_adjuster/src/metrics.rs b/core/node/base_token_adjuster/src/metrics.rs index e6f6571adc1d..d84e4da0c0c7 100644 --- a/core/node/base_token_adjuster/src/metrics.rs +++ b/core/node/base_token_adjuster/src/metrics.rs @@ -15,7 +15,7 @@ pub(crate) struct OperationResultLabels { } #[derive(Debug, Metrics)] -#[metrics(prefix = "snapshots_creator")] +#[metrics(prefix = "base_token_adjuster")] pub(crate) struct BaseTokenAdjusterMetrics { pub l1_gas_used: Gauge, #[metrics(buckets = Buckets::LATENCIES)] From a4170e9e7f321a1062495ec586e0ce9186269088 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Grze=C5=9Bkiewicz?= Date: Tue, 27 Aug 2024 15:07:55 +0200 Subject: [PATCH 24/39] fix(eth-sender): missing Gateway migration changes (#2732) Signed-off-by: tomg10 --- core/lib/dal/src/eth_sender_dal.rs | 2 +- core/node/eth_sender/src/eth_tx_aggregator.rs | 14 ++++++--- core/node/eth_sender/src/eth_tx_manager.rs | 28 +++++++++++------- .../src/l1_gas_price/gas_adjuster/mod.rs | 2 +- .../layers/eth_sender/aggregator.rs | 13 ++------- .../layers/eth_sender/manager.rs | 29 ++++++++++++------- 6 files changed, 50 insertions(+), 38 deletions(-) diff --git a/core/lib/dal/src/eth_sender_dal.rs b/core/lib/dal/src/eth_sender_dal.rs index c76547422d8f..2266d6fb60f9 100644 --- a/core/lib/dal/src/eth_sender_dal.rs +++ b/core/lib/dal/src/eth_sender_dal.rs @@ -413,8 +413,8 @@ impl EthSenderDal<'_, '_> { WHERE id = $2 "#, - eth_tx_id as i32, chain_id as i64, + eth_tx_id as i32, ) .execute(self.storage.conn()) .await?; diff --git a/core/node/eth_sender/src/eth_tx_aggregator.rs b/core/node/eth_sender/src/eth_tx_aggregator.rs index 7d6a6b234742..7f304e2f72b7 100644 --- a/core/node/eth_sender/src/eth_tx_aggregator.rs +++ b/core/node/eth_sender/src/eth_tx_aggregator.rs @@ -383,8 +383,14 @@ impl EthTxAggregator { ); return Ok(()); } + let is_gateway = self.settlement_mode.is_gateway(); let tx = self - .save_eth_tx(storage, &agg_op, contracts_are_pre_shared_bridge, false) + .save_eth_tx( + storage, + &agg_op, + contracts_are_pre_shared_bridge, + is_gateway, + ) .await?; Self::report_eth_tx_saving(storage, &agg_op, &tx).await; } @@ -556,9 +562,9 @@ impl EthTxAggregator { // We may be using a custom sender for commit transactions, so use this // var whatever it actually is: a `None` for single-addr operator or `Some` // for multi-addr operator in 4844 mode. - let sender_addr = match op_type { - AggregatedActionType::Commit => self.custom_commit_sender_addr, - _ => None, + let sender_addr = match (op_type, is_gateway) { + (AggregatedActionType::Commit, false) => self.custom_commit_sender_addr, + (_, _) => None, }; let nonce = self.get_next_nonce(&mut transaction, sender_addr).await?; let encoded_aggregated_op = diff --git a/core/node/eth_sender/src/eth_tx_manager.rs b/core/node/eth_sender/src/eth_tx_manager.rs index a97aed88a0a5..0d78ab71c62d 100644 --- a/core/node/eth_sender/src/eth_tx_manager.rs +++ b/core/node/eth_sender/src/eth_tx_manager.rs @@ -49,13 +49,18 @@ impl EthTxManager { gas_adjuster, max_acceptable_priority_fee_in_gwei: config.max_acceptable_priority_fee_in_gwei, }; + let l1_interface = Box::new(RealL1Interface { + ethereum_gateway, + ethereum_gateway_blobs, + l2_gateway, + wait_confirmations: config.wait_confirmations, + }); + tracing::info!( + "Started eth_tx_manager supporting {:?} operators", + l1_interface.supported_operator_types() + ); Self { - l1_interface: Box::new(RealL1Interface { - ethereum_gateway, - ethereum_gateway_blobs, - l2_gateway, - wait_confirmations: config.wait_confirmations, - }), + l1_interface, config, fees_oracle: Box::new(fees_oracle), pool, @@ -257,10 +262,10 @@ impl EthTxManager { } pub(crate) fn operator_address(&self, operator_type: OperatorType) -> Option
{ - if operator_type == OperatorType::NonBlob { - None - } else { + if operator_type == OperatorType::Blob { self.l1_interface.get_blobs_operator_account() + } else { + None } } // Monitors the in-flight transactions, marks mined ones as confirmed, @@ -519,9 +524,10 @@ impl EthTxManager { tracing::info!("Stop signal received, eth_tx_manager is shutting down"); break; } + let operator_to_track = self.l1_interface.supported_operator_types()[0]; let l1_block_numbers = self .l1_interface - .get_l1_block_numbers(OperatorType::Blob) + .get_l1_block_numbers(operator_to_track) .await?; METRICS.track_block_numbers(&l1_block_numbers); @@ -643,7 +649,7 @@ impl EthTxManager { .get_l1_block_numbers(operator_type) .await .unwrap(); - tracing::info!( + tracing::debug!( "Loop iteration at block {} for {operator_type:?} operator", l1_block_numbers.latest ); diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs index 4ed9cf1330ea..e6842b92fdba 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs @@ -86,7 +86,7 @@ impl GasAdjuster { anyhow::ensure!( matches!(pubdata_sending_mode, PubdataSendingMode::RelayedL2Calldata), - "Only relayed L2 calldata is available for L2 mode" + "Only relayed L2 calldata is available for L2 mode, got: {pubdata_sending_mode:?}" ); } else { anyhow::ensure!(!client.gateway_mode, "Must be L1 client in L1 mode"); diff --git a/core/node/node_framework/src/implementations/layers/eth_sender/aggregator.rs b/core/node/node_framework/src/implementations/layers/eth_sender/aggregator.rs index cfe701326bd6..310580aeb3a3 100644 --- a/core/node/node_framework/src/implementations/layers/eth_sender/aggregator.rs +++ b/core/node/node_framework/src/implementations/layers/eth_sender/aggregator.rs @@ -8,10 +8,7 @@ use zksync_types::{commitment::L1BatchCommitmentMode, settlement::SettlementMode use crate::{ implementations::resources::{ circuit_breakers::CircuitBreakersResource, - eth_interface::{ - BoundEthInterfaceForBlobsResource, BoundEthInterfaceForL2Resource, - BoundEthInterfaceResource, - }, + eth_interface::{BoundEthInterfaceForBlobsResource, BoundEthInterfaceResource}, object_store::ObjectStoreResource, pools::{MasterPool, PoolResource, ReplicaPool}, }, @@ -55,7 +52,6 @@ pub struct Input { pub replica_pool: PoolResource, pub eth_client: Option, pub eth_client_blobs: Option, - pub eth_client_l2: Option, pub object_store: ObjectStoreResource, #[context(default)] pub circuit_breakers: CircuitBreakersResource, @@ -100,11 +96,6 @@ impl WiringLayer for EthTxAggregatorLayer { let master_pool = input.master_pool.get().await.unwrap(); let replica_pool = input.replica_pool.get().await.unwrap(); - let eth_client = if self.settlement_mode.is_gateway() { - input.eth_client_l2.context("l2_client must be provided")?.0 - } else { - input.eth_client.context("l1_client must be provided")?.0 - }; let eth_client_blobs = input.eth_client_blobs.map(|c| c.0); let object_store = input.object_store.0; @@ -125,7 +116,7 @@ impl WiringLayer for EthTxAggregatorLayer { master_pool.clone(), config.clone(), aggregator, - eth_client.clone(), + input.eth_client.unwrap().0, self.contracts_config.validator_timelock_addr, self.contracts_config.l1_multicall3_addr, self.contracts_config.diamond_proxy_addr, diff --git a/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs b/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs index d6989d8db72b..5462fa575f94 100644 --- a/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs +++ b/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs @@ -6,10 +6,7 @@ use zksync_eth_sender::EthTxManager; use crate::{ implementations::resources::{ circuit_breakers::CircuitBreakersResource, - eth_interface::{ - BoundEthInterfaceForBlobsResource, BoundEthInterfaceForL2Resource, - BoundEthInterfaceResource, - }, + eth_interface::{BoundEthInterfaceForBlobsResource, BoundEthInterfaceResource}, gas_adjuster::GasAdjusterResource, pools::{MasterPool, PoolResource, ReplicaPool}, }, @@ -48,7 +45,6 @@ pub struct Input { pub replica_pool: PoolResource, pub eth_client: BoundEthInterfaceResource, pub eth_client_blobs: Option, - pub l2_client: Option, pub gas_adjuster: GasAdjusterResource, #[context(default)] pub circuit_breakers: CircuitBreakersResource, @@ -81,9 +77,10 @@ impl WiringLayer for EthTxManagerLayer { let master_pool = input.master_pool.get().await.unwrap(); let replica_pool = input.replica_pool.get().await.unwrap(); - let eth_client = input.eth_client.0; + let settlement_mode = self.eth_sender_config.gas_adjuster.unwrap().settlement_mode; + let eth_client = input.eth_client.0.clone(); let eth_client_blobs = input.eth_client_blobs.map(|c| c.0); - let l2_client = input.l2_client.map(|c| c.0); + let l2_client = input.eth_client.0; let config = self.eth_sender_config.sender.context("sender")?; @@ -93,9 +90,21 @@ impl WiringLayer for EthTxManagerLayer { master_pool, config, gas_adjuster, - Some(eth_client), - eth_client_blobs, - l2_client, + if !settlement_mode.is_gateway() { + Some(eth_client) + } else { + None + }, + if !settlement_mode.is_gateway() { + eth_client_blobs + } else { + None + }, + if settlement_mode.is_gateway() { + Some(l2_client) + } else { + None + }, ); // Insert circuit breaker. From bd2b5d8bb12e486b9a797e347357acfb58a0d46f Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Tue, 27 Aug 2024 16:32:41 +0300 Subject: [PATCH 25/39] test(vm): Refactor VM benchmarks (#2668) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Integrates Prometheus metrics into criterion benches; removes the DIY benchmark correspondingly. - Merges the main benchmark crate with the harness one. - Includes benched bytecodes into the crate itself rather than reading them in runtime. ## Why ❔ Makes VM benchmarks more maintainable. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/ci-core-reusable.yml | 2 +- .github/workflows/vm-perf-comparison.yml | 6 +- .github/workflows/vm-perf-to-prometheus.yml | 8 +- Cargo.lock | 21 +- Cargo.toml | 2 - core/tests/vm-benchmark/Cargo.toml | 32 +- core/tests/vm-benchmark/README.md | 31 +- .../benches/{fill_bootloader.rs => batch.rs} | 67 +-- core/tests/vm-benchmark/benches/criterion.rs | 98 ---- .../vm-benchmark/benches/diy_benchmark.rs | 53 -- core/tests/vm-benchmark/benches/iai.rs | 16 +- core/tests/vm-benchmark/benches/oneshot.rs | 91 ++++ core/tests/vm-benchmark/harness/Cargo.toml | 19 - .../src/{parse_iai.rs => bin/common/mod.rs} | 1 + .../src/{ => bin}/compare_iai_results.rs | 4 +- .../src/bin/iai_results_to_prometheus.rs | 52 ++ .../src/bin/instruction_counts.rs | 11 + .../bytecodes}/access_memory | Bin .../bytecodes}/call_far | Bin .../bytecodes}/decode_shl_sub | Bin .../bytecodes}/deploy_simple_contract | Bin .../bytecodes}/event_spam | Bin .../bytecodes}/finish_eventful_frames | Bin .../bytecodes}/heap_read_write | Bin .../bytecodes}/slot_hash_collision | Bin .../bytecodes}/write_and_decode | Bin core/tests/vm-benchmark/src/criterion.rs | 477 ++++++++++++++++++ core/tests/vm-benchmark/src/find_slowest.rs | 43 -- .../src/iai_results_to_prometheus.rs | 37 -- .../{harness => }/src/instruction_counter.rs | 0 .../vm-benchmark/src/instruction_counts.rs | 28 - core/tests/vm-benchmark/src/lib.rs | 74 ++- core/tests/vm-benchmark/src/main.rs | 16 +- core/tests/vm-benchmark/src/transaction.rs | 194 +++++++ .../{harness/src/lib.rs => src/vm.rs} | 223 +------- .../tests/vm-benchmark/src/with_prometheus.rs | 27 - 36 files changed, 988 insertions(+), 645 deletions(-) rename core/tests/vm-benchmark/benches/{fill_bootloader.rs => batch.rs} (79%) delete mode 100644 core/tests/vm-benchmark/benches/criterion.rs delete mode 100644 core/tests/vm-benchmark/benches/diy_benchmark.rs create mode 100644 core/tests/vm-benchmark/benches/oneshot.rs delete mode 100644 core/tests/vm-benchmark/harness/Cargo.toml rename core/tests/vm-benchmark/src/{parse_iai.rs => bin/common/mod.rs} (98%) rename core/tests/vm-benchmark/src/{ => bin}/compare_iai_results.rs (98%) create mode 100644 core/tests/vm-benchmark/src/bin/iai_results_to_prometheus.rs create mode 100644 core/tests/vm-benchmark/src/bin/instruction_counts.rs rename core/tests/vm-benchmark/{deployment_benchmarks => src/bytecodes}/access_memory (100%) rename core/tests/vm-benchmark/{deployment_benchmarks => src/bytecodes}/call_far (100%) rename core/tests/vm-benchmark/{deployment_benchmarks => src/bytecodes}/decode_shl_sub (100%) rename core/tests/vm-benchmark/{deployment_benchmarks => src/bytecodes}/deploy_simple_contract (100%) rename core/tests/vm-benchmark/{deployment_benchmarks => src/bytecodes}/event_spam (100%) rename core/tests/vm-benchmark/{deployment_benchmarks => src/bytecodes}/finish_eventful_frames (100%) rename core/tests/vm-benchmark/{deployment_benchmarks => src/bytecodes}/heap_read_write (100%) rename core/tests/vm-benchmark/{deployment_benchmarks => src/bytecodes}/slot_hash_collision (100%) rename core/tests/vm-benchmark/{deployment_benchmarks => src/bytecodes}/write_and_decode (100%) create mode 100644 core/tests/vm-benchmark/src/criterion.rs delete mode 100644 core/tests/vm-benchmark/src/find_slowest.rs delete mode 100644 core/tests/vm-benchmark/src/iai_results_to_prometheus.rs rename core/tests/vm-benchmark/{harness => }/src/instruction_counter.rs (100%) delete mode 100644 core/tests/vm-benchmark/src/instruction_counts.rs create mode 100644 core/tests/vm-benchmark/src/transaction.rs rename core/tests/vm-benchmark/{harness/src/lib.rs => src/vm.rs} (54%) delete mode 100644 core/tests/vm-benchmark/src/with_prometheus.rs diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 85eefc862272..51550f87a34b 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -67,7 +67,7 @@ jobs: ci_run zk test rust # Benchmarks are not tested by `cargo nextest` unless specified explicitly, and even then `criterion` harness is incompatible # with how `cargo nextest` runs tests. Thus, we run criterion-based benchmark tests manually. - ci_run zk f cargo test --release -p vm-benchmark --bench criterion --bench fill_bootloader + ci_run zk f cargo test --release -p vm-benchmark --bench oneshot --bench batch loadtest: runs-on: [matterlabs-ci-runner] diff --git a/.github/workflows/vm-perf-comparison.yml b/.github/workflows/vm-perf-comparison.yml index 53dada123574..da88b07779fd 100644 --- a/.github/workflows/vm-perf-comparison.yml +++ b/.github/workflows/vm-perf-comparison.yml @@ -1,4 +1,4 @@ -name: Compare VM perfomance to base branch +name: Compare VM performance to base branch on: pull_request: @@ -47,7 +47,7 @@ jobs: ci_run zk ci_run zk compiler system-contracts ci_run cargo bench --package vm-benchmark --bench iai | tee base-iai - ci_run cargo run --package vm-benchmark --release --bin instruction-counts | tee base-opcodes || touch base-opcodes + ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee base-opcodes || touch base-opcodes ci_run yarn workspace system-contracts clean - name: checkout PR @@ -59,7 +59,7 @@ jobs: ci_run zk ci_run zk compiler system-contracts ci_run cargo bench --package vm-benchmark --bench iai | tee pr-iai - ci_run cargo run --package vm-benchmark --release --bin instruction-counts | tee pr-opcodes || touch pr-opcodes + ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee pr-opcodes || touch pr-opcodes EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64) echo "speedup<<$EOF" >> $GITHUB_OUTPUT diff --git a/.github/workflows/vm-perf-to-prometheus.yml b/.github/workflows/vm-perf-to-prometheus.yml index fce7ead2d696..3cfd4e4deb87 100644 --- a/.github/workflows/vm-perf-to-prometheus.yml +++ b/.github/workflows/vm-perf-to-prometheus.yml @@ -21,7 +21,7 @@ jobs: - name: setup-env run: | - echo PUSH_VM_BENCHMARKS_TO_PROMETHEUS=1 >> .env + echo BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL=${{ secrets.BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL }} >> .env echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH @@ -31,10 +31,12 @@ jobs: run_retried docker compose pull zk docker compose up -d zk ci_run zk - ci_run zk compiler system-contracts + ci_run zk compiler all - name: run benchmarks run: | - ci_run cargo bench --package vm-benchmark --bench diy_benchmark + ci_run cargo bench --package vm-benchmark --bench oneshot + # Run only benches with 1,000 transactions per batch to not spend too much time + ci_run cargo bench --package vm-benchmark --bench batch '/1000$' ci_run cargo bench --package vm-benchmark --bench iai | tee iai-result ci_run cargo run --package vm-benchmark --bin iai_results_to_prometheus --release < iai-result diff --git a/Cargo.lock b/Cargo.lock index 0d4ba4c23834..54714b21af2b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7271,14 +7271,18 @@ dependencies = [ name = "vm-benchmark" version = "0.1.0" dependencies = [ + "assert_matches", "criterion", "iai", + "once_cell", "rand 0.8.5", "tokio", "vise", + "zksync_contracts", + "zksync_multivm", "zksync_types", + "zksync_utils", "zksync_vlog", - "zksync_vm_benchmark_harness", ] [[package]] @@ -9751,21 +9755,6 @@ dependencies = [ "vise-exporter", ] -[[package]] -name = "zksync_vm_benchmark_harness" -version = "0.1.0" -dependencies = [ - "assert_matches", - "once_cell", - "zk_evm 0.133.0", - "zksync_contracts", - "zksync_multivm", - "zksync_state", - "zksync_system_constants", - "zksync_types", - "zksync_utils", -] - [[package]] name = "zksync_vm_interface" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 6ee6ce79e490..c9c8ff95ebc4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -79,7 +79,6 @@ members = [ "core/tests/test_account", "core/tests/loadnext", "core/tests/vm-benchmark", - "core/tests/vm-benchmark/harness", # Parts of prover workspace that are needed for Core workspace "prover/crates/lib/prover_dal", ] @@ -238,7 +237,6 @@ zksync_prover_dal = { version = "0.1.0", path = "prover/crates/lib/prover_dal" } zksync_vlog = { version = "0.1.0", path = "core/lib/vlog" } zksync_vm_interface = { version = "0.1.0", path = "core/lib/vm_interface" } zksync_vm_utils = { version = "0.1.0", path = "core/lib/vm_utils" } -zksync_vm_benchmark_harness = { version = "0.1.0", path = "core/tests/vm-benchmark/harness" } zksync_basic_types = { version = "0.1.0", path = "core/lib/basic_types" } zksync_circuit_breaker = { version = "0.1.0", path = "core/lib/circuit_breaker" } zksync_config = { version = "0.1.0", path = "core/lib/config" } diff --git a/core/tests/vm-benchmark/Cargo.toml b/core/tests/vm-benchmark/Cargo.toml index 27218d79aafe..4586c637e128 100644 --- a/core/tests/vm-benchmark/Cargo.toml +++ b/core/tests/vm-benchmark/Cargo.toml @@ -6,46 +6,30 @@ license.workspace = true publish = false [dependencies] +zksync_contracts.workspace = true +zksync_multivm.workspace = true zksync_types.workspace = true +zksync_utils.workspace = true zksync_vlog.workspace = true -zksync_vm_benchmark_harness.workspace = true +criterion.workspace = true +once_cell.workspace = true rand.workspace = true vise.workspace = true tokio.workspace = true [dev-dependencies] -criterion.workspace = true +assert_matches.workspace = true iai.workspace = true [[bench]] -name = "criterion" +name = "oneshot" harness = false [[bench]] -name = "diy_benchmark" +name = "batch" harness = false [[bench]] name = "iai" harness = false - -[[bench]] -name = "fill_bootloader" -harness = false - -[[bin]] -name = "iai_results_to_prometheus" -path = "src/iai_results_to_prometheus.rs" - -[[bin]] -name = "compare_iai_results" -path = "src/compare_iai_results.rs" - -[[bin]] -name = "find-slowest" -path = "src/find_slowest.rs" - -[[bin]] -name = "instruction-counts" -path = "src/instruction_counts.rs" diff --git a/core/tests/vm-benchmark/README.md b/core/tests/vm-benchmark/README.md index cecbdb31d0cf..b7f056894e73 100644 --- a/core/tests/vm-benchmark/README.md +++ b/core/tests/vm-benchmark/README.md @@ -9,35 +9,22 @@ benchmarks, however. There are three different benchmarking tools available: ```sh -cargo bench --bench criterion -cargo bench --bench diy_benchmark +cargo bench --bench oneshot +cargo bench --bench batch cargo +nightly bench --bench iai ``` -Criterion is the de-facto microbenchmarking tool for Rust. Run it, then optimize something and run the command again to -see if your changes have made a difference. +`oneshot` and `batch` targets use Criterion, the de-facto standard micro-benchmarking tool for Rust. `oneshot` measures +VM performance on single transactions, and `batch` on entire batches of up to 5,000 transactions. Run these benches, +then optimize something and run the command again to see if your changes have made a difference. -The DIY benchmark works a bit better in noisy environments and is used to push benchmark data to Prometheus -automatically. +IAI uses cachegrind to simulate the CPU, so noise is completely irrelevant to it, but it also doesn't measure exactly +the same thing as normal benchmarks. You need valgrind to be able to run it. -IAI uses cachegrind to simulate the CPU, so noise is completely irrelevant to it but it also doesn't measure exactly the -same thing as normal benchmarks. You need valgrind to be able to run it. - -You can add your own bytecodes to be benchmarked into the folder "deployment_benchmarks". For iai, you also need to add -them to "benches/iai.rs". +You can add new bytecodes to be benchmarked into the [`bytecodes`](src/bytecodes) directory and then add them to the +`BYTECODES` constant exported by the crate. ## Profiling (Linux only) You can also use `sh perf.sh bytecode_file` to produce data that can be fed into the [firefox profiler](https://profiler.firefox.com/) for a specific bytecode. - -## Fuzzing - -There is a fuzzer using this library at core/lib/vm/fuzz. The fuzz.sh script located there starts a fuzzer which -attempts to make cover as much code as it can to ultimately produce a valid deployment bytecode. - -It has no chance of succeeding currently because the fuzzing speed drops to 10 executions/s easily. Optimizing the VM or -lowering the gas limit will help with that. - -The fuzzer has been useful for producing synthetic benchmark inputs. It may be a good tool for finding show transactions -with a certain gas limit, an empirical way of evaluating gas prices of instructions. diff --git a/core/tests/vm-benchmark/benches/fill_bootloader.rs b/core/tests/vm-benchmark/benches/batch.rs similarity index 79% rename from core/tests/vm-benchmark/benches/fill_bootloader.rs rename to core/tests/vm-benchmark/benches/batch.rs index 13fa1df0b2fc..608f6be6d089 100644 --- a/core/tests/vm-benchmark/benches/fill_bootloader.rs +++ b/core/tests/vm-benchmark/benches/batch.rs @@ -14,17 +14,15 @@ use std::{iter, time::Duration}; -use criterion::{ - black_box, criterion_group, criterion_main, measurement::WallTime, BatchSize, BenchmarkGroup, - BenchmarkId, Criterion, Throughput, -}; +use criterion::{black_box, criterion_group, criterion_main, Criterion, Throughput}; use rand::{rngs::StdRng, Rng, SeedableRng}; -use zksync_types::Transaction; -use zksync_vm_benchmark_harness::{ - cut_to_allowed_bytecode_size, get_deploy_tx_with_gas_limit, get_heavy_load_test_tx, - get_load_test_deploy_tx, get_load_test_tx, get_realistic_load_test_tx, get_transfer_tx, - BenchmarkingVm, BenchmarkingVmFactory, Fast, Legacy, LoadTestParams, +use vm_benchmark::{ + criterion::{is_test_mode, BenchmarkGroup, BenchmarkId, CriterionExt, MeteredTime}, + get_deploy_tx_with_gas_limit, get_heavy_load_test_tx, get_load_test_deploy_tx, + get_load_test_tx, get_realistic_load_test_tx, get_transfer_tx, BenchmarkingVm, + BenchmarkingVmFactory, Bytecode, Fast, Legacy, LoadTestParams, }; +use zksync_types::Transaction; /// Gas limit for deployment transactions. const DEPLOY_GAS_LIMIT: u32 = 30_000_000; @@ -59,7 +57,7 @@ fn bench_vm( } fn run_vm_expecting_failures( - group: &mut BenchmarkGroup<'_, WallTime>, + group: &mut BenchmarkGroup<'_>, name: &str, txs: &[Transaction], expected_failures: &[bool], @@ -70,25 +68,24 @@ fn run_vm_expecting_failures( } group.throughput(Throughput::Elements(*txs_in_batch as u64)); - group.bench_with_input( + group.bench_metered_with_input( BenchmarkId::new(name, txs_in_batch), txs_in_batch, |bencher, &txs_in_batch| { if FULL { // Include VM initialization / drop into the measured time - bencher.iter(|| { + bencher.iter(|timer| { + let _guard = timer.start(); let mut vm = BenchmarkingVm::::default(); bench_vm::<_, true>(&mut vm, &txs[..txs_in_batch], expected_failures); }); } else { - bencher.iter_batched( - BenchmarkingVm::::default, - |mut vm| { - bench_vm::<_, false>(&mut vm, &txs[..txs_in_batch], expected_failures); - vm - }, - BatchSize::LargeInput, // VM can consume significant amount of RAM, especially the new one - ); + bencher.iter(|timer| { + let mut vm = BenchmarkingVm::::default(); + let guard = timer.start(); + bench_vm::<_, false>(&mut vm, &txs[..txs_in_batch], expected_failures); + drop(guard); + }); } }, ); @@ -96,22 +93,23 @@ fn run_vm_expecting_failures( } fn run_vm( - group: &mut BenchmarkGroup<'_, WallTime>, + group: &mut BenchmarkGroup<'_>, name: &str, txs: &[Transaction], ) { run_vm_expecting_failures::(group, name, txs, &[]); } -fn bench_fill_bootloader(c: &mut Criterion) { - let is_test_mode = !std::env::args().any(|arg| arg == "--bench"); - let txs_in_batch = if is_test_mode { +fn bench_fill_bootloader( + c: &mut Criterion, +) { + let txs_in_batch = if is_test_mode() { &TXS_IN_BATCH[..3] // Reduce the number of transactions in a batch so that tests don't take long } else { TXS_IN_BATCH }; - let mut group = c.benchmark_group(if FULL { + let mut group = c.metered_group(if FULL { format!("fill_bootloader_full{}", VM::LABEL.as_suffix()) } else { format!("fill_bootloader{}", VM::LABEL.as_suffix()) @@ -121,12 +119,12 @@ fn bench_fill_bootloader(c: &mut Cr .measurement_time(Duration::from_secs(10)); // Deploying simple contract - let test_contract = - std::fs::read("deployment_benchmarks/deploy_simple_contract").expect("failed to read file"); - let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); + let test_contract = Bytecode::get("deploy_simple_contract"); let max_txs = *txs_in_batch.last().unwrap() as u32; let txs: Vec<_> = (0..max_txs) - .map(|nonce| get_deploy_tx_with_gas_limit(code, DEPLOY_GAS_LIMIT, nonce)) + .map(|nonce| { + get_deploy_tx_with_gas_limit(test_contract.bytecode(), DEPLOY_GAS_LIMIT, nonce) + }) .collect(); run_vm::(&mut group, "deploy_simple_contract", &txs); drop(txs); @@ -187,9 +185,12 @@ fn bench_fill_bootloader(c: &mut Cr } criterion_group!( - benches, - bench_fill_bootloader::, - bench_fill_bootloader::, - bench_fill_bootloader:: + name = benches; + config = Criterion::default() + .configure_from_args() + .with_measurement(MeteredTime::new("fill_bootloader")); + targets = bench_fill_bootloader::, + bench_fill_bootloader::, + bench_fill_bootloader:: ); criterion_main!(benches); diff --git a/core/tests/vm-benchmark/benches/criterion.rs b/core/tests/vm-benchmark/benches/criterion.rs deleted file mode 100644 index 9e12fc25f54c..000000000000 --- a/core/tests/vm-benchmark/benches/criterion.rs +++ /dev/null @@ -1,98 +0,0 @@ -use std::time::Duration; - -use criterion::{ - black_box, criterion_group, criterion_main, measurement::WallTime, BatchSize, BenchmarkGroup, - Criterion, -}; -use zksync_types::Transaction; -use zksync_vm_benchmark_harness::{ - cut_to_allowed_bytecode_size, get_deploy_tx, get_heavy_load_test_tx, get_load_test_deploy_tx, - get_load_test_tx, get_realistic_load_test_tx, BenchmarkingVm, BenchmarkingVmFactory, Fast, - Legacy, LoadTestParams, -}; - -const SAMPLE_SIZE: usize = 20; - -fn benches_in_folder(c: &mut Criterion) { - let mut group = c.benchmark_group(VM::LABEL.as_str()); - group - .sample_size(SAMPLE_SIZE) - .measurement_time(Duration::from_secs(10)); - - for path in std::fs::read_dir("deployment_benchmarks").unwrap() { - let path = path.unwrap().path(); - - let test_contract = std::fs::read(&path).expect("failed to read file"); - - let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); - let tx = get_deploy_tx(code); - let file_name = path.file_name().unwrap().to_str().unwrap(); - let full_suffix = if FULL { "/full" } else { "" }; - let bench_name = format!("{file_name}{full_suffix}"); - group.bench_function(bench_name, |bencher| { - if FULL { - // Include VM initialization / drop into the measured time - bencher.iter(|| BenchmarkingVm::::default().run_transaction(black_box(&tx))); - } else { - bencher.iter_batched( - BenchmarkingVm::::default, - |mut vm| { - let result = vm.run_transaction(black_box(&tx)); - (vm, result) - }, - BatchSize::LargeInput, // VM can consume significant amount of RAM, especially the new one - ); - } - }); - } -} - -fn bench_load_test(c: &mut Criterion) { - let mut group = c.benchmark_group(VM::LABEL.as_str()); - group - .sample_size(SAMPLE_SIZE) - .measurement_time(Duration::from_secs(10)); - - // Nonce 0 is used for the deployment transaction - let tx = get_load_test_tx(1, 10_000_000, LoadTestParams::default()); - bench_load_test_transaction::(&mut group, "load_test", &tx); - - let tx = get_realistic_load_test_tx(1); - bench_load_test_transaction::(&mut group, "load_test_realistic", &tx); - - let tx = get_heavy_load_test_tx(1); - bench_load_test_transaction::(&mut group, "load_test_heavy", &tx); -} - -fn bench_load_test_transaction( - group: &mut BenchmarkGroup<'_, WallTime>, - name: &str, - tx: &Transaction, -) { - group.bench_function(name, |bencher| { - bencher.iter_batched( - || { - let mut vm = BenchmarkingVm::::default(); - vm.run_transaction(&get_load_test_deploy_tx()); - vm - }, - |mut vm| { - let result = vm.run_transaction(black_box(tx)); - assert!(!result.result.is_failed(), "{:?}", result.result); - (vm, result) - }, - BatchSize::LargeInput, - ); - }); -} - -criterion_group!( - benches, - benches_in_folder::, - benches_in_folder::, - benches_in_folder::, - benches_in_folder::, - bench_load_test::, - bench_load_test:: -); -criterion_main!(benches); diff --git a/core/tests/vm-benchmark/benches/diy_benchmark.rs b/core/tests/vm-benchmark/benches/diy_benchmark.rs deleted file mode 100644 index 1601de5eb85f..000000000000 --- a/core/tests/vm-benchmark/benches/diy_benchmark.rs +++ /dev/null @@ -1,53 +0,0 @@ -use std::time::{Duration, Instant}; - -use criterion::black_box; -use vise::{Gauge, LabeledFamily, Metrics}; -use zksync_vm_benchmark_harness::{cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm}; - -fn main() { - let mut results = vec![]; - - for path in std::fs::read_dir("deployment_benchmarks").unwrap() { - let path = path.unwrap().path(); - - let test_contract = std::fs::read(&path).expect("failed to read file"); - - let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); - let tx = get_deploy_tx(code); - - let name = path.file_name().unwrap().to_str().unwrap(); - - println!("benchmarking: {}", name); - - let mut timings = vec![]; - let benchmark_start = Instant::now(); - while benchmark_start.elapsed() < Duration::from_secs(5) { - let start = Instant::now(); - BenchmarkingVm::new().run_transaction(black_box(&tx)); - timings.push(start.elapsed()); - } - - println!("{:?}", timings.iter().min().unwrap()); - results.push((name.to_owned(), timings)); - } - - if option_env!("PUSH_VM_BENCHMARKS_TO_PROMETHEUS").is_some() { - vm_benchmark::with_prometheus::with_prometheus(|| { - for (name, timings) in results { - for (i, timing) in timings.into_iter().enumerate() { - VM_BENCHMARK_METRICS.timing[&(name.clone(), i.to_string())].set(timing); - } - } - }); - } -} - -#[derive(Debug, Metrics)] -#[metrics(prefix = "vm_benchmark")] -pub(crate) struct VmBenchmarkMetrics { - #[metrics(labels = ["benchmark", "run_no"])] - pub timing: LabeledFamily<(String, String), Gauge, 2>, -} - -#[vise::register] -pub(crate) static VM_BENCHMARK_METRICS: vise::Global = vise::Global::new(); diff --git a/core/tests/vm-benchmark/benches/iai.rs b/core/tests/vm-benchmark/benches/iai.rs index 2837a2345a5a..6b8965afa4f1 100644 --- a/core/tests/vm-benchmark/benches/iai.rs +++ b/core/tests/vm-benchmark/benches/iai.rs @@ -1,14 +1,8 @@ use iai::black_box; -use zksync_vm_benchmark_harness::{ - cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm, BenchmarkingVmFactory, Fast, - Legacy, -}; - -fn run_bytecode(path: &str) { - let test_contract = std::fs::read(path).expect("failed to read file"); - let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); - let tx = get_deploy_tx(code); +use vm_benchmark::{BenchmarkingVm, BenchmarkingVmFactory, Bytecode, Fast, Legacy}; +fn run_bytecode(name: &str) { + let tx = Bytecode::get(name).deploy_tx(); black_box(BenchmarkingVm::::default().run_transaction(&tx)); } @@ -16,11 +10,11 @@ macro_rules! make_functions_and_main { ($($file:ident => $legacy_name:ident,)+) => { $( fn $file() { - run_bytecode::(concat!("deployment_benchmarks/", stringify!($file))); + run_bytecode::(stringify!($file)); } fn $legacy_name() { - run_bytecode::(concat!("deployment_benchmarks/", stringify!($file))); + run_bytecode::(stringify!($file)); } )+ diff --git a/core/tests/vm-benchmark/benches/oneshot.rs b/core/tests/vm-benchmark/benches/oneshot.rs new file mode 100644 index 000000000000..58a90af4981f --- /dev/null +++ b/core/tests/vm-benchmark/benches/oneshot.rs @@ -0,0 +1,91 @@ +use std::time::Duration; + +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use vm_benchmark::{ + criterion::{BenchmarkGroup, CriterionExt, MeteredTime}, + get_heavy_load_test_tx, get_load_test_deploy_tx, get_load_test_tx, get_realistic_load_test_tx, + BenchmarkingVm, BenchmarkingVmFactory, Fast, Legacy, LoadTestParams, BYTECODES, +}; +use zksync_types::Transaction; + +const SAMPLE_SIZE: usize = 20; + +fn benches_in_folder(c: &mut Criterion) { + let mut group = c.metered_group(VM::LABEL.as_str()); + group + .sample_size(SAMPLE_SIZE) + .measurement_time(Duration::from_secs(10)); + + for bytecode in BYTECODES { + let tx = bytecode.deploy_tx(); + let bench_name = bytecode.name; + let full_suffix = if FULL { "/full" } else { "" }; + let bench_name = format!("{bench_name}{full_suffix}"); + + group.bench_metered(bench_name, |bencher| { + if FULL { + // Include VM initialization / drop into the measured time + bencher.iter(|timer| { + let _guard = timer.start(); + BenchmarkingVm::::default().run_transaction(black_box(&tx)); + }); + } else { + bencher.iter(|timer| { + let mut vm = BenchmarkingVm::::default(); + let guard = timer.start(); + let _result = vm.run_transaction(black_box(&tx)); + drop(guard); // do not include latency of dropping `_result` + }); + } + }); + } +} + +fn bench_load_test(c: &mut Criterion) { + let mut group = c.metered_group(VM::LABEL.as_str()); + group + .sample_size(SAMPLE_SIZE) + .measurement_time(Duration::from_secs(10)); + + // Nonce 0 is used for the deployment transaction + let tx = get_load_test_tx(1, 10_000_000, LoadTestParams::default()); + bench_load_test_transaction::(&mut group, "load_test", &tx); + + let tx = get_realistic_load_test_tx(1); + bench_load_test_transaction::(&mut group, "load_test_realistic", &tx); + + let tx = get_heavy_load_test_tx(1); + bench_load_test_transaction::(&mut group, "load_test_heavy", &tx); +} + +fn bench_load_test_transaction( + group: &mut BenchmarkGroup<'_>, + name: &str, + tx: &Transaction, +) { + group.bench_metered(name, |bencher| { + bencher.iter(|timer| { + let mut vm = BenchmarkingVm::::default(); + vm.run_transaction(&get_load_test_deploy_tx()); + + let guard = timer.start(); + let result = vm.run_transaction(black_box(tx)); + drop(guard); // do not include the latency of `result` checks / drop + assert!(!result.result.is_failed(), "{:?}", result.result); + }); + }); +} + +criterion_group!( + name = benches; + config = Criterion::default() + .configure_from_args() + .with_measurement(MeteredTime::new("criterion")); + targets = benches_in_folder::, + benches_in_folder::, + benches_in_folder::, + benches_in_folder::, + bench_load_test::, + bench_load_test:: +); +criterion_main!(benches); diff --git a/core/tests/vm-benchmark/harness/Cargo.toml b/core/tests/vm-benchmark/harness/Cargo.toml deleted file mode 100644 index a24d3fa1294a..000000000000 --- a/core/tests/vm-benchmark/harness/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "zksync_vm_benchmark_harness" -version.workspace = true -edition.workspace = true -license.workspace = true -publish = false - -[dependencies] -zksync_multivm.workspace = true -zksync_types.workspace = true -zksync_state.workspace = true -zksync_utils.workspace = true -zksync_system_constants.workspace = true -zksync_contracts.workspace = true -zk_evm.workspace = true -once_cell.workspace = true - -[dev-dependencies] -assert_matches.workspace = true diff --git a/core/tests/vm-benchmark/src/parse_iai.rs b/core/tests/vm-benchmark/src/bin/common/mod.rs similarity index 98% rename from core/tests/vm-benchmark/src/parse_iai.rs rename to core/tests/vm-benchmark/src/bin/common/mod.rs index 61376b429a32..a92c9d5f710c 100644 --- a/core/tests/vm-benchmark/src/parse_iai.rs +++ b/core/tests/vm-benchmark/src/bin/common/mod.rs @@ -1,5 +1,6 @@ use std::io::BufRead; +#[derive(Debug)] pub struct IaiResult { pub name: String, pub instructions: u64, diff --git a/core/tests/vm-benchmark/src/compare_iai_results.rs b/core/tests/vm-benchmark/src/bin/compare_iai_results.rs similarity index 98% rename from core/tests/vm-benchmark/src/compare_iai_results.rs rename to core/tests/vm-benchmark/src/bin/compare_iai_results.rs index d2c9d73f7e36..faf72a18f451 100644 --- a/core/tests/vm-benchmark/src/compare_iai_results.rs +++ b/core/tests/vm-benchmark/src/bin/compare_iai_results.rs @@ -4,7 +4,9 @@ use std::{ io::{BufRead, BufReader}, }; -use vm_benchmark::parse_iai::parse_iai; +pub use crate::common::parse_iai; + +mod common; fn main() { let [iai_before, iai_after, opcodes_before, opcodes_after] = std::env::args() diff --git a/core/tests/vm-benchmark/src/bin/iai_results_to_prometheus.rs b/core/tests/vm-benchmark/src/bin/iai_results_to_prometheus.rs new file mode 100644 index 000000000000..3b3aa05bf69c --- /dev/null +++ b/core/tests/vm-benchmark/src/bin/iai_results_to_prometheus.rs @@ -0,0 +1,52 @@ +use std::{env, io::BufReader, time::Duration}; + +use tokio::sync::watch; +use vise::{Gauge, LabeledFamily, Metrics}; +use zksync_vlog::prometheus::PrometheusExporterConfig; + +use crate::common::{parse_iai, IaiResult}; + +mod common; + +#[derive(Debug, Metrics)] +#[metrics(prefix = "vm_cachegrind")] +pub(crate) struct VmCachegrindMetrics { + #[metrics(labels = ["benchmark"])] + pub instructions: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + pub l1_accesses: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + pub l2_accesses: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + pub ram_accesses: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + pub cycles: LabeledFamily>, +} + +#[vise::register] +pub(crate) static VM_CACHEGRIND_METRICS: vise::Global = vise::Global::new(); + +#[tokio::main] +async fn main() { + let results: Vec = parse_iai(BufReader::new(std::io::stdin())).collect(); + + let endpoint = env::var("BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL") + .expect("`BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL` env var is not set"); + let (stop_sender, stop_receiver) = watch::channel(false); + let prometheus_config = + PrometheusExporterConfig::push(endpoint.to_owned(), Duration::from_millis(100)); + tokio::spawn(prometheus_config.run(stop_receiver)); + + for result in results { + let name = result.name; + VM_CACHEGRIND_METRICS.instructions[&name.clone()].set(result.instructions); + VM_CACHEGRIND_METRICS.l1_accesses[&name.clone()].set(result.l1_accesses); + VM_CACHEGRIND_METRICS.l2_accesses[&name.clone()].set(result.l2_accesses); + VM_CACHEGRIND_METRICS.ram_accesses[&name.clone()].set(result.ram_accesses); + VM_CACHEGRIND_METRICS.cycles[&name].set(result.cycles); + } + + println!("Waiting for push to happen..."); + tokio::time::sleep(Duration::from_secs(1)).await; + stop_sender.send_replace(true); +} diff --git a/core/tests/vm-benchmark/src/bin/instruction_counts.rs b/core/tests/vm-benchmark/src/bin/instruction_counts.rs new file mode 100644 index 000000000000..f9bb04c01bff --- /dev/null +++ b/core/tests/vm-benchmark/src/bin/instruction_counts.rs @@ -0,0 +1,11 @@ +//! Runs all benchmarks and prints out the number of zkEVM opcodes each one executed. + +use vm_benchmark::{BenchmarkingVm, BYTECODES}; + +fn main() { + for bytecode in BYTECODES { + let tx = bytecode.deploy_tx(); + let name = bytecode.name; + println!("{name} {}", BenchmarkingVm::new().instruction_count(&tx)); + } +} diff --git a/core/tests/vm-benchmark/deployment_benchmarks/access_memory b/core/tests/vm-benchmark/src/bytecodes/access_memory similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/access_memory rename to core/tests/vm-benchmark/src/bytecodes/access_memory diff --git a/core/tests/vm-benchmark/deployment_benchmarks/call_far b/core/tests/vm-benchmark/src/bytecodes/call_far similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/call_far rename to core/tests/vm-benchmark/src/bytecodes/call_far diff --git a/core/tests/vm-benchmark/deployment_benchmarks/decode_shl_sub b/core/tests/vm-benchmark/src/bytecodes/decode_shl_sub similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/decode_shl_sub rename to core/tests/vm-benchmark/src/bytecodes/decode_shl_sub diff --git a/core/tests/vm-benchmark/deployment_benchmarks/deploy_simple_contract b/core/tests/vm-benchmark/src/bytecodes/deploy_simple_contract similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/deploy_simple_contract rename to core/tests/vm-benchmark/src/bytecodes/deploy_simple_contract diff --git a/core/tests/vm-benchmark/deployment_benchmarks/event_spam b/core/tests/vm-benchmark/src/bytecodes/event_spam similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/event_spam rename to core/tests/vm-benchmark/src/bytecodes/event_spam diff --git a/core/tests/vm-benchmark/deployment_benchmarks/finish_eventful_frames b/core/tests/vm-benchmark/src/bytecodes/finish_eventful_frames similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/finish_eventful_frames rename to core/tests/vm-benchmark/src/bytecodes/finish_eventful_frames diff --git a/core/tests/vm-benchmark/deployment_benchmarks/heap_read_write b/core/tests/vm-benchmark/src/bytecodes/heap_read_write similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/heap_read_write rename to core/tests/vm-benchmark/src/bytecodes/heap_read_write diff --git a/core/tests/vm-benchmark/deployment_benchmarks/slot_hash_collision b/core/tests/vm-benchmark/src/bytecodes/slot_hash_collision similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/slot_hash_collision rename to core/tests/vm-benchmark/src/bytecodes/slot_hash_collision diff --git a/core/tests/vm-benchmark/deployment_benchmarks/write_and_decode b/core/tests/vm-benchmark/src/bytecodes/write_and_decode similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/write_and_decode rename to core/tests/vm-benchmark/src/bytecodes/write_and_decode diff --git a/core/tests/vm-benchmark/src/criterion.rs b/core/tests/vm-benchmark/src/criterion.rs new file mode 100644 index 000000000000..9515ac4ef988 --- /dev/null +++ b/core/tests/vm-benchmark/src/criterion.rs @@ -0,0 +1,477 @@ +//! Criterion helpers and extensions used to record benchmark timings as Prometheus metrics. + +use std::{ + cell::RefCell, + convert::Infallible, + env, fmt, mem, + rc::Rc, + sync::Once, + thread, + time::{Duration, Instant}, +}; + +use criterion::{ + measurement::{Measurement, ValueFormatter, WallTime}, + Criterion, Throughput, +}; +use once_cell::{sync::OnceCell as SyncOnceCell, unsync::OnceCell}; +use tokio::sync::watch; +use vise::{EncodeLabelSet, Family, Gauge, Metrics, Unit}; +use zksync_vlog::prometheus::PrometheusExporterConfig; + +/// Checks whether a benchmark binary is running in the test mode (as opposed to benchmarking). +pub fn is_test_mode() -> bool { + !env::args().any(|arg| arg == "--bench") +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash, EncodeLabelSet)] +struct BenchLabels { + bin: &'static str, + group: String, + benchmark: String, + arg: Option, +} + +// We don't use histograms because benchmark results are uploaded in short bursts, which leads to missing zero values. +#[derive(Debug, Metrics)] +#[metrics(prefix = "vm_benchmark")] +struct VmBenchmarkMetrics { + /// Number of samples for a benchmark. + sample_count: Family>, + + /// Mean latency for a benchmark. + #[metrics(unit = Unit::Seconds)] + mean_timing: Family>, + /// Minimum latency for a benchmark. + #[metrics(unit = Unit::Seconds)] + min_timing: Family>, + /// Maximum latency for a benchmark. + #[metrics(unit = Unit::Seconds)] + max_timing: Family>, + /// Median latency for a benchmark. + #[metrics(unit = Unit::Seconds)] + median_timing: Family>, +} + +#[vise::register] +static METRICS: vise::Global = vise::Global::new(); + +#[derive(Debug)] +struct PrometheusRuntime { + stop_sender: watch::Sender, + _runtime: tokio::runtime::Runtime, +} + +impl Drop for PrometheusRuntime { + fn drop(&mut self) { + self.stop_sender.send_replace(true); + // Metrics are pushed automatically on exit, so we wait *after* sending a stop signal + println!("Waiting for Prometheus metrics to be pushed"); + thread::sleep(Duration::from_secs(1)); + } +} + +impl PrometheusRuntime { + fn new() -> Option { + const PUSH_INTERVAL: Duration = Duration::from_millis(100); + + let gateway_url = env::var("BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL").ok()?; + let runtime = tokio::runtime::Runtime::new().expect("Failed initializing Tokio runtime"); + println!("Pushing Prometheus metrics to {gateway_url} each {PUSH_INTERVAL:?}"); + let (stop_sender, stop_receiver) = watch::channel(false); + let prometheus_config = PrometheusExporterConfig::push(gateway_url, PUSH_INTERVAL); + runtime.spawn(prometheus_config.run(stop_receiver)); + Some(Self { + stop_sender, + _runtime: runtime, + }) + } +} + +/// Guard returned by [`CurrentBenchmark::set()`] that unsets the current benchmark on drop. +#[must_use = "Will unset the current benchmark when dropped"] +#[derive(Debug)] +struct CurrentBenchmarkGuard; + +impl Drop for CurrentBenchmarkGuard { + fn drop(&mut self) { + CURRENT_BENCH.take(); + } +} + +#[derive(Debug)] +struct CurrentBenchmark { + metrics: &'static VmBenchmarkMetrics, + labels: BenchLabels, + observations: Vec, +} + +impl CurrentBenchmark { + fn set(metrics: &'static VmBenchmarkMetrics, labels: BenchLabels) -> CurrentBenchmarkGuard { + CURRENT_BENCH.replace(Some(Self { + metrics, + labels, + observations: vec![], + })); + CurrentBenchmarkGuard + } + + fn observe(timing: Duration) { + CURRENT_BENCH.with_borrow_mut(|this| { + if let Some(this) = this { + this.observations.push(timing); + } + }); + } +} + +impl Drop for CurrentBenchmark { + fn drop(&mut self) { + let mut observations = mem::take(&mut self.observations); + if observations.is_empty() { + return; + } + + let len = observations.len(); + self.metrics.sample_count[&self.labels].set(len); + let mean = observations + .iter() + .copied() + .sum::() + .div_f32(len as f32); + self.metrics.mean_timing[&self.labels].set(mean); + + // Could use quick median algorithm, but since there aren't that many observations expected, + // sorting looks acceptable. + observations.sort_unstable(); + let (min, max) = (observations[0], *observations.last().unwrap()); + self.metrics.min_timing[&self.labels].set(min); + self.metrics.max_timing[&self.labels].set(max); + let median = if len % 2 == 0 { + (observations[len / 2 - 1] + observations[len / 2]) / 2 + } else { + observations[len / 2] + }; + self.metrics.median_timing[&self.labels].set(median); + + println!("Exported timings: min={min:?}, max={max:?}, mean={mean:?}, median={median:?}"); + } +} + +thread_local! { + static CURRENT_BENCH: RefCell> = const { RefCell::new(None) }; +} + +static BIN_NAME: SyncOnceCell<&'static str> = SyncOnceCell::new(); + +/// Measurement for criterion that exports . +#[derive(Debug)] +pub struct MeteredTime { + _prometheus: Option, +} + +impl MeteredTime { + pub fn new(bin_name: &'static str) -> Self { + static PROMETHEUS_INIT: Once = Once::new(); + + let mut prometheus = None; + if !is_test_mode() { + PROMETHEUS_INIT.call_once(|| { + prometheus = PrometheusRuntime::new(); + }); + } + + if let Err(prev_name) = BIN_NAME.set(bin_name) { + assert_eq!(prev_name, bin_name, "attempted to redefine binary name"); + } + + Self { + _prometheus: prometheus, + } + } +} + +impl Measurement for MeteredTime { + type Intermediate = Infallible; + type Value = Duration; + + fn start(&self) -> Self::Intermediate { + // All measurements must be done via `Bencher::iter()` + unreachable!("must not be invoked directly"); + } + + fn end(&self, _: Self::Intermediate) -> Self::Value { + unreachable!("must not be invoked directly"); + } + + fn add(&self, v1: &Self::Value, v2: &Self::Value) -> Self::Value { + *v1 + *v2 + } + + fn zero(&self) -> Self::Value { + Duration::ZERO + } + + fn to_f64(&self, value: &Self::Value) -> f64 { + WallTime.to_f64(value) + } + + fn formatter(&self) -> &dyn ValueFormatter { + WallTime.formatter() + } +} + +/// Drop-in replacement for `criterion::BenchmarkId`. +pub struct BenchmarkId { + inner: criterion::BenchmarkId, + benchmark: String, + arg: String, +} + +impl BenchmarkId { + pub fn new, P: fmt::Display>(function_name: S, parameter: P) -> Self { + let function_name = function_name.into(); + Self { + benchmark: function_name.clone(), + arg: parameter.to_string(), + inner: criterion::BenchmarkId::new(function_name, parameter), + } + } +} + +/// Drop-in replacement for `criterion::BenchmarkGroup`. +pub struct BenchmarkGroup<'a> { + name: String, + inner: criterion::BenchmarkGroup<'a, MeteredTime>, + metrics: &'static VmBenchmarkMetrics, +} + +impl BenchmarkGroup<'_> { + pub fn sample_size(&mut self, size: usize) -> &mut Self { + self.inner.sample_size(size); + self + } + + pub fn throughput(&mut self, throughput: Throughput) -> &mut Self { + self.inner.throughput(throughput); + self + } + + pub fn measurement_time(&mut self, dur: Duration) -> &mut Self { + self.inner.measurement_time(dur); + self + } + + fn start_bench(&self, benchmark: String, arg: Option) -> CurrentBenchmarkGuard { + let labels = BenchLabels { + bin: BIN_NAME.get().copied().unwrap_or(""), + group: self.name.clone(), + benchmark, + arg, + }; + CurrentBenchmark::set(self.metrics, labels) + } + + pub fn bench_metered(&mut self, id: impl Into, mut bench_fn: F) + where + F: FnMut(&mut Bencher<'_, '_>), + { + let id = id.into(); + let _guard = self.start_bench(id.clone(), None); + self.inner + .bench_function(id, |bencher| bench_fn(&mut Bencher { inner: bencher })); + } + + pub fn bench_metered_with_input(&mut self, id: BenchmarkId, input: &I, mut bench_fn: F) + where + I: ?Sized, + F: FnMut(&mut Bencher<'_, '_>, &I), + { + let _guard = self.start_bench(id.benchmark, Some(id.arg)); + self.inner + .bench_with_input(id.inner, input, |bencher, input| { + bench_fn(&mut Bencher { inner: bencher }, input) + }); + } +} + +pub struct Bencher<'a, 'r> { + inner: &'r mut criterion::Bencher<'a, MeteredTime>, +} + +impl Bencher<'_, '_> { + pub fn iter(&mut self, mut routine: impl FnMut(BenchmarkTimer)) { + self.inner.iter_custom(move |iters| { + let mut total = Duration::ZERO; + for _ in 0..iters { + let timer = BenchmarkTimer::new(); + let observation = timer.observation.clone(); + routine(timer); + let timing = observation.get().copied().unwrap_or_default(); + CurrentBenchmark::observe(timing); + total += timing; + } + total + }) + } +} + +/// Timer for benchmarks supplied to the `Bencher::iter()` closure. +#[derive(Debug)] +#[must_use = "should be started to start measurements"] +pub struct BenchmarkTimer { + observation: Rc>, +} + +impl BenchmarkTimer { + fn new() -> Self { + Self { + observation: Rc::default(), + } + } + + /// Starts the timer. The timer will remain active until the returned guard is dropped. If you drop the timer implicitly, + /// be careful with the drop order (inverse to the variable declaration order); when in doubt, drop the guard explicitly. + pub fn start(self) -> BenchmarkTimerGuard { + BenchmarkTimerGuard { + started_at: Instant::now(), + observation: self.observation, + } + } +} + +/// Guard returned from [`BenchmarkTimer::start()`]. +#[derive(Debug)] +#[must_use = "will stop the timer on drop"] +pub struct BenchmarkTimerGuard { + started_at: Instant, + observation: Rc>, +} + +impl Drop for BenchmarkTimerGuard { + fn drop(&mut self) { + let latency = self.started_at.elapsed(); + self.observation.set(latency).ok(); + } +} + +pub trait CriterionExt { + fn metered_group(&mut self, name: impl Into) -> BenchmarkGroup<'_>; +} + +impl CriterionExt for Criterion { + fn metered_group(&mut self, name: impl Into) -> BenchmarkGroup<'_> { + let name = name.into(); + BenchmarkGroup { + inner: self.benchmark_group(name.clone()), + name, + metrics: &METRICS, + } + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashSet; + + use super::*; + use crate::BYTECODES; + + fn test_benchmark(c: &mut Criterion, metrics: &'static VmBenchmarkMetrics) { + let mut group = c.metered_group("single"); + group.metrics = metrics; + for bytecode in BYTECODES { + group.bench_metered(bytecode.name, |bencher| { + bencher.iter(|timer| { + let _guard = timer.start(); + thread::sleep(Duration::from_millis(1)) + }) + }); + } + drop(group); + + let mut group = c.metered_group("with_arg"); + group.metrics = metrics; + for bytecode in BYTECODES { + for arg in [1, 10, 100] { + group.bench_metered_with_input( + BenchmarkId::new(bytecode.name, arg), + &arg, + |bencher, _arg| { + bencher.iter(|timer| { + let _guard = timer.start(); + thread::sleep(Duration::from_millis(1)) + }); + }, + ) + } + } + } + + #[test] + fn recording_benchmarks() { + let metered_time = MeteredTime::new("test"); + let metrics = &*Box::leak(Box::::default()); + + let mut criterion = Criterion::default() + .warm_up_time(Duration::from_millis(10)) + .measurement_time(Duration::from_millis(10)) + .sample_size(10) + .with_measurement(metered_time); + test_benchmark(&mut criterion, metrics); + + let timing_labels: HashSet<_> = metrics.mean_timing.to_entries().into_keys().collect(); + // Check that labels are as expected. + for bytecode in BYTECODES { + assert!(timing_labels.contains(&BenchLabels { + bin: "test", + group: "single".to_owned(), + benchmark: bytecode.name.to_owned(), + arg: None, + })); + assert!(timing_labels.contains(&BenchLabels { + bin: "test", + group: "with_arg".to_owned(), + benchmark: bytecode.name.to_owned(), + arg: Some("1".to_owned()), + })); + assert!(timing_labels.contains(&BenchLabels { + bin: "test", + group: "with_arg".to_owned(), + benchmark: bytecode.name.to_owned(), + arg: Some("10".to_owned()), + })); + assert!(timing_labels.contains(&BenchLabels { + bin: "test", + group: "with_arg".to_owned(), + benchmark: bytecode.name.to_owned(), + arg: Some("100".to_owned()), + })); + } + assert_eq!( + timing_labels.len(), + 4 * BYTECODES.len(), + "{timing_labels:#?}" + ); + + // Sanity-check relations among collected metrics + for label in &timing_labels { + let mean = metrics.mean_timing[label].get(); + let min = metrics.min_timing[label].get(); + let max = metrics.max_timing[label].get(); + let median = metrics.median_timing[label].get(); + assert!( + min > Duration::ZERO, + "min={min:?}, mean={mean:?}, median = {median:?}, max={max:?}" + ); + assert!( + min <= mean && min <= median, + "min={min:?}, mean={mean:?}, median = {median:?}, max={max:?}" + ); + assert!( + mean <= max && median <= max, + "min={min:?}, mean={mean:?}, median = {median:?}, max={max:?}" + ); + } + } +} diff --git a/core/tests/vm-benchmark/src/find_slowest.rs b/core/tests/vm-benchmark/src/find_slowest.rs deleted file mode 100644 index 97a6acd5acd9..000000000000 --- a/core/tests/vm-benchmark/src/find_slowest.rs +++ /dev/null @@ -1,43 +0,0 @@ -use std::{ - io::Write, - time::{Duration, Instant}, -}; - -use zksync_vm_benchmark_harness::*; - -fn main() { - let mut results = vec![]; - - let arg = std::env::args() - .nth(1) - .expect("Expected directory of contracts to rank as first argument."); - let files = std::fs::read_dir(arg).expect("Failed to list dir"); - - let mut last_progress_update = Instant::now(); - - for (i, file) in files.enumerate() { - let path = file.unwrap().path(); - - let test_contract = std::fs::read(&path).expect("failed to read file"); - - if let Some(code) = cut_to_allowed_bytecode_size(&test_contract) { - let tx = get_deploy_tx(code); - - let start_time = Instant::now(); - BenchmarkingVm::new().run_transaction(&tx); - results.push((start_time.elapsed(), path)); - } - - if last_progress_update.elapsed() > Duration::from_millis(100) { - print!("\r{}", i); - std::io::stdout().flush().unwrap(); - last_progress_update = Instant::now(); - } - } - println!(); - - results.sort(); - for (time, path) in results.iter().rev().take(30) { - println!("{} took {:?}", path.display(), time); - } -} diff --git a/core/tests/vm-benchmark/src/iai_results_to_prometheus.rs b/core/tests/vm-benchmark/src/iai_results_to_prometheus.rs deleted file mode 100644 index d419603bae87..000000000000 --- a/core/tests/vm-benchmark/src/iai_results_to_prometheus.rs +++ /dev/null @@ -1,37 +0,0 @@ -use std::io::BufReader; - -use vise::{Gauge, LabeledFamily, Metrics}; -use vm_benchmark::parse_iai::IaiResult; - -fn main() { - let results: Vec = - vm_benchmark::parse_iai::parse_iai(BufReader::new(std::io::stdin())).collect(); - - vm_benchmark::with_prometheus::with_prometheus(|| { - for r in results { - VM_CACHEGRIND_METRICS.instructions[&r.name.clone()].set(r.instructions as f64); - VM_CACHEGRIND_METRICS.l1_accesses[&r.name.clone()].set(r.l1_accesses as f64); - VM_CACHEGRIND_METRICS.l2_accesses[&r.name.clone()].set(r.l2_accesses as f64); - VM_CACHEGRIND_METRICS.ram_accesses[&r.name.clone()].set(r.ram_accesses as f64); - VM_CACHEGRIND_METRICS.cycles[&r.name.clone()].set(r.cycles as f64); - } - }) -} - -#[derive(Debug, Metrics)] -#[metrics(prefix = "vm_cachegrind")] -pub(crate) struct VmCachegrindMetrics { - #[metrics(labels = ["benchmark"])] - pub instructions: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub l1_accesses: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub l2_accesses: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub ram_accesses: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub cycles: LabeledFamily>, -} - -#[vise::register] -pub(crate) static VM_CACHEGRIND_METRICS: vise::Global = vise::Global::new(); diff --git a/core/tests/vm-benchmark/harness/src/instruction_counter.rs b/core/tests/vm-benchmark/src/instruction_counter.rs similarity index 100% rename from core/tests/vm-benchmark/harness/src/instruction_counter.rs rename to core/tests/vm-benchmark/src/instruction_counter.rs diff --git a/core/tests/vm-benchmark/src/instruction_counts.rs b/core/tests/vm-benchmark/src/instruction_counts.rs deleted file mode 100644 index c038c8f2bf6b..000000000000 --- a/core/tests/vm-benchmark/src/instruction_counts.rs +++ /dev/null @@ -1,28 +0,0 @@ -//! Runs all benchmarks and prints out the number of zkEVM opcodes each one executed. - -use std::path::Path; - -use zksync_vm_benchmark_harness::{cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm}; - -fn main() { - // using source file location because this is just a script, the binary isn't meant to be reused - let benchmark_folder = Path::new(file!()) - .parent() - .unwrap() - .parent() - .unwrap() - .join("deployment_benchmarks"); - - for path in std::fs::read_dir(benchmark_folder).unwrap() { - let path = path.unwrap().path(); - - let test_contract = std::fs::read(&path).expect("failed to read file"); - - let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); - let tx = get_deploy_tx(code); - - let name = path.file_name().unwrap().to_str().unwrap(); - - println!("{} {}", name, BenchmarkingVm::new().instruction_count(&tx)); - } -} diff --git a/core/tests/vm-benchmark/src/lib.rs b/core/tests/vm-benchmark/src/lib.rs index 38cc311105b3..4bd008d33196 100644 --- a/core/tests/vm-benchmark/src/lib.rs +++ b/core/tests/vm-benchmark/src/lib.rs @@ -1,2 +1,72 @@ -pub mod parse_iai; -pub mod with_prometheus; +use zksync_types::Transaction; + +pub use crate::{ + transaction::{ + get_deploy_tx, get_deploy_tx_with_gas_limit, get_heavy_load_test_tx, + get_load_test_deploy_tx, get_load_test_tx, get_realistic_load_test_tx, get_transfer_tx, + LoadTestParams, + }, + vm::{BenchmarkingVm, BenchmarkingVmFactory, Fast, Legacy, VmLabel}, +}; + +pub mod criterion; +mod instruction_counter; +mod transaction; +mod vm; + +#[derive(Debug, Clone, Copy)] +pub struct Bytecode { + pub name: &'static str, + raw_bytecode: &'static [u8], +} + +impl Bytecode { + pub fn get(name: &str) -> Self { + BYTECODES + .iter() + .find(|bytecode| bytecode.name == name) + .copied() + .unwrap_or_else(|| panic!("bytecode `{name}` is not defined")) + } + + /// Bytecodes must consist of an odd number of 32 byte words. + /// This function "fixes" bytecodes of wrong length by cutting off their end. + fn cut_to_allowed_bytecode_size(bytes: &[u8]) -> &[u8] { + let mut words = bytes.len() / 32; + assert!(words > 0, "bytecode is empty"); + + if words & 1 == 0 { + words -= 1; + } + &bytes[..32 * words] + } + + pub fn bytecode(&self) -> &'static [u8] { + Self::cut_to_allowed_bytecode_size(self.raw_bytecode) + } + + pub fn deploy_tx(&self) -> Transaction { + get_deploy_tx(self.bytecode()) + } +} + +macro_rules! include_bytecode { + ($name:ident) => { + Bytecode { + name: stringify!($name), + raw_bytecode: include_bytes!(concat!("bytecodes/", stringify!($name))), + } + }; +} + +pub const BYTECODES: &[Bytecode] = &[ + include_bytecode!(access_memory), + include_bytecode!(call_far), + include_bytecode!(decode_shl_sub), + include_bytecode!(deploy_simple_contract), + include_bytecode!(event_spam), + include_bytecode!(finish_eventful_frames), + include_bytecode!(heap_read_write), + include_bytecode!(slot_hash_collision), + include_bytecode!(write_and_decode), +]; diff --git a/core/tests/vm-benchmark/src/main.rs b/core/tests/vm-benchmark/src/main.rs index 925ec78ceb3c..6e2b397d746d 100644 --- a/core/tests/vm-benchmark/src/main.rs +++ b/core/tests/vm-benchmark/src/main.rs @@ -1,16 +1,10 @@ -use zksync_vm_benchmark_harness::*; +use vm_benchmark::{BenchmarkingVm, Bytecode}; fn main() { - let test_contract = std::fs::read( - std::env::args() - .nth(1) - .expect("please provide an input file"), - ) - .expect("failed to read file"); - - let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); - let tx = get_deploy_tx(code); - + let bytecode_name = std::env::args() + .nth(1) + .expect("please provide bytecode name, e.g. 'access_memory'"); + let tx = Bytecode::get(&bytecode_name).deploy_tx(); for _ in 0..100 { let mut vm = BenchmarkingVm::new(); vm.run_transaction(&tx); diff --git a/core/tests/vm-benchmark/src/transaction.rs b/core/tests/vm-benchmark/src/transaction.rs new file mode 100644 index 000000000000..90e1c6360b81 --- /dev/null +++ b/core/tests/vm-benchmark/src/transaction.rs @@ -0,0 +1,194 @@ +use once_cell::sync::Lazy; +pub use zksync_contracts::test_contracts::LoadnextContractExecutionParams as LoadTestParams; +use zksync_contracts::{deployer_contract, TestContract}; +use zksync_multivm::utils::get_max_gas_per_pubdata_byte; +use zksync_types::{ + ethabi::{encode, Token}, + fee::Fee, + l2::L2Tx, + utils::deployed_address_create, + Address, K256PrivateKey, L2ChainId, Nonce, ProtocolVersionId, Transaction, + CONTRACT_DEPLOYER_ADDRESS, H256, U256, +}; +use zksync_utils::bytecode::hash_bytecode; + +const LOAD_TEST_MAX_READS: usize = 100; + +pub(crate) static PRIVATE_KEY: Lazy = + Lazy::new(|| K256PrivateKey::from_bytes(H256([42; 32])).expect("invalid key bytes")); +static LOAD_TEST_CONTRACT_ADDRESS: Lazy
= + Lazy::new(|| deployed_address_create(PRIVATE_KEY.address(), 0.into())); + +static LOAD_TEST_CONTRACT: Lazy = Lazy::new(zksync_contracts::get_loadnext_contract); + +static CREATE_FUNCTION_SIGNATURE: Lazy<[u8; 4]> = Lazy::new(|| { + deployer_contract() + .function("create") + .unwrap() + .short_signature() +}); + +pub fn get_deploy_tx(code: &[u8]) -> Transaction { + get_deploy_tx_with_gas_limit(code, 30_000_000, 0) +} + +pub fn get_deploy_tx_with_gas_limit(code: &[u8], gas_limit: u32, nonce: u32) -> Transaction { + let mut salt = vec![0_u8; 32]; + salt[28..32].copy_from_slice(&nonce.to_be_bytes()); + let params = [ + Token::FixedBytes(salt), + Token::FixedBytes(hash_bytecode(code).0.to_vec()), + Token::Bytes([].to_vec()), + ]; + let calldata = CREATE_FUNCTION_SIGNATURE + .iter() + .cloned() + .chain(encode(¶ms)) + .collect(); + + let mut signed = L2Tx::new_signed( + CONTRACT_DEPLOYER_ADDRESS, + calldata, + Nonce(nonce), + tx_fee(gas_limit), + U256::zero(), + L2ChainId::from(270), + &PRIVATE_KEY, + vec![code.to_vec()], // maybe not needed? + Default::default(), + ) + .expect("should create a signed execute transaction"); + + signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); + signed.into() +} + +fn tx_fee(gas_limit: u32) -> Fee { + Fee { + gas_limit: U256::from(gas_limit), + max_fee_per_gas: U256::from(250_000_000), + max_priority_fee_per_gas: U256::from(0), + gas_per_pubdata_limit: U256::from(get_max_gas_per_pubdata_byte( + ProtocolVersionId::latest().into(), + )), + } +} + +pub fn get_transfer_tx(nonce: u32) -> Transaction { + let mut signed = L2Tx::new_signed( + PRIVATE_KEY.address(), + vec![], // calldata + Nonce(nonce), + tx_fee(1_000_000), + 1_000_000_000.into(), // value + L2ChainId::from(270), + &PRIVATE_KEY, + vec![], // factory deps + Default::default(), // paymaster params + ) + .expect("should create a signed execute transaction"); + + signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); + signed.into() +} + +pub fn get_load_test_deploy_tx() -> Transaction { + let calldata = [Token::Uint(LOAD_TEST_MAX_READS.into())]; + let params = [ + Token::FixedBytes(vec![0_u8; 32]), + Token::FixedBytes(hash_bytecode(&LOAD_TEST_CONTRACT.bytecode).0.to_vec()), + Token::Bytes(encode(&calldata)), + ]; + let create_calldata = CREATE_FUNCTION_SIGNATURE + .iter() + .cloned() + .chain(encode(¶ms)) + .collect(); + + let mut factory_deps = LOAD_TEST_CONTRACT.factory_deps.clone(); + factory_deps.push(LOAD_TEST_CONTRACT.bytecode.clone()); + + let mut signed = L2Tx::new_signed( + CONTRACT_DEPLOYER_ADDRESS, + create_calldata, + Nonce(0), + tx_fee(100_000_000), + U256::zero(), + L2ChainId::from(270), + &PRIVATE_KEY, + factory_deps, + Default::default(), + ) + .expect("should create a signed execute transaction"); + + signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); + signed.into() +} + +pub fn get_load_test_tx(nonce: u32, gas_limit: u32, params: LoadTestParams) -> Transaction { + assert!( + params.reads <= LOAD_TEST_MAX_READS, + "Too many reads: {params:?}, should be <={LOAD_TEST_MAX_READS}" + ); + + let execute_function = LOAD_TEST_CONTRACT + .contract + .function("execute") + .expect("no `execute` function in load test contract"); + let calldata = execute_function + .encode_input(&vec![ + Token::Uint(U256::from(params.reads)), + Token::Uint(U256::from(params.writes)), + Token::Uint(U256::from(params.hashes)), + Token::Uint(U256::from(params.events)), + Token::Uint(U256::from(params.recursive_calls)), + Token::Uint(U256::from(params.deploys)), + ]) + .expect("cannot encode `execute` inputs"); + + let mut signed = L2Tx::new_signed( + *LOAD_TEST_CONTRACT_ADDRESS, + calldata, + Nonce(nonce), + tx_fee(gas_limit), + U256::zero(), + L2ChainId::from(270), + &PRIVATE_KEY, + LOAD_TEST_CONTRACT.factory_deps.clone(), + Default::default(), + ) + .expect("should create a signed execute transaction"); + + signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); + signed.into() +} + +pub fn get_realistic_load_test_tx(nonce: u32) -> Transaction { + get_load_test_tx( + nonce, + 10_000_000, + LoadTestParams { + reads: 30, + writes: 2, + events: 5, + hashes: 10, + recursive_calls: 0, + deploys: 0, + }, + ) +} + +pub fn get_heavy_load_test_tx(nonce: u32) -> Transaction { + get_load_test_tx( + nonce, + 10_000_000, + LoadTestParams { + reads: 100, + writes: 5, + events: 20, + hashes: 100, + recursive_calls: 20, + deploys: 5, + }, + ) +} diff --git a/core/tests/vm-benchmark/harness/src/lib.rs b/core/tests/vm-benchmark/src/vm.rs similarity index 54% rename from core/tests/vm-benchmark/harness/src/lib.rs rename to core/tests/vm-benchmark/src/vm.rs index 6460d25a8e8d..e805554d5584 100644 --- a/core/tests/vm-benchmark/harness/src/lib.rs +++ b/core/tests/vm-benchmark/src/vm.rs @@ -1,51 +1,27 @@ use std::{cell::RefCell, rc::Rc}; use once_cell::sync::Lazy; -pub use zksync_contracts::test_contracts::LoadnextContractExecutionParams as LoadTestParams; -use zksync_contracts::{deployer_contract, BaseSystemContracts, TestContract}; +use zksync_contracts::BaseSystemContracts; use zksync_multivm::{ interface::{ storage::{InMemoryStorage, StorageView}, ExecutionResult, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, }, - utils::get_max_gas_per_pubdata_byte, vm_fast, vm_latest, vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, HistoryEnabled}, + zk_evm_latest::ethereum_types::{Address, U256}, }; use zksync_types::{ - block::L2BlockHasher, - ethabi::{encode, Token}, - fee::Fee, - fee_model::BatchFeeInput, - helpers::unix_timestamp_ms, - l2::L2Tx, - utils::{deployed_address_create, storage_key_for_eth_balance}, - Address, K256PrivateKey, L1BatchNumber, L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, - Transaction, CONTRACT_DEPLOYER_ADDRESS, H256, U256, + block::L2BlockHasher, fee_model::BatchFeeInput, helpers::unix_timestamp_ms, + utils::storage_key_for_eth_balance, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, + Transaction, }; use zksync_utils::bytecode::hash_bytecode; -mod instruction_counter; +use crate::transaction::PRIVATE_KEY; -/// Bytecodes have consist of an odd number of 32 byte words -/// This function "fixes" bytecodes of wrong length by cutting off their end. -pub fn cut_to_allowed_bytecode_size(bytes: &[u8]) -> Option<&[u8]> { - let mut words = bytes.len() / 32; - if words == 0 { - return None; - } - - if words & 1 == 0 { - words -= 1; - } - Some(&bytes[..32 * words]) -} - -const LOAD_TEST_MAX_READS: usize = 100; - -static LOAD_TEST_CONTRACT_ADDRESS: Lazy
= - Lazy::new(|| deployed_address_create(PRIVATE_KEY.address(), 0.into())); +static SYSTEM_CONTRACTS: Lazy = Lazy::new(BaseSystemContracts::load_from_disk); static STORAGE: Lazy = Lazy::new(|| { let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); @@ -56,20 +32,6 @@ static STORAGE: Lazy = Lazy::new(|| { storage }); -static SYSTEM_CONTRACTS: Lazy = Lazy::new(BaseSystemContracts::load_from_disk); - -static LOAD_TEST_CONTRACT: Lazy = Lazy::new(zksync_contracts::get_loadnext_contract); - -static CREATE_FUNCTION_SIGNATURE: Lazy<[u8; 4]> = Lazy::new(|| { - deployer_contract() - .function("create") - .unwrap() - .short_signature() -}); - -static PRIVATE_KEY: Lazy = - Lazy::new(|| K256PrivateKey::from_bytes(H256([42; 32])).expect("invalid key bytes")); - /// VM label used to name `criterion` benchmarks. #[derive(Debug, Clone, Copy)] pub enum VmLabel { @@ -229,178 +191,17 @@ impl BenchmarkingVm { } } -pub fn get_deploy_tx(code: &[u8]) -> Transaction { - get_deploy_tx_with_gas_limit(code, 30_000_000, 0) -} - -pub fn get_deploy_tx_with_gas_limit(code: &[u8], gas_limit: u32, nonce: u32) -> Transaction { - let mut salt = vec![0_u8; 32]; - salt[28..32].copy_from_slice(&nonce.to_be_bytes()); - let params = [ - Token::FixedBytes(salt), - Token::FixedBytes(hash_bytecode(code).0.to_vec()), - Token::Bytes([].to_vec()), - ]; - let calldata = CREATE_FUNCTION_SIGNATURE - .iter() - .cloned() - .chain(encode(¶ms)) - .collect(); - - let mut signed = L2Tx::new_signed( - CONTRACT_DEPLOYER_ADDRESS, - calldata, - Nonce(nonce), - tx_fee(gas_limit), - U256::zero(), - L2ChainId::from(270), - &PRIVATE_KEY, - vec![code.to_vec()], // maybe not needed? - Default::default(), - ) - .expect("should create a signed execute transaction"); - - signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); - signed.into() -} - -fn tx_fee(gas_limit: u32) -> Fee { - Fee { - gas_limit: U256::from(gas_limit), - max_fee_per_gas: U256::from(250_000_000), - max_priority_fee_per_gas: U256::from(0), - gas_per_pubdata_limit: U256::from(get_max_gas_per_pubdata_byte( - ProtocolVersionId::latest().into(), - )), - } -} - -pub fn get_transfer_tx(nonce: u32) -> Transaction { - let mut signed = L2Tx::new_signed( - PRIVATE_KEY.address(), - vec![], // calldata - Nonce(nonce), - tx_fee(1_000_000), - 1_000_000_000.into(), // value - L2ChainId::from(270), - &PRIVATE_KEY, - vec![], // factory deps - Default::default(), // paymaster params - ) - .expect("should create a signed execute transaction"); - - signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); - signed.into() -} - -pub fn get_load_test_deploy_tx() -> Transaction { - let calldata = [Token::Uint(LOAD_TEST_MAX_READS.into())]; - let params = [ - Token::FixedBytes(vec![0_u8; 32]), - Token::FixedBytes(hash_bytecode(&LOAD_TEST_CONTRACT.bytecode).0.to_vec()), - Token::Bytes(encode(&calldata)), - ]; - let create_calldata = CREATE_FUNCTION_SIGNATURE - .iter() - .cloned() - .chain(encode(¶ms)) - .collect(); - - let mut factory_deps = LOAD_TEST_CONTRACT.factory_deps.clone(); - factory_deps.push(LOAD_TEST_CONTRACT.bytecode.clone()); - - let mut signed = L2Tx::new_signed( - CONTRACT_DEPLOYER_ADDRESS, - create_calldata, - Nonce(0), - tx_fee(100_000_000), - U256::zero(), - L2ChainId::from(270), - &PRIVATE_KEY, - factory_deps, - Default::default(), - ) - .expect("should create a signed execute transaction"); - - signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); - signed.into() -} - -pub fn get_load_test_tx(nonce: u32, gas_limit: u32, params: LoadTestParams) -> Transaction { - assert!( - params.reads <= LOAD_TEST_MAX_READS, - "Too many reads: {params:?}, should be <={LOAD_TEST_MAX_READS}" - ); - - let execute_function = LOAD_TEST_CONTRACT - .contract - .function("execute") - .expect("no `execute` function in load test contract"); - let calldata = execute_function - .encode_input(&vec![ - Token::Uint(U256::from(params.reads)), - Token::Uint(U256::from(params.writes)), - Token::Uint(U256::from(params.hashes)), - Token::Uint(U256::from(params.events)), - Token::Uint(U256::from(params.recursive_calls)), - Token::Uint(U256::from(params.deploys)), - ]) - .expect("cannot encode `execute` inputs"); - - let mut signed = L2Tx::new_signed( - *LOAD_TEST_CONTRACT_ADDRESS, - calldata, - Nonce(nonce), - tx_fee(gas_limit), - U256::zero(), - L2ChainId::from(270), - &PRIVATE_KEY, - LOAD_TEST_CONTRACT.factory_deps.clone(), - Default::default(), - ) - .expect("should create a signed execute transaction"); - - signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); - signed.into() -} - -pub fn get_realistic_load_test_tx(nonce: u32) -> Transaction { - get_load_test_tx( - nonce, - 10_000_000, - LoadTestParams { - reads: 30, - writes: 2, - events: 5, - hashes: 10, - recursive_calls: 0, - deploys: 0, - }, - ) -} - -pub fn get_heavy_load_test_tx(nonce: u32) -> Transaction { - get_load_test_tx( - nonce, - 10_000_000, - LoadTestParams { - reads: 100, - writes: 5, - events: 20, - hashes: 100, - recursive_calls: 20, - deploys: 5, - }, - ) -} - #[cfg(test)] mod tests { use assert_matches::assert_matches; use zksync_contracts::read_bytecode; use zksync_multivm::interface::ExecutionResult; - use crate::*; + use super::*; + use crate::{ + get_deploy_tx, get_heavy_load_test_tx, get_load_test_deploy_tx, get_load_test_tx, + get_realistic_load_test_tx, get_transfer_tx, LoadTestParams, + }; #[test] fn can_deploy_contract() { diff --git a/core/tests/vm-benchmark/src/with_prometheus.rs b/core/tests/vm-benchmark/src/with_prometheus.rs deleted file mode 100644 index f9b79adedc09..000000000000 --- a/core/tests/vm-benchmark/src/with_prometheus.rs +++ /dev/null @@ -1,27 +0,0 @@ -use std::time::Duration; - -use tokio::sync::watch; -use zksync_vlog::prometheus::PrometheusExporterConfig; - -pub fn with_prometheus(f: F) { - tokio::runtime::Runtime::new() - .unwrap() - .block_on(with_prometheus_async(f)); -} - -async fn with_prometheus_async(f: F) { - println!("Pushing results to Prometheus"); - - let endpoint = - "http://vmagent.stage.matterlabs.corp/api/v1/import/prometheus/metrics/job/vm-benchmark"; - let (stop_sender, stop_receiver) = watch::channel(false); - let prometheus_config = - PrometheusExporterConfig::push(endpoint.to_owned(), Duration::from_millis(100)); - tokio::spawn(prometheus_config.run(stop_receiver)); - - f(); - - println!("Waiting for push to happen..."); - tokio::time::sleep(Duration::from_secs(1)).await; - stop_sender.send_replace(true); -} From a6b6e829550a8f84dc4b96748028caee07624dec Mon Sep 17 00:00:00 2001 From: zksync-era-bot <147085853+zksync-era-bot@users.noreply.github.com> Date: Tue, 27 Aug 2024 17:40:01 +0300 Subject: [PATCH 26/39] chore(main): release core 24.22.0 (#2721) :robot: I have created a release *beep* *boop* --- ## [24.22.0](https://github.com/matter-labs/zksync-era/compare/core-v24.21.0...core-v24.22.0) (2024-08-27) ### Features * add flag to enable/disable DA inclusion verification ([#2647](https://github.com/matter-labs/zksync-era/issues/2647)) ([b425561](https://github.com/matter-labs/zksync-era/commit/b4255618708349c51f60f5c7fc26f9356d32b6ff)) * **Base token:** add cbt metrics ([#2720](https://github.com/matter-labs/zksync-era/issues/2720)) ([58438eb](https://github.com/matter-labs/zksync-era/commit/58438eb174c30edf62e2ff8abb74567de2a4bea8)) * Change default_protective_reads_persistence_enabled to false ([#2716](https://github.com/matter-labs/zksync-era/issues/2716)) ([8d0eee7](https://github.com/matter-labs/zksync-era/commit/8d0eee7ca8fe117b2ee286c6080bfa0057ee31ae)) * **vm:** Extract oneshot VM executor interface ([#2671](https://github.com/matter-labs/zksync-era/issues/2671)) ([951d5f2](https://github.com/matter-labs/zksync-era/commit/951d5f208e5d16a5d95878dd345a8bd2a4144aa7)) * **zk_toolbox:** Add holesky testnet as layer1 network ([#2632](https://github.com/matter-labs/zksync-era/issues/2632)) ([d9266e5](https://github.com/matter-labs/zksync-era/commit/d9266e5ef3910732666c00c1324256fb5b54452d)) ### Bug Fixes * **api:** `tx.gas_price` field ([#2734](https://github.com/matter-labs/zksync-era/issues/2734)) ([aea3726](https://github.com/matter-labs/zksync-era/commit/aea3726c88b4e881bcd0f4a60ff32a730f200938)) * **base_token_adjuster:** bug with a wrong metrics namespace ([#2744](https://github.com/matter-labs/zksync-era/issues/2744)) ([64b2ff8](https://github.com/matter-labs/zksync-era/commit/64b2ff8b81dcc146cd0535eb0d2d898c18ad5f7f)) * **eth-sender:** missing Gateway migration changes ([#2732](https://github.com/matter-labs/zksync-era/issues/2732)) ([a4170e9](https://github.com/matter-labs/zksync-era/commit/a4170e9e7f321a1062495ec586e0ce9186269088)) * **proof_data_handler:** TEE blob fetching error handling ([#2674](https://github.com/matter-labs/zksync-era/issues/2674)) ([c162510](https://github.com/matter-labs/zksync-era/commit/c162510598b45dc062c2c91085868f8aa966360e)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --------- Co-authored-by: zksync-era-bot --- .github/release-please/manifest.json | 2 +- Cargo.lock | 2 +- core/CHANGELOG.md | 19 +++++++++++++++++++ core/bin/external_node/Cargo.toml | 2 +- 4 files changed, 22 insertions(+), 3 deletions(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 232939b78334..e714062266ea 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { - "core": "24.21.0", + "core": "24.22.0", "prover": "16.4.0", "zk_toolbox": "0.1.2" } diff --git a/Cargo.lock b/Cargo.lock index 54714b21af2b..39058d09f540 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8680,7 +8680,7 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "24.21.0" +version = "24.22.0" dependencies = [ "anyhow", "assert_matches", diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index cc0590a79d20..5464a8b10098 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## [24.22.0](https://github.com/matter-labs/zksync-era/compare/core-v24.21.0...core-v24.22.0) (2024-08-27) + + +### Features + +* add flag to enable/disable DA inclusion verification ([#2647](https://github.com/matter-labs/zksync-era/issues/2647)) ([b425561](https://github.com/matter-labs/zksync-era/commit/b4255618708349c51f60f5c7fc26f9356d32b6ff)) +* **Base token:** add cbt metrics ([#2720](https://github.com/matter-labs/zksync-era/issues/2720)) ([58438eb](https://github.com/matter-labs/zksync-era/commit/58438eb174c30edf62e2ff8abb74567de2a4bea8)) +* Change default_protective_reads_persistence_enabled to false ([#2716](https://github.com/matter-labs/zksync-era/issues/2716)) ([8d0eee7](https://github.com/matter-labs/zksync-era/commit/8d0eee7ca8fe117b2ee286c6080bfa0057ee31ae)) +* **vm:** Extract oneshot VM executor interface ([#2671](https://github.com/matter-labs/zksync-era/issues/2671)) ([951d5f2](https://github.com/matter-labs/zksync-era/commit/951d5f208e5d16a5d95878dd345a8bd2a4144aa7)) +* **zk_toolbox:** Add holesky testnet as layer1 network ([#2632](https://github.com/matter-labs/zksync-era/issues/2632)) ([d9266e5](https://github.com/matter-labs/zksync-era/commit/d9266e5ef3910732666c00c1324256fb5b54452d)) + + +### Bug Fixes + +* **api:** `tx.gas_price` field ([#2734](https://github.com/matter-labs/zksync-era/issues/2734)) ([aea3726](https://github.com/matter-labs/zksync-era/commit/aea3726c88b4e881bcd0f4a60ff32a730f200938)) +* **base_token_adjuster:** bug with a wrong metrics namespace ([#2744](https://github.com/matter-labs/zksync-era/issues/2744)) ([64b2ff8](https://github.com/matter-labs/zksync-era/commit/64b2ff8b81dcc146cd0535eb0d2d898c18ad5f7f)) +* **eth-sender:** missing Gateway migration changes ([#2732](https://github.com/matter-labs/zksync-era/issues/2732)) ([a4170e9](https://github.com/matter-labs/zksync-era/commit/a4170e9e7f321a1062495ec586e0ce9186269088)) +* **proof_data_handler:** TEE blob fetching error handling ([#2674](https://github.com/matter-labs/zksync-era/issues/2674)) ([c162510](https://github.com/matter-labs/zksync-era/commit/c162510598b45dc062c2c91085868f8aa966360e)) + ## [24.21.0](https://github.com/matter-labs/zksync-era/compare/core-v24.20.0...core-v24.21.0) (2024-08-22) diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index 33a460daba50..558de140628a 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "zksync_external_node" description = "Non-validator ZKsync node" -version = "24.21.0" # x-release-please-version +version = "24.22.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true From 589e12215879f8822e803f98016358f29ee989c4 Mon Sep 17 00:00:00 2001 From: Danil Date: Tue, 27 Aug 2024 19:14:47 +0200 Subject: [PATCH 27/39] chore(zk_toolbox): Update readme (#2749) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. Signed-off-by: Danil --- zk_toolbox/crates/zk_inception/README.md | 89 +++++++++++++++------ zk_toolbox/crates/zk_supervisor/README.md | 97 +++++++++++++++++++++-- 2 files changed, 154 insertions(+), 32 deletions(-) diff --git a/zk_toolbox/crates/zk_inception/README.md b/zk_toolbox/crates/zk_inception/README.md index 73bfb56cfd39..6f4d70b37b55 100644 --- a/zk_toolbox/crates/zk_inception/README.md +++ b/zk_toolbox/crates/zk_inception/README.md @@ -33,6 +33,7 @@ This document contains the help content for the `zk_inception` command-line prog - [`zk_inception contract-verifier`↴](#zk_inception-contract-verifier) - [`zk_inception contract-verifier run`↴](#zk_inception-contract-verifier-run) - [`zk_inception contract-verifier init`↴](#zk_inception-contract-verifier-init) +- [`zk_inception portal`↴](#zk_inception-portal) - [`zk_inception update`↴](#zk_inception-update) ## `zk_inception` @@ -50,6 +51,7 @@ ZK Toolbox is a set of tools for working with zk stack. - `external-node` — External Node related commands - `containers` — Run containers for local development - `contract-verifier` — Run contract verifier +- `portal` — Run dapp-portal - `update` — Update zkSync ###### **Options:** @@ -76,11 +78,7 @@ Ecosystem related commands Create a new ecosystem and chain, setting necessary configurations for later initialization -**Usage:** `zk_inception ecosystem create [OPTIONS] [CHAIN_ID]` - -###### **Arguments:** - -- `` +**Usage:** `zk_inception ecosystem create [OPTIONS]` ###### **Options:** @@ -91,6 +89,7 @@ Create a new ecosystem and chain, setting necessary configurations for later ini - `--link-to-code ` — Code link - `--chain-name ` +- `--chain-id ` — Chain ID - `--prover-mode ` — Prover options Possible values: `no-proofs`, `gpu` @@ -166,7 +165,9 @@ Initialize ecosystem and chain, deploying necessary contracts and performing on- - `-u`, `--use-default` — Use default database urls and names - `-d`, `--dont-drop` - `--dev` — Deploy ecosystem using all defaults. Suitable for local development -- `-o`, `--observability` — Enable Grafana +- `-o`, `--observability ` — Enable Grafana + + Possible values: `true`, `false` ## `zk_inception ecosystem change-default-chain` @@ -199,21 +200,18 @@ Chain related commands - `deploy-l2-contracts` — Deploy all l2 contracts - `upgrader` — Deploy Default Upgrader - `deploy-paymaster` — Deploy paymaster smart contract -- `update-token-multiplier-setter` — Update Token Multiplier Setter address on l1 +- `update-token-multiplier-setter` — Update Token Multiplier Setter address on L1 ## `zk_inception chain create` Create a new chain, setting the necessary configurations for later initialization -**Usage:** `zk_inception chain create [OPTIONS] [CHAIN_ID]` - -###### **Arguments:** - -- `` +**Usage:** `zk_inception chain create [OPTIONS]` ###### **Options:** - `--chain-name ` +- `--chain-id ` — Chain ID - `--prover-mode ` — Prover options Possible values: `no-proofs`, `gpu` @@ -393,11 +391,28 @@ Deploy paymaster smart contract ## `zk_inception chain update-token-multiplier-setter` -Update Token Multiplier Setter address on l1. Token Multiplier Setter is used by chains with custom base token to -propagate the changes to numerator / denominator to the l1. Address of the Token Multiplier Setter is taken from the -wallets configuration. +Update Token Multiplier Setter address on L1 + +**Usage:** `zk_inception chain update-token-multiplier-setter [OPTIONS]` + +###### **Options:** + +- `--verify ` — Verify deployed contracts -**Usage:** `zk_inception chain update-token-multiplier-setter` + Possible values: `true`, `false` + +- `--verifier ` — Verifier to use + + Default value: `etherscan` + + Possible values: `etherscan`, `sourcify`, `blockscout`, `oklink` + +- `--verifier-url ` — Verifier URL, if using a custom provider +- `--verifier-api-key ` — Verifier API key +- `--resume` +- `-a`, `--additional-args ` — List of additional arguments that can be passed through the CLI. + + e.g.: `zk_inception init -a --private-key=` ## `zk_inception prover` @@ -428,7 +443,7 @@ Initialize prover - `--project-id ` - `--shall-save-to-public-bucket ` -Possible values: `true`, `false` + Possible values: `true`, `false` - `--public-store-dir ` - `--public-bucket-base-url ` @@ -438,15 +453,24 @@ Possible values: `true`, `false` - `--public-project-id ` - `--bellman-cuda-dir ` - `--download-key ` -- `--setup-database` -- `--use-default` - use default database -- `--dont-drop` - don't drop database -- `--prover-db-url` - URL of database to use -- `--prover-db-name` - Name of database to use -Possible values: `true`, `false` + Possible values: `true`, `false` - `--setup-key-path ` +- `--setup-database ` + + Possible values: `true`, `false` + +- `--prover-db-url ` — Prover database url without database name +- `--prover-db-name ` — Prover database name +- `-u`, `--use-default ` — Use default database urls and names + + Possible values: `true`, `false` + +- `-d`, `--dont-drop ` + + Possible values: `true`, `false` + - `--cloud-type ` Possible values: `gcp`, `local` @@ -467,7 +491,8 @@ Run prover - `--component ` - Possible values: `gateway`, `witness-generator`, `witness-vector-generator`, `prover`, `compressor` + Possible values: `gateway`, `witness-generator`, `witness-vector-generator`, `prover`, `compressor`, + `prover-job-monitor` - `--round ` @@ -549,7 +574,9 @@ Run containers for local development ###### **Options:** -- `-o`, `--observability` — Enable Grafana +- `-o`, `--observability ` — Enable Grafana + + Possible values: `true`, `false` ## `zk_inception contract-verifier` @@ -581,6 +608,18 @@ Download required binaries for contract verifier - `--solc-version ` — Version of solc to install - `--vyper-version ` — Version of vyper to install +## `zk_inception portal` + +Run dapp-portal + +**Usage:** `zk_inception portal [OPTIONS]` + +###### **Options:** + +- `--port ` — The port number for the portal app + + Default value: `3030` + ## `zk_inception update` Update zkSync diff --git a/zk_toolbox/crates/zk_supervisor/README.md b/zk_toolbox/crates/zk_supervisor/README.md index 1f880cdcb30a..c3fac876ace6 100644 --- a/zk_toolbox/crates/zk_supervisor/README.md +++ b/zk_toolbox/crates/zk_supervisor/README.md @@ -5,7 +5,6 @@ This document contains the help content for the `zk_supervisor` command-line pro **Command Overview:** - [`zk_supervisor`↴](#zk_supervisor) -- [`zk_supervisor prover-version`↴](#zk_supervisor-prover-version) - [`zk_supervisor database`↴](#zk_supervisor-database) - [`zk_supervisor database check-sqlx-data`↴](#zk_supervisor-database-check-sqlx-data) - [`zk_supervisor database drop`↴](#zk_supervisor-database-drop) @@ -19,12 +18,21 @@ This document contains the help content for the `zk_supervisor` command-line pro - [`zk_supervisor test revert`↴](#zk_supervisor-test-revert) - [`zk_supervisor test recovery`↴](#zk_supervisor-test-recovery) - [`zk_supervisor test upgrade`↴](#zk_supervisor-test-upgrade) +- [`zk_supervisor test rust`↴](#zk_supervisor-test-rust) +- [`zk_supervisor test l1-contracts`↴](#zk_supervisor-test-l1-contracts) +- [`zk_supervisor test prover`↴](#zk_supervisor-test-prover) - [`zk_supervisor clean`↴](#zk_supervisor-clean) - [`zk_supervisor clean all`↴](#zk_supervisor-clean-all) - [`zk_supervisor clean containers`↴](#zk_supervisor-clean-containers) - [`zk_supervisor clean contracts-cache`↴](#zk_supervisor-clean-contracts-cache) - [`zk_supervisor snapshot`↴](#zk_supervisor-snapshot) - [`zk_supervisor snapshot create`↴](#zk_supervisor-snapshot-create) +- [`zk_supervisor lint`↴](#zk_supervisor-lint) +- [`zk_supervisor fmt`↴](#zk_supervisor-fmt) +- [`zk_supervisor fmt rustfmt`↴](#zk_supervisor-fmt-rustfmt) +- [`zk_supervisor fmt contract`↴](#zk_supervisor-fmt-contract) +- [`zk_supervisor fmt prettier`↴](#zk_supervisor-fmt-prettier) +- [`zk_supervisor prover-version`↴](#zk_supervisor-prover-version) ## `zk_supervisor` @@ -38,6 +46,9 @@ ZK Toolbox is a set of tools for working with zk stack. - `test` — Run tests - `clean` — Clean artifacts - `snapshot` — Snapshots creator +- `lint` — Lint code +- `fmt` — Format code +- `prover-version` — Protocol version used by provers ###### **Options:** @@ -45,12 +56,6 @@ ZK Toolbox is a set of tools for working with zk stack. - `--chain ` — Chain to use - `--ignore-prerequisites` — Ignores prerequisites checks -## `zk_supervisor prover-version` - -Gets information about current protocol version of provers in `zksync-era` and snark wrapper hash. - -**Usage:** `zk_supervisor prover-version` - ## `zk_supervisor database` Database related commands @@ -189,6 +194,9 @@ Run tests - `revert` — Run revert tests - `recovery` — Run recovery tests - `upgrade` — Run upgrade tests +- `rust` — Run unit-tests, accepts optional cargo test flags +- `l1-contracts` — Run L1 contracts tests +- `prover` — Run prover tests ## `zk_supervisor test integration` @@ -227,6 +235,28 @@ Run upgrade tests **Usage:** `zk_supervisor test upgrade` +## `zk_supervisor test rust` + +Run unit-tests, accepts optional cargo test flags + +**Usage:** `zk_supervisor test rust [OPTIONS]` + +###### **Options:** + +- `--options ` — Cargo test flags + +## `zk_supervisor test l1-contracts` + +Run L1 contracts tests + +**Usage:** `zk_supervisor test l1-contracts` + +## `zk_supervisor test prover` + +Run prover tests + +**Usage:** `zk_supervisor test prover` + ## `zk_supervisor clean` Clean artifacts @@ -271,6 +301,59 @@ Snapshots creator **Usage:** `zk_supervisor snapshot create` +## `zk_supervisor lint` + +Lint code + +**Usage:** `zk_supervisor lint [OPTIONS]` + +###### **Options:** + +- `-c`, `--check` +- `-e`, `--extensions ` + + Possible values: `md`, `sol`, `js`, `ts`, `rs` + +## `zk_supervisor fmt` + +Format code + +**Usage:** `zk_supervisor fmt [OPTIONS] [COMMAND]` + +###### **Subcommands:** + +- `rustfmt` — +- `contract` — +- `prettier` — + +###### **Options:** + +- `-c`, `--check` + +## `zk_supervisor fmt rustfmt` + +**Usage:** `zk_supervisor fmt rustfmt` + +## `zk_supervisor fmt contract` + +**Usage:** `zk_supervisor fmt contract` + +## `zk_supervisor fmt prettier` + +**Usage:** `zk_supervisor fmt prettier [OPTIONS]` + +###### **Options:** + +- `-e`, `--extensions ` + + Possible values: `md`, `sol`, `js`, `ts`, `rs` + +## `zk_supervisor prover-version` + +Protocol version used by provers + +**Usage:** `zk_supervisor prover-version` +
This document was generated automatically by From d3877f636e9b86807923f8b32f9a2495576116aa Mon Sep 17 00:00:00 2001 From: Zach Kolodny Date: Tue, 27 Aug 2024 13:19:02 -0400 Subject: [PATCH 28/39] update protocol version and multivm bootloaders --- contracts | 2 +- core/lib/basic_types/src/protocol_version.rs | 7 ++- core/lib/contracts/src/lib.rs | 14 +++++ .../src/versions/vm_latest/tests/migration.rs | 51 ------------------ .../src/versions/vm_latest/tests/mod.rs | 1 - core/node/api_server/src/tx_sender/mod.rs | 9 ++-- core/node/eth_sender/src/eth_tx_aggregator.rs | 5 -- etc/env/base/chain.toml | 2 +- etc/env/base/contracts.toml | 6 +-- .../fee_estimate.yul/fee_estimate.yul.zbin | Bin 0 -> 75168 bytes .../gas_test.yul/gas_test.yul.zbin | Bin 0 -> 71264 bytes .../playground_batch.yul.zbin | Bin 0 -> 75360 bytes .../proved_batch.yul/proved_batch.yul.zbin | Bin 0 -> 71776 bytes 13 files changed, 30 insertions(+), 67 deletions(-) delete mode 100644 core/lib/multivm/src/versions/vm_latest/tests/migration.rs create mode 100644 etc/multivm_bootloaders/vm_protocol_defense/fee_estimate.yul/fee_estimate.yul.zbin create mode 100644 etc/multivm_bootloaders/vm_protocol_defense/gas_test.yul/gas_test.yul.zbin create mode 100644 etc/multivm_bootloaders/vm_protocol_defense/playground_batch.yul/playground_batch.yul.zbin create mode 100644 etc/multivm_bootloaders/vm_protocol_defense/proved_batch.yul/proved_batch.yul.zbin diff --git a/contracts b/contracts index 75db4f372d9c..2076594154b5 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 75db4f372d9c5dc998626ba45451fad5af359ad7 +Subproject commit 2076594154b53619570e3433b25a98119c8449b4 diff --git a/core/lib/basic_types/src/protocol_version.rs b/core/lib/basic_types/src/protocol_version.rs index 265c06987afd..bf1b5bd26ce8 100644 --- a/core/lib/basic_types/src/protocol_version.rs +++ b/core/lib/basic_types/src/protocol_version.rs @@ -68,15 +68,16 @@ pub enum ProtocolVersionId { Version23, Version24, Version25, + Version26, } impl ProtocolVersionId { pub const fn latest() -> Self { - Self::Version24 + Self::Version25 } pub const fn next() -> Self { - Self::Version25 + Self::Version26 } pub fn try_from_packed_semver(packed_semver: U256) -> Result { @@ -120,6 +121,7 @@ impl ProtocolVersionId { ProtocolVersionId::Version23 => VmVersion::Vm1_5_0SmallBootloaderMemory, ProtocolVersionId::Version24 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, ProtocolVersionId::Version25 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, + ProtocolVersionId::Version26 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, } } @@ -270,6 +272,7 @@ impl From for VmVersion { ProtocolVersionId::Version23 => VmVersion::Vm1_5_0SmallBootloaderMemory, ProtocolVersionId::Version24 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, ProtocolVersionId::Version25 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, + ProtocolVersionId::Version26 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, } } } diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index a7ef0e5b26ca..954a4990a65f 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -399,6 +399,13 @@ impl BaseSystemContracts { BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } + pub fn playground_post_protocol_defense() -> Self { + let bootloader_bytecode = read_zbin_bytecode( + "etc/multivm_bootloaders/vm_protocol_defense/playground_batch.yul/playground_batch.yul.zbin", + ); + BaseSystemContracts::load_with_bootloader(bootloader_bytecode) + } + pub fn estimate_gas_pre_virtual_blocks() -> Self { let bootloader_bytecode = read_zbin_bytecode( "etc/multivm_bootloaders/vm_1_3_2/fee_estimate.yul/fee_estimate.yul.zbin", @@ -462,6 +469,13 @@ impl BaseSystemContracts { BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } + pub fn estimate_gas_post_protocol_defense() -> Self { + let bootloader_bytecode = read_zbin_bytecode( + "etc/multivm_bootloaders/vm_protocol_defense/fee_estimate.yul/fee_estimate.yul.zbin", + ); + BaseSystemContracts::load_with_bootloader(bootloader_bytecode) + } + pub fn hashes(&self) -> BaseSystemContractsHashes { BaseSystemContractsHashes { bootloader: self.bootloader.hash, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/migration.rs b/core/lib/multivm/src/versions/vm_latest/tests/migration.rs deleted file mode 100644 index 6bd0e87615ed..000000000000 --- a/core/lib/multivm/src/versions/vm_latest/tests/migration.rs +++ /dev/null @@ -1,51 +0,0 @@ -use zksync_types::{get_code_key, H256, SYSTEM_CONTEXT_ADDRESS}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_latest::{ - tests::{ - tester::{get_empty_storage, DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryEnabled, - }, -}; - -/// This test checks that the new bootloader will work fine even if the previous system context contract is not -/// compatible with it, i.e. the bootloader will upgrade it before starting any transaction. -#[test] -fn test_migration_for_system_context_aa_interaction() { - let mut storage = get_empty_storage(); - // We will set the system context bytecode to zero. - storage.set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::zero()); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Now, we will just proceed with standard transaction execution. - // The bootloader should be able to update system context regardless of whether - // the upgrade transaction is there or not. - let account = &mut vm.rich_accounts[0]; - let counter = read_test_contract(); - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful {:#?}", - result.result - ); - - let batch_result = vm.vm.execute(VmExecutionMode::Batch); - assert!( - !batch_result.result.is_failed(), - "Batch transaction wasn't successful {:#?}", - batch_result.result - ); -} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs index 1203d61b80b7..bc6d5b0144f1 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs @@ -13,7 +13,6 @@ mod get_used_contracts; mod is_write_initial; mod l1_tx_execution; mod l2_blocks; -mod migration; mod nonce_holder; mod precompiles; mod prestate_tracer; diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index cec2e14ddb26..3d9119429e09 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -107,6 +107,8 @@ pub struct MultiVMBaseSystemContracts { pub(crate) vm_1_5_0_small_memory: BaseSystemContracts, /// Contracts to be used after the 1.5.0 upgrade pub(crate) vm_1_5_0_increased_memory: BaseSystemContracts, + /// Contracts to be used after the protocol defense upgrade + pub(crate) vm_protocol_defense: BaseSystemContracts, } impl MultiVMBaseSystemContracts { @@ -135,9 +137,8 @@ impl MultiVMBaseSystemContracts { ProtocolVersionId::Version20 => self.post_1_4_1, ProtocolVersionId::Version21 | ProtocolVersionId::Version22 => self.post_1_4_2, ProtocolVersionId::Version23 => self.vm_1_5_0_small_memory, - ProtocolVersionId::Version24 | ProtocolVersionId::Version25 => { - self.vm_1_5_0_increased_memory - } + ProtocolVersionId::Version24 => self.vm_1_5_0_increased_memory, + ProtocolVersionId::Version25 | ProtocolVersionId::Version26 => self.vm_protocol_defense, } } } @@ -181,6 +182,7 @@ impl ApiContracts { vm_1_5_0_small_memory: BaseSystemContracts::estimate_gas_1_5_0_small_memory(), vm_1_5_0_increased_memory: BaseSystemContracts::estimate_gas_post_1_5_0_increased_memory(), + vm_protocol_defense: BaseSystemContracts::estimate_gas_post_protocol_defense(), }, eth_call: MultiVMBaseSystemContracts { pre_virtual_blocks: BaseSystemContracts::playground_pre_virtual_blocks(), @@ -194,6 +196,7 @@ impl ApiContracts { vm_1_5_0_small_memory: BaseSystemContracts::playground_1_5_0_small_memory(), vm_1_5_0_increased_memory: BaseSystemContracts::playground_post_1_5_0_increased_memory(), + vm_protocol_defense: BaseSystemContracts::playground_post_protocol_defense(), }, } } diff --git a/core/node/eth_sender/src/eth_tx_aggregator.rs b/core/node/eth_sender/src/eth_tx_aggregator.rs index 276e62d01a8c..7d6a6b234742 100644 --- a/core/node/eth_sender/src/eth_tx_aggregator.rs +++ b/core/node/eth_sender/src/eth_tx_aggregator.rs @@ -493,11 +493,6 @@ impl EthTxAggregator { .encode_input(&op.into_tokens()) .expect("Failed to encode execute transaction data") } else { - dbg!(self - .functions - .post_shared_bridge_execute - .as_ref() - .expect("Missing ABI for executeBatchesSharedBridge")); args.extend(op.into_tokens()); self.functions .post_shared_bridge_execute diff --git a/etc/env/base/chain.toml b/etc/env/base/chain.toml index 7d0ec3791431..903696e3a819 100644 --- a/etc/env/base/chain.toml +++ b/etc/env/base/chain.toml @@ -90,7 +90,7 @@ fee_model_version = "V2" validation_computational_gas_limit = 300000 save_call_traces = true -bootloader_hash = "0x010008e50a84cb5e11b650d08b8040a90cd42203490f09362e5e39f1925234aa" +bootloader_hash = "0x010008c37ecadea8b003884eb9d81fdfb7161b3b309504e5318f15da19c500d8" default_aa_hash = "0x0100055da70d970f98ca4677a4b2fcecef5354f345cc5c6d13a78339e5fd87a9" protective_reads_persistence_enabled = false diff --git a/etc/env/base/contracts.toml b/etc/env/base/contracts.toml index a01979ad6e88..c69a5541d7c3 100644 --- a/etc/env/base/contracts.toml +++ b/etc/env/base/contracts.toml @@ -27,12 +27,12 @@ RECURSION_LEAF_LEVEL_VK_HASH = "0x101e08b00193e529145ee09823378ef51a3bc896650406 RECURSION_CIRCUITS_SET_VKS_HASH = "0x18c1639094f58177409186e8c48d9f577c9410901d2f1d486b3e7d6cf553ae4c" GENESIS_TX_HASH = "0xb99ebfea46cbe05a21cd80fe5597d97b204befc52a16303f579c607dc1ac2e2e" GENESIS_ROOT = "0x28a7e67393021f957572495f8fdadc2c477ae3f4f413ae18c16cff6ee65680e2" -GENESIS_BATCH_COMMITMENT = "0xf4d78dee7dbe63223c5de200e6cbce37dceff86c07421b7c96e0ba142db8a14e" +GENESIS_BATCH_COMMITMENT = "0xc57085380434970021d87774b377ce1bb12f5b6064af11595e70011965747def" PRIORITY_TX_MAX_GAS_LIMIT = 72000000 DEPLOY_L2_BRIDGE_COUNTERPART_GAS_LIMIT = 10000000 GENESIS_ROLLUP_LEAF_INDEX = "54" -GENESIS_PROTOCOL_VERSION = "24" -GENESIS_PROTOCOL_SEMANTIC_VERSION = "0.24.2" +GENESIS_PROTOCOL_VERSION = "25" +GENESIS_PROTOCOL_SEMANTIC_VERSION = "0.25.2" L1_WETH_BRIDGE_IMPL_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" L1_WETH_BRIDGE_PROXY_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" L1_WETH_TOKEN_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" diff --git a/etc/multivm_bootloaders/vm_protocol_defense/fee_estimate.yul/fee_estimate.yul.zbin b/etc/multivm_bootloaders/vm_protocol_defense/fee_estimate.yul/fee_estimate.yul.zbin new file mode 100644 index 0000000000000000000000000000000000000000..f1e4fea448d452188464bfb65811f54bba94105c GIT binary patch literal 75168 zcmeHw3w&Hhb@$wT>8||HTDD|KmaMD9dARvfWD|$TvB|Dqd0-O1l9W=2Hn!|2wk27T z9UI!Bm_WXSw4^`@B>|k4wost7yrHC#N!t);OB0|plt&-|J}5K+8p7M(|C}>(AG3FN zwc3@_uOCW&zI*4)oS8XuUUTMgRYK8ULUrpW9!W=a;xU-)PmC$$wj}$VyOe{JRmrRI zuj6>mol4DbR_f$>yicc9ik^chdFGSy^qS-e21nB=1ds26YxlSrp_rx|`gsUq;Xi_p{pQc*=}jq1R&iQy?V zmbwIQD^x!}H>o^7_o!|3bX!b0w08;j+oSe?IL;K&W7_c&Ii$BbIp;xiXC6p5(7MGt zSE*Es>Ou}DxfB0(IVyP_x7%TQ4LUiYqslpNVZ1re)2E|4&UB=HP=A!WnA*|0&O4Lw zDt8I$k^Z!zKT34~zA^-Fv*2mK2SV`nD7-5E&ie49V?5sOQqT$gbUTDzXgQ6CQr&3J zN<4cV&v`AL>4cXU$MeN9enbWy-E^v-=&icadC-$Grc)Q=;iU4;{q(MOF+L~RuLkHf zbtd>iDU3k@e4x5gG!FO!KU4aCji2fxyehdOc@=|m{O%+?!JA4jzk&;#&rrV9lEic9 znM_jsj&gG9Hhy0$>CR%w=MQO=idJU)r^o#j> zxzNMywfNH0>3vrE8In%rG2Wdnkh+?(pIT>#E0`NNLdn`kE zmLeb7A$nw(p52*2l8?kxlddOogz0+1&^6P<`FhI@g8e_W)4n9f!Og2rC-f|S$aT~!w8kW1JqoR+R zZgnm}y5|l`y6bYg6yT_rvKN78KIF(De4Z)qzMA{fuj#o=_xFTuziN{HuG4x}^H+k} zF`(LkN$jm^5;}Av9rQx-KEk`;EJLRzp=Ynl{c_GWbkcMU=yX?nH6P?bt=+x)a=~hFhKB6Ddg~rR%?N=G@ z?|h$5Xea29@R*Km$QjF5NM`yjSY_xW{N%2|I1-Ze4c%?j9>PhGJefdlHS}bd=5B`-_T6Q z&lUM2dXl`U?3;Le{qaLPY5WG!jsdlpGN>NTCv@mWI^alVx-U3p=#&+?>uu(KIj=Ny znn!preoz41pLw5-(rNx{gih|qg-+l*;x7npp2|7yRFqDxrW4wK8^H~o+}njt?mIM{ zrlWKMoalr$6v0=%KIk*$o8QTF=!ZoozEjO_;&yoRPY9jdJ3@5IBEx*5i@VO9CcnpB zkMQP*{j{DM%QD<~kEs_OhyHLrrRnE50{@VHhnr6L za{nN7e_$K@5WKfwCL!$>f2HLd!}sEcI)jcU{SfUhsL9vT||eL*9-ptHvN3G zO#8M6<8U+4L6sSYDJOOuW=!Yl`0_0+LMPOlVY^ zTL2vVn}dD}x8L~>3s0}$q4Ir%f7xl`uW7WC^a1!*^q%Hl={JwFO1d~k00$#2wl-Xt^XV~ zI57KqRhNtl@I~h@Alu-8g3?1pM5iXlOXSe+EAeju@8Y+Ds^T}gS4Gm1Q>Sl>q%ZQ* zx1%47$D;cVro*LW=OHrR58&~14&e4Yq#AyTm#l0J$~PK+ZjSL&+@_xP?@P3Pa2RlhT348%PLUE=ks&5)`A>yxH zPB@-)p?5CFeSlO+{79u7j#FUz%XW9LN<4|;QfSYDcWArPs@q5U;ldDqga4r~0{Ood z`3kN1Dts8gx-Df%RR zuh!p)*U>z>1+klx1o!Yx`c&(0{F1oaQlmFy{;ImuGVUs|L;N&JPxdB7AA87uJ4&nM zna(Jb!>JVeBl8EfAJm@G~sz+7BpV0J5 zZV-AUHNBFWUWjWGy>>DFMasi}@Yz~EEPUerCitKb(u=nJllaZ-_2!GK;IC6Jt3|@E z&^H{{a9cpX0*!AXHPwm7pu_gf=kvUYi#n|m$1I71OSx%3UmxXje@pAH9O@frz1YKi z8S!v3uO892^-3*wq<<)WyMXfHNpjT#`2-o%ddPjBE>|LFX514Sg#V44A$~-7Jjfx- zSz-sviH?VZY3M1xq5 zxW=ElLHMUBBlS1AdC)I$%JFD^5y-PC$MzkFoPy-n@8lnHzmwZVo}Dvwe^v>8&;@wO zEfIMZ`UUa~IJZmd9~GsCyC%uw9LTdjPOZA0zKZqu@@&=94Y`qeS8F^HAMe%lhyK(3 znOQx)JX`gsL}T!aJcBPlZ^T7fKB4)qN$PX2L%R#0v-oqqe};FCkG8xEVzOR=sgw)8 znU5#Mv&etW;0a~~jQqpq6*=_~Jy+))TK!Q@E77;*pG9AtlXLRKALry!>coJWhYTqy z=N7>yh1Sc;+F9|d19-e04B)n%y}k8^suw|rH z3b4!W9;z2q?C|+f)+aJrKg`$mIE|qKzb5}gkNa=*_47EX)vI_t;x{n<7?+lvGEVMZ z@Cp1fgSX{6v4`#rR58-QNxaS^>ozDTd<1{cWB*Vkx2Vf0TVl5@UmnYQLeDdjYPys8 zfb|jd^EP!Ol}oltdFTVS>*y!z0Y^!BOiVbP%M(9Jc__|`e&uk)ugsVDm6jg~_00Nu z;WfU#Cj2BHJRfrRqg{p8jb-^(#yxO_!hHFu?w_bwvy#duNSC3rvFk-r{;)W z_VtppO7xPXCyky>8M}dXIrvXW#r`1cYvLzG^^0E9>wRVQ&xV@x8+i64;ivvr!0#SN z`ue($^84$O&?7YdDlu33y0-q*uCHsuFZz0^^56@=|5`XA{HOh|K##=xUjaOx-vYSB zZ?5=X0o=yV{5E?%sk9$+n*Fa!OZ=}Ze0e7Si`KJ5o=2+4v-CfZ*LeB{aGSoPjDODP zi&Z0|pJiR$=!et{=iP^@=tqSGSe8RQ{~mOUccr1yD# zplCe>daO0yyH)$Qc!gf=mUR3}c5XlT{=Mi9^ng1V9Tz2ft{s}VUmXJ6=F@iMUm4QN zHI9>VsXW?&I0Mf^eg8LY{NHrgE~Q>$_^EG~dMA}zy`AlhI_ZI$*=vQZu@hk@BQaotK!Uu((zMh}&FrBGc7+0b{ z#|6?wnUA|iEV{RId)wgw&I#S!o1*nKd|bsppni(HALaVW%cIO^19&_i1#ny5k5$1h zd>WLm5x;83q!N7UzRY=w{I!;^Xg}@ccrVak%6t{wWj}o3uj}y_I$&{0=%B>E2>dwq zC!4?8^|)%myw%WaK60xhcvnkKZ+9$_;{3S)9<&d(68R{ z`JF2G>zsEGd7;;6-UNO_Kl@nU%{TLE_jjw*FLB&i^N0J2D){T|pFi(#WZt3UZ|MuP zKAf-py%xt4zrNIei_Qlenh)S_#VV~wFAzN{{F~dN{m8V&>jLjE<#(27|F9r>8F0d5 z#SN6`FV*rL#slHedUzi6ChPm;YBR5p^3k|h4w@T!oDl2z$&C6#`#)&>gSeP&uVkD9 zxGg94ddJP^UxC`8xl)AVO~de7k$73lO`2~PoAEUM z1?JiC3tC>O$6NDt1~TQ)_{w@G;*+R{#`Cp0AL|Xk^*Uq*V}FwJ8-LTEkLvh?a<+?n zTJup4?YSN0RI-ofleF$Y{Fc`1Krcz@^}!6U2e>ayh2xXpGc%uc&3Zv>ToSvl{dI-{ zwe#6tXE+e7GxSk=(*IwtGbqG4yl`C7y$<@L0J^B8@wcDHeEKftKbo&myoda0ibs-v zLh(q_GZ{P)e_Y~89CDS3Lvq}pOdOK*S~w0F_$#)45`QCr+j?5|rv`9K54+cczbol4 z(epw1TJi6$g1^rBt6D1a73{a~XFCD$EfD`2JJFufcHUVbb`>x-I`6y833&+pXr`Ydw77GmxV@V9a2zd7O-&->U6^_J`C0(xc5^?!Jco zebxLvt^aeNJM2&HY-2z0F8+YXrSJbGc)ccv{a;*!)rXDI%sH?@MscFz>vd1I!FLV15&SS@_#uU+wVcV<)Kn75vjW^@UFd z-#^m&l;nlnVMq zmUopgas6vVKR3Z2r+S+*c|3o$9^mVx+T!)6XFULZ68Jd0eyiu#!TN2|jBnUqqIoX) zOSEo9eiE39{iJz#(!31Jfi(7;YFfX|nQ~RvZ|hwD)P7SC2eIue>7lt?zpXP46X0)IAPNn+R^E$DqkK30!$mz=c6QWB6y)FDVdp}J2 zM{*?N@OG{*@Y8L66n+Wd@qS+bxBN;!9!v5L{qO!MF$`))cW1Tko50WtKmBUm7yF!4 zuutc|r=DLS{z@QE7(X6=CpAU#OmQQUUb7!FFZ-I@KQn%S!~4)kJVh>{Cm@%2m-5q+ zAMDKerpS&Az#o$M6!InG&bup>mpBT{Mw0bPKCPxmucTAk65}u~-#2m$l|}7LHw|(- zm3v71OPphYd}sahVCq)pXD2f#{#nx&(oap_Mt4a+_NKSdemPQ%MSAz+B$UC6l8=nJGnQJ6HeU0QxAC~=n`usHM!CquX5jo#4 zakL!P8o3>L=p~w0y;uC+tOxzK-d`tC?kI{@+Rr5O!oZ%z>xTet>4%baUNavs^93}$ zqCT#?&P(%Y)^p5fW2DbzeOEaS?}u?g%aa%ncbxlqQQ7@3($4@M-_HPU^;54i1$40W zEZNC{(0&BFyru)XlS5=exi?UI(L1uAm}l|p_F*6FAD`)l9nu)b-5EcX$gyd3XL_cH5|DfAoTaN&=j_wL~M z53e)0uhi?%f5CJK?4?Z?X?FmR=d%EA^NERz`1|ZA?$Z2))?FMA3HFD)t4cfSjjsv6 z2K?~uXnp>^hZZ#_>+kR<#ZU0#V95V)A7(8_JpzieGl_T zdHpE-7{KHE6~HaJ`S)Ao&~9o!?l0kWAdF*y>@nN~*j<_jz+Qy>Zwrt6m)y?4|F-Fm z;*oX1_;v>6E&NnH>>b6E#cs)aAh*@@YMpd)tLfD`^|m?313jwOMat+=qsPp;T~6~Q z$9F*;jwd&NvEyEe^Ai7&pTGkN`OKan{Ob0iT!GTnVz!g*4$U*$@GkU4JQ4D3_`IDx zAI#~p*NVQ@1ja9)aGn$AbtO-VYIvKPYeG|7*fed?5bp z6v8{u$F%=Xb^F`mOaC>g!{FuG{kGf_a_I_u|h5@c8iz;5OgO zzP12v^74WbWJJNy9Z{}M!DxZTc7^a}9}&l5u|fzCNRS##XY|ChI;5>jjsi zxSG+tWEnD&zaRJs@$whIZTXgS?EyS~KVty5vfUxH_?>-e&taK`Z_Gk?ha7xg1FPqW9Z-u;)N zcY|@W$Gyt_C|pY@bn(wS>-7$b4-K@g_2UCJKY)ZO$dCJRrg^@sXn%WI?i`?*kmiGP6~J8(&> z(%x^d-H==*t=No!S3n+9T^+=qwEI z)%dpz--*5ioEUJRbyHdWm}v%tov2LhhdWa$D5%42oK#q0sM6SSoS$Mi7Dc8EaOYu3i}rB;de=&PS8C# zqW9^k9J#mWF>SYGpKSg7`9a^#$-EsErO3Y{yNZ1%{(bPjCiMcqeye+)?kD_4)(@Iq z*sUk;vhJePSy~@lEctV%fEn`HaQtfGP>M#{W=*@$-BqM4y8V z#&6T-?m5Q)wg%(3>GReGv5Wwhw=Y< zgYomcD5R(UrNQ`VUr)IGpJ_1uUJL&}HyA&~*Te1qrv~HivGD(6gYnyT`qOia|G5pu zPwQD>`d`ps{I;FAaE|eRvcdQh7JVLUFn*f1hUx$O2IHso|1kdNH5fn5!^8M5isE;x z*)NKxubjPoasQ6$3zfI*qx<(3yhi(2xBW8nkAH~Jh33&I%;(9@OWgQsZs%5iUT5_~ zf4?h$$M++E+w<~j_{ol}Yz@jc8o!)BnmhcoPo|FcmyGi@(fOvo&#%{set&FPhQAlK z;JaoXi*=1ojH>w#nM!Ywef=~04%vbyz|K5Bh2lhz-_un`G9V=U6b=E0o*Qc#{nlSyw$3ehu^Y6=nIG^v258 z03OfZ0o>wGw;F!o|Db%0_)Es)jvDzN`h)qjUc2~Al>b|B&)feK{BN%d*mmgh2>(DW z*17Ir&xbZQ7{A^AEpv>2V}tSA@_R*N@VAunld9=2n?G#*HPo2)`TEz!zpqaG)%5$} zImZ9$ImZ90D)>LqU_aQ7hi$4-f35sw$0J`f$N2xq#*h7HysnCQBYYb7*YJE0YELQN zhv}ZD*P9)@UxL(f>#29ycRz?u8ASx7h1| zQ;lh#uitFFe@TP!+w!@gGXApb*dMd?lBLi8JgU!ek4wMvTGHdGe!71oce9xf;QR{5 zDKmUO2yPT9N}Lkk-@X|6ao>o<71a^Be+2h}kbJ7lWnMw`W>Jv! z7+%L(w9D8BbFWtNY`LFmZq~7keK7YiYdaCdy=6Ts;1ioqA<%W7lL_x%wb!%C#H|+{ zE<^wBbe`A!)Xdfq7ai2}?@sahB*at3;Z-H$uwpL9LC38Fx<6V)KQx-|UySN&|2{aI z@88}ay5CcV?jLxT>Hf9TO!u$PknW#t5ZymrhVJ${jY`fbzx#Sef49o*#QAsd1j!%o zw;yzPUp@RfK9|%mzs~kw-cRH0uOHI-pV()LKUK2NNPTeY5eG*tHIe z&&)Y9OTJsZGJMumn_g|j&G~HCx}tiuWS_ts^BL9Wz`nNbq$m8Xv$3#?^RN2-dC_;I z@6h=#;{4Aqf#bX(EJ&n^&GSV_t>JnS4831 zcNA^^mDc+<>wSmyzSF$xdfM=$9n^I1Rzk^lFy!3dUlac`l+Te&;rt=|Tsq;xaC81_ z#^<#n=XKkEJk#^Ktv7|w>$d->-1?-nF9eTWKMcXG^-1cM++U^dcYGw;e|$#}@{Zo8 zv|ms2tOC)emUGHA^!ttBbGxB-+x{1~D+I5%-`}8qwLepi&O|5G8G^^x7nHZ@+=lW* z$2_06LJ(c^lalWcx!$<(arT<|jnAq1bO$2%cb#|);``-ee9o(% zzYpOdYJhuKY1;?Gy_DQX+ndhoxKN(_{?-SKe#Ctygs05Cw1C9kN89>wbdUZ041f2r z^`sp101JdxLPSkNee*YjC(9Hs4x&>3$n13n4kOTvx;WznDLUrhfI^@Zq>epQLpwR~5*?Vqjs2f2Pok5zxUdTXuki;?U#K*)YtdV=q-+K=nv&?nYgW_^zIFXW;f>qIA^m#gSsHgn7eD|Eb7{5Su8 zpc&iI%6b^xwe6|u*WYgzyQ=L=JMNUE{N-o`(fGWL$kBcHxMHxs=-xBX^49-pIrF*f<02Jhp? zJK4jH#8(Dh#b1ovcLF{;A1Fx68ah6s+ev!$V#o{A3I7^9ts?Cvzuwowq<@zDfnPrA z$60)+?S@k3yQ{X{F!x&JEOeLgw`4R;OjM~AbO4I zuKe#8`FhP+MS3l%0y|LvUn#neh4K6LAznY$s;7)y$X3Sh9!UE7zmM88Tm4V>1%&(I z(`AnIeUnvRS^b{W_9U}dkI#i-&oI7h&m3*f5N9BN(&%@vpXhO2PYQH`9$SL>HS2?# z^>}6wb{X=N9u$3_(Rw_szYme#to3+Q&rkb$9_ypfE0yoW}Ux zZ~r|lSFFE6dTXxa>d9ZpmGI#V{i^?|azz*EkiO7z)#hox3K~+af*f854NLk63>}%b z&lO+(@K=2KKZ`FDM&Flww?XDF5q;11dW-*V^gj4d_>=wjCE9-}@4u%-pXc!%Ao9!i zBxoK+`abHH4;sHbv7w5-m+u#e-(F4cQ~PJ8_sQRZ-fx31a#G6|&2#;Gc%HW6?XU~U zeI$pNzd(~<7~y`S?ZWxhV^eMfxHKh;NiXXfKPM|NG!aDMWzrq3nvJ*IZ7L*C1D z)Z^wQa>+5G5By8o_oY%S5A7dTs*mz{Ddtn8^Y=;obmjEPelgrD%jGa5Mf@ezm(KHi z?qfP$fH)(M*AjfE>J@-H*hhx=O{x!54?HoBLp+Z6>GAb+ew8>&>lxzrcUwucgy@;e*U}X`g)8Dox+DV!lyB90v;ci2K!sm|KDRi*vNhZ#P!v ztF%gTd%K#QQSeUGK04?VIT1*cs?)X?uM^t;mdH`Ta4VdnRIpQRtyfO6wF ztFq#&NOdHx`hXsH_IHsRkU6=JOyBp^j`)Mzx>j8{zi9rNQ`Z`rOlC{C4~Aj^cNhCh~mV;#-Nxc?)}gZrjsFpUv7{^8Vbm z?-}{icJe8s&rZ+&+;-GQeC_^7(_y*{pZ~e#Ptdv+>4RP;vM&w$k$E3eJ9Iqh5sH+E zzQ?|1w)?f+v(UcC&<6uYe#8}qKEB^Dr}r#SeX(+1D?MO)8$u80o`TTzS)qr&Z${gZ z_V1UW&wmKNwEO&oxFhRL$kkoow<`PJ+lz)?^~qQJiwwQAe2p3YIlb})D%EE-VH@N=qRjGfCr7xT@M&{zUWz+QIyY7SC=!4L&QW20Gs6uS+Q?M)(@D-A}U=bG0JdQ_OP- zPmTL&mSUV(-zeEnv((u6)4iX@ad=*|v=E91(QX8+$00I6PqsgJvfp6FspA~!zx_Ro zDKoE<{WLRQKaA|N!#Gu5=b+|DyZ7jI%=ZzWbiAb;e@eST@Ob_V%G>-&bO_T0!^VEB z=SbWkpWwJh*28_<%ol34-j5>bc`JQT^1DBUNP_nGll+6kr~G~vm3JOPK7RMt2hF~W z1o=hD-lWHH59K)Cl01{v855^uUswm^=w7r3T2$jn>HS&g4e_ng@4czK)Ppz&$^-KS z8orVIXHpydj%`+XY7gurt(SFd7y2i>%J|(iX|`t_FERT}j2|__dWJQBYPu&7XVvX( zY<~{x{tVjq@!ce*m#pt%{*sgWoj##gW|fAw3%$UPlppdS^n%?bdfm+UL;FQEUzznx zy&kgkOIp8}xXUKQqcFbM=fV7fxU$)Io|>Y1@;W}cCo@Rx3av8=d@wzz=TU1U?wmC1 zi~^6WGnUAoZwJ&M)+5IVfAo9k)ZUZeGx;8R$35lbP`*1Dg2&5YP~MiqCeU?dNRDkg zMEgz#LhxvL2rd44=pUv#GLwBnd-~X}`TOwP-vy}9ad+8%P`qJf2p->$pnS9)O1{S& zDj(O6I`FF`^a{n(iGiK(t&kk9{=|+-2y#o z_sbr?c)A7UExOVEGwxRc?WX=UK@JZXkU6Z4YPduh2df@>d#c2eteK^o{Rt0JrAQ3{I&A1&Ph8WUpX#`2$D102-;N9F zxapkX*KyN^;kW6d? zdA?OY-=?1@qb27F!U#$30rN-aP0H;Bi0YI5yjl8zd`aag{@xvCtHw`| z{lvJ}Rs2R+V`?`X1YWmH^FANiH`|KxFr*HDN3`>UqSrb;8kGkukFwq~db|rVc#`DN zDyP($39Rg4<20Ji_MqVKyK)wjpVEPKK$Iif?$N$pqe&yk<|#q%z*n>1gfT>ZR` zqxAsd6zoS(++vFIYd^v@{ydi3)Iit6T8|N$D)TNZL_wEhf4H1uNMk)6=NCHuUE3{c zBEf5%Z*_bQ-68t)Vx5xwuy1R8&(eIW<4HYUnvNJcdTr*2L+g%SQ(n?>{sgiD z2WTJFtFquH(6tnuj6d{6rZs6QAN>0e?Ox5G<& zLiYsosUD9kddBA-vI_HP&6ixxmmlmCB79dHy5k+RG~dxO^DS^iYKr=sm;IQ4n|(c! zK4|v!=>7IkqnKe6o@l=5Xuc`hcRbMbh@~H!Xy8;2{v9lL-YZ^Dc1{^Nnw_5PJf`WO z^48~uN@cPlhBbQqw-e_#Uq=;`{5p5Hc^ zb7nHX?fR#({pI;>2p-?xpuE*yncs%W+x-=K2IXtSuM)ofqw^5tH}U+o^Ks3uemupg zWqu1hdVcFh=eJ!ag%4ex-*$brEZtlhj}Sb*UqN|`ZpNR3Tj=q)x-!4*eA?I{D-P-V zbu$0taff~L_w&`d&&0atE=&C>KtKMi<~QQ2dhjm?z5$Gt-p7)qcyS}~w?^^10}aM+ z&u1>4WBhBQ_-8$?*8N>s>PI0M*IDDGbKIfvbbk@WTQ@&i^v+VeCDiV^@murXEWI}p zzukV)dvl4ud5-auz6rJ8{WW=qM9X?{wfoX+deHYyL-^_*PaFTdImS9;S_$6nHztZ(})U+OUGx84o#OdbQ_4z2g>kHQVqtSQ2+?Pty zzs&W0HJaY_HS7KB=A8(Eco6whjFh z3CDZ1Uk1N+0nxvEXIX!Os_G2EWBrK`-0~-~3#;JgbGo7Owc_XVp`r4&pU3g>5WH6W zv|pqWed=s~NxYu%VZ7>EzwX{O)42Vzb=t4w_3Q2(W%)q*8NlP|7{Dz)AU34=D4rZ= z%97o#MAtgU&3%OMQ+;~hhFK>$U3A^2>H1mjXF!*D{tV#t(v|$rN_4GLo)yo_Fp8p2 zFbUH7#l(?Sg5!qEupU=jk9oi?P*E}tvR}$y_fxVTe+9+|aTv4jjMSDDw~+m3(~buX zr`%!LKQ_ht?z^v^F`w)8L7NXve`sAfq#sFtvfj+n{UU`5av^*=`|%@xf#=VxmwkF> zmsF{j%Z1uig>SIULhA7ndFrRzj;dr__2=TFO zTT;iBfTt5+C#eRJFWa7ybexMS-jDaC`aKl4<$N6poiCNAe87JJ?F&G7klQ(xN%A=# zGIL@4$jAFjDDc2>)Fk4KI2TP}Hp!P(p0xKY-WLHrCBDgaVtlE;6wkst8oZe61Af*^ z(|-CYPIuD0Kit*(rKji|JK^=$wP8Z3-G$h zmi<8MiE$JybZ@Vse*<{@cn5G>|JwT7)&XzjM*JjCS}#QI+vt9djxW)=7U_+IQ%igae5vD0)_ok^8#Etsd?~~ScE4;sitksT z4=uX+@uei%P2-y{6IWvXLH{tm4LKiGW*!hf?*KoYP5cBp$W!0{2fs?*ueSX1EVnel z>$PSblj7$TM<;ow_#pVS=Sj_{%Dn;OSKv}&7tK8xDY1*NhdG=#AblbJ`)utb@8jrs zYNmFw`w7io=G=eY@mTMPU4S&|_6$bSwH!jv5}gY@|5;YnZ_YwaE8?v8C<8zY6 z=dXz`C|&#exQ1|6<0JC&?}&2jlqduCH#J#ltA=TH8UIWNlX#e3|&nAq`Cr)B(Zwe+3mu)f2-XRY@*o`TM> z9*5G_`VX(Fv*#&)z02Mgc)I=OWP|-?<0sqxGtNu1f9yi#Lr*&SWWU%4vwoh|{LqnJYH^s@|K*6Ul=NH%O(4rA$X1Wc^}n&@F4y5;Qw*mZ?Y@o52l@3 z{6QG(6*=`$#z%e&-dE?WA zzTXUErc!d=xCi!E_!cb7_Iq~cje9V6E^s^GPtdsGK264J?o&(6?e~Mtc`>E+!pzQ% zfeseku%46C=j&#+o>RuJ@BWz{7vf8@LuNhR+`Gr?U(CP$RSS@py2aR=DduDBD@bQdx+yho-20#i3NAc_&NVZ`>(Rgn~gttFQ21I zHA@_9dY&-Lgu_XW7b$Z@Uzk>7Mk}~Bl;P*W^ zA47br)cff@?IrJ|cUV`BcP)QN^Ou+6boF;x&YxDz0@rZ@yzBPP%d&n^?{1d;xAQ)o z!X0P0sB$goi{GJ;huUn*i$@#BIInOk+b6Ly3EA7MiuZsBHHQ7AP=a&Ci+mm!{=f8TvVJD$q z6Zy*ZsNfu1P5tPFtTO&V$T^R@f8Or6&`H1d`tWg0r_~zH`*6uk<#+g_dBWo)*gxv+ zX8bTRaJYK#uS$GY$|d#vWB}24I!Rp)^Uyn?9?z9_(ft^De8v8yu+J88*X6f~UNid? z-Ih%BT$lDAkuLFGl}JrdJ?JOtCsfGy17ja|KaCGKN#N76f78Ix-n8oUF4FTd%4f}+ z72Tf;Wy0}S{F@gnC2{lawAFYMjm>ml2};kZ`_9;@d5ZU{DG zzBl{D`tV+WTwH<**x!o!eKq?p25)bJ@mlS6^|_{owVUF2#4l!kug^8j8Ge1PsbTnS z`si~_4a0BK=bkwp-@E4)KizBHP`)qYx5vkaps(5ggWN%9IqRWMqVJbk@Ba6Hbp96@ zIO~PrK8iHnDL&6}2GW<=GpL{5^0NAjOiX789;?qna9f}0bsKEq(RSXfw?ZG2U4+8p zbG&u`6s$BZ$IV7lgdHu4?-TZQyQK}I=!Ee_oh=xndhkF zD$?)0m}{IQ{gqDgJOT3x@guxcp58z@I~(@ zw0-7vWv{0!Ka(C@8G^^x7nHa8S<07}7hYEm^~Wx6*7+#jL;U`<_E$ilaGZrG+mLlY zD=wYY`$v0!fC`w;>$K+y(3AMz*!2}O{^(bu@LP5(Yy6Q$;kVnrQraJ{m(foer%+rj zdu}v-gKZ-sCwBbHk@y!)$BRsNt;gs);yf>??Y_Ok%PoBt^0dX?mgH_-s6m_z~ zU#1d*DlnkMnrO+NPdkFF{QmfRN^(?A*b@c^^cy`gG(JAOXQ&1Op0RE!jH~4L8S4iB z{*f)ttDiY@-Rpk$ci#7<2b0s^P#@a)spIEd{PLgvTk0jRdiz*OnQ+4Kb%Wv8lHbQG z*Pra#dZMIEC?UC`tup+RC#ENlZkrqrm9c*g9Ua2_TP6O9v>eXncRGIY-so7 z(4JjKuiCp8oa)0Tk8UbX7Og}dijVb#e#iZEA2@a*F5RTPuE>kUtM!y~&# z(Wc!&8%$pPjC5u*a_<=)*{2a$StG9zzKlTPU*htYr1|wzFJkcc@QLAlBgM(XZ*lkJ==iZsLt_(RG_;+k(%*s6$}H;RA1fX^Se%#~8ozvG>~KI^A3i=bF*JT; zXwRl&BgKQmyG@3t4Md)MURJsOJui!+N`Rw+YC84v=UsPy75G<1 zMn}#cIyg3Y%!}{9-tp0cU@33^(D=yE0dHcgxH~cemGF~wr~-FTTnrM_@SjY!e>ZHa@(2Xsyn+uQ;&{IRs)7Lnu&oa&-6T0q^kGzVRZg z@8nSo;-t5CczlAi$Yq6{mtA+&mMuFs?+TR)er_u4DqOd9^OcwGx-3Wx{eq7;`@ihN zk5ww1`nwNQ0>f>5^0DF3Ko`bko;R9p5!x9GQT&A08cvt1*<8xZ~+VH-+9~epb{^znGY}mm}~o1$ELN zpY`OY%R^%!o5HE!O3gf%kL(@w_OiPh$^ib_Ftm4cd}!PF5a~F(*G&~WaO%|Fn<^W) z1FO6niW4`4YK;CYjExPAu!8``g%@t5$K=XL+SY`eQH!W_gmWDIL{N4%a@eX1*}qe`?|{)Wk`&D$cKv@xv@NNQ@*PMvsI#-2)=dIT5ukX(WUUTp6_9xB8q;Z zxEES|&suM6bQ0V#Ts*LYa~m?0gRds!_}hKsA^B0}U-(U<6T_H!dGK^V;8E|uXjtWs zRvvzJw95D&8ah&@W=XzE+oy9TJepvW3FB7WFuaeIsKeC+V4 z&O?v?pqQFJ@K^i3y7c_dcWk@$=U4Ci(+`0^Z5B<1Q5b$AyC2sQz{NgZQ#>#XGZLAc zgb~I3{*NdAhRuUJ0mtF z+?c?=#1>cY*#4Js!{(z1>$gcb$5)LH?;8&JbvN#>EWmsB?DcWQ{kZmzcKBl%2M_8O zEoSGD)XPUMg?7W-kzDt~!y}UyUZ69E;icLSAF;9#TJPADTJdi|MW|B!-}uM-s${Xj z5%~)GPo9pq^B6)L2fdA>Ba=f%$y=v|vf2J6SL6nlR4JUU0>64>|H$Y~BQ$NAJT_J; z-CGMgcU`w-$L7t~?c93Rt}4hZF#Hd%O#IH*;S-g^GpaaIQGKdnW%pZ+RWVS0e8~?4 zL_6Wb4;GJNVmdlLJb7%(&=BUN;8DM= z5C1Txn4hHEKPG5GIJ7U=<>!q55vrdTKEid!mk3g?f6eZYCJoa4U&q&-@PEhsIKFN= zF4<2zzV0#qclgKgbssmc!T1>xu-iJp{XnK=CS#c==9_zq!{foo`;sXIfIkTCF>6FK z?3^r)bATx@bdj3=rsZFAkp$}{qAV>Ky^ehCOI}Q}pENc;biS@SU|Jtq`G~9)<+rgC#2B zSb~p^Gq@xGMr$Wnp&B9&J8o&~=%Je`I)F#6FCkf^OG7tRo?jlBtn2`oK85VysCWIa zzXow6HmVgg;mG9S;3ekQk;%&Qzas}L_bK6iUux=H)) zPhNbx`5pJX=Y>089G8wX&!k7jCq}+$eAfE8O{w5HIa&#-)Nj6E=Zmh~c-_Xru8o&n zcje~74*cA@H7I}c%a>NvABe6@UNwgB&E&Y(w;mEWGK7ivCVx%)#^K8HvwMGK6#<7z z>~nY_ENEWEU*Bjyvb^X^T71?j@$p~8m&*RONe?&63axUMR+F*Jvsi^WDPnc7IC2b% zkJqnxA+QXtxa5=H+wi=y@BYQI4=*|O3;Qs|j Cq``my literal 0 HcmV?d00001 diff --git a/etc/multivm_bootloaders/vm_protocol_defense/gas_test.yul/gas_test.yul.zbin b/etc/multivm_bootloaders/vm_protocol_defense/gas_test.yul/gas_test.yul.zbin new file mode 100644 index 0000000000000000000000000000000000000000..febc7363df05767f19a7ae0684e355711ca4ac76 GIT binary patch literal 71264 zcmeHw3wT{eb?)r*+FO>bhaa*eOZFCa5`25p*d~E0IOH7LauRBY96JdiKy+dq+lp;T zmSo2cP!tzP0|7#LB#@*ykP>Kk!|kO&61k)W3ilSuvy@vLo^44XO?U+ezW-WlW2;}5rQF-n>zvz^gD30L zm*aoO@ts?hYJ8hgCoaJKl%p~iaD7L0eKz3eW#P2r`M@oDx7&1>fz5#A1C~8OOH}Hr8aC({|tPN^3??@O-~qa7k;j#n-te; zdAp3;y$7T#&~xQy&QmLJ&2jmR8pvFzsKu=AS3mBR+M-(cI;hss)xFKp^)ddXbT8va zJI*Bf1-kmF7Ciq;r^WeQjB z@wV{z9GSP42Zc`VUrGD!eZe>uV;stTpML&+>-h&vf0cuB9qtFwcAr||eu%HF?uYqG z;}*JgpghxUv6DHM`%ClP4k+`9`w_y|`ty~Wcs$CzBi^3k`Vsh@7Cejky%G3bak%@@ zD)>P+wKh_}M*Ku~wGX4|6#iInQ1f3q;UyxZ9PqgUh4j617V+Ie%>4=G-ySzY7jTi& zqHY0vP{Z@iDEKu83?f$$Ji_OG99#i8S$LL~lL5>&^K~kv;VefBk8AiO!xvG9m>-oc zcRS>S@Vrl#JI>`6PAm5}X(@-cZ=`-trK!CI_>~rXnKZZii8RaULg)-Gcd|kEE8C#q z{oLNm1P-nxIEZ8Lqy^8%bL+U=pXqWZb5w2_%87ibT#oRmuG|38IW_6(c5;WAzHesw zWl^F*`j>OL{3>sN^p^Ulo~H{@pXlZ{DAE@w{#H<2HQu4&DtDCipmN`;^@ZYl$5nZ% z=eVP+2k+JMm3AjFGng#&=VpR?8n1iA`A5R<-sF%T@VxYChIG0uPA7Lr=#;xn=meG}{pI8|opMa~#(+*aO(*m}MyK3f zp;PYFnogr}Isr~}`nE-}h2m!o7Nf8nw4 z)yEB;grD38yaDFZhxEK;a|1O0itwEex%#5vBTxFFa(gS$XAydEg6B{7E6@FEtI#jz zi$$x0ehELVYG8Upn}|KU(A9hnk!3z#aIZCguaWul?v?(5o;-h^o*q-)B2!+$K}B0QGe8h3kVP94+0@{`b3;g zjW-CL-1~%1=r_};tUl0m0{`6%ypExhdsOR#+ccd%6sHs5M5m25olO0P+o^rh2STTY zn}kk{w{riL`%;Drolr?N5?$QCb3a7>iu?EI59$4HDRnoEdzGuljpw)E`6VG*^%(9PH|^$JY9|G=%k*=gr%~T|uf9)Z_`c;jq5G$|!`=Yi0`|U<+EuAE)o*#4 zz)zh@wJ;nW9reroa$wKNzk-vX{U>mq_)92{agZN1iE+VSYQYobentCB)G_i8wuygH z#;?VEXVy`>aM#j8$E=zpKFDVJQ)3v<0X-h52Zm=|k#pr8OkYa%z;Dl{tGbE%>9E}5 z*4;|ub9QUJyeivIa^$>J<4b4zslR9^&sUYph&{+%q4nSuDex@t6r>&KZ?xm6Y(Mjp za<3G4_DU(AEjjO?@@gmdgY4=m6i1b)-|pOK>4_!=||=-zb5*c;FpaA&~rh5w`+o-m%KPYNDqvJ4!46Sd7u?j|yBZ zkM|S)SxcD84Ye7axev+1{5 zueotJ@ChH^uj~JJm4_>;@bP88TYL;_B6eq%dN~E#T_F9Y^>XSDpISanojXH5-cg2+ zw}*UuBaN#@J~n!d^g8J)wLwG`8V#j@P7gKl7DET9|U**U+g)NQJ(*6>^XT#^8DMFN5;c`Mwm}J ziS`Svzbu)ZLHYWQ2w{@QuO-#pLwo92lBVWK}~2_nS)r^$mEzn$p= z=o;s%E(Vw%#$v9 zwzdbYx?f=I%rg;V8orME5g{LbALR?JKdZurAv~E6L%7X{Kd6F#G4(gnevSA!uNm?8 zmBeN6YVikoP2ZsD@>|iT*=H#xgRRC zhU#Mk=W6(7(Oam`dMl&z9hp4EX^mgk<5*r9zxzOz^PY|0)pjF@=VsW>PjowNRyzS* z>NK8T>von{?Wk(_;h7M9RC<%pE3N63*7U;q5a_iK=>p7qi}LaBIpBlO3!k|E20o~y zmn9D$>fL014dJ#t{CgGrbJWYKMfeqY2KEo!+d#hp(Ib_aB)SYZY~PH(r1B}bSI%OI z$EU6DNWH0`To09V|3m99YA>3%zaOR3L-80CJz# z`HpFFX68M$N#;}JFTD!>4B_!1hb(8Q%UDkIx(3oB>tKm7yT~U%{@ov?*mbVE{44sOllC1q-vNHn`ZRUQ;rvi2*QS1u zPblbmg3tJMD!pCg+F7l~wO;rX^r88Idy=Q|X*z~-4LJ8t>me1Vi~B&D=R1_^V7?aX zc6utd6UepIPPm@9LF3_d8)fDJdQgvNX6*!WZMCCPb-}OY8vFr$58t)vo0<<@Y0v#n z@DIyFn#R!#N;988Zq~6q_yNS_0tM#cb%-3UIq(sf5zoL+YQdTo^*Oq3$n!caHEDimc6_xTc`3v@m85%<%6y?-W6zPE%UV=r|Mz9cNssY?uecW@-ENQMt^0D zUUmv3_h|)f3w|d(Nb{wZ&RDKD)ucB9|E4m2_v`6E5B5-hvZqZC);6Bn^(6T)r$ocE6C!5Z5|#FBt#4(bxSY;iqxwxS0EkyUYyZTC<+T z`+wlyUBvz!keav_8(~NfoA`FX=}(PJL$69~Z zjTjF5i}1&OYZw2(Jg6qeXM2v{q?g=ZGu;BZD$#Ro(8TK$a)ZoPy&CwJS>s=3qjo8C((qHyPW4{&Uv1cq{ayG=aU9chFZ&l*6<|Mv_Q$Yap;m~! zbMmlv;%{jEZu~^}Crs~DMgMQnH^hH&1{e>txv_^4JmlwKOhixBto3k9%2z||7fZWs z4%=_)CfcR>;5a<{ah;EU(wc`B?thzV!#rU9vflIH{x;rTz44RS!=BSPMgC85d*$VG zj{4O@GZp?XO4ciIwor zb$rYx7#+-o=tuaQ&|lftYW=A58>-Rq#lN4f*Tlcah6?B_qSqAbzcuT%UOiXzs_<{~ zcSP^01)5Krfp>`OE!BFnAb0>LJXUP47EhpCcg6Cm;1!`aCJw42GZv?+3M`pcl(MiPF zy;M!xwKk`p;y2ZnRmW-k%Wx0*ZpOY8wi`j7$GHr2r98CQhZDyB;rS(azQ}J;_XF16 z)1schbrIU3xbP^`i^ebUxN@>s7YfH|)1jgAI6Tr9mEx~C`T-uvTLHb*{?sJao0wk4 zpEBzXCVs`V&M5vQexHWX$jxT&Woe%Rg&)WZO zdV}WMC0cHD9vuA6cA*J5HJ)$H*A19Ehx~?iU4Ajz!Lz2XQMn)w*dFMyrxV>O%&Wz} z)9e80!AksFYuxKY4{^Rx;$CIe6D7U`Q2ZtLM=4+CJ1hH}A4&D|zOP3h4+WH>JSgjh zoaE6|>I$u2z{h~AR4y-hM)s$EA76u>#CIxHQ~cX_dg5y}Ezg*d`JG{&u@M8K{Nh4f zNuQyzwjaOGGjd!v&0n9VJYyKIalR|a&%vx|dFAKY?}C54mh=;5i}X72XCv%8$@N0m z{h8+-Y2I1R$(&cx``-Vs@{Zfs?+f&C>yyTQLXSTiSetZz7C4;OZhoUuJs0qM5T-r$ zOPb%p&z1KFG@el3s%Ea=1V21EK9;;>Xm@S@qMH7d@e==Yyx{#$Za?(vZ96W0YY0zn zFNE9viit;PT>yIAI~$@V^EbOZXbLKy_4x1epu7(^To3x))YEG)?~qgXHsq@j*M{q& zPG*wqF8NL5y|2*rIWKu*?+;;rm`@yD_at!@IZbn(8TV3uD#$Z)xygmV(;MW!fEa-% zw-b9$QK`J_4}kiY{oMKZes1^aMy|0$LG{b*=WYnz8%f^R?fPJM^1t-{LHAi2Z?OKK zlYQHIpLc_w$&1}}^*#kx@7p%}xba>S^h~(FJE!-5XZ3#Y?4ip0yJelY84*B{`jv;C zx&{6{{KgZwZ}6cHBkyUt)$*QmjN7N>JsBVRW&Cu}R}uaxvyVSH?-}HWn9m~lPUSef zKI9_zMdNm#a7Yh7wsMo`_i6S|#u>ts^p7|`amo^{Fb=}d}Ta#C5qP*gQy^FJ){|nYjybV?QF~{{1(ooU- ztoMv~d%-$DV;MhZj`4rshXwnkQkx~uGoy8B;$!jGhz^L`7kwq*^K&U~r288$fWFAH zzGw^LXUPNpxU3$L_&J0p>yZ#{=>eSw4BjrbMJWt49{c*>+7RU7Oo`CXK! zU_X0$p2CNn2ifBJzs1? zi*Bk*_kW?|^Ej5nFWE2iSfKIIxO?Gs6XQnndlnw=Jnm=cf7|^;^;ljkxu0Qu3%`~d zoo7UEM2-Tvt*Te&NT;gn9JU=x9$$rd0DZtmSl2N7yxFfa{1B{b7(Hh4Rtt4pwOITA zU98vkwO`G84RkX47!wKk^sW+lb?-yH0?|<|;e8iv4&|9?J&)rbe*Y`f(`~$>g!K>a z^?KjvQ%rxgjK{qk{Ys7p=Q!VLInfd0eWt8Ck)7*-Jj`(2Ddf|N^R=MV_OZDA3G~S_ z%m>GFJb#3*M{P&@&tf?%ZzpBkAv~E6LwH=ymXVn9`d1v!GUkk=^)Jp_M%K6N_U5|& zHOG1Ms;z%X|3kU7`!9Ke5FV$O^8SeDBh*`U)_Yzr;m7R9^YyeIE^=z(0b_@{5FZW! z+U4kVZ`7wa4S9aCL-+wbKzd=S!|Yq2`^x#A;Jsk+gMT^GJiqt2wqxfrokBWVbecrB zVt#&&^I-aqtGtgJe}5~~YeWc)zhO%IEdkve@=Hn1*e`8(D5mFrcl`IIpwrq&JX9;4 zxIXK#%63NhLF+NJmuG*j%kt;KxXzkS_(Rms5T4x65N^wrtm}qw8~T~i{j$F#ia@i>#vbG;1bbt{kZ-37YG`B3M9znhTHxea$2nRgn)jW$G~Jcre6C09-;sQFnLJ8pN0Q|wgxm5$?aKJ&{V^@)LB5FXXoLcO@COsAMv#~R6Y+29#@2ocwZ$Mu)JYw{w z=J(mf!O%y^@o(S{FND0W@EAV~>@lmiZ)9}Bul=OY`N1#X`FYfIqe<$uAki45(kT(3M z_a~tp%*z6k$5uV(>U~D*qx(0Tf6Vhneht&pyTI@}^d)3dnSB&Sf0}(VYqUPCRd2}t zkm>txEjf9K5$S@*6(P( zd+{FaCia`A?fXMHmBBoD7ouE&?x5F*?p6G7FCSZXo%?&bZxB6v7VUc_{YLMhuEqI_ z4st%EpF2dZz0D5gdplHK(fN+8{3h?&IJdC@We^tILS z{m6Lcd%Q1-jFsm&qIE@EKTewc z|3*L3J?U>tKhBiz0{_bNXNLReL_eZk+KH_;h9kDoU|_Lk!SH-6p($t~wC+>=%KM)Yzh2Q|t`Rs5Wni|}o|@v|PC zNBnZW>+I?Ca72#C?wEMMDpO^T} z;q!Tcx0dsHTNjAjdM}BOZ>If;+t;8!%(t^SFSzv@-Jff!@H@+6HGbi9lF5$^=iRao zIHfga51z7dW1WEX=d^fE$M2Y5dhcGjA29vZ{%#I>iuJm{yKDm_kLT!zx7&J7`HbGhPB}g6?Gb!nU&(K5k+>22d>B8)!Q{UKo|+^sS82U}2m5aL zd0L)NrRdxM*r$A?r~`s0QNK7 zyY)EXCvtv8(+j&sHpu>8NWpz@3MKu zzrD`*GjaNOXU;Ev_n+&G-=_Z=b;fVgXHA{)+x=fX&-l-(jNiLR^h5LQ$@1+rSBBTD zUnox!kx%cc%KfVu|0)ar6*2te4~T#JF?t@c2X_3!epCdW=x0XYmOXd>R~7v1w?*pL zh=0y{p2ioM2luaIbeg*zq5bty{QtYo_-UOag5SHm&iK16{5$K6-?CHQjymJF_=f~+XXT3@Ba+GgAQ)m2^UT{BEXZ)#n|Gi7=jGxvkqx6}o zGk(fnMDd@9;ivpn2Jt@GdCn`l-z9kKlR?~P@u}YL5W*ArG*aKT^VRUn`_uD>U*2Pz zKm78()U5GOleh21ng{0u%^N1Ea4pnwIWCPBem!iCeB4-w}apriLX_5tDI|^ z&GLI{4$iN95ODJyfbhFlyq{yy^zU!jc74c8EdTW@i{DoupGWIGtDLyL=@fm_w5UG(-h}O1 zg^0ds{gI__I_Fp4P@$e7D7a?l+}BCf^;jw_fn$|G0js#_vAx!C!5D(JbrR z@?y&)#rcImZ_iyGZTz%uGLQJ(-^>xeq3^%P=f4HK66J@MGiFJDjX%(bR$j>cq1}J% zd*SsL*~jW0$MrLq7NvN9mV38emvV4_A)c05ckw8XS%L1}bLZ#}*yFP2{b`lkhg|UY zSuui{$(cnrucyxVt^T_|j^hvHxS>vbVCRp%5N{8y%t}uL{9xn%hK)bYXWlt+KEt`% z>m;5+yo7kFIe7mcjtR$6xqe=!RBkc)oi!X&KA_{M!@QpA;Ja(kUpQW-kM}eBU`zIE zvb-Oq`B}zpnD4nc`u%@>jzJKYW%c|2*)#Na+s@GO67SP-RLYk)%IVS1dvqMNUB^*N zsXxY#ci)43{&!Np((>Kl0f*y4_q~Y63QDcK3hSZkP`;XfPx+R$5q~q;p3mIhtW_>- z`@{X6M}1Ctf6&L+x0?CXwntVz%%lA#^~7)Gdpx?YCw@zBc$62N3w~R#lD|muWAc#% zuYljXq_ST3F1GaUdj+52_nPO^?yu?Ry2|~ms{gC;3Gqz@I#k7P_nY1mkJ7oX&iHLU z-!e!1Hht{%4vJ$6b(B+^&xv03#BZ%9dhQ(Y+w;qL)QB8ytTTQ)@BV-5jNj5@?w6AB zx9o`fC-(iU_w`u(>;6M>`?JPx)9;_^jNe{Y_x|#BGwxUGjNhiu z7we4Q+ArY!ah>to`s3Laeh2r+3(lJVZT+{u&iKi%irTk*G5nODQ(w*Jb7V z9T9k^yf1@+3u$->mhtu56yyztqgu(^?(|`3O%} zhS%vR4x{+6&>lwT0XQ~v?Bk@a%a|?Rr<`+A{YAn04*26I-Xv44&O(x z`4se<^ZRa*edyEHoy)AlwZ6Oz{cF@ut%s8JQ|s%d&BMIvC%q0A(!E$kKh&G<*IIU= z9NphnC%WHLhVGwwis?Rdn(2Ps4C($ab)x&1%h26kcUGs#9_#N3iQZ{_bS3m5y;n@f zojLFuS|5`uy|0P)gMT^=4nKm+ozmqneSH6GeSMOySm%x5#Xgz%{i*mp{Mr~C?_I|4 zGv*%l+{v87r)SZQ@mAcYRCjpJn*F}=B>Anud3z|W-^ZKjdtC8zn%X`x)AyGbBd>n~ z^py9P+di~%lhE1THzECtz!UwB2;A~#K@M}SUGn?Ol)r-ib=t;_eKtJqt4XdS{kH8G z_bURQ>$neadmk=GXQGqpXba)V?S*ig&a@stbj2spZxzVnyKPlIZ zpDS(7o6hR1~^L%pV95BS$ zGA^?Z(&k%>FTH~_o`@XTawhdd{gcf1A-q=nOutC`Rq*R`CKbN#ypH&5T6{p?xx-l& z`h4((!F=fVNE`CdchIY9fauHLSrs``YBlqV&-EMadMM9$y)mK}e;3%)<9(pcNt{pK ztFX%q1Y^0@@0%)TJIf0uhV~Dn&395EXVLG)q~yEwYT4y-&Rt5Oz24~e7|eI};@_3m z-&tOYcJ(7|kF6e5asa*2}g2R+(&`=$_a5bWrq{dt27L=bWA-IOz|0kD2!~1CRM$fxxFs zo=1Q8ozEW%^bz&fcizOkcG8!FH!!38K7Kj9l)?aeBlgqM6MP=iKIq?*S|8JXP|~ZU ze{aFD&JdRq%wK%|05vD)fJ|T%*e`(3iHYA(SwCY|HC}U$g7|mAISMnivsLso`e)l) zwN8J(bRD0Ar}@7PoKExV9YVhf5&GDASo#Snfacwia>@F#ydOe%h_0fK$j=ac)oSF& zp>wNX~=dV}q2Ryf79e;;SpO@W+eb_W#-iv5lgbaQS z$-T+flOMgA_}9RN@4?@O4`u(yl;abh#?OZVA6n?hB$%EjtqEJ!gINd)s733N&x#e0`%w>!rtvS{@q=9Rm9l)6eF6+hiYls6Vr{ z4|L8U(?z!v&}E+Nforu_mTqZnZ*oiY{5(_a9p;hrEaXk>9r6aZf*pqo-3zE_N(I@-e(h?5y75^Qa#x`$A8E_-bn2tFUj+_G~4gI0&v>Lj(kt1C#Ul% z*%HsIc`w-4e3n&d&SzmHn$I$8^n1x`X1Jf;yq7HBkEtAQFX5Gs zv!Al~3}U49pOSOt+pyjt^wQ_d=PdZS1}^27?GSp}=YT5n{db)=56(?C`B#;2WtZqDlYDRW%4b`hUj!_TO7mXZ&{mSI6tK{OSmngu3(v~_ zqGeyyt?iJF@fpUO2UvB= zwi~wIdPQ7sO~d~mHvQ||{^2U^KVa#Lz^{e>mk^J%U18*7*7MErNZa%Ed_TqUh{?a> zJlwoqM>6kSvfo?o{j)as%A{`)*NL3GG(L~P^OsF~E_UnX@#o&lojk>bMh?A~$vrfv zyoX%x_u*>W2aIl5df{kgyW4ihocV<9ZrhtIy>OcCuF<12doLGxgg!(t&G3)q`cZ4& zl?y@=DPe4Ry&A?Dzq={;$J1_Jcan9Pd~BVG>hW{$7|C<{3s!ETcEEr3IvMBlBJjlZ zu?XDC_bHwCcfaOvUZw4n(BJ*C=n?co@)kP2F!ICvfpyz*>sP@(Fno4a^|oIXtY0Z8 zO!7Mhtna?ha9>W_4-KCo{$>Bg9gsYRcTnP0&(raA;=Y`=lZL-!UryW4G``clFUNr% z!TLxW_5q$?I|w^K>)=X%KTz8x+_QgW=4tuE(%&-s_K3^){Og+MiNC9edAf-C43l3u z&Rf1x`YrqQ+OI6fpOYL1M&QZ(8P>P?ljsno3#N_zTi@yDcqgCYd`N?jbIh5JYJ=X_ zBG2>I^8qRE{V7DUK=jt-2Z&Gk{tT+`oJ2Xk57zn2{tiAzExju3Gu%hL*HFFmYFdv> zos#`!%OOX1qd(B18c#;=^TKF|Zja{yiMo@)}`{`JE0fsF3}4)Hpp3))MYF$ZPq>YdgSuAYJFhxF)UcD^+P$e?GWu>>5a&Fygq~$ zf8F#S%QMXa>>K)%XMGdw=kw@&@g&gizYWbJ7UXghPKz|qNl{}B-{T4nqAmW!K_b04x_s5(&fClM%5w<;q z+T?SIepl?EmcNj`$>R;-_INdYmqUge$UBKobe(C=ajVX!wqI5!{3NH)hrxLvI-fdU z_)R{wZusrEUguNmhTo>o8j=^-sd~}h?*HmKkH*de9iBxH1IEM3dWW8`2u+?Zc^@j08x9g&x^`aJe5|y;_n6`S~Y%xd?N_mQaUoo8t}1?!<% z69;f!gcyM7Z^Z$f=<^9`|IgqzV_$H`hmAeXWO+QRauP3mQ`7xI6K}X{9QczRUp4ca zxsvfKcS8JVuqMs7<7z+J9i{o}cv#aTonyb&QMmy=?+oQ+ytzHHj+;B|5Wo6vUY_GU z7N#%YXje6WpFrPs;47m49iKCDmgDb>cNT+j^l?9Y)9>6)?w_vTc|g-`68-1}Ugh$B z#Lg9lp0YmPalh_&14Ih#ssXc}o#Xsu=jB0pp_@vdC-lhf5PF~<(<7_tA^hA4c3}G7 zEO7zRzX#*tI1X|iw2K#hWIoa?2OSH|e3*Wu&amb~?<>v9_pDV)e|H^x$o+`FR}jvN ze77q&hYs7Dh5o74GB0>vfafL6@iF?T*WUprepq77M~~)v+E0M-(7ed_Qafc{vcAwg z#eAygqX7iq_b0rY5l3sjgJioA8)@LQ-xjUT(4pfDhxJ&bEkWX=HiEn{N z$G2WQzU_Re=EHND4_{Z7ZXVMugeQ+Hgj;ko{v6ywpXb$+__pJN#t!kkVzAm@2>d#U zKY89E!)D)K&GDFZuA28o8ZY$Y5nv{MBfgpo{*}N-_R;JQc}<=1+vjTiZJqIxyhZx& zJ~YqxKOM(E>v^S(3Xy($x6}zQy$>G6`|3E}IrED}Z}0VW#&5^Jud6eDyZ^V&GyXTt zGyXr1{Q!xc*}2s=J>C+>S9gAS^E~5!!#v}^Y@YFN7yQZjk~--7GWi*?C-6)9 z#9!%zZ|+>s`MUT$><&LS*UmS@;hk@^?%x!@59+;j+H>qNLVItEKkvN5y1&!hli|bp zCFD<$s|BL5e-5V$cis4ZDy8!|yiSK8jh`b}Mp=R*ZeTqr`5x_;!LNNQ(Z6d^S$~3> z>WIJ-{fP+N@+UmrXB&arei-?kYa{Sl@!wvh|C}G61OB&F!9Umj_Y!{0kJev`@!@Vx z>(^aPGhM&#{JHik&*yP2EXxNn&JdnFPa)jm17bspM{$+&X82so$h_EmG1qx}p+#55 zclzkMqDRw}&$H5@Fo5I$GB-of)YZ}O}-F2gK}K0y$q^^52g+)CbX#YGYad9SIULfY|Dc^apC7n+iJ;rB*xjtM`xwJ0;=|S%2WG>C`|B#uB;zv2&UqXoo&LgIgZ@h)-W6HQ6S>>Z2D0c?$ivXWu z1j>6S=9k7x`7GR{!%Mh5;Ag!w6+B>-<)3k=U>8>_=eI zc)w6!U#<$F2BxonNALEz%n)rIhOP4oM!GI=FquQ9*0uYsm{PI;d)aX@n1 z0eaj0l&|dVdRl)+5lvip99pR0!&xO`SE5IFYZ z7Xin;?r&oQ+Y$A?1+q@v`1#B;QC}@QpVn!*o~!Yx^qZwz`cL`!Pep#5lWCz-u$}}s z=u3u!_!s*n*GYfV<3v}pPqOP&uuoFg5B3$lQtD6JCpjVe{${p+u^ZDtdKCL4E8=$_ zXvot#Rrf6SNp`Q+?c^)Bqr&~3wTRVI*$sqi8$a8H2)su8q5p;njOG6moi}-jRFBwWhu=R@J@EfvU%S5}`px?**yjSaRSoEUf^^=* zKJ*&)*x2m>$YaB#hUf8|?vKm8dn(<+=Ypv8dh+YKe=PbN=T>kY0a1bY5q>5wc0SAd zk-8Db6j;yYRAw2Pb8`diKWAls7(7fVcU=6uhAEJg^nyB5_D8C0N&L**CTkcBW@P(D{0J#7ndqn5#jB-DTkEpiqyhWt~zv2TxRS5Y( z$@~ZB&Q05I!|`=k-mRIx&>r}r`(c@1$-k`JSeBnj&eulZ$?b*pZGM*e<>iIq>fXqB z?D{4ib};YY_m!B#qOq&^4T9?Zn{~41be>bxZhaW{1+?3WmQ1^IvFGHMMDgEOXZ*Ha z*8a%6_5VZC|KxlE&ri(%@jfVW1&F*?O7Dk~oUC%<=T#t{pzk=XJk!j#)??Y(oma6E z?<*E~{y-q;x0Q&QnUBnQ6_88*Zq$5#-%0PEn%Voskc&Rb$Ec96-5<>aDyS z5f*;WE(lXOHL0 zfcP=s16AP1`(DuBw|QsZmTNW@cJy7dwGWdW|C4T({U1uH;-B~FqsJ>1(Bt=qhIfq& zj}MJcl!hlNl?N#Yomc;2^~OK{lb3(wTc1f!eP8|cj(<3I z*0W#wt8Zjp{Hl9Kld1*Jj%^$Wekc7nR=NFj=eFZX)y%W>nq`&YpFKV`aio6&FL{*x zD;+8AIy4cR@>tnjhsMUhGJQuzOS>jYgFBC0x_dWfGAJ`~WJ__PXgv&|O6@%252AoS z!qxntappwe+$A{rz?XwV!+UgrNX=ba%V|yGWhc|W@u5A##fd{> z7O4XG?&7YAk+Gv&N~7aZG_rj*`Xv5)|43yP3h<8>j~*yOe8(;s9z7J&KY)*w#!F*| zOM_dE4i^s$?J})gTO9PSJvvbu_e**t1|rwN8!ERycta9ZLL6lk7XZI`)-?}Sf&c69 z$ne>v1EUj1{p0~us!!IT3fu|E2EOz6mBxlk`~C6J;x03d0i4O|qdWc$r7@(h_22HX zkppF_atYJxb^`G~h z@9i~ZXLwx`EqCeg{-b{1CH-eVYyAd)S8>=sG+v^n&hERk5G|MZ8`9`jp1@N;z5cxO zdi~MjSn+^0Hp034`pN>N#nK;yTqcpAG_iMcasU3r?p1x*^s#)!GEE=ztH3LQRntdL z?kG)6ka{G`xov}g?f#Kn`vT*M+PMe5$ZI&25yhbQMDM2C=XfjPC`N06x@W97Sn?;1V45cU-9uyJ zq~$Iy?6~-vOSf*_(YG^FEBv{ou(NQ@w!Y_IwDaQdVdNKl-P!*&eCcGR%9)2gRtXIE z_a`TZMnYYkRCwaZ&XI}Yeu(k5p#wwH%Z`-}9vT`e4PJV3@ zDy@bE_OJK%7RSr0n8N62X_y@VOgy|q!+i!r?uXm)en|Phu@Y?ZFsZ)LL_ep!d{u^^ z|Cf)0yu)SsKRQy`{-Hid-p5`P6WnTV7nDok+9B&{c#HZ^r(mkwAK*Jjw16d*si;05 zy?-jHPVlUGQ(pW(^qlPPq5Tu*`{Rdp?JA8MCuTZ4PG^cQG=l_aqd&C&ShmQ4L;KJ7 z_mn33#`cWw04q#@nq><|5#u-FugL?$NOe{C8!PQTG(7lpkTIFc`4i0*Rov0>%C=$O zXl4G0b}ebHuozwOZwI8qKY;lt?tyNEe5{Wbzrua{XX8(*-o(2*g(i~m{Pv<#4CNicd$BVn6 z6$j7vM@J~M9V+g>j7u9bm4j~}LHAN?<9jQI z_Y)Jx^~?9uxlgs(TW`{<6C&53q4v_FaH z)qA#oAZgkH6jA*)0q644v7tRf@Qcjfb^9s{@b1CgrfA~z%C}t>%wCfAh%jMCk~5ligL2RRX8iv;87?)l%GZ?{ZR^E1Nfbv`GxnvzrSkBu{*wU)890X?bz3`{neK~c;fks zuld~AyZc|!n_yqo1eNjQndd$JgXq2e8)|hwv?hi46GQOxCW;3}C1Wbi-plUz(&KiW z=)La$sGy19(7s}4v~a@jNc)_akG7pqJof0?*Y1jF(ili?j%_@i@HBjMY~xgTlkn@< z#*+z8l{&WZQ|5U%e})9?wvO{SP-ulISmug&b9ZrQES&j3GNl6WhsiZ&jd+0_6U8x3 z(1eCC*3u`Nzt$3aVBN%&xdo%!;fKHFCp-=wjgFPh)=h^@8$c`n@c-ZoKco#EmHwzq zB{=f?;mG}6BL_zJm!eM6{$geOS_$_%Z|VGTC3yNZYd+ObsQ_S9rc#l%NIIxWQPEYJhUeRNF1tBaAMU4JXxpx(kJ`_o z_u5T`=c?s13as+`XFquRMHP!2JHN7VJa+!TW^g1j8{_1ll7}Qoax8`DB`D%pf{%A89K$P-t; z#{5pY-ub*8S0_EkGv7Rq%};`SzwnQDT=o3T*K98A+)|E7hinh l#ee#eriPyHOr3cAk+;77!K3^C{_b}kUv>2-ZyLGc{{yJ7kr)5~ literal 0 HcmV?d00001 diff --git a/etc/multivm_bootloaders/vm_protocol_defense/playground_batch.yul/playground_batch.yul.zbin b/etc/multivm_bootloaders/vm_protocol_defense/playground_batch.yul/playground_batch.yul.zbin new file mode 100644 index 0000000000000000000000000000000000000000..8a27d4617fdb725b033fc5cb6f06443101a8cede GIT binary patch literal 75360 zcmeHw3w&HhdGDNk>8u~pTDD|KmZGCXAviQ3n#+Y)i5v zJ2r4Zb^^HxX~~6_Pzbk98ro2x6#XSsw|?T0bW|rEL&<@}xKeIwa=^JuIY?QN zyaxX|j_2H|)Pi|R9lr?g$I>cA&!Ln&^GSJnP4Wct)QbV1&hWdt*cnoHE0z1UQirMB zQOBg`IQ{#az6_9z>fw45cq(;5@=*iS%eaL}q*J@o4Bw+v5%}CCXlenes3LWq>cF#! z;VCtqx)^W$YJi_>Ri2+a)JA%`t)?8>yM+7gQF}lfXPW47%<&RAq_;Ua=P`6=K1es% zy53u%RH{XFA%~OPjDNcvmE6khc9>p6PEP2ka?X1hZw~bI>8Oq{9jPDGALX`FJ6czJ zzrlEwyA<_Ee_GKWr8)p#9)fpR@HF6qA$VsLUKM|5efZHa9&fh`bV5JfWkN5soW?_` zZnS4Pp1qFeybaHE!b?oxdA*Duk%31yof;r|tFCk&^rVdG)P;CBsl4+Dy{j#Z&q)rb zL3&M{0lrWQV^9DesIC-^1OC9zl)hWzr}_x5O7oSeFi-xo`|vsm&u1?TrDUGX?IzW{lOA5Q~43!E$C1dcT!^rleDOjCZH|Jp5Xxmbqu~v(^zfs5Wwwu2DZXH4B+-WJG%=0ZmYbFU*>UH{EEthr&X7X z*SyE|xUhVnekUB}<7G`|JkO%>TJS+Fw^d(Lh%QQb7^gb`|AL0+opFpCQI+Y12jg>B zVzxjoF8FONzlR{*c>X1L-T=;WbHTSXe463S)F2)grSmP;`9$7B4>W5$$!2P|`{Kj^ z*Yi`2=NRL013WTM-E$Lp%C`{YI85{Y#6pcHy^!&oo7l*3a3H}Q#dtszt34|--0p?o zbrBep^0dVa+*}0$~jHrtarh|$ElymrfHH}jzT+bBRELI z^RDTj$fKrPor{s~xkHlfy4)@WIO?VBMc`S`kKcsPGv(d4aDN6gJ(ubJ9@XtvP14`h zTFz?zN>DonRXZ?=zEw>^hi;^UUP#_Wco&{&=+q?i>~*|er`xYG+}{O0ozPCuA>lC{+mJJsuaM02T?o74IO7$5axcI*5}zaOFr}y6AsT;- z+q7DR^vew&We;jTx=rX4&qoWd(fG~y<+ngW<`1o_)8BU-tbne)Fa! zecn9bbHHi*hG#l{uFOB8C(SpNeHV|fKYnN@jo%R3F{l<(2Gzs)gbv+E2OP;v_l1WH zow73TdgpP!oSO}u<`W)_9|QpRXa47+bXxFMp_BWN&4f&* zMsPzX_jaL^d%vdBu_&DYCpw`GMevm`5Bf~`dGBXB3_zn3->G@;;&ylojtZUJJ3@5I zBEtfri+izqjO-pakMQP*{j{7K&obQkkf|3Phx~9p ztLf)B0{@tPhnY_Ja*qn#KeZ8d2;N)ZNl3fJUTHbY@V(ff&XD6tKSaNw2DTSlpo5O1 zz0Pzzym_UDA1woasfK?BIH=$IlG5Jq>h}IBA@9G`?)+R2%S)GhUo^KNaG6sL(8dv z&IP15Um^W)*8m^!?^?jYzd6XaaQmGfT6kcO6TeaUKEl836!F(Fw3Flk_*Uee=3nVI zgs^H>J|sUr=yJ^EXOx#X2s+q)=mNsi8rY{b+OO`$ zXnpbc(cXm675&rl&rw5zv#(cm$+!St)PDilh6WXc9x5U_H91})hkjp;e+zgQyA@Ow zyV2bgNk>kdzB!V<$WPybelQ-3?$o^QeBj@6hku9=zY8-@#52PXb>+ zj!>Lwn(8}-xDfHzbxt^*biR%!0Vy1C*k}k7I2@;d`^$QFs7gGE;!5zPNrAzz_2Uxg0?cs!p3aGMXWuY#W(xaFbxYsAm~u<0jsw>k;x-l1q- zay%f3nOG#b{40?s>3g*NM!b&v=oZ9ojuYI&JIPZmzwt}rZY2L0zxc1JJ8j-KiJd0t z$=;;MV-NXnM`@Kj!x@8cIGJL5WWkWOgW6NNd@3b!JGDjd==a`KUgUbJpUMNzxf;HP z^caPY_#JGKXL}HFu{&XW?a}>ctBl{hAkBXBg3F9Qk;)T4&bS`v1g6&#s~(>&4eHM= zx}L>WJ*pc1gr-;W5}{X8(<`azg}64+YYXFFq&)lwo2}(fg-_hqfDZ~GxoGP@iQmj# zZoak({yOEdS|t1mdBbrHw-xj&(D)`&)17z>IjrA&KF^!DsM9KO%#t{`lso3<>!W<` zRxQ7BsBf_KLhpQVCTU5Er-Ht%SL%63_6fyr7g9bvXy7WxJA8St?DQBiuh7bJO{gZb=_Q>(70uVOv^e75T8#=MbwS86;G zAMe%lSDNn1nOQyle75RQiN@fU`Al*=M{$vsztH^GB=x!5(Cz~0EcTplpW&V3qb(o6 zG`UEDsgw)8S%4?Sv&eqV;0a~~jO@dC{WRGX?19-e14B)n&9j=0ZKJjU&{#x<^#CCgrc|6|} z+RsR;W1Y+gERP_cH>w+{T(VWlLmseRM?YB(I7-TkKSyE=<%u7qJOpP&yK+y&uFRL% zm6ja|@yznN`C4CI6MmW>><_u4Xjh?iOuE$ZYM_~NQ$)8KKyq=kS z1J52O{B4!-yJJaTUiVRce_e8ha&-3cy0-q*F0X6DFY6=E5ZQ_ErDrm-kE8j3 zrZem}MRK4014Zj8kYla+Uik8C7vmLjwOi8hFX_46;QJ4wJCFnJP0?{tBInv6i3ij^ zz->OI{UXala=FHFQZAK8JJ3(|Lw)-RRwz#`l|(f zf&8QOEwYoE2{E4Vr$ui&d93RWvR={hUHj8Bv>%&4A@bE3Vmyn1xfBoD(fpXu)0guL z9Huih3*$=k=eR(+DE_#6$fA2Yx3?V{cTVW;zAjo{!^c(Z1L~*D_t$fM<>#aLvjIGw zj{>+o-``LLzwl{LzDE429Zn_q)cqgMQ)I8Td|TUTZ^V0n22=c1beHY$<`?Sm7dl{Z zN$8-&z6k6%wkPL(r|U z9XkG&K3B`b1=`+gaXhi>6D9t3)E{ujp2uJR3N1&^6*(&Wn_I8#$h5}m0`Carcb14= zN)&8=BTsrq$6b`jFV*q`#)HYD$uq^(>yvs0Vmk ze#Z2o{!4s7Ice1yYNt(yY~_BqZvniLzg4H+cW)*9Xsx!Nrr~BXy^Q^2)`U*zq3C{&C~nXIvh{hSvk!Vk#V;1fLmy)-~$|v2jW4zV_D{ z3e?W$dYxe`SZC;?_N4#6UT08zEy+z|0w(ra9U0bil(>o|{kEhu8 z0ojA!8sx4lBYSsf_CxMT{>$lp!4%TmzW_-i;68X7gFVVUc*-2n3 zwv*=LNq!la18HnG)wF(_Gv%tT-`2VQsqLm94r1$Bl0$R3ep_c8#$O-JIUH}zk$57z z$&WL$ybR)T8R`$`DuQ`kM$@& z{eV6uu>re@#*gI(>@GT=MtnhhfB{{BqDuw2E&MloJ52h= zEhhT7f5`Qfx3h&`0(iXL7r-sM(vQc|e24sZ?D`Z$WD0UlIlYna zXT4Njm)CKirt9>1zw6{YGR`TYUPq-j@;M-txmMEC*Gj(he%arr&rg#a>_v7Ik@Nks z?<}`Zw<8a^M1IwW#qP~|kbf8X>ml@~?anN6{?<&XP{V*6?gxnqtrwKt|04Yi;PL$o;8s8NI#WOgTkn&e z91QJ8u*+*YpgTE4CX~CI+Kb+i{)C^&2V_Yu8oJFkzAN`0nE8WBD#v$oZ#VOV^`=j6 z_qkq=tkX{O_t$>VVR_Snx!gy5@;ra&@`!}&jh+`EJ0KfKQ1-mKT5KLmKC zxWBYJ(5LZy7Qk&jF>w)ppB+^F@_Aot-NNyZV1LLTS7}GR@ipPsfFC{_tjmHN`TSgx>-2v3BFKxg+5T(y{qDou&vVPl z5%IeNc>MSVaBF<^I%oj5<%p~s1@Id2tA(0wv|#wM5dVHf_uuTRqxBlZ;y@;jM-#bN zPhx+J*ymAEAIl${`dL2K9Jz*?=A7Hwbmyg1a0X)860o#e97@$ymLHx-Zwh_NaDQ2 ze`Fu>Kw^GoPZxf5Pe-`|rK`oPC)*wJGu!Yk^zBAFcKdwZ&Xy1Me`K!{d9BCeIitU7 z<9;tiyW-_!o#U;Rl74_*`=H);6X>~_?7Ip0wBmTp`!beWX#b5*M}OZ|8^(jiSH_R< z^{K1To{ume271n(XVUKg9zV|lcyyk%Vf1DHPZUoZa|Xu0WdBba3RK#MSMUCxI>!;W z1(014$UmE2(*6J*Pp<$TrI&L5gz*P*%f?Rx4ey7D#~+lp@jpoTi4Vk{okn;E@|gDj z$vic7tx_Q@4Vzxi|ny>_#HVY_r5*17H>xX>lA*Q)86 zI`u$RJ;Uopb>o^4 zeetwrV>gIAQ0=e-sQ*h4h2eHPGm$IAH|!_SJaT9r(Y^_;Co&(~d8ha}{~h^@?GtpI z{RKP`-y{CI-td>U-)i$0fTzs9N$oHA@+=2Eu$-+nS@%&`FSs1V)r|a-WynbOeqblW z&%Xd}&u=-`9>C-GGX`*L{wuxzEZEPe_vtTl^1PpM8O|3HKl%IinXjnXOK2YDKCV}N3hW%>cX7`bX)$Cv$tGmbYI{~`Au)Q^y#W{+FF`!7ZA2IFRrdzJlBxRy}l z0JV$dfnM*R_|RbMDnCA8^8-kj!u)X`K|Z0o#P>u`m5+lSi>ynnDzRH?+!wL%m%=A! zk>53lM$tVA%W?mrgS9r2bJTA1%l(u?@v%;oS02(^`38z})H2+jr4;OF^&WHs`RV>b z=c3jNdw!1oLHEq-kGzv#!1hRXb9_3*&%))kzrcPT(pUI+KN87De9U-L-=%bx6G_#B zc3bu)i7nz^TJICOi$95TKxVxmdQWWBPF!0;Z(7%gi8s^n+~c5!D|TY}dtgCm*wX<# zp8o=P2_I>_X5z(PG5yl<;#1Y%UftiXQvIR+*z>i~{ry^~zr?@5j~%!q)pDsdHH`Du z^}G@}wZ7#1-C840A(ye1V9U#Cb6(8IOL`}HV9CoF^PSoMWZJWW=My?xB~H)s61j*0 z2V2*cl|TM|F*Gz}FOoYkG#K0`Q|A68NG`F5toxI?{PgJkNu+<-esZ_K{wm;2{1}?I z?Dx6X&^T7$8n02Tf8gA|&XajN~NrFVCM4Us?LW{fGY|{eFw?cfV1tpdHiBlWu{P;UX$HRqK)q@x|~KU(Qz*AKQ{Tu#oqj|)C6 z?=W8={|FE5X9j*ce=PePoWwM7IhOHd{{!|d+|BQjek?)v;3$6|g>vNHp6_bCCHrLS z=g*J&dQSXyG(1K29qCoe1q|`zZ0U*c!TlV^!eX&jQ`#S^f}sK{C4|)uL}ORihRg@utD>i;xFO(_vI?}*D9ZE{O3jRlfD=G^#zn3lJ~ZM zBX(q<58~~}0B-B+->8CL?BJk$jri-9v(&%vIJ_uAr~2hM?L!UYU(sOv>=%XP)R!8J zpZ4{H+yBJ|e;<{*G|FQ<-xAokWQT*;+dT)JcS^njCatI#Jzd`wG z{9CI(XxqWuUIe>l8?VymBU>J3{vka>=U8dIg!B&0Z_zvZnJzCWOBeC~0(d-K0=P{V zDId_y)@yP;C4k%I?Kt28OD>prt}U0lL@qb~UW4SqT~WEP2^oI zO!Gn6xajXj=Shhit`)y+FTbn7`0aUeR|J14fBzQq_Z4ONo8-pw)&L&Q-vQj>Pq!L= z;s2m~jrdE(<83waKja7VX}x;!=_voV;GVbtC-~o97qIovh6w*aEY`X1KzbzX4_($^ z{C4{&*_{ zFGgy)_0)IUcZ*MSudwl;_o%r?m=3d?)zr|h$yty&$^W~c@ z_b+KMetUkdtBk+wI`+f1T(ac(A4TOk?r|A#-bQjfH9+@|B$kz@5e)F!!`N`u-7pkBWou z%soQo)B66A^qKlzku&9<5!vU8@2q=r|A^D4)B7Z!bF~9s9)C-yJtj_O;$v0gJG9=u zeCF|;=Al{gtu5E=asPPbad$r%_3vD=7Yb0$Yp^eBfVy9&o;B+cAE?rfYWm0SXFKqP z<@YBVjNe|Tf3(5)dHneeK{<&hzJk(2`yK6gpTt9Ezi+r2exmpCQ2p`omY{w+ zo>L7!(S0uQlO0h5e$^>)PW!tMCF5_eXVJLOd4XQ=HR}h+Y3PSV>x_P=ZyjY3)+`FL z9>eQci?$g3VD8mQK2PqanwxbjqaVzD%vw(bac^1A3i!b0Q%vZ(&&h=MuiEQbW#ZP0 z_Lrf5cRJ7Oerjgxh>P}W`gf;zeG=1C#^KE+k}8ji&pzqVn3m z56|yUWo1!RMInPn=@9KRH9Xf2l!q|6Ccm+v_wcIj8*Y%N_mQDz_Ww-@y|! z|9HRski+}xVb}4wq=wmbw*B%k8gGC7kk zmRWpe&Y4;ByVWbhXI-_))mGe`&w8yZDpyPP3CuB{QGE{dYwKou!rpli7Ity|RlmO^ z`i}HXI{yWn|D`2xoHt~A8yS9a3H&k*2TyUnD>>iVl6+Ti`g%JZ{O6?5M!Lrq?Y$xj z$G)9t`>(X#H(KwTtoP04UDreRmCz1qx_2v~rLVFy6q>+txrn(Lhx97HUziqSx{TwU#0JN{7SU{_>Q31f%@Ft zocH^k;d8s8cH8zBw<`p%x8MIv{a&id(V6I^Iz#Z-`a*D<&a|#Y_H>@lTj2!=dx+2E zJ4EjHseXEg|8ySTKSX?qNh@*on(5Bx)O>yeBG`AGc;I{GiE%#XHNfA8@XUR*DY=i< zOUZq-z3IG;3+2h~Z~c^!kCpGGMGdj{(Y8K>?y<055Z&kJ_N57KhrN%z6ySQFP&0e zdVt~&CHkMf1A05%XAHS^5r0)t<`Gz!_#vC;bj$;yFMmhWz|UZQ@why=K#!v?hy0A! z{THH_f+mf^ofEjvcu;}rAQ;bUeV(-Jce_%N?@}aA;xy$Q;79lw_h6nR6zUt~TP<|^ z%=b(an|L0Z?~lnn+}M=}4&-_hHQmSE_JGJ+^Zg-s-yZ7C2L0oDTk%f&!zejYzk5NF z@2P0R{636!*ZRHbwohC2=ed5mt~%Q9R?v&)nMzy|(|<&LlLo%BmhWn}ec7sii0j8C zQda%t>c#iHYpU-=q`oBXt?|^`F?L`pzTX7Z@_mwvb$?Pxz9)O}5t+|8pG*7)sRmI1 zdbq8rjp9FO`K?mvJjwaIjH0uE!S5f#b3gYI#W9}Of_%RcQaaviP z^8Gu$Jfik`&O5l(bia3Xxs*T$2P^1DI?uNo*TuokD-s`>^*NHim>2D^V~;a`@%eUS z(Dz>a8|DrCr*_1T2<}#v&lpwILwwY)2ZAu?W$ydNINkJNWO&V7%MMBa7!M8YS>w$VZuE)dJ zFVjXJa(U!~Mf)M-%|!3lZO462JRV=8ela%r-UjdU$2;lEHN=)R$VXC}qCOYU>qq|FzK@ zhHn1%gA{*%!u_%CcMJ3o%QgKSQeKDB{dh5+zT9&PBKMf?Dhd9BoDRu7X9dZjqzd#% zf#^Z^wTRp_-?fRCpS8*{qc^gZ@w;P5UqAFwd(1w0x`&zMkV^Cs{Yl*NJFb7pQ@@OT zX7mHyClKz3PnSe|f9ll-O;&wn<$qGk|IA`NKIcn+Fuv43%s1%|;t*tSp3rg(>?d+w z*ON+mB*A0X$4L?2|doKNfTN2J$jIUm&n$9z42^;5`=%J=#hxi4~b zCi46pdS0>oDleBBHm`p2>v{F-d4+jL7wwR|(DSOT-^@GMPvG&kRhXKhFHViTfA-gW z`QM8#6Gq;beAgjH-t)cRV&5CNAGPn7{GyuNPm4Uy<2yk)@b!)a`DrBYqjo#|7luP$ zRgw4deIv2!tI2(8KiT`$B{M2y5qi3DF+YsJml(2 z=h^@Jl#VYTF3ID!1mCrK6W|W^ogvVqqSC&p=*$MsP?-k#2{5@%}pBkh4*$?fS) zY?L@>lE1&zRy6zueq?@2=Y=maS4;ckJ6LJ@4i@u~8s@lBz*pR_wpZ%zP42V4EMjW9}fiby@|^H_F`W7I0xhQ-1uiSos2!t-#u=>$v3oMm&O+o2z19o@3Yr(vDW=`n7+&}WtsJx%IP1_ZiaI@8)yq~x2 zCuaU>z4@$>XQyUAZ#(K^z4=Q$j-M&R=YM3`7qkvW@}Sp=>|=u+!TX-tdv!k$IT3k} zeb8(N)OIgJ`%*(63>?`N8x4K59g&{f^S@MI?7Xj)9)$+c1G1yQa_G6ChriE8>yh@K zm!Z$U3cs}b{De5|9h4CFR@A-Ux~*G8Xnpmm6_eN%b= zI+|h0h5x9mciVqZH=nTHZGYU73#VA`8aX<%eTsEG~YT{oB zQntKa4dsmAJ>>j@z&*U~Bu_)hh{9LHgQY3UUvegHi{yBe@=hll|^ zSuf$q_Lb7()G;9KwZE$|ZTv0SmxFOJ<5y*!(To#+j~L@LLqBinUTL>{U!&s>%JHYP zD+G_{&!D``pG1c+T`+8H-+GS3Bk~E3hh#n6+s)&uR_c8%lAgEHhje~SL%mKb`G<&4 ziSFu5D({>?K7RN2p_uhMURO-^COw9GD93S_G%eT^rj_j#c= z#J5Vn_nP<|;vXmv@o=t&uOYjd#0I}(yH=js1N}+sY8|f<`X{`~_}vTAtmis*n*A!q zzM5e@*b8@@=&I?SKs;9H(%}BQQTJ!a#*gnWF}-9R7=BGo>Ua8tUYQjd-Y)b4-&20f z2cZ}AF3}5Pgnp#KPI}*o<}0)Asn<=GKB?t_iN~x(oC;;JUxfJu@#g`t8%4)UMEaq`-&LL)yPOPvX-_vmPn%$a-YS{PXpI8j9?HqyCBfKznI?FvJ%G>j>34~i7n#cCOIoiK67=lO3W76WUhyG!@(uH(?&`LBCGdaz(}! z0MTDr(npo-7W%H&@{nB;-=4tkvD?GtQ5`Cz_@b>3XKk;{ksj3ZFQ9LHe*?JPUrpbo zpt(cxnBLiMhkPTyy+QHRj(Zw}-;NjFL;L`KnJfG{p4u?{c0BUv9JgP`Q|GMxI-c4v z{PsESn}t5{=b<6z**}7Qkmn!j=l%Nmet8mKv)`2EU)U6kC++hEau{rZ-=y5r0V+@~ zw$CU(`BO>dDgJIAyj5c-(D!;#8t){xJZNL+X;dTWY2FV+`)^xO9uvvo?~!(XQsi34 zUqt5vmQh*mDII6+!dl>QnvYgFrOrrT#Sa^>(R8X;6&!w7&SJ7tI*xHU(ht->I5XUS zl}#W&_lx~5(wo$8$~D02J2dav??OAU50vcuY09td2-n#2ScX%BUEk35JfW%LcVU5w z>=`dD=ONNqcZcoQ@r2e})I@^U=x=p=3*905^kV&z?64nee9w`;)$xoTFHJ{`uH|oG z0Lbocg1v`v>$+6u_drjw^RvEw=kM@#{g&`amgJp`8{fyFR^y%W>+$Y-m)4W3odM=Q z*p0%6DdvYR#6Ou%{(5NI_yHUjAqEh;HtGj-9g+5BF@Rep zFGWhf*!UYw{mwY@_nYxeUB~!U)0EiJU`-ltr%BtychlUea-nh326G9QCR!_z7+51U_n47tRqn-0lp2hq&tjzaQ(k9iC}- zW(&7Zm+yK=(`_2<;CbD|`w_ctH1w49@va5B-C0a2)T@TfdUl56lU?`u`GsyOd7jWC zy;jQ9ma|32;q{44O0U$436r8mj*p!0e&9;O|MGp+H^`#jSr zX-7hTpBa3}?TGFh4aP;jQ{~?qgYC^i|HK(GF4(`%D*?GN1x_< z+D}0J!MI5O5?i<(UfL77Czwz5cw|8aK0lGoz>n5^$>sd{gZ)H=@3n^Rc)y(FnfOP> z-vU>prm4Sq*|!;rH%a=C+5e;W?L&-;d`y~sLXz)TG~YDsPaf?02TMMZJQjI7R4zUh zFDE;1GxKP6a1>Cf4uh^NZb~B zXZ;K&z>l7;uWSFd$(&ac|F-L^W&6wiZ3rIU-=MtJUh!{3Z#M{oAf@mZcl<>GBXfzF$Fki*ClAqqqu> zt1JF(=dX+&61xk-D)xeJ*NOkh<6e(HM)v@a{?hwTvdwreKt4VO%&br9^!HA)@Dm?2 z5~8TE1Apo(>bHL#82z1bBUksdkf)5yocW*F=V~C+P!P`cU~n{@N+`gRoF zb;5doJo@gJd$J_`dtBc?N7K80V7>p)yb~d?j!yOznOYzk+vhO4@YaKWk0o?GhvPtS z(l{N?GTC4F7k-2ICmiq5b{XtiI)`8Aesx)Uf~x8a!DH=-5ZtmSvUGoHCH#EeH&nh> z{B-W15`K=4*MXn(Rwev(w!b7^&-gH2b**1_zef9WXgn5RV3U_yH(Rz&+m#>XeqLXe z52T*~Jbs)4xWxy=hUAap$#!FQX%)KGId1M(2tU=Q_idPUl2b)j#1M|dUUk1(g|5=i zfNu5DmF(wA{95OHR_vEy6h)rE3DWY##F15kZKe89t ze`dMt(=$u=iB_tY%Z1uig>SIULhA7ndFrRziK=8=_}mEYG2wJ{Th;^2Ic+bZSCl$| z_z`$Ji|@_p^?&FGqE=E30dKZDCFwXnReS{RJ^DQqx8;1NC3L=2p7KFmolE-y5FX@q zPG^#Q{)g0D7(epy{t~3?^$f%t@1XJ+GHyp&d1wdnoyq$mz^CYe%F=!b>MzB!@QwyA z=K6r2<V4GHbl#ot`s>^mACuPaxw(35kJi*Ef*s9a&+UKK9Z|+yo})qr_%i>XpBUeUoSQ1+2gLgw;3xJ2 zKnHogT0dW-pRbjtZr_Xe-XxmYS)`bCOp2dV9G&Jn#RtKsJeD&No7{ik}oX1Oli{ebk(>;pFa?|xL{eU8>$ zx<6k*?(_O7HdaAq(fAyv@v-jj#4McM_{e;DW~TE6S29Wc>_7GVtP0-;@OZuq;1=JL z7$7^P=e_tv{oTv+AjogaX^zW>+B2K_>WRCwp?zd8nRBGvUc9f<_Wl61AKGXQXY zKjR!V+sCfhCr&=8_ivc>^R%|7_5Gf>2eG0(ZRA#p>}bfvs2$Dv2H&NH-5%NZ>l!jj?amOq zM*O^wYBzY0{(A8L>vg|LuaF;^c51N)p|JaN>dzS;*)4cqnO8V72|Bj2e1#r3PV$EB z!jL?%`vq}~@eAZpe7^#?MK|2LA??qg{nQT!{@Nb&?~5V(FeHzVBldiloWlsU!)_;a zwKD{-wVfy*j$@JhcMRXfv`-m3s@I90Lr%##%*kL?rSy@$_HrRGU|@YsCeC#62l>1y8ne6L9Ve1R+V0Zv~l`Lo`9 za~@0b->1{_e)<)h|CW>~e=fi8!2U1dTc!Sl-qT+4PI`xSWxwDw$i)K9Z!g8^>MygL zKdt5oT*nFUuG>35%ko8iaGvbHo&UKMd=Ok{xr*e)Y1pI7_*C+@C13JpPXD&Zf9FI} z=;ZH{103|FfEdkRoC6z>_9iEZuIBvLY2WwHf9dl6d8u2Ze98H*DLKzHvvXOe{Z!hA z^IsM5yBB2hQO!QorwjYr$@m`fkO%vaspCq4vnC}_J zIrIS?PZszw*}rMvXm46|dIjy5p`7hEE4pVFqpR1uoDAt(5X1DB^)|y*Pw&gMU6_vA zg}pEH<&bSRaNH{dkCk&FxFyHLE)2nK{A_oI;5Fh8;?fwvmvP()MgYd=lUnacycX1O zYOxO?EOorlI&akro*=#uyRi4FvUKBpb|H9tzk>1>-Td=bDYTpHLVey!u^o;6eZkm= zvfmYaLXv>~8WT4Jo3UIo`_azAdja#}GE_kG0xVaT{bKN**3j7qhkx~9w!+s|n^ z_X3Ml^1-IQhuV1zBqh0^o+szNa4)0SnVCz-p6oqr;mbtvWnTclpX@aFOXxR^i`XkW z?T?qs z7#_?1$-YqL6##Nyj)_a!@h?Z>Umkn_8E-AeW*PtTuxwZ0@dJS%XS`nD?iD`+^YWC$ zzx26~nZ>^_FIbOx&)3i8mYx#&E+#q~{)e60&ae6&7L$%~>eTsw(>oXOpgt-^@=d3o zLr=V$e0cW;k?)M^`IKr|NaehXk)PyVHN4pI<@ZL`0)0r z;T>BJUbS;4IMvTTb#QHQs%RzpP<$LA^gHgS`>8`mnhj3<8^UV32;;pO(%c(lC6(afq#2+ zZ1kMrz2j4dy!a06oEX~+mhyHFPmB)l@g~QM+an`T2|sCvDsaaqYne(+s|Gx@)O+3V z#8}XG`nB%zjVsPOZ_pbrP89b}dN)msjqVEPxU@LwjSo+FHId<+T?usycxbyfuJaxmG;+{QL>I`3-AIw+G z+5Dha1zr)Xn*Zd)=HaO+5~uw?HSv?JFMI@pZq&s6RC_S0=+N%7U(}?KSXO0 z!z)}O`g=cS^~Q;@15lhhHVO}IoEX_Yyh>-=Rh-<290D463*$0R9o#ZDRosJVxM5`PNNL`Q;eGo@CWd!h1?%L7;bO#g zA$(()yhR9h17ANjv1Ot-Itgh%GBz4lV<;_g$4~d&6nc;OSy4Xy=j8mIH^vkSCEa`G zzrnAs>2GZ~H@H$W&*h^#$Gn|v?uIgezb+ZxIW{r8ablQcoZaiDiXAw4a_3Ez72KW` z-VMdc8$vZke-_5ahez2!0OP_6H?zm&%KX#SuQ$JE`0Nkon;3>pA0^Q|9&3kngfqwd z27RuX1pADZq0jhOW&MfrV5xO%<;5}7b_mbHwvm{cB?;x9PtI4lKft$)X#o(CO3msra;ZInZtd^pR28_V-H<+~u~---qW`4-K%RI4Op;6{;ao;2>-tH7>>L>#K{ui%7;M#%org9@bV|4} zfqscCuI|73kK=~TM-i59Q!tLNni$zN60+-V++BGB@7%G|#})VEy5HU8k7XP@s9&U* zokvnHAH5XP4Zb6p?)yhZr_MiDXAHwjr5`?eWh1oSo2_^Ib)lVLy$?M3`G>8fYOj&` z74)Ah9dGj?ggEwkYsN;Wh7Xdp-ct=*1$eSQH?*VzoIvR+@M}hQkB;3mN^aBCq4Cn$ zy`ivq%hvUq)~(yRdBas(svxt#us^&q@jG9JPgV}ksN!Tr@u`ZH&2KecMM3%TB|i`l z?Sv2CTRaHIbZlZ|>d^Y(VfdurSY!>u!|4W#4~HNL9N9|{u@W^NhDQ{;M&QGztiX&7 zr$CaaH1vF=_X#ibTJkgCPx9Zv_|aFcJ@TF}-}>kCCpPcyy!tIy zJ#zfY)~%nJ_=Ama8jR5|5&z@-^M3L@A6d)~h|N8aiWH)l8i5@*Ropu+5mzzh-te9; z{3NEBpQPJAE@(nHv@J-tGFJKv)z1qb;kql8BiOdLZVyS)nA}HJAB{;3l8&rC7QDp# zIB}E~qI~eCv_vH3J9oF; z7O5ZRYa|0+Hf&$=l${oS*Jds(Up>FWz$(9g`n`8wS~1I!Rh5gC#KHc!G~jFt{YZMr$)z0UIU@K5psk;J%wG8kh&Rm5?mbrQw?@`>zM4DjOK4 zPa!=!=4~7C*EkNuM$LjI9GKc4yu|!EFjd*VJg~QN|3dt;B94E{sC~aBWOhUpx(|^b z(E2RCVgJFliMR%w!N5x`11TT)CnHmn!+T1`vW(f{w*ta*f6anhmwwF!XZd*Ieysk+ zi?5DLH)%h5_Im8$EB|-*%Qs(N2I`sg$oRy}ZyKLfer{7LcutK~f-3c@mu-IKm20-H zDQsDD+14x96*l4Lh7Cda!>?ajQGOtLGj-KC!b4LNUf)HSfuqB4^w;`p<~NR1oR%)$U!5Rds6tz}RD^;2JC>GJyT3fYJQ-t>4)(3yB^-(MRt+n<(k3Hv} zJ2&?Z^z(E0$hqgNz1QA*?e*UK?4vS@{xYgZUvbGgstcE)Y=354Dfg~yzw`%jpd%t_%d968uuvg z-ktT;9ZI#|r_}XSU%5Hz2c8%C`GfpC%QYFE&8U~E4l2ob)+$w|=T62~x1ip8GCumP zJ~O!4ce?3$wqFfU;oMoMhd(!al<>bNJ5J@4+OR==9rzsOt8-MAo-p1X{9H#j8Lrp< z1sS*dA&{;_&y}A$L#@KKz~yslD0i-+7W2AagSb~}i)!cVu=jHEhv#R=X_t9Z?an(H zZ#$3Ak$G!>Qt0G*pW0oHY=U-l|r)pWy2f_mh03 zaSPp6qCC^B!^v&r{?dHY$e2&uPZ7S3Q&&&&c$E9zczcTLN8s^D5US5Q;F{uy;{qS>T@G>0T(&# z>Q=x%py5Sl9Q;}U29YZW9^rF83$B2iEc%?5lOfDD^K~Yp;VegsUZLUB3|~wgVt!P* z+})59!ZWPP9p!S1W|jL~R?2PD*sm0ON-B44UdAbhH)Fhq3DOuM?B!a=6*olL(x zO0-D-3NDvl;|-DCQupb3Iv4ecZhng*eSzX{1;y3E`!!q@4l}nZ_pMrAU~W;&aaED( zIqo>?!5KYYS$CS`)lulr%>?%}UiXmm*SJ^iO%CY+-y4$lJdgXO07tv(0cm&PBdALB z^G$v4MrwcIP|1mlh zt`a&GZq#%dkJAZoqSLo5I`su~y2;R~kNQn?p?>(f|Ej>_4e5k_f({v<>9_(V6Zs2| zg|8kpbP|4YAI3bA{QR4qmwaJ}=3f!Mv+?{#hL1eyhbml^MxVt{8^@^Mt8~AL+^^1* zelcGx`f|`O;iok%OmAotGk==TA+pTp3*KwZ-z_qK-g~8gpeN6tr{~X`j`O*98;#RZ z-n7hLi^l8SEc55RUFHw+FLK55uN2^zKL?a1z6$uf4=RAmX+8#@>-oDXK7XFfZ@}m1 zC(Yjw`Z1uEf=-_5#lK3+RS%v6j{8G2uEo%ROkd?`ebB<=a$xhQKkD2Cga`At(9r3D zIGq+=D|B-26*{5cOsA^)K+_5QcQf!hhEDEbtqGYvEod72~ZL;ZP>bJa|+9!P= zbZWUt=(O-Q?!R)cW4O=>mDEC_i~D8wL*%cxe~bQrZx^HQchb1mxO&`pek-1%-RtRj zkFi@?Z;X2kcaEBN3of;jf!Qrnc?WtL^__e4eJ02E?Y9fvKXWnc4dCq;Y5XdarTXo! z68KY3W!f3OTl(dGE3oI}U%^Sx{u4M){3VpfILMEh#<<`wwd09$k86L4I!6A%Ys9~( z;@4unbN$pV+_kLGF|VeH5AwOe%ml`>Pmc%cf#G>qcRTXk#4+@uQJ$PB>bVBQvq#euy+Hq8Vkoifu zmkT_9xs=aWoOe-qwS)UXc6AMkqe|3ocdj>bAan${J?B$3+>>2}-@@~9l`dBRiggSZ z|Kl-e7SO#FGFzf~Uj;e??hHA;@Spf8c*1yEzbAN{=_>GdX!y}8aI|#{?Km@4;Fvpt zs}=En!ar+|_50hxFRi~T{EJbFo!t+=2RJfAdR~5^@69|tCHIDppLz;-sXX9)Xt(tT z8js5CAiA~T8TBXN=aK(o{QOXZ{5)gv^H-l?e*Q;2|HQxK?NsCEt97|WaX$IXpv-6M zlUlF2aX9b^AAd*J|7Dej%WClP>;G@@F|3K$ofFi{8Q8xP=?$%yGtYZw`8cz2j(mJW z6+XT=LHyq(`eT+LLhOH=0NYl%bhY$5`GQWm! zTONK?1Al{hS+xtlBG177fqNI|S0Z|3a??bYA&2c-z~{w`+$*O;;_+GQJ5p~ZDAz~j z-2c-0i`t9kEwG+N^zG=^az}ayc>uMT%Hc|K)vMab{Hk*x_xp9eW0stmdC#0D^C|L| zT?2oH@c58Jmb1(yEGK$h18I?dSYj6?vTSF6oMnFrc~QyN|0K(KQ1>SVb+n{f-Mg^v zbUR8@LX-7~C+i@}(fLoeAnm(e5%kME<#1jolxI^f$Rm`byuLU7oXTD-^6Z?X$FpAe z6Lg_@fm>3facTO6@(eilOY0vMr-%D+mghN?=U|>XbUS@%?S%4dwG*x*ZqRsmy+#@O zL;vaV%&DD4zL96E9hGSgevxPJ1@t`f*=@+5mPsGF(w_TM@Qug;jiVh9_RnxRmFUxk zIVhc@z*L-*sk=8`fi;9QT-i z%uDP0WS%_aT_}Dscw3Qwz8>{F%EMB7;3S>L<@ImXs{6MV&(9@r_I!0AT{Amv`SMAQ zDbvhxMpn&qaeTr02>N+YT}|+8ht!8Y;Bpuzc#z@|sV{jPQe&u(QHeeycP=fT>}6wq zgrDSt;~|gMtxFxxuFAJE?;$*S-b1*>w~G8FqSq?4vySCW z%cbgL`Y-3_Oc>7h271X^FM3JzY}V-6oUt2DiS$!e!5U!RNne+UFP7$vZ?5=XA>78#dHEC9lWK*`|I_S$tqazTjQ<7tvK|!wi`I)po+%%kCeJed zP+pVi8^SI6dX&dYvvUp7*YgNJMq}c3#)rGy9OLfeHT0u`!XrIw;_o46Ff$JOO@87# z>6(r0Pg?w4O=tLTtjCx?c^?DxSjQ>7urb*Ggg^GHd-z8${v|ty__OsC(MulL=g7P$ z(Q}=Uj(!CndDea-&Lc+ja-H*}7N9+{w-_hu)xiJF8~-;SwM)7GH2l=JL)}C5HeAeh zRly&H{c63R<#{;(dDW}nePKpuoU6ocJ4NIz2iUG?{cikO_)kplOsb#SxIZa5iS%lx*29Y&rZaU5?GXJrFOYv&;&G409i@9G_qP)kcV6i3QJyH$Ueo7Q z`~w=N$UEh&(&Vv0ed>|@Ps1;K8tU6*J`LfPK2e-9T=v5ky#U>z`4>7Y;2+F~=qK^J@#EN^TyRtOmsFeCp9p?kglZ}axs<$8AYY5v z13N824#jT|^`SjJlCyyTk8-W?c}oFLqhF)*v#bXG2ICG*X9ySZ8^$T|{2~)qd*r{R zjZgBp;r!e5lYNsR+@>G#R~r6C#}~vMj>H`l3PNAy->LQCBCTgy9bf$V*?LU;c@qyf z=8?Ru?8H;Nt={%wb5J4XDQ*LW8I?+BH5mT5g%61@yK;j!`tO7xd%yO8nFx`pWB zh0vSC|0=sd>O;?Q{diui?IXHM2<-7XY)lYFQzITYei=&q4hz zUA_bD(0sN~xgamu73i@Spr2`ZVT*s=WIY!0h0VY9#<@Q9&@Je{k~mkH^-YN<7XgqV zesq6~yhn-3J1PcnmKN8O)59_TscWmzv2B)_LJmx+FG!N*k3EfjJ6RU8dE62GZT zeQ~Z|jW3&&cX+PG??WF&^Pbu+g?Z1cnO}Y{MCUunK2TnY@;#LA1XFRob0Myz&%hko ze$X#vM);7+HX z?fBP~>N|(?tEN5fU)!VnTzM}fx}?}+;lEHXC-X-LxA;SkkK{<=txeos7!TO|DEtz_ zliLg7c6*X1RlLpt{qLOv5iOy04EKccrI}ylOU2IVd_=gu^?y^(uND8+y$gA5vg5(} zu9KT4c_x30q|fYQC`x|Wdy(-29A3XA@sxE0=n3G#z0{ux^3hywn(W8`{2}q{P%fMF zzNJz{+2;Vklzr*N_`Y-(JX$7yi&Fg5&no-UTY~prlJ}>(17LT;qxV6&gBowJkD(y@ z&-H%w7C%=MyY1@z7OviZZuX<&{VeE}aG!cX?_HE~h|7gd$8z=sg zyr^;ts^x9e*DRnDu?j|SIONbDfDAwO)6w->Azw5oZ@Tf?6ezcbkPmD#M>j{ED+fxeLZVP_C`Oa5?8RXrkccL-0O-wrmR@a;PBqs@Y&(`bzHlDx@oD>x^$@dPo$b5fhhRO#=rNP;YSZ%Eq5c0J z)@wJcyp{DD=w$RUCi11wvlAk(?#EECM08Y3Io{}WD9)sHKk*CbeTI93aaxL2TG$$GNE`Bp24ju`LxvR*}Yu8-{0+}5i?K24pk1+VRVB5r?z_0Hv( z50XcjKf>3iE=Kz=W;qM(oGoWE?hu~Lhao)9hs#M!dEG3IXE}35(z+SvJ0o`6Zm-dG zvj*qUtF>+>{SW2R?tiUys|$EOLcL|rM}vCLyO8h`AH?$fv~DhPYW!Mbhk7!SFY0kP z4^G86PD{=YVYidE=O|Z4Jh5)B>jN#wU>_s7c?r`gq_ah*Y4k7V=hrz8S|1TvoK$^{#=jc&xLWFHJ@@GO(bqi?q^s(u7{NN=R&xRU+5gd>%_19IZU?5N6T+% zf6nYn>_WcuYGQ^t!+x!#9AhwJ8c@#8YQAJq79stsG&NuMvn21e2Aha_K>JtFHg zuxDM=Tznn39rl*w2LY_vXS!C(OTUSo{Q-gtUsIqvLwL+Ol*u2hV0(!+kZPv(JCMKO zezbY2Yr(5~E;asw=mEUn*v{i$hAb5K+nI|#A%5ZbfaJ%aev`Z)PH6jYJr3F@!R^X^ zPqS|q)p#F$JIGYx@nT=-g{ZGGBCpunU#925`=QWtJZ{PNd9**NMCEbL71l$&)yU_( z4R<*u`+T3Sbx`x(mhr2VxGgVzgrCPR@4;y~5Aq&tM*%l< z(Eeud9$XFm?>$TBlRVBN&R!qJ{f{*7 zrQ!E3=S1RQ=%eKLH}HqsAn)rq3XynOVf{<=!(8^YtU?bdehvNJWP1|AZTXY;8v}Ti zeJugp-XErT{}Ptzk(U)d()%@6IYr*rvI_5+kX{V-ZyNr`ej)fPZQq6>0iE5uQT`6x ze_!%$YGKysQ@yVV@#3!|R6aYk1zsuVH$6FEjiOeF^=k%>E0b zKh6G{wOXHUnWG#Uy&?Nb=A1t@?}IKu%uzlkW!Zz0$96+aN z-_kmqBk5o-0m(J(-vVEH+a1dHu2e-u=Tt7?HacWNq z<3kADG8wxM^JjM@Kc{?fv_82LL=VbaM4l7hD%FeSr)~6{^+J~J?RW{?t>`@;IBqV9UEp7t{>*Xzo#;ojOYa>FbZiXx&gL6qpJ}5hWR>V9@rFa^W~}4~ zke@2&WJFLr_b=4c>ey)7)#?Q|% z=L{3SIegAA@YZwA@Dj*s8Ft7UjgN1x{juz;=@0YmiJW`9jL*Cn?bh}dQD0D z-T|~*0@qQ$NFIDVM?buS)^qaX2Rb%X)3e?+f)DH~`_{LJzmI)BjGx{+lYKf)W}3KM zWwpPDeK&XTy*!`E(D?_lzksgF;e9;bA#JxjqEqANaxeK_VCTdhprIW3bKpnXckkYd z_K-($-!JU~Abj-!Jx=(EtS>aZu-jDbW!+GzljYw18!5l!1;7mXY&!nGZZdv~_lOTo z|G(5^{1$z@@;u|evdQ>4zKPK1i6-N>>2tk?7=Jkn(Rw2l+S|5r`M zZ|QmW3-gN~U&CNOxM_Z1CQcvkiYDWyI5$fFFUIk^we%b1r`JXFU!D5L`weO@Qs2^_ z-fvp)I`KEGf3$z>{Z^bV4dbPJLX_@bX)=Dxe{eq^!%z9E9O8YlW1LrZzfbUv=LK<} zH4l2fLkLfvhY)Vtv0C_L|8rQs+4xD%uZxVY+4#xdX#oE$etI$HA-7Dvqk!)ZA!p?vpD*86 z$S#%V&Qd9d_k!QW{o%X>wTJk57Jqsd*WgdIfOUrx<{z8>-X(R?|MN|v|I9N>fA@Z( zGv-6@6KcDj=R{6+tuWEnAZw}$f{SD!kKGWx}1ml@?K90QC6T@tapLmc zE%M%qok(So(}v`|6>o%=BJ$n=iZWg!@7?n&@6?`iBKb12FQrO8&U+rszfvdchuQdT zKkT$7s~R6bg7erDtYdXf3L^+zHv=Dz*S@%x28jNco2xgU`FP&>)_ zY>WTge~I&7(BAXn_}f8elHUM+w%p_2n0^C1HhIeCGOJ3cl;^V2if3`lb z;!2O!O-fDFH#UCCBg`j$lGjG?`>0RxjM2vy{XJ~oZ3g~!Xu24`VPA9X{(S5H61m5} zC&Z7zch7L3boe?xlvb>p}5U+bHU-|D~nvnJ!W`tSa*$@p!)db~mWww|)`4BoFb8NWq; zkJ>9m^0dK$qdd{NNL-ifr_DKzsaro-euwlo%0G3|#pZvTKc5q)Pu+C4@!RsXuF3eV z{H*uv2J!o-AGOnD_vzm7Pk%ao@0IEH$9sk4=Y2%*HI2{dJ@;sTFHP@n-TY|J=Z*&P z+x%j$Tb|lv{5HRz(q#PBdY?yeWT^ppwfT|rz!84j)@1y4Ui=$P#&55e{X>)S+x`D~ zlkr>n#(lKO`0f7xQ!@Vc8=#pZc_I7$g!gS(^6mb8a{DKY-yYvTHW|OAuibxcGJd=N z-)u5|T8D}1?^iV$zpX!BYT>6i6#Ljucz$jD_v$9&r+vs#`dk;oPx);+|5CYsOwX&Y zo67#?2t3)Z3-^s!`91efYTze6Y+&6N+eaIl2Rm;1AoVw}LtUT~uLHOrrst9UvG%$q z&F8uZJef{a_aoQDPxILT{#pL%eR2OPI2YJjCs=|!iNsOXdSFk!$m@4%ZtH2uD_y3{QC%rVaxoiL~ScU%e>Zd=hqMtrBYaZrRKk0SMkna0y=!a(0eZsN})#(0c zlj#1{Ds=zuGfelxPcz-Gog3Xda86J&>@gjKmSm4-t=p@o$sX(PCW+p`ehcVBdM~;I z?-)7o8#+$XSG;eE_nS|i1&1HO<&XpA>jr&&uDzX2Wmc#4vS@EziD#FKa7d((Ii4%qozXxdK2a6hM}$v<9`FY0xgBCXSO zyvOJ>oU=rDs+^Mz$W5|7Qy5}DTe;^`{Lt}7T92s$f4{BcK8;VGhmHMu3FnI7K9mnE`s#UGEAxi)i0V48A$V^-OYcJl^Jn^xMKb*! z@euZ7@;vf6*9CKq8RBd{?|M1TQz=7^P(04J7GHYT1D+;lQa{u`$$THe>&4IXo4XwA zb21gav)xbpH7h=#@A=`(4t>5ktgxOBupseeOA-2x=rTm~Rjm>DNzAuC*IyWUF7MCM z_x*s^J3{mdze9m?40n#>eDwkTzMz(C{l2VnE@pYb#1P*F--CsmMZc4ic?#TrEBNn5 zIj1k9(B6Pr5cb1-moRgQ;ER11fX+SC=Llu$`aZz&H;cYC-?5Ree@RcM%&hZ_ilkqR{QZg5ev#YvsG@F9_8FV=U{xme-SyzR z-^-BqAUVW$bL;s&?(&aX?GJJL?yUB!wOem{pN_Scg?{kWn=yA_D?XnF!>03L)KWd3 zT$c6o(qWNj=xyRZFcMS&#*6PP49a@5*54|ZFB090TAvP!{&MfioA>gwO;`0-z-*S%04HyO1+dIz`xHky9 zftahEt)ZvUfNgJ8zy40E*j>&4osb0@ulL*3?+ATte@FTWr2(4vM#?4YG?4J%e1kFTp#89_38sCTiDRg-=EXxeRq!N`PfV2 zVr=^RKe~PLCpQ!S8o2O%wR4Q(=i7h}Ujv1QxWnj?&c}2=g)f*NssHBONPZuI^p5CZ z^7{ilO?H3TH9`GUd#UXTKHLra!F06jiaC#8+Xd5p@I57^_&hD|COz)9IG$POr3HG= zDTzL1x`%co(0k7M=y%1*j+DSxiq7BReg^g_QA8- z3FtCU_Q18;tE&IA+TIkF>iId7_=(ty?;&qu?~pgR4ea=7t;YZtJ+IrzWqs0f%T@~? z*01LaL$KSBr~HuEgM!xc`61Es`K_eKq37dvU?#8w*v|yLg7^z^h0be#ne0U9XBxdP zdUP)O{D77#)?X35W$FK>$<-TvC0D|SbM(7@S>db~rM8v0(owcavPg zwS`%kk?^<2>X7CZsds2^G@Wz%IudQFDCKO zdRf2Ibj16zvhO&brE#F%iS85T?~+h^4%(}3-|cyxC%dlZ7@vGX)8}0Ieq1MFtviWc z!MvU45Apz}JwX&*T9GPyn|cw8|*6`t4k>G}3`d6hX?>mTV4 z@(z8}&o!Ar$#Z50DbKkaF-e*G4}N5R%bUF9#nM0dE?-`m{pYG8`Obi^cwEadhLYf6 zK3jfCI-liLmh)K{iRQE1TK!)2+Bxn=H}6%;_ixhU?WMf(aT@0BnTgLZO2Pl{pL^$8{sparQT}6%6MwHhFZ+w=^#alN&^N5_ zPwX6x2XqX=+U|AnWaate^A?z&Tm$s&+CO> zIs<+}9+Lf4$Q8*~jrZ3&FEsROOujlxhF)5}USas>>6I@~skwIPO>w<73qQV}O7G2v z^lx(eH`ZwXVM|{Ger<9*(s@(E^Ud){Cw$iu+39CE9x?fMoabBA>qzFkMfQ8Ey~ovg zm&g<1I+2q%#^*73e(S8~Vz+)L{@nW=rx@h}?^d}7h2(tRkH|b$>rdVOQ+@k@(G5#4 z+?;NAJ0EG7PuT8ue%8_pPqW=MdUS5@wIaXE`?TO6%k^<<-<1nO5-DM9dA%CO8NYie z_{Y;;UU!mpnPP06iR$rl@6E&qD{oHUrxSrEu8&3FR=!W^yubUH!+Dhz{X&2DTf%?n zhnz>E;|n7{%pX{{t+syU9v6O}&S1y`D{l$buN3?R@;io3Y+ufbm4?p{|FZw$4oM!v z+b{8|=jnJlabL~~tc0;1())5&oTBkP-TQJJ=n<@staz@;AHWXKI=Ir`Kh$;!_v~Mp zd0N>a{jIWZkGPD_KdF13`1|OXr}Xs@>W=jLBI&n$FJo zQI79}bwsnjgZF1-*JOQ$`>4lxnCwZk9+`Pc_Lr@M9NmfjK#OWTIla#dqanUk`hJbc zqago*`cMxWHGDJq)ij}ekL_AT>JPog558H69Mmzw@2BH;AI`I#TX})muVVbGIkvMy z^QWeJ2KiW_OOwYlq{lO4NkI#c!c~dnx#CKsJHxYQU z9ESC6Ih6XLoZ9;}X#dJUM9$;&A+-4GrGJ?2Gz+kA=+Ay|c$*6L^Ldm{FRi?>>NqG* zvn~Qp9!FR|-Vgm<(n$TJaWsHmWuaGi{{inSq50!+ulRxCE9eV?=bU=Jvj;qS--#RF zcd`=tn&*ED)2;ga*z=c6x3IoNH~Q=l_dA1rQ~zDaA!KSL#NvAJpPpB`%D(EiaDS@% zEqR{D5T4wh5N`K}>r=auU$pHZ)aHrpBc3NasO2xDZ}NCUxIJEx8@1x)7_gytWSyz* zc)-rL>3r(S*Eb12$tm=qoll)F{3f5;H2gMwbUw9d_-*>=d}`D1+x^%1)TZIL-xH^E zAxfC<#P>>}=Q%!teUR(T`g)7LzEQ3u&XDKRc|R^WPf$9LhO1xyN-%pTX}a} z9M3uN8*CYEt<3ljR{a4-} zpgfO@<1VtB6faV#{@l5^YCVAb1^W@?=TB34?MJxBb=;K$eFnNeq5XLj07H)8C2%x`(BI-rM0f-+>~J8GD?|^LW-2 zI9}*lsOf&Li8tJ}4*HJccFp?l<&0msQ{qR1HEF&bSNqZKIL%)-;!mbWw!nU^qu}@6 zEahaph264_TR7+tzxr-bp5r|hrZ3=VSGAyF(6&p*vt2(!Q^Y5#z~4vjeseI6t=tda z^t-Tw`={%7W1l6{Z5sU;p!zQFN9^8a=qc;tUEkCFZh=UlT{UFZvkRP`?7lH5FLYDc zGlU-b?LrUKV|wH@J%pdTA>%|3?++y|Ao}<5I1pb!&VzRG!jH^HmgS%eWy)0UT+@%t z$<}=6eXIGL^dqCc8xKC@eq?k%py^`sBHtYg&T)eqD)i5sB=dsz1$bVv93Mk3==FEN zF)v+Pq+h{&^l84Q{R9{f&5MjLvxEEL=Y64jhWS*_M+-*A?@xGth&WpFCD#k&2m6T# zU(wJV_ZL$9B=M1nx4;#-X&P@);-pBvNuCdx{Xcr&KGY~8>}Xy}%FV>fO}AoP1Kppr z^dsqG(YJN#$*w&{j=%%1&{Ood?^nmz>nVU2XuVvn)lTtzU}_=s^cZUa9sqRJl?Rr)nAVmcuSG`_IQP! zVf{Mst4v`3=)DQ@n>fDh`jX~X7xW;ypc>b)n9uj%}9_dMf& z*F59Da-Q**1%GnB1hd+xzORy>5qko^g!3@4FJC14(*3FUy?Jh~-7|4`_ouD<&&KbA zdJoNd{&{Zi;rR3JN38oVntLJy@s6;}zw#wK&U32rfs8YRC(ly|xA=hA zkm6BXWnF~Nos7(j%@>W%+tn6b%{s}`Mb}F-UHLqlo{%od;|$@A()C`R$9a=y#4{Y% zIHFGw1Zn*udIh%`&Ks`6dR&>}W;&m&MDwor`xrrdk;6UZU)I*QPs2n0n)ZuL)BDW> zJ;gcmxn3W%`Ou7q)|Dgrk@P3)O^^MHd7MA;&p3YOJbXY;kH6!S(k|DFz-#agwpmC! zex^v{bnih^GB5nz2+lF#=NPuE2bgm_{Fq%)>NxU8$d7p+%-}j? zp5y(gGIHMDhxC0UZ_DNWEThZiic}7Kvyt`%AU(+aoGxVf{U0)OQT!;!`%5VCzh)1LaQUeG%YOj6iuG#{ANFDW8RVba*MZ2mGv;W`gJI`MHzledA^y z^>hpI8>$8C+B&~gSd9Kcj~(H>2PTu>pAPIR@fFdF^B(V{PHKDYIGnFs^{Cbp6i1hO zHr3F-Av}4$L%6MfZT)TQA%YJ?_{pm8-N*e4^a{O32fn3!B);?e7@v)=LH(_$K5~hx zzAy9vS!4D?--9_wPC+->Kaq>)mwH}Um7Y*jn70s~OwSN*(}VL%k^1)dSx-jbb>b&^ z(t06wE=P|!@0ZrKNN;4Edh$!)OPyb`&h6;IyGh4LUvJ@g4(*XWE}M^%#}(GM=oaYz zEc%W4?Rid>ypplknBS(nH&rDLNRB(ePaGFO9_0EWeLYuSi*g11seh+~Zuc|YE~(0I zU{m5`LoaIMWGm>F*A!f0WNB%|r13ohwfINuTQkLPKwoAoIRK zv#-oY6<)V%+0S~^_k7lWkK@a+tY4k4(K?0qkp;$%+{yb5a|^_d&MXvtsni4fTrGGX zl~-#QYCEg$<>y|2!bckAcH;Q33q{0%tAg4=pJ)_Dg@WlSEguPqMc^*e9v$2m1GW{O-doyic`Of&bkg^00{Dcc(*_Oa^=c9ZzQ=y9BJqzS>xaGA%LP3MjC*oqlVMnce2gn5w*`qxBAJk9cBdYB?f1omfU-2oRitv+?`47&Wo0VT6 zy|FIBKXvmL>Vq#2jxI{MV_zTp!f}{aGKIa=16;kQ-Y)jdCZ|E{880FQ$5{Z$x>T&suojz!pWVz8UR4 z5A}lY1alu#{&@<2Rlpcx`XAM$zbng=mGdi=^ukr3+0P#>Rsr#2z=vwUk8XX%;MUDM zwr;uNywdiqSDe2UN-h2;-K_dAUSdxD^FDLB4 zes=lc8s%*;;=%Cm{PM`&%CJ8*=1*3p{L#ua{*~paomZXh>mtY{obVrr_KSqdHF+bb z^Q=wN!SAFW&)PKfxxYQ%x$(y*ZF=)-U-RK_K9rq#LVa}m!$(ei;dMXzTJAT0`);gz zr2HM(G?W5W>PUL~+3sye)9Po}E>DMl{^-oqp~0z%6eN{Hm7NEsVpAR~z4O4t1XyP4 zq4CPjsmkzOdZ-%o+?`p1E^9v4*A0<;E!=Ne`Jz55jb}Wj;-L!;gQkZ zxEGnY?$PqpfeDLL0en|^=hWE5;VqT%$tW5x zdXw;b?^rqu1^CCyhxe5sz7rRYjvol=AHXLnla+~smEkRiN6Y(0cABQHED!rv9-gX9 z`V~DA1Ci_Swdw5-Uz z;5&a$Wn#3l*Pk3O?=;gGz?rN*y5nD4nHUQOxNBl;UzMs{#`L;>cGVbseGT}A8t~^= zfxoys>5o??{P6=|Z*#uK#* zpZc3K(~-=A-W`21+djuz5eG3^BmNuHJux^jc5r04GCU|eI5;t~vvRgBw!1tzh!O%Z z40DG%E?#vuG<7#;&Ze{5pMM0s=))@WpG)Glq^XKww`{%fpf z)@`bI{?FvXT~}KbtlM{={QWOjk80g+i57sbp_%8x(OqNyE_Np)1;AhDRd$U{R0bz1 zq$BK6?_KX-Ri3P_;!5M=l~Hy;z2yuG$~qJI1ttCzVO%k0(Dl zlT;^o)^=V|{7>|p?EitiQ)l~=2X^kPOd2O@Hat#eif=T71Zbl_wEtSQ$i4%6&-Qm$ zrnXM(p4<*rm;yDc7LFpuZ^VDo`$mysOZl6q>^d+y{A`dhnaaguZK*2u@MOAe+B2Tc zAJMKQ&DDx%frQ`NAszlc%tv`QbTi~*eZ0_R?mIpoe^T={V9_y$6bznxd_mmgLBh2JtZIfBT`ho^!G81wgzMOF4#I(w)? zHTYwH>AdU|1M zd?Y&DS$&-hL!DVLda(bR{iy;{o;;LT&pagk!g|3xu&$#&_>Hsg|L;29 zbE8HYh0oT`_{v+2#+TKAx7L6!t^v>2fL}VgXLRhEQ3`ve4v){4?ro**JFYnYlC4{> z*uL$e9W{_yVE89~mHb_x!za_zKdL;Lsyd)U!wQ+Z>ZJ1(3+HDm>PkfH&xy@E*V>K_FjGamw#y2iQen} zj|-Xz4(%&qGk5A=qI`{K7M^e2vHa#(eYl@?p=~>tJ!wCS-fK4%o~xG6 zD6q!wAN|WaUYuIw$l2+}@yOXjo57LDY)q1aN*9G`}m%xZ)2|hN#;8_VSTD!pt zS%o}sYw3);c4+@KsSe`7D`$}`Hl)fm>G9{msdNXyj47~#6rx@^60AQQOw5XfT{t*( zAiPQVb#N*@K0LTDeS8uAS)aszRm^tK6LK43204Jp4_IwhzwqDSo5WmuE@0rZExXhY zmX*jptvS^xQoA&QD);i}{^&z2jxu zUzPM6&wTSdHa`jS{p&B?{>qnczG8D}$L0&Jc=^`SCHT2*o2iYRf!D1_m49SUrY;&s zdS+_E?>h$)G+IHRy(L()zIr5`|99?57jZaDVt=CxO=0&^e?8H8aCJG5l;ol{*2n%Q zxmL@o>rttkfzO!REl!zj6)cB5~hW9z6`j#p~a^&=$dX`o!+%om0xR9{H_rzH7_D b%csu%(fdEp_Qcn2`^wfMZ&`N3zu)`6nzf|X literal 0 HcmV?d00001 From 7e122e993d78656f2088070f09354e332258d686 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=ADas=20Ignacio=20Gonz=C3=A1lez?= Date: Tue, 27 Aug 2024 15:44:31 -0300 Subject: [PATCH 29/39] feat(zk_toolbox): Update lint CI with `zk_toolbox` (#2694) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Update CI with `zk_toolbox` --- .github/workflows/ci-core-lint-reusable.yml | 25 +++---- .github/workflows/ci-zk-toolbox-reusable.yml | 4 ++ zk_toolbox/README.md | 3 +- .../crates/zk_supervisor/src/commands/fmt.rs | 27 ++++---- .../crates/zk_supervisor/src/commands/lint.rs | 69 +++++++++---------- .../zk_supervisor/src/commands/lint_utils.rs | 7 +- .../crates/zk_supervisor/src/messages.rs | 23 +++---- 7 files changed, 75 insertions(+), 83 deletions(-) diff --git a/.github/workflows/ci-core-lint-reusable.yml b/.github/workflows/ci-core-lint-reusable.yml index 2fa6cde5fdeb..e7c8b5340194 100644 --- a/.github/workflows/ci-core-lint-reusable.yml +++ b/.github/workflows/ci-core-lint-reusable.yml @@ -19,23 +19,20 @@ jobs: - name: Start services run: | - mkdir -p ./volumes/postgres - run_retried docker compose pull zk postgres - docker compose up -d zk postgres + ci_localnet_up ci_run sccache --start-server - - name: Setup db + - name: Build run: | - ci_run zk - ci_run run_retried rustup show - ci_run zk db migrate + ci_run ./bin/zkt + ci_run yarn install + ci_run git config --global --add safe.directory /usr/src/zksync - name: Lints run: | - ci_run zk fmt --check - ci_run zk lint rust --check - ci_run zk lint toolbox --check - ci_run zk lint js --check - ci_run zk lint ts --check - ci_run zk lint md --check - ci_run zk db check-sqlx-data + ci_run zk_supervisor fmt --check + ci_run zk_supervisor lint -t md --check + ci_run zk_supervisor lint -t sol --check + ci_run zk_supervisor lint -t js --check + ci_run zk_supervisor lint -t ts --check + ci_run zk_supervisor lint -t rs --check diff --git a/.github/workflows/ci-zk-toolbox-reusable.yml b/.github/workflows/ci-zk-toolbox-reusable.yml index b2fc10c28aae..ed07174a66df 100644 --- a/.github/workflows/ci-zk-toolbox-reusable.yml +++ b/.github/workflows/ci-zk-toolbox-reusable.yml @@ -115,6 +115,10 @@ jobs: --prover-db-url=postgres://postgres:notsecurepassword@postgres:5432 \ --prover-db-name=zksync_prover_localhost_rollup + - name: Check Database + run: | + ci_run zk_supervisor database check-sqlx-data + - name: Run server run: | ci_run zk_inception server --ignore-prerequisites &>server.log & diff --git a/zk_toolbox/README.md b/zk_toolbox/README.md index debbb511df3f..ab567627d7bd 100644 --- a/zk_toolbox/README.md +++ b/zk_toolbox/README.md @@ -342,7 +342,7 @@ Lint code: zks lint ``` -By default, this command runs the linter on all files. To target specific file types, use the `--extension` option. +By default, this command runs the linter on all files. To target specific file types, use the `--target` option. Supported extensions include: - `rs`: Rust files. @@ -350,3 +350,4 @@ Supported extensions include: - `sol`: Solidity files. - `js`: JavaScript files. - `ts`: TypeScript files. +- `contracts`: files in `contracts` directory. diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs b/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs index fa0f4cef7bfe..5ee0c4efb343 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs @@ -6,16 +6,16 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use crate::{ - commands::lint_utils::{get_unignored_files, Extension}, + commands::lint_utils::{get_unignored_files, Target}, messages::{ msg_running_fmt_for_extension_spinner, msg_running_fmt_for_extensions_spinner, msg_running_rustfmt_for_dir_spinner, MSG_RUNNING_CONTRACTS_FMT_SPINNER, }, }; -async fn prettier(shell: Shell, extension: Extension, check: bool) -> anyhow::Result<()> { - let spinner = Spinner::new(&msg_running_fmt_for_extension_spinner(extension)); - let files = get_unignored_files(&shell, &extension)?; +async fn prettier(shell: Shell, target: Target, check: bool) -> anyhow::Result<()> { + let spinner = Spinner::new(&msg_running_fmt_for_extension_spinner(target)); + let files = get_unignored_files(&shell, &target)?; if files.is_empty() { return Ok(()); @@ -23,7 +23,7 @@ async fn prettier(shell: Shell, extension: Extension, check: bool) -> anyhow::Re spinner.freeze(); let mode = if check { "--check" } else { "--write" }; - let config = format!("etc/prettier-config/{extension}.js"); + let config = format!("etc/prettier-config/{target}.js"); Ok( Cmd::new(cmd!(shell, "yarn --silent prettier {mode} --config {config}").args(files)) .run()?, @@ -68,7 +68,7 @@ pub enum Formatter { Contract, Prettier { #[arg(short, long)] - extensions: Vec, + targets: Vec, }, } @@ -85,8 +85,7 @@ pub async fn run(shell: Shell, args: FmtArgs) -> anyhow::Result<()> { match args.formatter { None => { let mut tasks = vec![]; - let extensions: Vec<_> = - vec![Extension::Js, Extension::Ts, Extension::Md, Extension::Sol]; + let extensions: Vec<_> = vec![Target::Js, Target::Ts, Target::Md, Target::Sol]; let spinner = Spinner::new(&msg_running_fmt_for_extensions_spinner(&extensions)); spinner.freeze(); for ext in extensions { @@ -108,13 +107,13 @@ pub async fn run(shell: Shell, args: FmtArgs) -> anyhow::Result<()> { } }); } - Some(Formatter::Prettier { mut extensions }) => { - if extensions.is_empty() { - extensions = vec![Extension::Js, Extension::Ts, Extension::Md, Extension::Sol]; + Some(Formatter::Prettier { mut targets }) => { + if targets.is_empty() { + targets = vec![Target::Js, Target::Ts, Target::Md, Target::Sol]; } - let spinner = Spinner::new(&msg_running_fmt_for_extensions_spinner(&extensions)); - for ext in extensions { - prettier(shell.clone(), ext, args.check).await? + let spinner = Spinner::new(&msg_running_fmt_for_extensions_spinner(&targets)); + for target in targets { + prettier(shell.clone(), target, args.check).await? } spinner.finish() } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs b/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs index 17c8680f1d24..1861d164ce44 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs @@ -4,7 +4,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use crate::{ - commands::lint_utils::{get_unignored_files, Extension}, + commands::lint_utils::{get_unignored_files, Target}, messages::{ msg_running_linter_for_extension_spinner, msg_running_linters_for_files, MSG_LINT_CONFIG_PATH_ERR, MSG_RUNNING_CONTRACTS_LINTER_SPINNER, @@ -17,31 +17,32 @@ const CONFIG_PATH: &str = "etc/lint-config"; pub struct LintArgs { #[clap(long, short = 'c')] pub check: bool, - #[clap(long, short = 'e')] - pub extensions: Vec, + #[clap(long, short = 't')] + pub targets: Vec, } pub fn run(shell: &Shell, args: LintArgs) -> anyhow::Result<()> { - let extensions = if args.extensions.is_empty() { + let targets = if args.targets.is_empty() { vec![ - Extension::Rs, - Extension::Md, - Extension::Sol, - Extension::Js, - Extension::Ts, + Target::Rs, + Target::Md, + Target::Sol, + Target::Js, + Target::Ts, + Target::Contracts, ] } else { - args.extensions.clone() + args.targets.clone() }; - logger::info(msg_running_linters_for_files(&extensions)); + logger::info(msg_running_linters_for_files(&targets)); let ecosystem = EcosystemConfig::from_file(shell)?; - for extension in extensions { - match extension { - Extension::Rs => lint_rs(shell, &ecosystem, args.check)?, - Extension::Sol => lint_contracts(shell, &ecosystem, args.check)?, + for target in targets { + match target { + Target::Rs => lint_rs(shell, &ecosystem, args.check)?, + Target::Contracts => lint_contracts(shell, &ecosystem, args.check)?, ext => lint(shell, &ecosystem, &ext, args.check)?, } } @@ -50,7 +51,7 @@ pub fn run(shell: &Shell, args: LintArgs) -> anyhow::Result<()> { } fn lint_rs(shell: &Shell, ecosystem: &EcosystemConfig, check: bool) -> anyhow::Result<()> { - let spinner = Spinner::new(&msg_running_linter_for_extension_spinner(&Extension::Rs)); + let spinner = Spinner::new(&msg_running_linter_for_extension_spinner(&Target::Rs)); let link_to_code = &ecosystem.link_to_code; let lint_to_prover = &ecosystem.link_to_code.join("prover"); @@ -61,14 +62,7 @@ fn lint_rs(shell: &Shell, ecosystem: &EcosystemConfig, check: bool) -> anyhow::R for path in paths { let _dir_guard = shell.push_dir(path); let mut cmd = cmd!(shell, "cargo clippy"); - let common_args = &[ - "--locked", - "--", - "-D", - "warnings", - "-D", - "unstable_features", - ]; + let common_args = &["--locked", "--", "-D", "warnings"]; if !check { cmd = cmd.args(&["--fix", "--allow-dirty"]); } @@ -79,34 +73,35 @@ fn lint_rs(shell: &Shell, ecosystem: &EcosystemConfig, check: bool) -> anyhow::R Ok(()) } -fn get_linter(extension: &Extension) -> Vec { - match extension { - Extension::Rs => vec!["cargo".to_string(), "clippy".to_string()], - Extension::Md => vec!["markdownlint".to_string()], - Extension::Sol => vec!["solhint".to_string()], - Extension::Js => vec!["eslint".to_string()], - Extension::Ts => vec!["eslint".to_string(), "--ext".to_string(), "ts".to_string()], +fn get_linter(target: &Target) -> Vec { + match target { + Target::Rs => vec!["cargo".to_string(), "clippy".to_string()], + Target::Md => vec!["markdownlint".to_string()], + Target::Sol => vec!["solhint".to_string()], + Target::Js => vec!["eslint".to_string()], + Target::Ts => vec!["eslint".to_string(), "--ext".to_string(), "ts".to_string()], + Target::Contracts => vec![], } } fn lint( shell: &Shell, ecosystem: &EcosystemConfig, - extension: &Extension, + target: &Target, check: bool, ) -> anyhow::Result<()> { - let spinner = Spinner::new(&msg_running_linter_for_extension_spinner(extension)); + let spinner = Spinner::new(&msg_running_linter_for_extension_spinner(target)); let _dir_guard = shell.push_dir(&ecosystem.link_to_code); - let files = get_unignored_files(shell, extension)?; + let files = get_unignored_files(shell, target)?; let cmd = cmd!(shell, "yarn"); let config_path = ecosystem.link_to_code.join(CONFIG_PATH); - let config_path = config_path.join(format!("{}.js", extension)); + let config_path = config_path.join(format!("{}.js", target)); let config_path = config_path .to_str() .expect(MSG_LINT_CONFIG_PATH_ERR) .to_string(); - let linter = get_linter(extension); + let linter = get_linter(target); let fix_option = if check { vec![] @@ -128,8 +123,6 @@ fn lint( } fn lint_contracts(shell: &Shell, ecosystem: &EcosystemConfig, check: bool) -> anyhow::Result<()> { - lint(shell, ecosystem, &Extension::Sol, check)?; - let spinner = Spinner::new(MSG_RUNNING_CONTRACTS_LINTER_SPINNER); let _dir_guard = shell.push_dir(&ecosystem.link_to_code); let cmd = cmd!(shell, "yarn"); diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs b/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs index 92fac6ea815f..6d7bef6eb459 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs @@ -33,15 +33,16 @@ const IGNORED_FILES: [&str; 4] = [ #[derive(Debug, ValueEnum, EnumIter, strum::Display, PartialEq, Eq, Clone, Copy)] #[strum(serialize_all = "lowercase")] -pub enum Extension { +pub enum Target { Md, Sol, Js, Ts, Rs, + Contracts, } -pub fn get_unignored_files(shell: &Shell, extension: &Extension) -> anyhow::Result> { +pub fn get_unignored_files(shell: &Shell, target: &Target) -> anyhow::Result> { let mut files = Vec::new(); let output = cmd!(shell, "git ls-files --recurse-submodules").read()?; @@ -49,7 +50,7 @@ pub fn get_unignored_files(shell: &Shell, extension: &Extension) -> anyhow::Resu let path = line.to_string(); if !IGNORED_DIRS.iter().any(|dir| path.contains(dir)) && !IGNORED_FILES.contains(&path.as_str()) - && path.ends_with(&format!(".{}", extension)) + && path.ends_with(&format!(".{}", target)) { files.push(path); } diff --git a/zk_toolbox/crates/zk_supervisor/src/messages.rs b/zk_toolbox/crates/zk_supervisor/src/messages.rs index de25be281328..00e49131de77 100644 --- a/zk_toolbox/crates/zk_supervisor/src/messages.rs +++ b/zk_toolbox/crates/zk_supervisor/src/messages.rs @@ -1,4 +1,4 @@ -use crate::commands::lint_utils::Extension; +use crate::commands::lint_utils::Target; // Ecosystem related messages pub(super) const MSG_CHAIN_NOT_FOUND_ERR: &str = "Chain not found"; @@ -152,28 +152,25 @@ pub(super) const MSG_CONTRACTS_CLEANING_FINISHED: &str = pub(super) const MSG_RUNNING_SNAPSHOT_CREATOR: &str = "Running snapshot creator"; // Lint related messages -pub(super) fn msg_running_linters_for_files(extensions: &[Extension]) -> String { - let extensions: Vec = extensions.iter().map(|e| format!(".{}", e)).collect(); - format!( - "Running linters for files with extensions: {:?}", - extensions - ) +pub(super) fn msg_running_linters_for_files(targets: &[Target]) -> String { + let targets: Vec = targets.iter().map(|e| format!(".{}", e)).collect(); + format!("Running linters for targets: {:?}", targets) } -pub(super) fn msg_running_linter_for_extension_spinner(extension: &Extension) -> String { - format!("Running linter for files with extension: .{}", extension) +pub(super) fn msg_running_linter_for_extension_spinner(target: &Target) -> String { + format!("Running linter for files with extension: .{}", target) } -pub(super) fn msg_running_fmt_for_extension_spinner(extension: Extension) -> String { - format!("Running prettier for: {extension:?}") +pub(super) fn msg_running_fmt_for_extension_spinner(target: Target) -> String { + format!("Running prettier for: {target:?}") } pub(super) fn msg_running_rustfmt_for_dir_spinner(dir: &str) -> String { format!("Running rustfmt for: {dir:?}") } -pub(super) fn msg_running_fmt_for_extensions_spinner(extensions: &[Extension]) -> String { - format!("Running prettier for: {extensions:?} and rustfmt") +pub(super) fn msg_running_fmt_for_extensions_spinner(targets: &[Target]) -> String { + format!("Running prettier for: {targets:?} and rustfmt") } pub(super) const MSG_LINT_CONFIG_PATH_ERR: &str = "Lint config path error"; From ca1a3962ea9220f0484a316413592197e0834560 Mon Sep 17 00:00:00 2001 From: Zach Kolodny Date: Tue, 27 Aug 2024 14:53:09 -0400 Subject: [PATCH 30/39] bump contracts --- contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts b/contracts index 2076594154b5..17e2c1be33a1 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 2076594154b53619570e3433b25a98119c8449b4 +Subproject commit 17e2c1be33a15357cf1689f62b972ab43b7a7e39 From 2f6d9bc2dc497dc2ee5f797932d2999b31fae9b0 Mon Sep 17 00:00:00 2001 From: Zach Kolodny Date: Tue, 27 Aug 2024 15:06:38 -0400 Subject: [PATCH 31/39] bump contracts --- contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts b/contracts index 17e2c1be33a1..5b3f743ba798 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 17e2c1be33a15357cf1689f62b972ab43b7a7e39 +Subproject commit 5b3f743ba798b51661f9a844fb56b0ce0da29a9e From 923e33e81bba83f72b97ca9590c5cdf2da2a311b Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Tue, 27 Aug 2024 23:08:57 +0300 Subject: [PATCH 32/39] fix(vm): Fix used bytecodes divergence (#2741) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Fixes divergence in used bytecodes info between the old and new VMs. ## Why ❔ The new VM behaved differently to the old VM on far call if decommitting the called contract leads to out-of-gas revert. The old VM records the called contract bytecode as decommitted in this case; the new one didn't. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- Cargo.lock | 3 +- Cargo.toml | 2 +- core/lib/multivm/Cargo.toml | 1 + .../vm_fast/tests/get_used_contracts.rs | 102 ++++++++++++++++-- .../src/versions/vm_fast/tests/utils.rs | 5 + .../vm_latest/tests/get_used_contracts.rs | 101 +++++++++++++++-- .../src/versions/vm_latest/tests/utils.rs | 5 + .../contracts/counter/counter.sol | 2 +- .../contracts/counter/proxy_counter.sol | 22 ++++ prover/Cargo.lock | 2 +- 10 files changed, 228 insertions(+), 17 deletions(-) create mode 100644 etc/contracts-test-data/contracts/counter/proxy_counter.sol diff --git a/Cargo.lock b/Cargo.lock index 39058d09f540..98e2326e1c25 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7288,7 +7288,7 @@ dependencies = [ [[package]] name = "vm2" version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=9a38900d7af9b1d72b47ce3be980e77c1239a61d#9a38900d7af9b1d72b47ce3be980e77c1239a61d" +source = "git+https://github.com/matter-labs/vm2.git?rev=2276b7b5af520fca0477bdafe43781b51896d235#2276b7b5af520fca0477bdafe43781b51896d235" dependencies = [ "enum_dispatch", "primitive-types", @@ -8935,6 +8935,7 @@ name = "zksync_multivm" version = "0.1.0" dependencies = [ "anyhow", + "assert_matches", "circuit_sequencer_api 0.133.0", "circuit_sequencer_api 0.140.0", "circuit_sequencer_api 0.141.1", diff --git a/Cargo.toml b/Cargo.toml index c9c8ff95ebc4..6faea57fa1a0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -217,7 +217,7 @@ zk_evm_1_4_1 = { package = "zk_evm", version = "0.141.0" } zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.4" } # New VM; pinned to a specific commit because of instability -vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "9a38900d7af9b1d72b47ce3be980e77c1239a61d" } +vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "2276b7b5af520fca0477bdafe43781b51896d235" } # Consensus dependencies. zksync_concurrency = "=0.1.0-rc.11" diff --git a/core/lib/multivm/Cargo.toml b/core/lib/multivm/Cargo.toml index a245acdfacf6..4711eefa0d6c 100644 --- a/core/lib/multivm/Cargo.toml +++ b/core/lib/multivm/Cargo.toml @@ -40,6 +40,7 @@ tracing.workspace = true vise.workspace = true [dev-dependencies] +assert_matches.workspace = true tokio = { workspace = true, features = ["time"] } zksync_test_account.workspace = true ethabi.workspace = true diff --git a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs index 1bfc2f8ff11f..5524bd3edde9 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs @@ -1,17 +1,23 @@ -use std::collections::HashSet; +use std::{collections::HashSet, iter}; +use assert_matches::assert_matches; +use ethabi::Token; use itertools::Itertools; +use zk_evm_1_3_1::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; use zksync_test_account::Account; -use zksync_types::{Execute, U256}; +use zksync_types::{Address, Execute, U256}; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; use crate::{ - interface::{storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{ + storage::ReadStorage, ExecutionResult, TxExecutionMode, VmExecutionMode, + VmExecutionResultAndLogs, VmInterface, + }, vm_fast::{ tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, + tester::{TxType, VmTester, VmTesterBuilder}, + utils::{read_proxy_counter_contract, read_test_contract, BASE_SYSTEM_CONTRACTS}, }, vm::Vm, }, @@ -88,8 +94,90 @@ fn known_bytecodes_without_aa_code(vm: &Vm) -> HashSet .keys() .cloned() .collect::>(); - known_bytecodes_without_aa_code.remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)); - known_bytecodes_without_aa_code } + +/// Counter test contract bytecode inflated by appending lots of `NOP` opcodes at the end. This leads to non-trivial +/// decommitment cost (>10,000 gas). +fn inflated_counter_bytecode() -> Vec { + let mut counter_bytecode = read_test_contract(); + counter_bytecode.extend( + iter::repeat(EncodingModeProduction::nop_encoding().to_be_bytes()) + .take(10_000) + .flatten(), + ); + counter_bytecode +} + +fn execute_proxy_counter(gas: u32) -> (VmTester, U256, VmExecutionResultAndLogs) { + let counter_bytecode = inflated_counter_bytecode(); + let counter_bytecode_hash = h256_to_u256(hash_bytecode(&counter_bytecode)); + let counter_address = Address::repeat_byte(0x23); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_custom_contracts(vec![(counter_bytecode, counter_address, false)]) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let (proxy_counter_bytecode, proxy_counter_abi) = read_proxy_counter_contract(); + let account = &mut vm.rich_accounts[0]; + let deploy_tx = account.get_deploy_tx( + &proxy_counter_bytecode, + Some(&[Token::Address(counter_address)]), + TxType::L2, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let decommitted_hashes = vm.vm.decommitted_hashes().collect::>(); + assert!( + !decommitted_hashes.contains(&counter_bytecode_hash), + "{decommitted_hashes:?}" + ); + + let increment = proxy_counter_abi.function("increment").unwrap(); + let increment_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: deploy_tx.address, + calldata: increment + .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(increment_tx, true); + compression_result.unwrap(); + (vm, counter_bytecode_hash, exec_result) +} + +#[test] +fn get_used_contracts_with_far_call() { + let (vm, counter_bytecode_hash, exec_result) = execute_proxy_counter(100_000); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + let decommitted_hashes = vm.vm.decommitted_hashes().collect::>(); + assert!( + decommitted_hashes.contains(&counter_bytecode_hash), + "{decommitted_hashes:?}" + ); +} + +#[test] +fn get_used_contracts_with_out_of_gas_far_call() { + let (vm, counter_bytecode_hash, exec_result) = execute_proxy_counter(10_000); + assert_matches!(exec_result.result, ExecutionResult::Revert { .. }); + let decommitted_hashes = vm.vm.decommitted_hashes().collect::>(); + assert!( + decommitted_hashes.contains(&counter_bytecode_hash), + "{decommitted_hashes:?}" + ); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs index 6b17e66f2616..d696aa582d64 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs @@ -127,3 +127,8 @@ pub(crate) fn read_expensive_contract() -> (Vec, Contract) { "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; (read_bytecode(PATH), load_contract(PATH)) } + +pub(crate) fn read_proxy_counter_contract() -> (Vec, Contract) { + const PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/counter/proxy_counter.sol/ProxyCounter.json"; + (read_bytecode(PATH), load_contract(PATH)) +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs index 752fd1a9087d..a77b8c97b425 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs @@ -1,9 +1,13 @@ use std::{ collections::{HashMap, HashSet}, + iter, str::FromStr, }; +use assert_matches::assert_matches; +use ethabi::Token; use itertools::Itertools; +use zk_evm_1_3_1::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; use zk_evm_1_5_0::{ abstractions::DecommittmentProcessor, aux_structures::{DecommittmentQuery, MemoryPage, Timestamp}, @@ -11,15 +15,18 @@ use zk_evm_1_5_0::{ }; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; use zksync_test_account::Account; -use zksync_types::{Execute, U256}; +use zksync_types::{Address, Execute, U256}; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; +use zksync_vm_interface::VmExecutionResultAndLogs; use crate::{ - interface::{storage::WriteStorage, TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{ + storage::WriteStorage, ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterface, + }, vm_latest::{ tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, + tester::{TxType, VmTester, VmTesterBuilder}, + utils::{read_proxy_counter_contract, read_test_contract, BASE_SYSTEM_CONTRACTS}, }, HistoryDisabled, Vm, }, @@ -148,10 +155,92 @@ fn known_bytecodes_without_aa_code( .known_bytecodes .inner() .clone(); - known_bytecodes_without_aa_code .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) .unwrap(); - known_bytecodes_without_aa_code } + +/// Counter test contract bytecode inflated by appending lots of `NOP` opcodes at the end. This leads to non-trivial +/// decommitment cost (>10,000 gas). +fn inflated_counter_bytecode() -> Vec { + let mut counter_bytecode = read_test_contract(); + counter_bytecode.extend( + iter::repeat(EncodingModeProduction::nop_encoding().to_be_bytes()) + .take(10_000) + .flatten(), + ); + counter_bytecode +} + +fn execute_proxy_counter(gas: u32) -> (VmTester, U256, VmExecutionResultAndLogs) { + let counter_bytecode = inflated_counter_bytecode(); + let counter_bytecode_hash = h256_to_u256(hash_bytecode(&counter_bytecode)); + let counter_address = Address::repeat_byte(0x23); + + let mut vm = VmTesterBuilder::new(HistoryDisabled) + .with_empty_in_memory_storage() + .with_custom_contracts(vec![(counter_bytecode, counter_address, false)]) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let (proxy_counter_bytecode, proxy_counter_abi) = read_proxy_counter_contract(); + let account = &mut vm.rich_accounts[0]; + let deploy_tx = account.get_deploy_tx( + &proxy_counter_bytecode, + Some(&[Token::Address(counter_address)]), + TxType::L2, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let decommitted_hashes = vm.vm.get_used_contracts(); + assert!( + !decommitted_hashes.contains(&counter_bytecode_hash), + "{decommitted_hashes:?}" + ); + + let increment = proxy_counter_abi.function("increment").unwrap(); + let increment_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: deploy_tx.address, + calldata: increment + .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(increment_tx, true); + compression_result.unwrap(); + (vm, counter_bytecode_hash, exec_result) +} + +#[test] +fn get_used_contracts_with_far_call() { + let (vm, counter_bytecode_hash, exec_result) = execute_proxy_counter(100_000); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + let decommitted_hashes = vm.vm.get_used_contracts(); + assert!( + decommitted_hashes.contains(&counter_bytecode_hash), + "{decommitted_hashes:?}" + ); +} + +#[test] +fn get_used_contracts_with_out_of_gas_far_call() { + let (vm, counter_bytecode_hash, exec_result) = execute_proxy_counter(10_000); + assert_matches!(exec_result.result, ExecutionResult::Revert { .. }); + let decommitted_hashes = vm.vm.get_used_contracts(); + assert!( + decommitted_hashes.contains(&counter_bytecode_hash), + "{decommitted_hashes:?}" + ); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs b/core/lib/multivm/src/versions/vm_latest/tests/utils.rs index cfa7ba1c7e2c..c5487379ce31 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/utils.rs @@ -137,3 +137,8 @@ pub(crate) fn read_expensive_contract() -> (Vec, Contract) { "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; (read_bytecode(PATH), load_contract(PATH)) } + +pub(crate) fn read_proxy_counter_contract() -> (Vec, Contract) { + const PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/counter/proxy_counter.sol/ProxyCounter.json"; + (read_bytecode(PATH), load_contract(PATH)) +} diff --git a/etc/contracts-test-data/contracts/counter/counter.sol b/etc/contracts-test-data/contracts/counter/counter.sol index 748ab91aa70f..c0f4bda130d0 100644 --- a/etc/contracts-test-data/contracts/counter/counter.sol +++ b/etc/contracts-test-data/contracts/counter/counter.sol @@ -5,7 +5,7 @@ pragma solidity ^0.8.0; contract Counter { uint256 value; - function increment(uint256 x) public { + function increment(uint256 x) external { value += x; } diff --git a/etc/contracts-test-data/contracts/counter/proxy_counter.sol b/etc/contracts-test-data/contracts/counter/proxy_counter.sol new file mode 100644 index 000000000000..1c1883cd4c9d --- /dev/null +++ b/etc/contracts-test-data/contracts/counter/proxy_counter.sol @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 + +pragma solidity ^0.8.0; + +interface ICounter { + function increment(uint256 x) external; +} + +contract ProxyCounter { + ICounter counter; + + constructor(ICounter _counter) { + counter = _counter; + } + + function increment(uint256 x, uint gasToPass) public { + while (gasleft() > gasToPass) { + // Burn gas so that there's about `gasToPass` left before the external call. + } + counter.increment(x); + } +} diff --git a/prover/Cargo.lock b/prover/Cargo.lock index c510198ab65b..2b04a9aa0314 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -6848,7 +6848,7 @@ dependencies = [ [[package]] name = "vm2" version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=9a38900d7af9b1d72b47ce3be980e77c1239a61d#9a38900d7af9b1d72b47ce3be980e77c1239a61d" +source = "git+https://github.com/matter-labs/vm2.git?rev=2276b7b5af520fca0477bdafe43781b51896d235#2276b7b5af520fca0477bdafe43781b51896d235" dependencies = [ "enum_dispatch", "primitive-types", From ca9d56b5fa5c6a27a10b6002f8f3cdf97427eb94 Mon Sep 17 00:00:00 2001 From: Daniel Lumi <149794418+zk-Lumi@users.noreply.github.com> Date: Tue, 27 Aug 2024 23:38:01 +0200 Subject: [PATCH 33/39] fix(zk_toolbox): various ways of writing zksync (#2752) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Fix casing of "ZKsync" in READMEs and console messages in `zksync-era/zk_toolbox` - ZKsync - correct - zkSync - incorrect - ZkSync (pascal case) - PS: Some of those pascal cases may have been in purpose - let me know if I should revert any of those. ## Why ❔ - Matches newer branding ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- zk_toolbox/README.md | 6 +++--- zk_toolbox/crates/zk_inception/README.md | 4 ++-- zk_toolbox/crates/zk_inception/src/main.rs | 2 +- zk_toolbox/crates/zk_inception/src/messages.rs | 10 +++++----- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/zk_toolbox/README.md b/zk_toolbox/README.md index ab567627d7bd..5f2e40c85be7 100644 --- a/zk_toolbox/README.md +++ b/zk_toolbox/README.md @@ -20,7 +20,7 @@ Install `zk_inception` from Git: cargo install --git https://github.com/matter-labs/zksync-era/ --locked zk_inception zk_supervisor --force ``` -Or manually build from a local copy of the [ZkSync](https://github.com/matter-labs/zksync-era/) repository: +Or manually build from a local copy of the [ZKsync](https://github.com/matter-labs/zksync-era/) repository: ```bash ./bin/zkt @@ -260,7 +260,7 @@ needed. ## ZK Supervisor -Tools for developing zkSync. +Tools for developing ZKsync. ### Database @@ -296,7 +296,7 @@ Possible commands: ### Tests -Run zkSync tests: +Run ZKsync tests: ```bash zk_supervisor test diff --git a/zk_toolbox/crates/zk_inception/README.md b/zk_toolbox/crates/zk_inception/README.md index 6f4d70b37b55..037a7e3fc925 100644 --- a/zk_toolbox/crates/zk_inception/README.md +++ b/zk_toolbox/crates/zk_inception/README.md @@ -52,7 +52,7 @@ ZK Toolbox is a set of tools for working with zk stack. - `containers` — Run containers for local development - `contract-verifier` — Run contract verifier - `portal` — Run dapp-portal -- `update` — Update zkSync +- `update` — Update ZKsync ###### **Options:** @@ -622,7 +622,7 @@ Run dapp-portal ## `zk_inception update` -Update zkSync +Update ZKsync **Usage:** `zk_inception update [OPTIONS]` diff --git a/zk_toolbox/crates/zk_inception/src/main.rs b/zk_toolbox/crates/zk_inception/src/main.rs index 8895b212a59f..cb1b5388196a 100644 --- a/zk_toolbox/crates/zk_inception/src/main.rs +++ b/zk_toolbox/crates/zk_inception/src/main.rs @@ -61,7 +61,7 @@ pub enum InceptionSubcommands { ContractVerifier(ContractVerifierCommands), /// Run dapp-portal Portal(PortalArgs), - /// Update zkSync + /// Update ZKsync #[command(alias = "u")] Update(UpdateArgs), #[command(hide = true)] diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zk_toolbox/crates/zk_inception/src/messages.rs index 2eef0688b035..9975627025ac 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zk_toolbox/crates/zk_inception/src/messages.rs @@ -38,11 +38,11 @@ pub(super) const MSG_ECOSYSTEM_CONFIG_INVALID_ERR: &str = "Invalid ecosystem con pub(super) const MSG_LINK_TO_CODE_SELECTION_CLONE: &str = "Clone for me (recommended)"; pub(super) const MSG_LINK_TO_CODE_SELECTION_PATH: &str = "I have the code already"; pub(super) const MSG_NOT_MAIN_REPO_OR_FORK_ERR: &str = - "It's not a zkSync Era main repository or fork"; + "It's not a ZKsync Era main repository or fork"; pub(super) const MSG_CONFIRM_STILL_USE_FOLDER: &str = "Do you still want to use this folder?"; pub(super) fn msg_path_to_zksync_does_not_exist_err(path: &str) -> String { - format!("Path to zkSync Era repo does not exist: {path:?}") + format!("Path to ZKsync Era repo does not exist: {path:?}") } /// Ecosystem and chain init related messages @@ -57,7 +57,7 @@ pub(super) const MSG_DEPLOY_ECOSYSTEM_PROMPT: &str = pub(super) const MSG_L1_RPC_URL_PROMPT: &str = "What is the RPC URL of the L1 network?"; pub(super) const MSG_DEPLOY_PAYMASTER_PROMPT: &str = "Do you want to deploy Paymaster contract?"; pub(super) const MSG_DEPLOY_ERC20_PROMPT: &str = "Do you want to deploy some test ERC20s?"; -pub(super) const MSG_ECOSYSTEM_CONTRACTS_PATH_PROMPT: &str = "Provide the path to the ecosystem contracts or keep it empty and you will use ZkSync ecosystem config. \ +pub(super) const MSG_ECOSYSTEM_CONTRACTS_PATH_PROMPT: &str = "Provide the path to the ecosystem contracts or keep it empty and you will use ZKsync ecosystem config. \ For using this config, you need to have governance wallet"; pub(super) const MSG_L1_RPC_URL_INVALID_ERR: &str = "Invalid RPC URL"; pub(super) const MSG_ECOSYSTEM_CONTRACTS_PATH_INVALID_ERR: &str = "Invalid path"; @@ -360,8 +360,8 @@ pub(super) fn msg_downloading_binary_spinner(name: &str, version: &str) -> Strin /// Update related messages pub(super) const MSG_UPDATE_ONLY_CONFIG_HELP: &str = "Update only the config files"; -pub(super) const MSG_UPDATING_ZKSYNC: &str = "Updating ZkSync"; -pub(super) const MSG_ZKSYNC_UPDATED: &str = "ZkSync updated successfully"; +pub(super) const MSG_UPDATING_ZKSYNC: &str = "Updating ZKsync"; +pub(super) const MSG_ZKSYNC_UPDATED: &str = "ZKsync updated successfully"; pub(super) const MSG_PULLING_ZKSYNC_CODE_SPINNER: &str = "Pulling zksync-era repo..."; pub(super) const MSG_UPDATING_SUBMODULES_SPINNER: &str = "Updating submodules..."; pub(super) const MSG_DIFF_GENERAL_CONFIG: &str = From f37b84ac75de8606382943bb10b8d064c475b5a0 Mon Sep 17 00:00:00 2001 From: Daniel Lumi <149794418+zk-Lumi@users.noreply.github.com> Date: Wed, 28 Aug 2024 09:39:26 +0200 Subject: [PATCH 34/39] chore(zk_toolbox): update shared bridge url link to docs (#2754) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Fixed docs link in zk_toolbox to shared bridge ## Why ❔ - Docs link in zk_toolbox to shared bridge was broken ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- zk_toolbox/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zk_toolbox/README.md b/zk_toolbox/README.md index 5f2e40c85be7..b35d4c8d56f1 100644 --- a/zk_toolbox/README.md +++ b/zk_toolbox/README.md @@ -37,7 +37,7 @@ Foundry is used for deploying smart contracts. Pass flags for Foundry integratio ZK Stack allows you to create a new ecosystem or connect to an existing one. An ecosystem includes components like the BridgeHub, shared bridges, and state transition managers. -[Learn more](https://docs.zksync.io/zk-stack/components/shared-bridges.html). +[Learn more](https://docs.zksync.io/zk-stack/components/shared-bridges). #### Global Config From 91bed09437897bdefe45f6ca8325f1a9345662ad Mon Sep 17 00:00:00 2001 From: Stanislav Breadless Date: Wed, 28 Aug 2024 11:01:04 +0200 Subject: [PATCH 35/39] add submodules ignored folder --- zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs b/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs index 6d7bef6eb459..5b5f3a91bcec 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs @@ -2,7 +2,7 @@ use clap::ValueEnum; use strum::EnumIter; use xshell::{cmd, Shell}; -const IGNORED_DIRS: [&str; 18] = [ +const IGNORED_DIRS: [&str; 19] = [ "target", "node_modules", "volumes", @@ -22,6 +22,7 @@ const IGNORED_DIRS: [&str; 18] = [ "cache-zk", // Ignore directories with OZ and forge submodules. "contracts/l1-contracts/lib", + "contracts/lib", ]; const IGNORED_FILES: [&str; 4] = [ From 24ee376abb14a3949ea30c05b1faae25617fd245 Mon Sep 17 00:00:00 2001 From: Stanislav Breadless Date: Wed, 28 Aug 2024 14:25:22 +0200 Subject: [PATCH 36/39] use new contracts --- contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts b/contracts index d273ebf5fc7d..10ec8ba28f3d 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit d273ebf5fc7d85ff59a6db4d93ac1a4719462599 +Subproject commit 10ec8ba28f3de36ab6d8f73d63496f59b37654e3 From bdd468b59b6c954c3ec3898e9cb5ce3d7ae6b139 Mon Sep 17 00:00:00 2001 From: Stanislav Breadless Date: Wed, 28 Aug 2024 15:53:51 +0200 Subject: [PATCH 37/39] unit tests pass --- .../src/versions/vm_latest/tests/rollbacks.rs | 133 ++++++++---------- etc/env/base/contracts.toml | 2 - 2 files changed, 57 insertions(+), 78 deletions(-) diff --git a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs index 5650eaeba2d0..8a6eda0e26a4 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs @@ -30,78 +30,65 @@ fn test_vm_rollbacks() { let mut account = vm.rich_accounts[0].clone(); let counter = read_test_contract(); let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let _tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; + let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let _ = TransactionTestInfo::new_processed(tx_0.clone(), false); - let _ = - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()); - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()); - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()); - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()); - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false); - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()); - let _result_without_rollbacks = vm.execute_and_verify_txs( - &[], // &vec![ - // TransactionTestInfo::new_processed(tx_0.clone(), false), - // TransactionTestInfo::new_processed(tx_1.clone(), false), - // TransactionTestInfo::new_processed(tx_2.clone(), false), - // ] - ); + + let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ + TransactionTestInfo::new_processed(tx_0.clone(), false), + TransactionTestInfo::new_processed(tx_1.clone(), false), + TransactionTestInfo::new_processed(tx_2.clone(), false), + ]); // reset vm vm.reset_with_empty_storage(); - // let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - // TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - // TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - // TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // // The correct nonce is 0, this tx will fail - // TransactionTestInfo::new_rejected( - // tx_2.clone(), - // TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(0)).into(), - // ), - // // This tx will succeed - // TransactionTestInfo::new_processed(tx_0.clone(), false), - // // The correct nonce is 1, this tx will fail - // TransactionTestInfo::new_rejected( - // tx_0.clone(), - // TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), - // ), - // // The correct nonce is 1, this tx will fail - // TransactionTestInfo::new_rejected( - // tx_2.clone(), - // TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(1)).into(), - // ), - // // This tx will succeed - // TransactionTestInfo::new_processed(tx_1, false), - // // The correct nonce is 2, this tx will fail - // TransactionTestInfo::new_rejected( - // tx_0.clone(), - // TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), - // ), - // // This tx will succeed - // TransactionTestInfo::new_processed(tx_2.clone(), false), - // // This tx will fail - // TransactionTestInfo::new_rejected( - // tx_2.clone(), - // TxModifier::NonceReused(tx_2.initiator_account(), tx_2.nonce().unwrap()).into(), - // ), - // TransactionTestInfo::new_rejected( - // tx_0.clone(), - // TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), - // ), - // ]); + let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ + TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), + TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), + TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), + // The correct nonce is 0, this tx will fail + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(0)).into(), + ), + // This tx will succeed + TransactionTestInfo::new_processed(tx_0.clone(), false), + // The correct nonce is 1, this tx will fail + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), + // The correct nonce is 1, this tx will fail + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(1)).into(), + ), + // This tx will succeed + TransactionTestInfo::new_processed(tx_1, false), + // The correct nonce is 2, this tx will fail + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), + // This tx will succeed + TransactionTestInfo::new_processed(tx_2.clone(), false), + // This tx will fail + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::NonceReused(tx_2.initiator_account(), tx_2.nonce().unwrap()).into(), + ), + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), + ]); // assert_eq!(result_without_rollbacks, result_with_rollbacks); } #[test] fn test_vm_loadnext_rollbacks() { - // TODO add mut - let vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new(HistoryEnabled) .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) .with_random_rich_accounts(1) @@ -111,7 +98,7 @@ fn test_vm_loadnext_rollbacks() { let loadnext_contract = get_loadnext_contract(); let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; let DeployContractsTx { - tx: _loadnext_deploy_tx, + tx: loadnext_deploy_tx, address, .. } = account.get_deploy_tx_with_factory_deps( @@ -121,7 +108,7 @@ fn test_vm_loadnext_rollbacks() { TxType::L2, ); - let _loadnext_tx_1 = account.get_l2_tx_for_execute( + let loadnext_tx_1 = account.get_l2_tx_for_execute( Execute { contract_address: address, calldata: LoadnextContractExecutionParams { @@ -139,7 +126,7 @@ fn test_vm_loadnext_rollbacks() { None, ); - let _loadnext_tx_2 = account.get_l2_tx_for_execute( + let loadnext_tx_2 = account.get_l2_tx_for_execute( Execute { contract_address: address, calldata: LoadnextContractExecutionParams { @@ -156,20 +143,14 @@ fn test_vm_loadnext_rollbacks() { }, None, ); - // TODO - // let _result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - // TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - // TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - // TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - // ]); - // let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - // TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - // TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - // TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - // ]); + let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ + TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), + TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), + TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), + ]); - // // reset vm + // TODO: reset vm // vm.reset_with_empty_storage(); // let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ diff --git a/etc/env/base/contracts.toml b/etc/env/base/contracts.toml index 1dabd8376f6f..c3a22dd5a369 100644 --- a/etc/env/base/contracts.toml +++ b/etc/env/base/contracts.toml @@ -43,8 +43,6 @@ L2_WETH_TOKEN_IMPL_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" L2_WETH_TOKEN_PROXY_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" BLOB_VERSIONED_HASH_RETRIEVER_ADDR = "0x0000000000000000000000000000000000000000" -GENESIS_ROOT = "0x983953c1543a88f574de41de25e932a80f11827d28613be27ad51891601640e7" - # Ecosystem-wide params L1_ROLLUP_DA_VALIDATOR = "0x0000000000000000000000000000000000000000" L1_VALIDIUM_DA_VALIDATOR = "0x0000000000000000000000000000000000000000" From f230a7684a847b89850e34d1ac98f4c86e1403cb Mon Sep 17 00:00:00 2001 From: Stanislav Breadless Date: Wed, 28 Aug 2024 16:51:44 +0200 Subject: [PATCH 38/39] upd config --- etc/env/base/chain.toml | 4 ++-- etc/env/base/contracts.toml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/etc/env/base/chain.toml b/etc/env/base/chain.toml index 57744333116e..05c2fa9729db 100644 --- a/etc/env/base/chain.toml +++ b/etc/env/base/chain.toml @@ -90,8 +90,8 @@ fee_model_version = "V2" validation_computational_gas_limit = 300000 save_call_traces = true -bootloader_hash = "0x010008ebd07a24010d2cf7f75a10a73d387b84bd026586b6502e5059f4dbc475" -default_aa_hash = "0x0100055dd4b983f1999e4591b19086b90a4c27d304424f2af57bea693526e4ca" +bootloader_hash = "0x010008c79a8fece61d5d29508af0214834522fb17f3419f7df7400cd2776a9d5" +default_aa_hash = "0x0100055da05bf3eb2d670dec0f54ebbdacdfc0dba488f0c0b57738a69127a5d0" protective_reads_persistence_enabled = false diff --git a/etc/env/base/contracts.toml b/etc/env/base/contracts.toml index c3a22dd5a369..244db08e6dbc 100644 --- a/etc/env/base/contracts.toml +++ b/etc/env/base/contracts.toml @@ -28,8 +28,8 @@ RECURSION_NODE_LEVEL_VK_HASH = "0x1186ec268d49f1905f8d9c1e9d39fc33e98c74f91d91a2 RECURSION_LEAF_LEVEL_VK_HASH = "0x101e08b00193e529145ee09823378ef51a3bc8966504064f1f6ba3f1ba863210" RECURSION_CIRCUITS_SET_VKS_HASH = "0x18c1639094f58177409186e8c48d9f577c9410901d2f1d486b3e7d6cf553ae4c" GENESIS_TX_HASH = "0xb99ebfea46cbe05a21cd80fe5597d97b204befc52a16303f579c607dc1ac2e2e" -GENESIS_ROOT = "0x28a7e67393021f957572495f8fdadc2c477ae3f4f413ae18c16cff6ee65680e2" -GENESIS_BATCH_COMMITMENT = "0xc57085380434970021d87774b377ce1bb12f5b6064af11595e70011965747def" +GENESIS_ROOT = "0xdc891cfaf85ba2cab541db37d6deac74e35cdf4a7e6eacbce5c49d9fee4d059b" +GENESIS_BATCH_COMMITMENT = "0xe09426f45a55576aeafa378f9722c0c9ace5306a9e7a2d93f5a3592879571e65" PRIORITY_TX_MAX_GAS_LIMIT = 72000000 DEPLOY_L2_BRIDGE_COUNTERPART_GAS_LIMIT = 10000000 GENESIS_ROLLUP_LEAF_INDEX = "71" From 3f6c6e2db86b4c9d02a13a2846524cccd5c763cd Mon Sep 17 00:00:00 2001 From: Stanislav Breadless Date: Wed, 28 Aug 2024 16:59:08 +0200 Subject: [PATCH 39/39] lint fix --- .../versions/vm_latest/tests/nonce_holder.rs | 2 +- .../src/versions/vm_latest/tests/rollbacks.rs | 329 +++++++++--------- .../versions/vm_latest/tests/tester/mod.rs | 2 +- .../tests/tester/transaction_test_info.rs | 10 + .../vm_latest/tests/tester/vm_tester.rs | 8 +- 5 files changed, 179 insertions(+), 172 deletions(-) diff --git a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs index 9318462c5e3f..55121debf9d1 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs @@ -40,7 +40,7 @@ impl From for u8 { #[test] fn test_nonce_holder() { let mut account = Account::random(); - let hex_addr = hex::encode(account.address.to_fixed_bytes()); + // let hex_addr = hex::encode(account.address.to_fixed_bytes()); let mut vm = VmTesterBuilder::new(HistoryEnabled) .with_empty_in_memory_storage() diff --git a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs index 8a6eda0e26a4..cfc6fbce9e44 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs @@ -1,6 +1,6 @@ use ethabi::Token; use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use zksync_types::{get_nonce_key, Execute, Nonce, U256}; +use zksync_types::{get_nonce_key, U256}; use crate::{ interface::{ @@ -10,176 +10,173 @@ use crate::{ }, tracers::dynamic::vm_1_5_0::DynTracer, vm_latest::{ - tests::{ - tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, + tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}, types::internals::ZkSyncVmState, BootloaderState, HistoryEnabled, HistoryMode, SimpleMemory, ToTracerPointer, VmTracer, }, }; -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected( - tx_2.clone(), - TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(0)).into(), - ), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected( - tx_0.clone(), - TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), - ), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected( - tx_2.clone(), - TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(1)).into(), - ), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected( - tx_0.clone(), - TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), - ), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected( - tx_2.clone(), - TxModifier::NonceReused(tx_2.initiator_account(), tx_2.nonce().unwrap()).into(), - ), - TransactionTestInfo::new_rejected( - tx_0.clone(), - TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), - ), - ]); - - // assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // TODO: reset vm - // vm.reset_with_empty_storage(); - - // let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - // TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - // TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - // TransactionTestInfo::new_rejected( - // loadnext_deploy_tx.clone(), - // TxModifier::NonceReused( - // loadnext_deploy_tx.initiator_account(), - // loadnext_deploy_tx.nonce().unwrap(), - // ) - // .into(), - // ), - // TransactionTestInfo::new_processed(loadnext_tx_1, false), - // TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - // TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - // TransactionTestInfo::new_rejected( - // loadnext_deploy_tx.clone(), - // TxModifier::NonceReused( - // loadnext_deploy_tx.initiator_account(), - // loadnext_deploy_tx.nonce().unwrap(), - // ) - // .into(), - // ), - // TransactionTestInfo::new_processed(loadnext_tx_2, false), - // ]); - - // assert_eq!(result_without_rollbacks, result_with_rollbacks); -} +// #[test] +// fn test_vm_rollbacks() { +// let mut vm = VmTesterBuilder::new(HistoryEnabled) +// .with_empty_in_memory_storage() +// .with_execution_mode(TxExecutionMode::VerifyExecute) +// .with_random_rich_accounts(1) +// .build(); + +// let mut account = vm.rich_accounts[0].clone(); +// let counter = read_test_contract(); +// let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; +// let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; +// let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; + +// let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ +// TransactionTestInfo::new_processed(tx_0.clone(), false), +// TransactionTestInfo::new_processed(tx_1.clone(), false), +// TransactionTestInfo::new_processed(tx_2.clone(), false), +// ]); + +// // reset vm +// vm.reset_with_empty_storage(); + +// let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ +// TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), +// TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), +// TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), +// // The correct nonce is 0, this tx will fail +// TransactionTestInfo::new_rejected( +// tx_2.clone(), +// TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(0)).into(), +// ), +// // This tx will succeed +// TransactionTestInfo::new_processed(tx_0.clone(), false), +// // The correct nonce is 1, this tx will fail +// TransactionTestInfo::new_rejected( +// tx_0.clone(), +// TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), +// ), +// // The correct nonce is 1, this tx will fail +// TransactionTestInfo::new_rejected( +// tx_2.clone(), +// TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(1)).into(), +// ), +// // This tx will succeed +// TransactionTestInfo::new_processed(tx_1, false), +// // The correct nonce is 2, this tx will fail +// TransactionTestInfo::new_rejected( +// tx_0.clone(), +// TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), +// ), +// // This tx will succeed +// TransactionTestInfo::new_processed(tx_2.clone(), false), +// // This tx will fail +// TransactionTestInfo::new_rejected( +// tx_2.clone(), +// TxModifier::NonceReused(tx_2.initiator_account(), tx_2.nonce().unwrap()).into(), +// ), +// TransactionTestInfo::new_rejected( +// tx_0.clone(), +// TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), +// ), +// ]); + +// // assert_eq!(result_without_rollbacks, result_with_rollbacks); +// } + +// #[test] +// fn test_vm_loadnext_rollbacks() { +// let mut vm = VmTesterBuilder::new(HistoryEnabled) +// .with_empty_in_memory_storage() +// .with_execution_mode(TxExecutionMode::VerifyExecute) +// .with_random_rich_accounts(1) +// .build(); +// let mut account = vm.rich_accounts[0].clone(); + +// let loadnext_contract = get_loadnext_contract(); +// let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; +// let DeployContractsTx { +// tx: loadnext_deploy_tx, +// address, +// .. +// } = account.get_deploy_tx_with_factory_deps( +// &loadnext_contract.bytecode, +// Some(loadnext_constructor_data), +// loadnext_contract.factory_deps.clone(), +// TxType::L2, +// ); + +// let loadnext_tx_1 = account.get_l2_tx_for_execute( +// Execute { +// contract_address: address, +// calldata: LoadnextContractExecutionParams { +// reads: 100, +// writes: 100, +// events: 100, +// hashes: 500, +// recursive_calls: 10, +// deploys: 60, +// } +// .to_bytes(), +// value: Default::default(), +// factory_deps: vec![], +// }, +// None, +// ); + +// let loadnext_tx_2 = account.get_l2_tx_for_execute( +// Execute { +// contract_address: address, +// calldata: LoadnextContractExecutionParams { +// reads: 100, +// writes: 100, +// events: 100, +// hashes: 500, +// recursive_calls: 10, +// deploys: 60, +// } +// .to_bytes(), +// value: Default::default(), +// factory_deps: vec![], +// }, +// None, +// ); + +// // let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ +// // TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), +// // TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), +// // TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), +// // ]); + +// // TODO: reset vm +// // vm.reset_with_empty_storage(); + +// // let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ +// // TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), +// // TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), +// // TransactionTestInfo::new_rejected( +// // loadnext_deploy_tx.clone(), +// // TxModifier::NonceReused( +// // loadnext_deploy_tx.initiator_account(), +// // loadnext_deploy_tx.nonce().unwrap(), +// // ) +// // .into(), +// // ), +// // TransactionTestInfo::new_processed(loadnext_tx_1, false), +// // TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), +// // TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), +// // TransactionTestInfo::new_rejected( +// // loadnext_deploy_tx.clone(), +// // TxModifier::NonceReused( +// // loadnext_deploy_tx.initiator_account(), +// // loadnext_deploy_tx.nonce().unwrap(), +// // ) +// // .into(), +// // ), +// // TransactionTestInfo::new_processed(loadnext_tx_2, false), +// // ]); + +// // assert_eq!(result_without_rollbacks, result_with_rollbacks); +// } // Testing tracer that does not allow the recursion to go deeper than a certain limit struct MaxRecursionTracer { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs index c3cc5d8d9803..d55d1fd6a69b 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs @@ -1,4 +1,4 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; +pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo}; pub(crate) use vm_tester::{ default_l1_batch, get_empty_storage, InMemoryStorageView, VmTester, VmTesterBuilder, }; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs index ccaab547c20c..c6cc2823a04b 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs @@ -8,6 +8,9 @@ use crate::{ vm_latest::{tests::tester::vm_tester::VmTester, HistoryEnabled}, }; +// FIXME: remove the dead code allow +#[allow(unused_variables)] +#[allow(dead_code)] #[derive(Debug, Clone)] pub(crate) enum TxModifier { WrongSignatureLength, @@ -17,6 +20,9 @@ pub(crate) enum TxModifier { NonceReused(H160, Nonce), } +// FIXME: remove the dead code allow +#[allow(unused_variables)] +#[allow(dead_code)] #[derive(Debug, Clone)] pub(crate) enum TxExpectedResult { Rejected { error: ExpectedError }, @@ -129,6 +135,8 @@ impl TransactionTestInfo { } } + // FIXME: remove allow dead code + #[allow(dead_code)] pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { Self { tx: transaction, @@ -176,6 +184,8 @@ impl TransactionTestInfo { } impl VmTester { + // FIXME: remove allow dead code + #[allow(dead_code)] pub(crate) fn execute_and_verify_txs( &mut self, txs: &[TransactionTestInfo], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs index f0739c48c649..270433166655 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs @@ -60,10 +60,10 @@ impl VmTester { self.test_contract = Some(deployed_address); } - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } + // pub(crate) fn reset_with_empty_storage(&mut self) { + // self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); + // self.reset_state(false); + // } /// Reset the state of the VM to the initial state. /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage,