From 4629450d2d40c2c4c28255e51d5bb67d588ba837 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Wed, 23 Oct 2024 21:33:32 +0300 Subject: [PATCH 01/32] ci: unify fmt check (#3159) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ prover ci should check formatting with zkstack_cli ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- .github/workflows/ci-prover-reusable.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci-prover-reusable.yml b/.github/workflows/ci-prover-reusable.yml index 6cb9c26d21e7..4154885549b8 100644 --- a/.github/workflows/ci-prover-reusable.yml +++ b/.github/workflows/ci-prover-reusable.yml @@ -40,7 +40,9 @@ jobs: ci_run zkstack dev db setup --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} - name: Formatting - run: ci_run bash -c "cd prover && cargo fmt --check" + run: | + ci_run git config --global --add safe.directory /usr/src/zksync + ci_run zkstack dev fmt --check rustfmt unit-tests: runs-on: [ matterlabs-ci-runner-highmem-long ] From 561fc1bddfc79061dab9d8d150baa06acfa90692 Mon Sep 17 00:00:00 2001 From: QEDK <1994constant@gmail.com> Date: Thu, 24 Oct 2024 06:04:10 +0530 Subject: [PATCH 02/32] feat: Implement gas relay mode and inclusion data for data attestation (#3070) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ This PR adds gas relay API support for gasless submission to the Avail network, it also provides the attestation implementation necessary for data attestation. ## Why ❔ Gas relay API support is required for Avail partners that choose to pay in a different token. Data attestation ensures that arbitrary tx data cannot be used for rollup finality and that no data withholding attack can occur. ## Checklist - [X] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [X] Documentation comments have been added / updated. - [X] Code has been formatted via `zk fmt` and `zk lint`. Supersedes #2987 --------- Co-authored-by: vibhurajeev Co-authored-by: dimazhornyk Co-authored-by: Dima Zhornyk <55756184+dimazhornyk@users.noreply.github.com> --- .gitignore | 1 + Cargo.lock | 3 + Cargo.toml | 3 +- core/lib/basic_types/src/api_key.rs | 20 ++ core/lib/basic_types/src/lib.rs | 1 + .../lib/config/src/configs/da_client/avail.rs | 28 ++- core/lib/config/src/testonly.rs | 16 +- core/lib/env_config/src/da_client.rs | 80 +++++-- core/lib/protobuf_config/src/da_client.rs | 53 +++- .../src/proto/config/da_client.proto | 20 +- .../src/proto/config/secrets.proto | 1 + core/lib/protobuf_config/src/secrets.rs | 54 ++++- core/node/da_clients/Cargo.toml | 3 + core/node/da_clients/src/avail/client.rs | 226 +++++++++++++++--- core/node/da_clients/src/avail/sdk.rs | 100 +++++++- 15 files changed, 512 insertions(+), 97 deletions(-) create mode 100644 core/lib/basic_types/src/api_key.rs diff --git a/.gitignore b/.gitignore index 86ed40c70417..adf3b7799618 100644 --- a/.gitignore +++ b/.gitignore @@ -115,6 +115,7 @@ prover/data/keys/setup_* # ZK Stack CLI chains/era/configs/* chains/gateway/* +chains/avail/* configs/* era-observability/ core/tests/ts-integration/deployments-zk diff --git a/Cargo.lock b/Cargo.lock index 7e4cad34cf8f..a42ef8e3fdcc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10028,14 +10028,17 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "backon", "base58", "blake2 0.10.6", "blake2b_simd", + "bytes", "flate2", "futures 0.3.30", "hex", "jsonrpsee 0.23.2", "parity-scale-codec", + "reqwest 0.12.7", "scale-encode", "serde", "serde_json", diff --git a/Cargo.toml b/Cargo.toml index f1e70e7f3028..0f8e6ba77ae6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -110,6 +110,7 @@ backon = "0.4.4" bigdecimal = "0.4.5" bincode = "1" blake2 = "0.10" +bytes = "1" chrono = "0.4" clap = "4.2.2" codegen = "0.2.0" @@ -155,7 +156,7 @@ rayon = "1.3.1" regex = "1" reqwest = "0.12" rlp = "0.5" -rocksdb = "0.21.0" +rocksdb = "0.21" rustc_version = "0.4.0" rustls = "0.23" secp256k1 = { version = "0.27.0", features = ["recovery", "global-context"] } diff --git a/core/lib/basic_types/src/api_key.rs b/core/lib/basic_types/src/api_key.rs new file mode 100644 index 000000000000..eadf4e9051b5 --- /dev/null +++ b/core/lib/basic_types/src/api_key.rs @@ -0,0 +1,20 @@ +use std::str::FromStr; + +use secrecy::{ExposeSecret, Secret}; + +#[derive(Debug, Clone)] +pub struct APIKey(pub Secret); + +impl PartialEq for APIKey { + fn eq(&self, other: &Self) -> bool { + self.0.expose_secret().eq(other.0.expose_secret()) + } +} + +impl FromStr for APIKey { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + Ok(APIKey(s.parse()?)) + } +} diff --git a/core/lib/basic_types/src/lib.rs b/core/lib/basic_types/src/lib.rs index 79c7b3924e34..7953f362fd42 100644 --- a/core/lib/basic_types/src/lib.rs +++ b/core/lib/basic_types/src/lib.rs @@ -24,6 +24,7 @@ use serde::{de, Deserialize, Deserializer, Serialize}; #[macro_use] mod macros; +pub mod api_key; pub mod basic_fri_types; pub mod commitment; pub mod network; diff --git a/core/lib/config/src/configs/da_client/avail.rs b/core/lib/config/src/configs/da_client/avail.rs index 590dc5fef18a..b8e9db0f3937 100644 --- a/core/lib/config/src/configs/da_client/avail.rs +++ b/core/lib/config/src/configs/da_client/avail.rs @@ -1,16 +1,38 @@ use serde::Deserialize; -use zksync_basic_types::seed_phrase::SeedPhrase; +use zksync_basic_types::{api_key::APIKey, seed_phrase::SeedPhrase}; + +pub const AVAIL_GAS_RELAY_CLIENT_NAME: &str = "GasRelay"; +pub const AVAIL_FULL_CLIENT_NAME: &str = "FullClient"; + +#[derive(Clone, Debug, PartialEq, Deserialize)] +#[serde(tag = "avail_client")] +pub enum AvailClientConfig { + FullClient(AvailDefaultConfig), + GasRelay(AvailGasRelayConfig), +} #[derive(Clone, Debug, PartialEq, Deserialize)] pub struct AvailConfig { - pub api_node_url: String, pub bridge_api_url: String, - pub app_id: u32, pub timeout: usize, + #[serde(flatten)] + pub config: AvailClientConfig, +} + +#[derive(Clone, Debug, PartialEq, Deserialize)] +pub struct AvailDefaultConfig { + pub api_node_url: String, + pub app_id: u32, +} + +#[derive(Clone, Debug, PartialEq, Deserialize)] +pub struct AvailGasRelayConfig { + pub gas_relay_api_url: String, pub max_retries: usize, } #[derive(Clone, Debug, PartialEq)] pub struct AvailSecrets { pub seed_phrase: Option, + pub gas_relay_api_key: Option, } diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 9b1ec13e2d2e..880bc5aa98d2 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -3,6 +3,7 @@ use std::num::NonZeroUsize; use rand::{distributions::Distribution, Rng}; use secrecy::Secret; use zksync_basic_types::{ + api_key::APIKey, basic_fri_types::CircuitIdRoundTuple, commitment::L1BatchCommitmentMode, network::Network, @@ -17,7 +18,12 @@ use zksync_crypto_primitives::K256PrivateKey; use crate::{ configs::{ - self, da_client::DAClientConfig::Avail, external_price_api_client::ForcedPriceClientConfig, + self, + da_client::{ + avail::{AvailClientConfig, AvailDefaultConfig}, + DAClientConfig::Avail, + }, + external_price_api_client::ForcedPriceClientConfig, }, AvailConfig, }; @@ -935,11 +941,12 @@ impl Distribution for EncodeDist { impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::da_client::DAClientConfig { Avail(AvailConfig { - api_node_url: self.sample(rng), bridge_api_url: self.sample(rng), - app_id: self.sample(rng), timeout: self.sample(rng), - max_retries: self.sample(rng), + config: AvailClientConfig::FullClient(AvailDefaultConfig { + api_node_url: self.sample(rng), + app_id: self.sample(rng), + }), }) } } @@ -948,6 +955,7 @@ impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::secrets::DataAvailabilitySecrets { configs::secrets::DataAvailabilitySecrets::Avail(configs::da_client::avail::AvailSecrets { seed_phrase: Some(SeedPhrase(Secret::new(self.sample(rng)))), + gas_relay_api_key: Some(APIKey(Secret::new(self.sample(rng)))), }) } } diff --git a/core/lib/env_config/src/da_client.rs b/core/lib/env_config/src/da_client.rs index 0fc3ad216f87..1043786fc1eb 100644 --- a/core/lib/env_config/src/da_client.rs +++ b/core/lib/env_config/src/da_client.rs @@ -2,19 +2,34 @@ use std::env; use zksync_config::configs::{ da_client::{ - avail::AvailSecrets, DAClientConfig, AVAIL_CLIENT_CONFIG_NAME, - OBJECT_STORE_CLIENT_CONFIG_NAME, + avail::{ + AvailClientConfig, AvailSecrets, AVAIL_FULL_CLIENT_NAME, AVAIL_GAS_RELAY_CLIENT_NAME, + }, + DAClientConfig, AVAIL_CLIENT_CONFIG_NAME, OBJECT_STORE_CLIENT_CONFIG_NAME, }, secrets::DataAvailabilitySecrets, + AvailConfig, }; use crate::{envy_load, FromEnv}; impl FromEnv for DAClientConfig { fn from_env() -> anyhow::Result { - let client_tag = std::env::var("DA_CLIENT")?; + let client_tag = env::var("DA_CLIENT")?; let config = match client_tag.as_str() { - AVAIL_CLIENT_CONFIG_NAME => Self::Avail(envy_load("da_avail_config", "DA_")?), + AVAIL_CLIENT_CONFIG_NAME => Self::Avail(AvailConfig { + bridge_api_url: env::var("DA_BRIDGE_API_URL").ok().unwrap(), + timeout: env::var("DA_TIMEOUT")?.parse()?, + config: match env::var("DA_AVAIL_CLIENT_TYPE")?.as_str() { + AVAIL_FULL_CLIENT_NAME => { + AvailClientConfig::FullClient(envy_load("da_avail_full_client", "DA_")?) + } + AVAIL_GAS_RELAY_CLIENT_NAME => { + AvailClientConfig::GasRelay(envy_load("da_avail_gas_relay", "DA_")?) + } + _ => anyhow::bail!("Unknown Avail DA client type"), + }, + }), OBJECT_STORE_CLIENT_CONFIG_NAME => { Self::ObjectStore(envy_load("da_object_store", "DA_")?) } @@ -30,11 +45,21 @@ impl FromEnv for DataAvailabilitySecrets { let client_tag = std::env::var("DA_CLIENT")?; let secrets = match client_tag.as_str() { AVAIL_CLIENT_CONFIG_NAME => { - let seed_phrase = env::var("DA_SECRETS_SEED_PHRASE") - .ok() - .map(|s| s.parse()) - .transpose()?; - Self::Avail(AvailSecrets { seed_phrase }) + let seed_phrase: Option = + env::var("DA_SECRETS_SEED_PHRASE") + .ok() + .map(|s| s.parse().unwrap()); + let gas_relay_api_key: Option = + env::var("DA_SECRETS_GAS_RELAY_API_KEY") + .ok() + .map(|s| s.parse().unwrap()); + if seed_phrase.is_none() && gas_relay_api_key.is_none() { + anyhow::bail!("No secrets provided for Avail DA client"); + } + Self::Avail(AvailSecrets { + seed_phrase, + gas_relay_api_key, + }) } _ => anyhow::bail!("Unknown DA client name: {}", client_tag), }; @@ -47,7 +72,10 @@ impl FromEnv for DataAvailabilitySecrets { mod tests { use zksync_config::{ configs::{ - da_client::{DAClientConfig, DAClientConfig::ObjectStore}, + da_client::{ + avail::{AvailClientConfig, AvailDefaultConfig}, + DAClientConfig::{self, ObjectStore}, + }, object_store::ObjectStoreMode::GCS, }, AvailConfig, ObjectStoreConfig, @@ -91,14 +119,14 @@ mod tests { bridge_api_url: &str, app_id: u32, timeout: usize, - max_retries: usize, ) -> DAClientConfig { DAClientConfig::Avail(AvailConfig { - api_node_url: api_node_url.to_string(), bridge_api_url: bridge_api_url.to_string(), - app_id, timeout, - max_retries, + config: AvailClientConfig::FullClient(AvailDefaultConfig { + api_node_url: api_node_url.to_string(), + app_id, + }), }) } @@ -107,11 +135,13 @@ mod tests { let mut lock = MUTEX.lock(); let config = r#" DA_CLIENT="Avail" - DA_API_NODE_URL="localhost:12345" + DA_AVAIL_CLIENT_TYPE="FullClient" + DA_BRIDGE_API_URL="localhost:54321" - DA_APP_ID="1" DA_TIMEOUT="2" - DA_MAX_RETRIES="3" + + DA_API_NODE_URL="localhost:12345" + DA_APP_ID="1" "#; lock.set_env(config); @@ -124,7 +154,6 @@ mod tests { "localhost:54321", "1".parse::().unwrap(), "2".parse::().unwrap(), - "3".parse::().unwrap(), ) ); } @@ -139,15 +168,18 @@ mod tests { lock.set_env(config); - let actual = match DataAvailabilitySecrets::from_env().unwrap() { - DataAvailabilitySecrets::Avail(avail) => avail.seed_phrase, + let (actual_seed, actual_key) = match DataAvailabilitySecrets::from_env().unwrap() { + DataAvailabilitySecrets::Avail(avail) => (avail.seed_phrase, avail.gas_relay_api_key), }; assert_eq!( - actual.unwrap(), - "bottom drive obey lake curtain smoke basket hold race lonely fit walk" - .parse() - .unwrap() + (actual_seed.unwrap(), actual_key), + ( + "bottom drive obey lake curtain smoke basket hold race lonely fit walk" + .parse() + .unwrap(), + None + ) ); } } diff --git a/core/lib/protobuf_config/src/da_client.rs b/core/lib/protobuf_config/src/da_client.rs index 1499e88efb4c..a17a8711a27b 100644 --- a/core/lib/protobuf_config/src/da_client.rs +++ b/core/lib/protobuf_config/src/da_client.rs @@ -1,10 +1,10 @@ use anyhow::Context; -use zksync_config::{ - configs::{ - da_client::DAClientConfig::{Avail, ObjectStore}, - {self}, +use zksync_config::configs::{ + self, + da_client::{ + avail::{AvailClientConfig, AvailConfig, AvailDefaultConfig, AvailGasRelayConfig}, + DAClientConfig::{Avail, ObjectStore}, }, - AvailConfig, }; use zksync_protobuf::{required, ProtoRepr}; @@ -18,15 +18,31 @@ impl ProtoRepr for proto::DataAvailabilityClient { let client = match config { proto::data_availability_client::Config::Avail(conf) => Avail(AvailConfig { - api_node_url: required(&conf.api_node_url) - .context("api_node_url")? - .clone(), bridge_api_url: required(&conf.bridge_api_url) .context("bridge_api_url")? .clone(), - app_id: *required(&conf.app_id).context("app_id")?, timeout: *required(&conf.timeout).context("timeout")? as usize, - max_retries: *required(&conf.max_retries).context("max_retries")? as usize, + config: match conf.config.as_ref() { + Some(proto::avail_config::Config::FullClient(full_client_conf)) => { + AvailClientConfig::FullClient(AvailDefaultConfig { + api_node_url: required(&full_client_conf.api_node_url) + .context("api_node_url")? + .clone(), + app_id: *required(&full_client_conf.app_id).context("app_id")?, + }) + } + Some(proto::avail_config::Config::GasRelay(gas_relay_conf)) => { + AvailClientConfig::GasRelay(AvailGasRelayConfig { + gas_relay_api_url: required(&gas_relay_conf.gas_relay_api_url) + .context("gas_relay_api_url")? + .clone(), + max_retries: *required(&gas_relay_conf.max_retries) + .context("max_retries")? + as usize, + }) + } + None => return Err(anyhow::anyhow!("Invalid Avail DA configuration")), + }, }), proto::data_availability_client::Config::ObjectStore(conf) => { ObjectStore(object_store_proto::ObjectStore::read(conf)?) @@ -41,11 +57,22 @@ impl ProtoRepr for proto::DataAvailabilityClient { Avail(config) => Self { config: Some(proto::data_availability_client::Config::Avail( proto::AvailConfig { - api_node_url: Some(config.api_node_url.clone()), bridge_api_url: Some(config.bridge_api_url.clone()), - app_id: Some(config.app_id), timeout: Some(config.timeout as u64), - max_retries: Some(config.max_retries as u64), + config: match &config.config { + AvailClientConfig::FullClient(conf) => Some( + proto::avail_config::Config::FullClient(proto::AvailClientConfig { + api_node_url: Some(conf.api_node_url.clone()), + app_id: Some(conf.app_id), + }), + ), + AvailClientConfig::GasRelay(conf) => Some( + proto::avail_config::Config::GasRelay(proto::AvailGasRelayConfig { + gas_relay_api_url: Some(conf.gas_relay_api_url.clone()), + max_retries: Some(conf.max_retries as u64), + }), + ), + }, }, )), }, diff --git a/core/lib/protobuf_config/src/proto/config/da_client.proto b/core/lib/protobuf_config/src/proto/config/da_client.proto index d01bda2c8470..73fa2435996f 100644 --- a/core/lib/protobuf_config/src/proto/config/da_client.proto +++ b/core/lib/protobuf_config/src/proto/config/da_client.proto @@ -5,12 +5,26 @@ package zksync.config.da_client; import "zksync/config/object_store.proto"; message AvailConfig { - optional string api_node_url = 1; optional string bridge_api_url = 2; - optional uint32 app_id = 4; optional uint64 timeout = 5; - optional uint64 max_retries = 6; + oneof config { + AvailClientConfig full_client = 7; + AvailGasRelayConfig gas_relay = 8; + } + reserved 1; reserved "api_node_url"; reserved 3; reserved "seed"; + reserved 4; reserved "app_id"; + reserved 6; reserved "max_retries"; +} + +message AvailClientConfig { + optional string api_node_url = 1; + optional uint32 app_id = 2; +} + +message AvailGasRelayConfig { + optional string gas_relay_api_url = 1; + optional uint64 max_retries = 2; } message DataAvailabilityClient { diff --git a/core/lib/protobuf_config/src/proto/config/secrets.proto b/core/lib/protobuf_config/src/proto/config/secrets.proto index 17b915b3f087..43c4542783c7 100644 --- a/core/lib/protobuf_config/src/proto/config/secrets.proto +++ b/core/lib/protobuf_config/src/proto/config/secrets.proto @@ -21,6 +21,7 @@ message ConsensusSecrets { message AvailSecret { optional string seed_phrase = 1; + optional string gas_relay_api_key = 2; } message DataAvailabilitySecrets { diff --git a/core/lib/protobuf_config/src/secrets.rs b/core/lib/protobuf_config/src/secrets.rs index 587351480078..07ab340c2313 100644 --- a/core/lib/protobuf_config/src/secrets.rs +++ b/core/lib/protobuf_config/src/secrets.rs @@ -2,7 +2,7 @@ use std::str::FromStr; use anyhow::Context; use secrecy::ExposeSecret; -use zksync_basic_types::{seed_phrase::SeedPhrase, url::SensitiveUrl}; +use zksync_basic_types::{api_key::APIKey, seed_phrase::SeedPhrase, url::SensitiveUrl}; use zksync_config::configs::{ consensus::{AttesterSecretKey, ConsensusSecrets, NodeSecretKey, ValidatorSecretKey}, da_client::avail::AvailSecrets, @@ -103,14 +103,31 @@ impl ProtoRepr for proto::DataAvailabilitySecrets { let secrets = required(&self.da_secrets).context("config")?; let client = match secrets { - DaSecrets::Avail(avail_secret) => DataAvailabilitySecrets::Avail(AvailSecrets { - seed_phrase: Some( - SeedPhrase::from_str( - required(&avail_secret.seed_phrase).context("seed_phrase")?, - ) - .unwrap(), - ), - }), + DaSecrets::Avail(avail_secret) => { + let seed_phrase = match avail_secret.seed_phrase.as_ref() { + Some(seed) => match SeedPhrase::from_str(seed) { + Ok(seed) => Some(seed), + Err(_) => None, + }, + None => None, + }; + let gas_relay_api_key = match avail_secret.gas_relay_api_key.as_ref() { + Some(api_key) => match APIKey::from_str(api_key) { + Ok(api_key) => Some(api_key), + Err(_) => None, + }, + None => None, + }; + if seed_phrase.is_none() && gas_relay_api_key.is_none() { + return Err(anyhow::anyhow!( + "At least one of seed_phrase or gas_relay_api_key must be provided" + )); + } + DataAvailabilitySecrets::Avail(AvailSecrets { + seed_phrase, + gas_relay_api_key, + }) + } }; Ok(client) @@ -133,7 +150,24 @@ impl ProtoRepr for proto::DataAvailabilitySecrets { None }; - Some(DaSecrets::Avail(AvailSecret { seed_phrase })) + let gas_relay_api_key = if config.gas_relay_api_key.is_some() { + Some( + config + .clone() + .gas_relay_api_key + .unwrap() + .0 + .expose_secret() + .to_string(), + ) + } else { + None + }; + + Some(DaSecrets::Avail(AvailSecret { + seed_phrase, + gas_relay_api_key, + })) } }; diff --git a/core/node/da_clients/Cargo.toml b/core/node/da_clients/Cargo.toml index 60b65067f48d..fa2f15920bd0 100644 --- a/core/node/da_clients/Cargo.toml +++ b/core/node/da_clients/Cargo.toml @@ -37,3 +37,6 @@ blake2b_simd.workspace = true jsonrpsee = { workspace = true, features = ["ws-client"] } parity-scale-codec = { workspace = true, features = ["derive"] } subxt-signer = { workspace = true, features = ["sr25519", "native"] } +reqwest = { workspace = true } +bytes = { workspace = true } +backon.workspace = true diff --git a/core/node/da_clients/src/avail/client.rs b/core/node/da_clients/src/avail/client.rs index 7718691bf185..46d652d57137 100644 --- a/core/node/da_clients/src/avail/client.rs +++ b/core/node/da_clients/src/avail/client.rs @@ -1,34 +1,133 @@ -use std::{fmt::Debug, sync::Arc}; +use std::{fmt::Debug, sync::Arc, time::Duration}; +use anyhow::anyhow; use async_trait::async_trait; use jsonrpsee::ws_client::WsClientBuilder; +use serde::{Deserialize, Serialize}; use subxt_signer::ExposeSecret; -use zksync_config::configs::da_client::avail::{AvailConfig, AvailSecrets}; +use zksync_config::configs::da_client::avail::{AvailClientConfig, AvailConfig, AvailSecrets}; use zksync_da_client::{ types::{DAError, DispatchResponse, InclusionData}, DataAvailabilityClient, }; +use zksync_types::{ + ethabi::{self, Token}, + web3::contract::Tokenize, + H256, U256, +}; + +use crate::avail::sdk::{GasRelayClient, RawAvailClient}; -use crate::avail::sdk::RawAvailClient; +#[derive(Debug, Clone)] +enum AvailClientMode { + Default(Box), + GasRelay(GasRelayClient), +} /// An implementation of the `DataAvailabilityClient` trait that interacts with the Avail network. #[derive(Debug, Clone)] pub struct AvailClient { config: AvailConfig, - sdk_client: Arc, + sdk_client: Arc, + api_client: Arc, // bridge API reqwest client +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct BridgeAPIResponse { + blob_root: Option, + bridge_root: Option, + data_root_index: Option, + data_root_proof: Option>, + leaf: Option, + leaf_index: Option, + leaf_proof: Option>, + range_hash: Option, + error: Option, +} + +#[derive(Deserialize, Serialize, Debug)] +#[serde(rename_all = "camelCase")] +struct MerkleProofInput { + // proof of inclusion for the data root + data_root_proof: Vec, + // proof of inclusion of leaf within blob/bridge root + leaf_proof: Vec, + // abi.encodePacked(startBlock, endBlock) of header range commitment on vectorx + range_hash: H256, + // index of the data root in the commitment tree + data_root_index: U256, + // blob root to check proof against, or reconstruct the data root + blob_root: H256, + // bridge root to check proof against, or reconstruct the data root + bridge_root: H256, + // leaf being proven + leaf: H256, + // index of the leaf in the blob/bridge root tree + leaf_index: U256, +} + +impl Tokenize for MerkleProofInput { + fn into_tokens(self) -> Vec { + vec![Token::Tuple(vec![ + Token::Array( + self.data_root_proof + .iter() + .map(|x| Token::FixedBytes(x.as_bytes().to_vec())) + .collect(), + ), + Token::Array( + self.leaf_proof + .iter() + .map(|x| Token::FixedBytes(x.as_bytes().to_vec())) + .collect(), + ), + Token::FixedBytes(self.range_hash.as_bytes().to_vec()), + Token::Uint(self.data_root_index), + Token::FixedBytes(self.blob_root.as_bytes().to_vec()), + Token::FixedBytes(self.bridge_root.as_bytes().to_vec()), + Token::FixedBytes(self.leaf.as_bytes().to_vec()), + Token::Uint(self.leaf_index), + ])] + } } impl AvailClient { pub async fn new(config: AvailConfig, secrets: AvailSecrets) -> anyhow::Result { - let seed_phrase = secrets - .seed_phrase - .ok_or_else(|| anyhow::anyhow!("seed phrase"))?; - let sdk_client = RawAvailClient::new(config.app_id, seed_phrase.0.expose_secret()).await?; - - Ok(Self { - config, - sdk_client: Arc::new(sdk_client), - }) + let api_client = Arc::new(reqwest::Client::new()); + match config.config.clone() { + AvailClientConfig::GasRelay(conf) => { + let gas_relay_api_key = secrets + .gas_relay_api_key + .ok_or_else(|| anyhow::anyhow!("Gas relay API key is missing"))?; + let gas_relay_client = GasRelayClient::new( + &conf.gas_relay_api_url, + gas_relay_api_key.0.expose_secret(), + conf.max_retries, + Arc::clone(&api_client), + ) + .await?; + Ok(Self { + config, + sdk_client: Arc::new(AvailClientMode::GasRelay(gas_relay_client)), + api_client, + }) + } + AvailClientConfig::FullClient(conf) => { + let seed_phrase = secrets + .seed_phrase + .ok_or_else(|| anyhow::anyhow!("Seed phrase is missing"))?; + // these unwraps are safe because we validate in protobuf config + let sdk_client = + RawAvailClient::new(conf.app_id, seed_phrase.0.expose_secret()).await?; + + Ok(Self { + config, + sdk_client: Arc::new(AvailClientMode::Default(Box::new(sdk_client))), + api_client, + }) + } + } } } @@ -39,37 +138,83 @@ impl DataAvailabilityClient for AvailClient { _: u32, // batch_number data: Vec, ) -> anyhow::Result { - let client = WsClientBuilder::default() - .build(self.config.api_node_url.as_str()) - .await - .map_err(to_non_retriable_da_error)?; + match self.sdk_client.as_ref() { + AvailClientMode::Default(client) => { + let default_config = match &self.config.config { + AvailClientConfig::FullClient(conf) => conf, + _ => unreachable!(), // validated in protobuf config + }; + let ws_client = WsClientBuilder::default() + .build(default_config.api_node_url.clone().as_str()) + .await + .map_err(to_non_retriable_da_error)?; - let extrinsic = self - .sdk_client - .build_extrinsic(&client, data) - .await - .map_err(to_non_retriable_da_error)?; + let extrinsic = client + .build_extrinsic(&ws_client, data) + .await + .map_err(to_non_retriable_da_error)?; - let block_hash = self - .sdk_client - .submit_extrinsic(&client, extrinsic.as_str()) - .await - .map_err(to_non_retriable_da_error)?; - let tx_id = self - .sdk_client - .get_tx_id(&client, block_hash.as_str(), extrinsic.as_str()) - .await - .map_err(to_non_retriable_da_error)?; - - Ok(DispatchResponse::from(format!("{}:{}", block_hash, tx_id))) + let block_hash = client + .submit_extrinsic(&ws_client, extrinsic.as_str()) + .await + .map_err(to_non_retriable_da_error)?; + let tx_id = client + .get_tx_id(&ws_client, block_hash.as_str(), extrinsic.as_str()) + .await + .map_err(to_non_retriable_da_error)?; + Ok(DispatchResponse::from(format!("{}:{}", block_hash, tx_id))) + } + AvailClientMode::GasRelay(client) => { + let (block_hash, extrinsic_index) = client + .post_data(data) + .await + .map_err(to_retriable_da_error)?; + Ok(DispatchResponse { + blob_id: format!("{:x}:{}", block_hash, extrinsic_index), + }) + } + } } async fn get_inclusion_data( &self, - _blob_id: &str, + blob_id: &str, ) -> anyhow::Result, DAError> { - // TODO: implement inclusion data retrieval - Ok(Some(InclusionData { data: vec![] })) + let (block_hash, tx_idx) = blob_id.split_once(':').ok_or_else(|| DAError { + error: anyhow!("Invalid blob ID format"), + is_retriable: false, + })?; + let url = format!( + "{}/eth/proof/{}?index={}", + self.config.bridge_api_url, block_hash, tx_idx + ); + + let response = self + .api_client + .get(&url) + .timeout(Duration::from_secs(self.config.timeout as u64)) + .send() + .await + .map_err(to_retriable_da_error)?; + + let bridge_api_data = response + .json::() + .await + .map_err(to_retriable_da_error)?; + + let attestation_data: MerkleProofInput = MerkleProofInput { + data_root_proof: bridge_api_data.data_root_proof.unwrap(), + leaf_proof: bridge_api_data.leaf_proof.unwrap(), + range_hash: bridge_api_data.range_hash.unwrap(), + data_root_index: bridge_api_data.data_root_index.unwrap(), + blob_root: bridge_api_data.blob_root.unwrap(), + bridge_root: bridge_api_data.bridge_root.unwrap(), + leaf: bridge_api_data.leaf.unwrap(), + leaf_index: bridge_api_data.leaf_index.unwrap(), + }; + Ok(Some(InclusionData { + data: ethabi::encode(&attestation_data.into_tokens()), + })) } fn clone_boxed(&self) -> Box { @@ -87,3 +232,10 @@ pub fn to_non_retriable_da_error(error: impl Into) -> DAError { is_retriable: false, } } + +pub fn to_retriable_da_error(error: impl Into) -> DAError { + DAError { + error: error.into(), + is_retriable: true, + } +} diff --git a/core/node/da_clients/src/avail/sdk.rs b/core/node/da_clients/src/avail/sdk.rs index 002422109d05..f693280ba4a9 100644 --- a/core/node/da_clients/src/avail/sdk.rs +++ b/core/node/da_clients/src/avail/sdk.rs @@ -1,18 +1,22 @@ //! Minimal reimplementation of the Avail SDK client required for the DA client implementation. //! This is considered to be a temporary solution until a mature SDK is available on crates.io -use std::fmt::Debug; +use std::{fmt::Debug, sync::Arc, time}; +use backon::{ConstantBuilder, Retryable}; +use bytes::Bytes; use jsonrpsee::{ core::client::{Client, ClientT, Subscription, SubscriptionClientT}, rpc_params, }; use parity_scale_codec::{Compact, Decode, Encode}; use scale_encode::EncodeAsFields; +use serde::{Deserialize, Serialize}; use subxt_signer::{ bip39::Mnemonic, sr25519::{Keypair, Signature}, }; +use zksync_types::H256; use crate::avail::client::to_non_retriable_da_error; @@ -287,7 +291,7 @@ impl RawAvailClient { let status = sub.next().await.transpose()?; if status.is_some() && status.as_ref().unwrap().is_object() { - if let Some(block_hash) = status.unwrap().get("inBlock") { + if let Some(block_hash) = status.unwrap().get("finalized") { break block_hash .as_str() .ok_or_else(|| anyhow::anyhow!("Invalid block hash"))? @@ -369,3 +373,95 @@ fn ss58hash(data: &[u8]) -> Vec { ctx.update(data); ctx.finalize().to_vec() } + +/// An implementation of the `DataAvailabilityClient` trait that interacts with the Avail network. +#[derive(Debug, Clone)] +pub(crate) struct GasRelayClient { + api_url: String, + api_key: String, + max_retries: usize, + api_client: Arc, +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +pub struct GasRelayAPISubmissionResponse { + submission_id: String, +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +pub struct GasRelayAPIStatusResponse { + submission: GasRelayAPISubmission, +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +pub struct GasRelayAPISubmission { + block_hash: Option, + extrinsic_index: Option, +} + +impl GasRelayClient { + const DEFAULT_INCLUSION_DELAY: time::Duration = time::Duration::from_secs(60); + const RETRY_DELAY: time::Duration = time::Duration::from_secs(5); + pub(crate) async fn new( + api_url: &str, + api_key: &str, + max_retries: usize, + api_client: Arc, + ) -> anyhow::Result { + Ok(Self { + api_url: api_url.to_owned(), + api_key: api_key.to_owned(), + max_retries, + api_client, + }) + } + + pub(crate) async fn post_data(&self, data: Vec) -> anyhow::Result<(H256, u64)> { + let submit_url = format!("{}/user/submit_raw_data?token=ethereum", &self.api_url); + // send the data to the gas relay + let submit_response = self + .api_client + .post(&submit_url) + .body(Bytes::from(data)) + .header("Content-Type", "text/plain") + .header("Authorization", &self.api_key) + .send() + .await?; + + let submit_response = submit_response + .json::() + .await?; + + let status_url = format!( + "{}/user/get_submission_info?submission_id={}", + self.api_url, submit_response.submission_id + ); + + tokio::time::sleep(Self::DEFAULT_INCLUSION_DELAY).await; + let status_response = (|| async { + self.api_client + .get(&status_url) + .header("Authorization", &self.api_key) + .send() + .await + }) + .retry( + &ConstantBuilder::default() + .with_delay(Self::RETRY_DELAY) + .with_max_times(self.max_retries), + ) + .await?; + + let status_response = status_response.json::().await?; + let (block_hash, extrinsic_index) = ( + status_response.submission.block_hash.ok_or_else(|| { + anyhow::anyhow!("Block hash not found in the response from the gas relay") + })?, + status_response.submission.extrinsic_index.ok_or_else(|| { + anyhow::anyhow!("Extrinsic index not found in the response from the gas relay") + })?, + ); + + Ok((block_hash, extrinsic_index)) + } +} From 04f4daef85b618b76dda618906b9d8b09cddfe58 Mon Sep 17 00:00:00 2001 From: Daniyar Itegulov Date: Thu, 24 Oct 2024 15:04:47 +1100 Subject: [PATCH 03/32] chore: lower no base token ratio log level (#3141) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Lowers no base token ratio log level to `WARN` instead of `ERROR` ## Why ❔ It gets printed >500 times during integration tests making it hard to find the actual errors. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- core/node/base_token_adjuster/src/base_token_ratio_provider.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/node/base_token_adjuster/src/base_token_ratio_provider.rs b/core/node/base_token_adjuster/src/base_token_ratio_provider.rs index e16ea16ff0f5..b613e5219dd2 100644 --- a/core/node/base_token_adjuster/src/base_token_ratio_provider.rs +++ b/core/node/base_token_adjuster/src/base_token_ratio_provider.rs @@ -81,7 +81,7 @@ impl DBBaseTokenRatioProvider { // Though the DB should be populated very soon after the server starts, it is possible // to have no ratios in the DB right after genesis. Having initial ratios in the DB // from the genesis stage will eliminate this possibility. - tracing::error!("No latest price found in the database. Using default ratio."); + tracing::warn!("No latest price found in the database. Using default ratio."); BaseTokenConversionRatio::default() } Err(err) => anyhow::bail!("Failed to get latest base token ratio: {:?}", err), From 6719429312fdf5137459711aa54da16e550c4919 Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Thu, 24 Oct 2024 09:58:16 +0200 Subject: [PATCH 04/32] ci: Remove invalid step from GAR build workflow (#3164) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ There is no need to remove local docker image. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- .../build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml b/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml index 4639f8c77c41..30990889caf6 100644 --- a/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml +++ b/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml @@ -69,10 +69,6 @@ jobs: --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} - - name: Remove prover-gpu-fri-gar image to free space - run: | - docker image rm us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} - - name: Move Setup data from prover-gpu-fri-gar to circuit-prover-gpu-gar run: | mv -v docker/prover-gpu-fri-gar/*.bin docker/circuit-prover-gpu-gar/ From 16f275756cd28024a6b11ac1ac327eb5b8b446e1 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Thu, 24 Oct 2024 12:07:20 +0300 Subject: [PATCH 05/32] feat: gateway preparation (#3006) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - adds new fields to DB tables and rust structs - adds new config variables - update commitment generator to work with post-gateway - adds new vm subversion (vm fast is not changed yet) ## Why ❔ prepare for gateway, reduce sync-layer-stable diff ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- Cargo.lock | 2 + core/bin/external_node/src/config/mod.rs | 8 + core/bin/external_node/src/node_builder.rs | 20 +- core/bin/snapshots_creator/src/tests.rs | 1 + .../system-constants-generator/src/utils.rs | 9 +- core/bin/zksync_server/src/node_builder.rs | 6 +- core/lib/basic_types/src/commitment.rs | 24 +- core/lib/basic_types/src/protocol_version.rs | 7 + core/lib/basic_types/src/vm.rs | 1 + core/lib/config/src/configs/contracts.rs | 9 + core/lib/config/src/testonly.rs | 2 + core/lib/constants/src/contracts.rs | 26 +- core/lib/constants/src/lib.rs | 1 + core/lib/constants/src/message_root.rs | 5 + core/lib/constants/src/system_logs.rs | 11 +- core/lib/contracts/src/lib.rs | 14 + ...01396dacefc0cea8cbcf5807185eb00fc0f7.json} | 30 +- ...0cc9e176729744c779fee97ca9392ae8a8c8.json} | 18 +- ...11345ef888824e0ca3c5f39befbbc5bd0388.json} | 7 +- ...892118f5732374e62f35e27800422afb5746.json} | 30 +- ...2f38816f163a3e3fba4fdbb81076b969e970.json} | 30 +- ...911add046315e5f8877bc57a34e3dadf9e37.json} | 30 +- ...7bd02627ebaf2df7c5ad517cb60a243182d2.json} | 16 +- ...3369701d7cd5f75ca031bf77ca27d0437cb9.json} | 30 +- ...33f6503bc79cc9f809d35c558e275ba117ba.json} | 8 +- ...806fcc54d73216a7dc54be6ba210ef02d789.json} | 30 +- ...0e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json} | 30 +- ...673e4b5bba059ebe07bbbb64578881db030b.json} | 30 +- ...7999eabb611338925abe9dc9e64c837183d9.json} | 18 +- .../20240925103531_gateway_upgrade.down.sql | 8 + .../20240925103531_gateway_upgrade.up.sql | 11 + core/lib/dal/src/blocks_dal.rs | 92 +- core/lib/dal/src/consensus/conv.rs | 45 +- core/lib/dal/src/consensus/mod.rs | 5 +- core/lib/dal/src/consensus/proto/mod.proto | 11 + core/lib/dal/src/consensus/tests.rs | 11 +- core/lib/dal/src/consensus_dal/tests.rs | 3 + core/lib/dal/src/eth_watcher_dal.rs | 2 +- core/lib/dal/src/lib.rs | 4 +- core/lib/dal/src/models/storage_block.rs | 16 +- core/lib/dal/src/models/storage_sync.rs | 17 +- core/lib/dal/src/sync_dal.rs | 4 +- core/lib/dal/src/tests/mod.rs | 2 + core/lib/env_config/src/contracts.rs | 4 + core/lib/eth_client/src/clients/http/query.rs | 85 +- core/lib/eth_client/src/clients/mock.rs | 34 +- core/lib/multivm/Cargo.toml | 1 + core/lib/multivm/src/lib.rs | 1 + core/lib/multivm/src/pubdata_builders/mod.rs | 24 + .../multivm/src/pubdata_builders/rollup.rs | 128 ++ .../lib/multivm/src/pubdata_builders/tests.rs | 123 + .../lib/multivm/src/pubdata_builders/utils.rs | 70 + .../multivm/src/pubdata_builders/validium.rs | 93 + core/lib/multivm/src/utils/events.rs | 56 +- core/lib/multivm/src/utils/mod.rs | 69 +- core/lib/multivm/src/versions/shadow/mod.rs | 1 - core/lib/multivm/src/versions/shadow/tests.rs | 26 +- .../src/versions/testonly/block_tip.rs | 8 +- .../src/versions/testonly/bootloader.rs | 6 +- .../versions/testonly/bytecode_publishing.rs | 8 +- .../multivm/src/versions/testonly/circuits.rs | 4 +- .../src/versions/testonly/code_oracle.rs | 10 +- .../src/versions/testonly/default_aa.rs | 8 +- .../versions/testonly/get_used_contracts.rs | 7 +- .../src/versions/testonly/is_write_initial.rs | 6 +- .../src/versions/testonly/l1_tx_execution.rs | 14 +- .../src/versions/testonly/l2_blocks.rs | 18 +- core/lib/multivm/src/versions/testonly/mod.rs | 13 +- .../src/versions/testonly/nonce_holder.rs | 4 +- .../src/versions/testonly/precompiles.rs | 8 +- .../multivm/src/versions/testonly/refunds.rs | 27 +- .../src/versions/testonly/require_eip712.rs | 8 +- .../src/versions/testonly/secp256r1.rs | 4 +- .../src/versions/testonly/simple_execution.rs | 16 +- .../multivm/src/versions/testonly/storage.rs | 8 +- .../src/versions/testonly/tester/mod.rs | 16 +- .../testonly/tester/transaction_test_info.rs | 13 +- .../multivm/src/versions/testonly/transfer.rs | 22 +- .../multivm/src/versions/testonly/upgrade.rs | 22 +- core/lib/multivm/src/versions/vm_1_3_2/vm.rs | 17 +- .../vm_1_4_1/tracers/pubdata_tracer.rs | 3 +- .../vm_1_4_1/types/internals/pubdata.rs | 2 +- core/lib/multivm/src/versions/vm_1_4_1/vm.rs | 15 +- .../vm_1_4_2/tracers/pubdata_tracer.rs | 3 +- .../vm_1_4_2/types/internals/pubdata.rs | 2 +- core/lib/multivm/src/versions/vm_1_4_2/vm.rs | 11 +- .../tracers/pubdata_tracer.rs | 3 +- .../types/internals/pubdata.rs | 2 +- .../src/versions/vm_boojum_integration/vm.rs | 11 +- .../multivm/src/versions/vm_fast/pubdata.rs | 2 +- .../multivm/src/versions/vm_fast/tests/mod.rs | 17 +- core/lib/multivm/src/versions/vm_fast/vm.rs | 81 +- .../vm_latest/bootloader_state/state.rs | 43 +- .../vm_latest/bootloader_state/utils.rs | 77 +- .../src/versions/vm_latest/constants.rs | 3 +- .../vm_latest/implementation/execution.rs | 1 + .../versions/vm_latest/tests/call_tracer.rs | 6 +- .../src/versions/vm_latest/tests/mod.rs | 25 +- .../vm_latest/tests/prestate_tracer.rs | 10 +- .../src/versions/vm_latest/tests/rollbacks.rs | 10 +- .../vm_latest/tracers/pubdata_tracer.rs | 30 +- .../versions/vm_latest/types/internals/mod.rs | 2 - .../vm_latest/types/internals/pubdata.rs | 123 - .../vm_latest/types/internals/vm_state.rs | 1 + core/lib/multivm/src/versions/vm_latest/vm.rs | 35 +- core/lib/multivm/src/versions/vm_m5/vm.rs | 19 +- core/lib/multivm/src/versions/vm_m6/vm.rs | 14 +- .../src/versions/vm_refunds_enhancement/vm.rs | 13 +- .../src/versions/vm_virtual_blocks/vm.rs | 11 +- core/lib/multivm/src/vm_instance.rs | 28 +- core/lib/protobuf_config/src/contracts.rs | 16 + .../src/proto/config/contracts.proto | 2 + core/lib/prover_interface/src/inputs.rs | 5 +- core/lib/snapshots_applier/src/tests/utils.rs | 1 + core/lib/state/src/test_utils.rs | 1 + core/lib/tee_verifier/src/lib.rs | 10 +- core/lib/types/src/api/en.rs | 4 +- core/lib/types/src/api/mod.rs | 1 + core/lib/types/src/block.rs | 3 +- core/lib/types/src/commitment/mod.rs | 140 +- core/lib/types/src/commitment/tests/mod.rs | 5 + .../tests/post_boojum_1_4_1_test.json | 33 +- .../tests/post_boojum_1_4_2_test.json | 33 +- .../tests/post_boojum_1_5_0_test.json | 187 +- .../post_boojum_1_5_0_test_with_evm.json | 187 +- .../commitment/tests/post_gateway_test.json | 1977 +++++++++++++++++ core/lib/types/src/l2_to_l1_log.rs | 15 +- core/lib/vm_executor/src/batch/factory.rs | 30 +- core/lib/vm_executor/src/oneshot/block.rs | 1 + core/lib/vm_executor/src/oneshot/contracts.rs | 5 + core/lib/vm_executor/src/oneshot/mod.rs | 7 +- core/lib/vm_executor/src/storage.rs | 17 +- core/lib/vm_interface/src/executor.rs | 3 +- core/lib/vm_interface/src/lib.rs | 5 +- core/lib/vm_interface/src/pubdata/mod.rs | 90 + .../src/types/inputs/execution_mode.rs | 19 + core/lib/vm_interface/src/types/inputs/mod.rs | 2 +- core/lib/vm_interface/src/utils/dump.rs | 17 +- core/lib/vm_interface/src/utils/shadow.rs | 16 +- core/lib/vm_interface/src/vm.rs | 13 +- core/node/api_server/src/web3/state.rs | 1 + core/node/block_reverter/src/tests.rs | 1 + core/node/commitment_generator/Cargo.toml | 1 + core/node/commitment_generator/src/lib.rs | 63 +- core/node/commitment_generator/src/utils.rs | 88 +- core/node/consensus/src/storage/store.rs | 8 + core/node/consensus/src/testonly.rs | 13 +- core/node/db_pruner/src/tests.rs | 1 + core/node/eth_sender/src/tests.rs | 4 + core/node/eth_watch/src/lib.rs | 4 +- .../src/l1_gas_price/gas_adjuster/mod.rs | 4 +- core/node/genesis/src/lib.rs | 1 + core/node/logs_bloom_backfill/src/lib.rs | 1 + .../layers/state_keeper/mempool_io.rs | 10 +- .../layers/state_keeper/output_handler.rs | 15 +- core/node/node_sync/src/external_io.rs | 5 +- core/node/node_sync/src/fetcher.rs | 16 +- core/node/node_sync/src/sync_action.rs | 1 + core/node/node_sync/src/tests.rs | 6 +- .../src/request_processor.rs | 91 +- .../src/tee_request_processor.rs | 3 +- .../state_keeper/src/executor/tests/tester.rs | 30 +- core/node/state_keeper/src/io/common/mod.rs | 4 +- core/node/state_keeper/src/io/common/tests.rs | 8 +- core/node/state_keeper/src/io/mempool.rs | 83 +- core/node/state_keeper/src/io/mod.rs | 16 +- core/node/state_keeper/src/io/persistence.rs | 85 +- .../io/seal_logic/l2_block_seal_subtasks.rs | 17 +- .../state_keeper/src/io/seal_logic/mod.rs | 7 +- core/node/state_keeper/src/io/tests/mod.rs | 14 +- core/node/state_keeper/src/io/tests/tester.rs | 2 + core/node/state_keeper/src/keeper.rs | 25 +- core/node/state_keeper/src/testonly/mod.rs | 7 +- .../src/testonly/test_batch_executor.rs | 7 +- core/node/state_keeper/src/tests/mod.rs | 3 +- core/node/state_keeper/src/updates/mod.rs | 20 +- core/node/test_utils/src/lib.rs | 9 + core/node/vm_runner/src/process.rs | 1 + core/node/vm_runner/src/storage.rs | 9 +- core/tests/vm-benchmark/src/vm.rs | 8 +- etc/multivm_bootloaders/vm_gateway/commit | 1 + .../fee_estimate.yul/fee_estimate.yul.zbin | Bin 0 -> 75296 bytes .../vm_gateway/gas_test.yul/gas_test.yul.zbin | Bin 0 -> 71392 bytes .../playground_batch.yul.zbin | Bin 0 -> 75424 bytes .../proved_batch.yul/proved_batch.yul.zbin | Bin 0 -> 71904 bytes prover/Cargo.lock | 1 + yarn.lock | 235 +- zkstack_cli/crates/config/src/contracts.rs | 2 + 188 files changed, 4838 insertions(+), 1207 deletions(-) create mode 100644 core/lib/constants/src/message_root.rs rename core/lib/dal/.sqlx/{query-7aebc0d8eb43bd835c4f175edc4c0371bdc118b25d64fcf526bd6575e4d675c8.json => query-1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7.json} (78%) rename core/lib/dal/.sqlx/{query-a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756.json => query-250cc655f48144137906a72490680cc9e176729744c779fee97ca9392ae8a8c8.json} (83%) rename core/lib/dal/.sqlx/{query-55f4585be3d0f1a147cb10f6e59325fad494a512ba92df95439d2d7fe0f3a285.json => query-398598e20f1892b47bf749b220f611345ef888824e0ca3c5f39befbbc5bd0388.json} (65%) rename core/lib/dal/.sqlx/{query-942d6d948770c374ba4d3566c50e56e43137ac0cf45312d70dec0c407cadc1bf.json => query-45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746.json} (70%) rename core/lib/dal/.sqlx/{query-e2d0bd978f76e0ce09b36b0e4b0a2baec4b2531ecaa8da234863e2eb810761c7.json => query-4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970.json} (79%) rename core/lib/dal/.sqlx/{query-0784f2cc13f85763cc7da29902850fa76a03907957b7a0d87ea55a7873f3312e.json => query-62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37.json} (79%) rename core/lib/dal/.sqlx/{query-2049362aad5e32981e48e5c5ef7a00a91254ec6c8a68a359d22b02df5a40911f.json => query-7553d8013d101af0451830d26b7d7bd02627ebaf2df7c5ad517cb60a243182d2.json} (83%) rename core/lib/dal/.sqlx/{query-b456147560b107640abdc10f7ac76b563ff2f0f3a818e8c8a02c2ef632d0b960.json => query-77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9.json} (80%) rename core/lib/dal/.sqlx/{query-34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04.json => query-7d8c19c3568c03ec3e4a788b22c233f6503bc79cc9f809d35c558e275ba117ba.json} (55%) rename core/lib/dal/.sqlx/{query-f30748bef5f8d08b60739cdfd9508c8132d0958e4e25f4954e93d2095b4f11e8.json => query-a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789.json} (73%) rename core/lib/dal/.sqlx/{query-2def67eb8372245ed59e76e07d615598f5d22a3aebd893afddded0e3c6b94a3b.json => query-b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json} (73%) rename core/lib/dal/.sqlx/{query-5aa487a98dff53a5d32a5916a26cbf3ffb03b3791c0e9a9f39fb85cfffc65db2.json => query-c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b.json} (77%) rename core/lib/dal/.sqlx/{query-f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8.json => query-d4cdd4eed07dfdad2757c480903f7999eabb611338925abe9dc9e64c837183d9.json} (84%) create mode 100644 core/lib/dal/migrations/20240925103531_gateway_upgrade.down.sql create mode 100644 core/lib/dal/migrations/20240925103531_gateway_upgrade.up.sql create mode 100644 core/lib/multivm/src/pubdata_builders/mod.rs create mode 100644 core/lib/multivm/src/pubdata_builders/rollup.rs create mode 100644 core/lib/multivm/src/pubdata_builders/tests.rs create mode 100644 core/lib/multivm/src/pubdata_builders/utils.rs create mode 100644 core/lib/multivm/src/pubdata_builders/validium.rs delete mode 100644 core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs create mode 100644 core/lib/types/src/commitment/tests/post_gateway_test.json create mode 100644 core/lib/vm_interface/src/pubdata/mod.rs create mode 100644 etc/multivm_bootloaders/vm_gateway/commit create mode 100644 etc/multivm_bootloaders/vm_gateway/fee_estimate.yul/fee_estimate.yul.zbin create mode 100644 etc/multivm_bootloaders/vm_gateway/gas_test.yul/gas_test.yul.zbin create mode 100644 etc/multivm_bootloaders/vm_gateway/playground_batch.yul/playground_batch.yul.zbin create mode 100644 etc/multivm_bootloaders/vm_gateway/proved_batch.yul/proved_batch.yul.zbin diff --git a/Cargo.lock b/Cargo.lock index a42ef8e3fdcc..64ae0a9a12f4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9667,6 +9667,7 @@ dependencies = [ "zksync_multivm", "zksync_node_genesis", "zksync_node_test_utils", + "zksync_system_constants", "zksync_types", "zksync_utils", "zksync_web3_decl", @@ -10532,6 +10533,7 @@ dependencies = [ "zk_evm 0.150.6", "zksync_contracts", "zksync_eth_signer", + "zksync_mini_merkle_tree", "zksync_system_constants", "zksync_test_account", "zksync_types", diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 56ee3edfd253..70803a663110 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -110,7 +110,12 @@ pub(crate) struct RemoteENConfig { // the `l2_erc20_bridge_addr` and `l2_shared_bridge_addr` are basically the same contract, but with // a different name, with names adapted only for consistency. pub l1_shared_bridge_proxy_addr: Option
, + /// Contract address that serves as a shared bridge on L2. + /// It is expected that `L2SharedBridge` is used before gateway upgrade, and `L2AssetRouter` is used after. pub l2_shared_bridge_addr: Option
, + /// Address of `L2SharedBridge` that was used before gateway upgrade. + /// `None` if chain genesis used post-gateway protocol version. + pub l2_legacy_shared_bridge_addr: Option
, pub l1_erc20_bridge_proxy_addr: Option
, pub l2_erc20_bridge_addr: Option
, pub l1_weth_bridge_addr: Option
, @@ -189,6 +194,7 @@ impl RemoteENConfig { l2_erc20_bridge_addr: l2_erc20_default_bridge, l1_shared_bridge_proxy_addr: bridges.l1_shared_default_bridge, l2_shared_bridge_addr: l2_erc20_shared_bridge, + l2_legacy_shared_bridge_addr: bridges.l2_legacy_shared_bridge, l1_weth_bridge_addr: bridges.l1_weth_bridge, l2_weth_bridge_addr: bridges.l2_weth_bridge, base_token_addr, @@ -218,6 +224,7 @@ impl RemoteENConfig { l1_shared_bridge_proxy_addr: Some(Address::repeat_byte(5)), l1_weth_bridge_addr: None, l2_shared_bridge_addr: Some(Address::repeat_byte(6)), + l2_legacy_shared_bridge_addr: Some(Address::repeat_byte(7)), l1_batch_commit_data_generator_mode: L1BatchCommitmentMode::Rollup, dummy_verifier: true, } @@ -1403,6 +1410,7 @@ impl From<&ExternalNodeConfig> for InternalApiConfig { l2_erc20_default_bridge: config.remote.l2_erc20_bridge_addr, l1_shared_default_bridge: config.remote.l1_shared_bridge_proxy_addr, l2_shared_default_bridge: config.remote.l2_shared_bridge_addr, + l2_legacy_shared_bridge: config.remote.l2_legacy_shared_bridge_addr, l1_weth_bridge: config.remote.l1_weth_bridge_addr, l2_weth_bridge: config.remote.l2_weth_bridge_addr, }, diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs index 7d8489013535..883f3f8a5fae 100644 --- a/core/bin/external_node/src/node_builder.rs +++ b/core/bin/external_node/src/node_builder.rs @@ -55,6 +55,7 @@ use zksync_node_framework::{ service::{ZkStackService, ZkStackServiceBuilder}, }; use zksync_state::RocksdbStorageOptions; +use zksync_types::L2_NATIVE_TOKEN_VAULT_ADDRESS; use crate::{config::ExternalNodeConfig, metrics::framework::ExternalNodeMetricsLayer, Component}; @@ -192,11 +193,22 @@ impl ExternalNodeBuilder { // compression. const OPTIONAL_BYTECODE_COMPRESSION: bool = true; + let l2_shared_bridge_addr = self + .config + .remote + .l2_shared_bridge_addr + .context("Missing `l2_shared_bridge_addr`")?; + let l2_legacy_shared_bridge_addr = if l2_shared_bridge_addr == L2_NATIVE_TOKEN_VAULT_ADDRESS + { + // System has migrated to `L2_NATIVE_TOKEN_VAULT_ADDRESS`, use legacy shared bridge address from main node. + self.config.remote.l2_legacy_shared_bridge_addr + } else { + // System hasn't migrated on `L2_NATIVE_TOKEN_VAULT_ADDRESS`, we can safely use `l2_shared_bridge_addr`. + Some(l2_shared_bridge_addr) + }; + let persistence_layer = OutputHandlerLayer::new( - self.config - .remote - .l2_shared_bridge_addr - .expect("L2 shared bridge address is not set"), + l2_legacy_shared_bridge_addr, self.config.optional.l2_block_seal_queue_capacity, ) .with_pre_insert_txs(true) // EN requires txs to be pre-inserted. diff --git a/core/bin/snapshots_creator/src/tests.rs b/core/bin/snapshots_creator/src/tests.rs index a440d836b4c9..f3c191388803 100644 --- a/core/bin/snapshots_creator/src/tests.rs +++ b/core/bin/snapshots_creator/src/tests.rs @@ -167,6 +167,7 @@ async fn create_l2_block( base_fee_per_gas: 0, gas_per_pubdata_limit: 0, batch_fee_input: Default::default(), + pubdata_params: Default::default(), base_system_contracts_hashes: Default::default(), protocol_version: Some(Default::default()), virtual_blocks: 0, diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index 800da68ee50d..16167975cf0e 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -9,7 +9,7 @@ use zksync_multivm::{ interface::{ storage::{InMemoryStorage, StorageView, WriteStorage}, tracer::VmExecutionStopReason, - L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmFactory, + InspectExecutionMode, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmFactory, VmInterface, VmInterfaceExt, }, tracers::dynamic::vm_1_5_0::DynTracer, @@ -271,8 +271,9 @@ pub(super) fn execute_internal_transfer_test() -> u32 { output: tracer_result.clone(), } .into_tracer_pointer(); + let mut vm: Vm<_, HistoryEnabled> = Vm::new(l1_batch, system_env, storage_view.to_rc_ptr()); - let result = vm.inspect(&mut tracer.into(), VmExecutionMode::Bootloader); + let result = vm.inspect(&mut tracer.into(), InspectExecutionMode::Bootloader); assert!(!result.result.is_failed(), "The internal call has reverted"); tracer_result.take() @@ -331,7 +332,7 @@ pub(super) fn execute_user_txs_in_test_gas_vm( let mut total_gas_refunded = 0; for tx in txs { vm.push_transaction(tx); - let tx_execution_result = vm.execute(VmExecutionMode::OneTx); + let tx_execution_result = vm.execute(InspectExecutionMode::OneTx); total_gas_refunded += tx_execution_result.refunds.gas_refunded; if !accept_failure { @@ -343,7 +344,7 @@ pub(super) fn execute_user_txs_in_test_gas_vm( } } - let result = vm.execute(VmExecutionMode::Bootloader); + let result = vm.execute(InspectExecutionMode::Bootloader); let metrics = result.get_execution_metrics(None); VmSpentResourcesResult { diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 234e22894240..e2bd487f22b6 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -238,9 +238,7 @@ impl MainNodeBuilder { let wallets = self.wallets.clone(); let sk_config = try_load_config!(self.configs.state_keeper_config); let persistence_layer = OutputHandlerLayer::new( - self.contracts_config - .l2_shared_bridge_addr - .context("L2 shared bridge address")?, + self.contracts_config.l2_legacy_shared_bridge_addr, sk_config.l2_block_seal_queue_capacity, ) .with_protective_reads_persistence_enabled(sk_config.protective_reads_persistence_enabled); @@ -249,6 +247,8 @@ impl MainNodeBuilder { sk_config.clone(), try_load_config!(self.configs.mempool_config), try_load_config!(wallets.state_keeper), + self.contracts_config.l2_da_validator_addr, + self.genesis_config.l1_batch_commit_data_generator_mode, ); let db_config = try_load_config!(self.configs.db_config); let experimental_vm_config = self diff --git a/core/lib/basic_types/src/commitment.rs b/core/lib/basic_types/src/commitment.rs index eca339f40f42..0eed46aad782 100644 --- a/core/lib/basic_types/src/commitment.rs +++ b/core/lib/basic_types/src/commitment.rs @@ -1,10 +1,12 @@ +use std::str::FromStr; + use serde::{Deserialize, Serialize}; use strum::{Display, EnumIter}; use crate::{ ethabi, web3::contract::{Detokenize, Error as ContractError}, - U256, + Address, U256, }; #[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize, EnumIter, Display)] @@ -41,3 +43,23 @@ impl Detokenize for L1BatchCommitmentMode { } } } + +impl FromStr for L1BatchCommitmentMode { + type Err = &'static str; + + fn from_str(s: &str) -> Result { + match s { + "Rollup" => Ok(Self::Rollup), + "Validium" => Ok(Self::Validium), + _ => { + Err("Incorrect l1 batch commitment mode type; expected one of `Rollup`, `Validium`") + } + } + } +} + +#[derive(Default, Copy, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct PubdataParams { + pub l2_da_validator_address: Address, + pub pubdata_type: L1BatchCommitmentMode, +} diff --git a/core/lib/basic_types/src/protocol_version.rs b/core/lib/basic_types/src/protocol_version.rs index e01586cdad7d..ebecfaa1b872 100644 --- a/core/lib/basic_types/src/protocol_version.rs +++ b/core/lib/basic_types/src/protocol_version.rs @@ -69,6 +69,7 @@ pub enum ProtocolVersionId { Version24, Version25, Version26, + Version27, } impl ProtocolVersionId { @@ -122,6 +123,7 @@ impl ProtocolVersionId { ProtocolVersionId::Version24 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, ProtocolVersionId::Version25 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, ProtocolVersionId::Version26 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, + ProtocolVersionId::Version27 => VmVersion::VmGateway, } } @@ -139,6 +141,10 @@ impl ProtocolVersionId { self <= &Self::Version22 } + pub fn is_pre_gateway(&self) -> bool { + self <= &Self::Version26 + } + pub fn is_1_4_0(&self) -> bool { self >= &ProtocolVersionId::Version18 && self < &ProtocolVersionId::Version20 } @@ -278,6 +284,7 @@ impl From for VmVersion { ProtocolVersionId::Version24 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, ProtocolVersionId::Version25 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, ProtocolVersionId::Version26 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, + ProtocolVersionId::Version27 => VmVersion::VmGateway, } } } diff --git a/core/lib/basic_types/src/vm.rs b/core/lib/basic_types/src/vm.rs index c753bbfc8183..f11f98596f18 100644 --- a/core/lib/basic_types/src/vm.rs +++ b/core/lib/basic_types/src/vm.rs @@ -16,6 +16,7 @@ pub enum VmVersion { Vm1_4_2, Vm1_5_0SmallBootloaderMemory, Vm1_5_0IncreasedBootloaderMemory, + VmGateway, } impl VmVersion { diff --git a/core/lib/config/src/configs/contracts.rs b/core/lib/config/src/configs/contracts.rs index b68720ebaefe..0bf7aab3bcab 100644 --- a/core/lib/config/src/configs/contracts.rs +++ b/core/lib/config/src/configs/contracts.rs @@ -29,7 +29,13 @@ pub struct ContractsConfig { pub diamond_proxy_addr: Address, pub validator_timelock_addr: Address, pub l1_shared_bridge_proxy_addr: Option
, + /// Contract address that serves as a shared bridge on L2. + /// It is expected that `L2SharedBridge` is used before gateway upgrade, and `L2AssetRouter` is used after. pub l2_shared_bridge_addr: Option
, + /// Address of `L2SharedBridge` that was used before gateway upgrade. + /// `None` if chain genesis used post-gateway protocol version. + /// If present it will be used as L2 token deployer address. + pub l2_legacy_shared_bridge_addr: Option
, pub l1_erc20_bridge_proxy_addr: Option
, pub l2_erc20_bridge_addr: Option
, pub l1_weth_bridge_proxy_addr: Option
, @@ -40,6 +46,7 @@ pub struct ContractsConfig { // Used by the RPC API and by the node builder in wiring the BaseTokenRatioProvider layer. pub base_token_addr: Option
, pub chain_admin_addr: Option
, + pub l2_da_validator_addr: Option
, } impl ContractsConfig { @@ -53,6 +60,7 @@ impl ContractsConfig { l2_erc20_bridge_addr: Some(Address::repeat_byte(0x0c)), l1_shared_bridge_proxy_addr: Some(Address::repeat_byte(0x0e)), l2_shared_bridge_addr: Some(Address::repeat_byte(0x0f)), + l2_legacy_shared_bridge_addr: Some(Address::repeat_byte(0x19)), l1_weth_bridge_proxy_addr: Some(Address::repeat_byte(0x0b)), l2_weth_bridge_addr: Some(Address::repeat_byte(0x0c)), l2_testnet_paymaster_addr: Some(Address::repeat_byte(0x11)), @@ -61,6 +69,7 @@ impl ContractsConfig { base_token_addr: Some(Address::repeat_byte(0x14)), ecosystem_contracts: Some(EcosystemContracts::for_tests()), chain_admin_addr: Some(Address::repeat_byte(0x18)), + l2_da_validator_addr: Some(Address::repeat_byte(0x1a)), } } } diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 880bc5aa98d2..ce681cc0cc43 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -262,6 +262,7 @@ impl Distribution for EncodeDist { l2_erc20_bridge_addr: self.sample_opt(|| rng.gen()), l1_shared_bridge_proxy_addr: self.sample_opt(|| rng.gen()), l2_shared_bridge_addr: self.sample_opt(|| rng.gen()), + l2_legacy_shared_bridge_addr: self.sample_opt(|| rng.gen()), l1_weth_bridge_proxy_addr: self.sample_opt(|| rng.gen()), l2_weth_bridge_addr: self.sample_opt(|| rng.gen()), l2_testnet_paymaster_addr: self.sample_opt(|| rng.gen()), @@ -269,6 +270,7 @@ impl Distribution for EncodeDist { ecosystem_contracts: self.sample(rng), base_token_addr: self.sample_opt(|| rng.gen()), chain_admin_addr: self.sample_opt(|| rng.gen()), + l2_da_validator_addr: self.sample_opt(|| rng.gen()), } } } diff --git a/core/lib/constants/src/contracts.rs b/core/lib/constants/src/contracts.rs index fe37ef6c69fd..4f0f362d9149 100644 --- a/core/lib/constants/src/contracts.rs +++ b/core/lib/constants/src/contracts.rs @@ -135,12 +135,36 @@ pub const EVM_GAS_MANAGER_ADDRESS: Address = H160([ 0x00, 0x00, 0x80, 0x13, ]); -/// Note, that the `Create2Factory` is explicitly deployed on a non-system-contract address. pub const CREATE2_FACTORY_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, ]); +pub const L2_GENESIS_UPGRADE_ADDRESS: Address = H160([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x01, +]); + +pub const L2_BRIDGEHUB_ADDRESS: Address = H160([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x02, +]); + +pub const L2_ASSET_ROUTER_ADDRESS: Address = H160([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x03, +]); + +pub const L2_NATIVE_TOKEN_VAULT_ADDRESS: Address = H160([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x04, +]); + +pub const L2_MESSAGE_ROOT_ADDRESS: Address = H160([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x05, +]); + pub const ERC20_TRANSFER_TOPIC: H256 = H256([ 221, 242, 82, 173, 27, 226, 200, 155, 105, 194, 176, 104, 252, 55, 141, 170, 149, 43, 167, 241, 99, 196, 161, 22, 40, 245, 90, 77, 245, 35, 179, 239, diff --git a/core/lib/constants/src/lib.rs b/core/lib/constants/src/lib.rs index 6aab79ad71f3..30ae6a7b582a 100644 --- a/core/lib/constants/src/lib.rs +++ b/core/lib/constants/src/lib.rs @@ -3,6 +3,7 @@ pub mod contracts; pub mod crypto; pub mod ethereum; pub mod fees; +pub mod message_root; pub mod system_context; pub mod system_logs; pub mod trusted_slots; diff --git a/core/lib/constants/src/message_root.rs b/core/lib/constants/src/message_root.rs new file mode 100644 index 000000000000..a8f4a034fb99 --- /dev/null +++ b/core/lib/constants/src/message_root.rs @@ -0,0 +1,5 @@ +// Position of `FullTree::_height` in `MessageRoot`'s storage layout. +pub const AGG_TREE_HEIGHT_KEY: usize = 3; + +// Position of `FullTree::nodes` in `MessageRoot`'s storage layout. +pub const AGG_TREE_NODES_KEY: usize = 5; diff --git a/core/lib/constants/src/system_logs.rs b/core/lib/constants/src/system_logs.rs index bd4167b3d02c..aa2c2cc156cc 100644 --- a/core/lib/constants/src/system_logs.rs +++ b/core/lib/constants/src/system_logs.rs @@ -1,11 +1,8 @@ /// The key of the system log with value of the L2->L1 logs tree root hash pub const L2_TO_L1_LOGS_TREE_ROOT_KEY: u32 = 0; -/// The key of the system log with value of the state diff hash -pub const STATE_DIFF_HASH_KEY: u32 = 2; +/// The key of the system log with value of the state diff hash for pre-gateway protocol versions +pub const STATE_DIFF_HASH_KEY_PRE_GATEWAY: u32 = 2; -/// The key of the system log with value of the first blob linear hash -pub const BLOB1_LINEAR_HASH_KEY: u32 = 7; - -/// The key of the system log with value of the second blob linear hash -pub const BLOB2_LINEAR_HASH_KEY: u32 = 8; +/// The key of the system log with value of the first blob linear hash for pre-gateway protocol versions +pub const BLOB1_LINEAR_HASH_KEY_PRE_GATEWAY: u32 = 7; diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index 0ee773abcd4a..cb5be504c8a0 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -516,6 +516,13 @@ impl BaseSystemContracts { BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } + pub fn playground_gateway() -> Self { + let bootloader_bytecode = read_zbin_bytecode( + "etc/multivm_bootloaders/vm_gateway/playground_batch.yul/playground_batch.yul.zbin", + ); + BaseSystemContracts::load_with_bootloader(bootloader_bytecode) + } + pub fn estimate_gas_pre_virtual_blocks() -> Self { let bootloader_bytecode = read_zbin_bytecode( "etc/multivm_bootloaders/vm_1_3_2/fee_estimate.yul/fee_estimate.yul.zbin", @@ -586,6 +593,13 @@ impl BaseSystemContracts { BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } + pub fn estimate_gas_gateway() -> Self { + let bootloader_bytecode = read_zbin_bytecode( + "etc/multivm_bootloaders/vm_gateway/fee_estimate.yul/fee_estimate.yul.zbin", + ); + BaseSystemContracts::load_with_bootloader(bootloader_bytecode) + } + pub fn hashes(&self) -> BaseSystemContractsHashes { BaseSystemContractsHashes { bootloader: self.bootloader.hash, diff --git a/core/lib/dal/.sqlx/query-7aebc0d8eb43bd835c4f175edc4c0371bdc118b25d64fcf526bd6575e4d675c8.json b/core/lib/dal/.sqlx/query-1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7.json similarity index 78% rename from core/lib/dal/.sqlx/query-7aebc0d8eb43bd835c4f175edc4c0371bdc118b25d64fcf526bd6575e4d675c8.json rename to core/lib/dal/.sqlx/query-1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7.json index dffd3ed8f9d2..48adcd412676 100644 --- a/core/lib/dal/.sqlx/query-7aebc0d8eb43bd835c4f175edc4c0371bdc118b25d64fcf526bd6575e4d675c8.json +++ b/core/lib/dal/.sqlx/query-1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -142,6 +142,26 @@ "ordinal": 27, "name": "fee_address", "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "aggregation_root", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -175,8 +195,12 @@ true, true, true, - false + false, + true, + true, + true, + true ] }, - "hash": "7aebc0d8eb43bd835c4f175edc4c0371bdc118b25d64fcf526bd6575e4d675c8" + "hash": "1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7" } diff --git a/core/lib/dal/.sqlx/query-a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756.json b/core/lib/dal/.sqlx/query-250cc655f48144137906a72490680cc9e176729744c779fee97ca9392ae8a8c8.json similarity index 83% rename from core/lib/dal/.sqlx/query-a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756.json rename to core/lib/dal/.sqlx/query-250cc655f48144137906a72490680cc9e176729744c779fee97ca9392ae8a8c8.json index c8c438295e49..5c4ce3d6a4e3 100644 --- a/core/lib/dal/.sqlx/query-a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756.json +++ b/core/lib/dal/.sqlx/query-250cc655f48144137906a72490680cc9e176729744c779fee97ca9392ae8a8c8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n l2_da_validator_address,\n pubdata_type\n FROM\n miniblocks\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -92,6 +92,16 @@ "ordinal": 17, "name": "logs_bloom", "type_info": "Bytea" + }, + { + "ordinal": 18, + "name": "l2_da_validator_address", + "type_info": "Bytea" + }, + { + "ordinal": 19, + "name": "pubdata_type", + "type_info": "Text" } ], "parameters": { @@ -115,8 +125,10 @@ false, true, true, - true + true, + false, + false ] }, - "hash": "a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756" + "hash": "250cc655f48144137906a72490680cc9e176729744c779fee97ca9392ae8a8c8" } diff --git a/core/lib/dal/.sqlx/query-55f4585be3d0f1a147cb10f6e59325fad494a512ba92df95439d2d7fe0f3a285.json b/core/lib/dal/.sqlx/query-398598e20f1892b47bf749b220f611345ef888824e0ca3c5f39befbbc5bd0388.json similarity index 65% rename from core/lib/dal/.sqlx/query-55f4585be3d0f1a147cb10f6e59325fad494a512ba92df95439d2d7fe0f3a285.json rename to core/lib/dal/.sqlx/query-398598e20f1892b47bf749b220f611345ef888824e0ca3c5f39befbbc5bd0388.json index ecf54f0417b8..ffe785d754ca 100644 --- a/core/lib/dal/.sqlx/query-55f4585be3d0f1a147cb10f6e59325fad494a512ba92df95439d2d7fe0f3a285.json +++ b/core/lib/dal/.sqlx/query-398598e20f1892b47bf749b220f611345ef888824e0ca3c5f39befbbc5bd0388.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE l1_batches\n SET\n commitment = $1,\n aux_data_hash = $2,\n pass_through_data_hash = $3,\n meta_parameters_hash = $4,\n l2_l1_merkle_root = $5,\n zkporter_is_available = $6,\n compressed_state_diffs = $7,\n compressed_initial_writes = $8,\n compressed_repeated_writes = $9,\n updated_at = NOW()\n WHERE\n number = $10\n AND commitment IS NULL\n ", + "query": "\n UPDATE l1_batches\n SET\n commitment = $1,\n aux_data_hash = $2,\n pass_through_data_hash = $3,\n meta_parameters_hash = $4,\n l2_l1_merkle_root = $5,\n zkporter_is_available = $6,\n compressed_state_diffs = $7,\n compressed_initial_writes = $8,\n compressed_repeated_writes = $9,\n state_diff_hash = $10,\n aggregation_root = $11,\n local_root = $12,\n updated_at = NOW()\n WHERE\n number = $13\n AND commitment IS NULL\n ", "describe": { "columns": [], "parameters": { @@ -14,10 +14,13 @@ "Bytea", "Bytea", "Bytea", + "Bytea", + "Bytea", + "Bytea", "Int8" ] }, "nullable": [] }, - "hash": "55f4585be3d0f1a147cb10f6e59325fad494a512ba92df95439d2d7fe0f3a285" + "hash": "398598e20f1892b47bf749b220f611345ef888824e0ca3c5f39befbbc5bd0388" } diff --git a/core/lib/dal/.sqlx/query-942d6d948770c374ba4d3566c50e56e43137ac0cf45312d70dec0c407cadc1bf.json b/core/lib/dal/.sqlx/query-45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746.json similarity index 70% rename from core/lib/dal/.sqlx/query-942d6d948770c374ba4d3566c50e56e43137ac0cf45312d70dec0c407cadc1bf.json rename to core/lib/dal/.sqlx/query-45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746.json index 8c22b4f92c4e..11bff1102932 100644 --- a/core/lib/dal/.sqlx/query-942d6d948770c374ba4d3566c50e56e43137ac0cf45312d70dec0c407cadc1bf.json +++ b/core/lib/dal/.sqlx/query-45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", "describe": { "columns": [ { @@ -142,6 +142,26 @@ "ordinal": 27, "name": "fee_address", "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "aggregation_root", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -181,8 +201,12 @@ true, true, true, - false + false, + true, + true, + true, + true ] }, - "hash": "942d6d948770c374ba4d3566c50e56e43137ac0cf45312d70dec0c407cadc1bf" + "hash": "45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746" } diff --git a/core/lib/dal/.sqlx/query-e2d0bd978f76e0ce09b36b0e4b0a2baec4b2531ecaa8da234863e2eb810761c7.json b/core/lib/dal/.sqlx/query-4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970.json similarity index 79% rename from core/lib/dal/.sqlx/query-e2d0bd978f76e0ce09b36b0e4b0a2baec4b2531ecaa8da234863e2eb810761c7.json rename to core/lib/dal/.sqlx/query-4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970.json index e55d10d6f9a8..66d3e18075bf 100644 --- a/core/lib/dal/.sqlx/query-e2d0bd978f76e0ce09b36b0e4b0a2baec4b2531ecaa8da234863e2eb810761c7.json +++ b/core/lib/dal/.sqlx/query-4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", "describe": { "columns": [ { @@ -142,6 +142,26 @@ "ordinal": 27, "name": "fee_address", "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "aggregation_root", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -179,8 +199,12 @@ true, true, true, - false + false, + true, + true, + true, + true ] }, - "hash": "e2d0bd978f76e0ce09b36b0e4b0a2baec4b2531ecaa8da234863e2eb810761c7" + "hash": "4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970" } diff --git a/core/lib/dal/.sqlx/query-0784f2cc13f85763cc7da29902850fa76a03907957b7a0d87ea55a7873f3312e.json b/core/lib/dal/.sqlx/query-62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37.json similarity index 79% rename from core/lib/dal/.sqlx/query-0784f2cc13f85763cc7da29902850fa76a03907957b7a0d87ea55a7873f3312e.json rename to core/lib/dal/.sqlx/query-62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37.json index 84f677a36c86..dfdb4b6c82e7 100644 --- a/core/lib/dal/.sqlx/query-0784f2cc13f85763cc7da29902850fa76a03907957b7a0d87ea55a7873f3312e.json +++ b/core/lib/dal/.sqlx/query-62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -142,6 +142,26 @@ "ordinal": 27, "name": "fee_address", "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "aggregation_root", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -177,8 +197,12 @@ true, true, true, - false + false, + true, + true, + true, + true ] }, - "hash": "0784f2cc13f85763cc7da29902850fa76a03907957b7a0d87ea55a7873f3312e" + "hash": "62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37" } diff --git a/core/lib/dal/.sqlx/query-2049362aad5e32981e48e5c5ef7a00a91254ec6c8a68a359d22b02df5a40911f.json b/core/lib/dal/.sqlx/query-7553d8013d101af0451830d26b7d7bd02627ebaf2df7c5ad517cb60a243182d2.json similarity index 83% rename from core/lib/dal/.sqlx/query-2049362aad5e32981e48e5c5ef7a00a91254ec6c8a68a359d22b02df5a40911f.json rename to core/lib/dal/.sqlx/query-7553d8013d101af0451830d26b7d7bd02627ebaf2df7c5ad517cb60a243182d2.json index b8f8db874b63..6cc2e22382dd 100644 --- a/core/lib/dal/.sqlx/query-2049362aad5e32981e48e5c5ef7a00a91254ec6c8a68a359d22b02df5a40911f.json +++ b/core/lib/dal/.sqlx/query-7553d8013d101af0451830d26b7d7bd02627ebaf2df7c5ad517cb60a243182d2.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\",\n (miniblocks.l1_tx_count + miniblocks.l2_tx_count) AS \"tx_count!\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.evm_emulator_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n miniblocks.fee_account_address AS \"fee_account_address!\"\n FROM\n miniblocks\n WHERE\n miniblocks.number BETWEEN $1 AND $2\n ", + "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\",\n (miniblocks.l1_tx_count + miniblocks.l2_tx_count) AS \"tx_count!\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.evm_emulator_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n miniblocks.fee_account_address AS \"fee_account_address!\",\n miniblocks.l2_da_validator_address AS \"l2_da_validator_address!\",\n miniblocks.pubdata_type AS \"pubdata_type!\"\n FROM\n miniblocks\n WHERE\n miniblocks.number BETWEEN $1 AND $2\n ", "describe": { "columns": [ { @@ -72,6 +72,16 @@ "ordinal": 13, "name": "fee_account_address!", "type_info": "Bytea" + }, + { + "ordinal": 14, + "name": "l2_da_validator_address!", + "type_info": "Bytea" + }, + { + "ordinal": 15, + "name": "pubdata_type!", + "type_info": "Text" } ], "parameters": { @@ -94,8 +104,10 @@ false, false, true, + false, + false, false ] }, - "hash": "2049362aad5e32981e48e5c5ef7a00a91254ec6c8a68a359d22b02df5a40911f" + "hash": "7553d8013d101af0451830d26b7d7bd02627ebaf2df7c5ad517cb60a243182d2" } diff --git a/core/lib/dal/.sqlx/query-b456147560b107640abdc10f7ac76b563ff2f0f3a818e8c8a02c2ef632d0b960.json b/core/lib/dal/.sqlx/query-77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9.json similarity index 80% rename from core/lib/dal/.sqlx/query-b456147560b107640abdc10f7ac76b563ff2f0f3a818e8c8a02c2ef632d0b960.json rename to core/lib/dal/.sqlx/query-77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9.json index 80a6946026b0..f4e08abe31c5 100644 --- a/core/lib/dal/.sqlx/query-b456147560b107640abdc10f7ac76b563ff2f0f3a818e8c8a02c2ef632d0b960.json +++ b/core/lib/dal/.sqlx/query-77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n is_sealed\n AND number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n is_sealed\n AND number = $1\n ", "describe": { "columns": [ { @@ -142,6 +142,26 @@ "ordinal": 27, "name": "fee_address", "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "aggregation_root", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -177,8 +197,12 @@ true, true, true, - false + false, + true, + true, + true, + true ] }, - "hash": "b456147560b107640abdc10f7ac76b563ff2f0f3a818e8c8a02c2ef632d0b960" + "hash": "77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9" } diff --git a/core/lib/dal/.sqlx/query-34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04.json b/core/lib/dal/.sqlx/query-7d8c19c3568c03ec3e4a788b22c233f6503bc79cc9f809d35c558e275ba117ba.json similarity index 55% rename from core/lib/dal/.sqlx/query-34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04.json rename to core/lib/dal/.sqlx/query-7d8c19c3568c03ec3e4a788b22c233f6503bc79cc9f809d35c558e275ba117ba.json index 35c606bf22bb..f89f531c4463 100644 --- a/core/lib/dal/.sqlx/query-34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04.json +++ b/core/lib/dal/.sqlx/query-7d8c19c3568c03ec3e4a788b22c233f6503bc79cc9f809d35c558e275ba117ba.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n miniblocks (\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n NOW(),\n NOW()\n )\n ", + "query": "\n INSERT INTO\n miniblocks (\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n l2_da_validator_address,\n pubdata_type,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n $20,\n NOW(),\n NOW()\n )\n ", "describe": { "columns": [], "parameters": { @@ -22,10 +22,12 @@ "Int8", "Int8", "Int8", - "Bytea" + "Bytea", + "Bytea", + "Text" ] }, "nullable": [] }, - "hash": "34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04" + "hash": "7d8c19c3568c03ec3e4a788b22c233f6503bc79cc9f809d35c558e275ba117ba" } diff --git a/core/lib/dal/.sqlx/query-f30748bef5f8d08b60739cdfd9508c8132d0958e4e25f4954e93d2095b4f11e8.json b/core/lib/dal/.sqlx/query-a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789.json similarity index 73% rename from core/lib/dal/.sqlx/query-f30748bef5f8d08b60739cdfd9508c8132d0958e4e25f4954e93d2095b4f11e8.json rename to core/lib/dal/.sqlx/query-a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789.json index 4f138822ad1b..9a93ba45978e 100644 --- a/core/lib/dal/.sqlx/query-f30748bef5f8d08b60739cdfd9508c8132d0958e4e25f4954e93d2095b4f11e8.json +++ b/core/lib/dal/.sqlx/query-a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", "describe": { "columns": [ { @@ -142,6 +142,26 @@ "ordinal": 27, "name": "fee_address", "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "aggregation_root", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -178,8 +198,12 @@ true, true, true, - false + false, + true, + true, + true, + true ] }, - "hash": "f30748bef5f8d08b60739cdfd9508c8132d0958e4e25f4954e93d2095b4f11e8" + "hash": "a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789" } diff --git a/core/lib/dal/.sqlx/query-2def67eb8372245ed59e76e07d615598f5d22a3aebd893afddded0e3c6b94a3b.json b/core/lib/dal/.sqlx/query-b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json similarity index 73% rename from core/lib/dal/.sqlx/query-2def67eb8372245ed59e76e07d615598f5d22a3aebd893afddded0e3c6b94a3b.json rename to core/lib/dal/.sqlx/query-b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json index afac14e6d5cd..8a68b1a9b9bd 100644 --- a/core/lib/dal/.sqlx/query-2def67eb8372245ed59e76e07d615598f5d22a3aebd893afddded0e3c6b94a3b.json +++ b/core/lib/dal/.sqlx/query-b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", "describe": { "columns": [ { @@ -142,6 +142,26 @@ "ordinal": 27, "name": "fee_address", "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "aggregation_root", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -180,8 +200,12 @@ true, true, true, - false + false, + true, + true, + true, + true ] }, - "hash": "2def67eb8372245ed59e76e07d615598f5d22a3aebd893afddded0e3c6b94a3b" + "hash": "b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd" } diff --git a/core/lib/dal/.sqlx/query-5aa487a98dff53a5d32a5916a26cbf3ffb03b3791c0e9a9f39fb85cfffc65db2.json b/core/lib/dal/.sqlx/query-c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b.json similarity index 77% rename from core/lib/dal/.sqlx/query-5aa487a98dff53a5d32a5916a26cbf3ffb03b3791c0e9a9f39fb85cfffc65db2.json rename to core/lib/dal/.sqlx/query-c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b.json index 4eae4f778cee..f97ea8a6ccd5 100644 --- a/core/lib/dal/.sqlx/query-5aa487a98dff53a5d32a5916a26cbf3ffb03b3791c0e9a9f39fb85cfffc65db2.json +++ b/core/lib/dal/.sqlx/query-c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -142,6 +142,26 @@ "ordinal": 27, "name": "fee_address", "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "aggregation_root", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -177,8 +197,12 @@ true, true, true, - false + false, + true, + true, + true, + true ] }, - "hash": "5aa487a98dff53a5d32a5916a26cbf3ffb03b3791c0e9a9f39fb85cfffc65db2" + "hash": "c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b" } diff --git a/core/lib/dal/.sqlx/query-f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8.json b/core/lib/dal/.sqlx/query-d4cdd4eed07dfdad2757c480903f7999eabb611338925abe9dc9e64c837183d9.json similarity index 84% rename from core/lib/dal/.sqlx/query-f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8.json rename to core/lib/dal/.sqlx/query-d4cdd4eed07dfdad2757c480903f7999eabb611338925abe9dc9e64c837183d9.json index 700352c1a8bf..111234e02b75 100644 --- a/core/lib/dal/.sqlx/query-f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8.json +++ b/core/lib/dal/.sqlx/query-d4cdd4eed07dfdad2757c480903f7999eabb611338925abe9dc9e64c837183d9.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n l2_da_validator_address,\n pubdata_type\n FROM\n miniblocks\n WHERE\n number = $1\n ", "describe": { "columns": [ { @@ -92,6 +92,16 @@ "ordinal": 17, "name": "logs_bloom", "type_info": "Bytea" + }, + { + "ordinal": 18, + "name": "l2_da_validator_address", + "type_info": "Bytea" + }, + { + "ordinal": 19, + "name": "pubdata_type", + "type_info": "Text" } ], "parameters": { @@ -117,8 +127,10 @@ false, true, true, - true + true, + false, + false ] }, - "hash": "f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8" + "hash": "d4cdd4eed07dfdad2757c480903f7999eabb611338925abe9dc9e64c837183d9" } diff --git a/core/lib/dal/migrations/20240925103531_gateway_upgrade.down.sql b/core/lib/dal/migrations/20240925103531_gateway_upgrade.down.sql new file mode 100644 index 000000000000..9af34d7dc8ee --- /dev/null +++ b/core/lib/dal/migrations/20240925103531_gateway_upgrade.down.sql @@ -0,0 +1,8 @@ +ALTER TABLE l1_batches DROP COLUMN IF EXISTS state_diff_hash BYTEA; + +ALTER TABLE l1_batches DROP COLUMN IF EXISTS aggregation_root; +ALTER TABLE l1_batches DROP COLUMN IF EXISTS local_root; + +ALTER TABLE miniblocks + DROP COLUMN IF EXISTS l2_da_validator_address, + DROP COLUMN IF EXISTS pubdata_type; diff --git a/core/lib/dal/migrations/20240925103531_gateway_upgrade.up.sql b/core/lib/dal/migrations/20240925103531_gateway_upgrade.up.sql new file mode 100644 index 000000000000..a58464f6ebb3 --- /dev/null +++ b/core/lib/dal/migrations/20240925103531_gateway_upgrade.up.sql @@ -0,0 +1,11 @@ +ALTER TABLE l1_batches ADD COLUMN IF NOT EXISTS state_diff_hash BYTEA; + +ALTER TABLE l1_batches ADD COLUMN IF NOT EXISTS aggregation_root BYTEA; +ALTER TABLE l1_batches ADD COLUMN IF NOT EXISTS local_root BYTEA; + +ALTER TABLE miniblocks + ADD COLUMN IF NOT EXISTS l2_da_validator_address BYTEA NOT NULL DEFAULT '\x0000000000000000000000000000000000000000'::bytea, + -- There are miniblocks that used the `Rollup' type, but were actually used on a Validium chain. + -- This is okay, since this field represents how the VM works with the DA, rather what is committed on L1. + ADD COLUMN IF NOT EXISTS pubdata_type TEXT NOT NULL DEFAULT 'Rollup'; +-- ^ Add a default value so that DB queries don't fail even if the DB migration is not completed. diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index f71dc68ce757..943aa12caf75 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -344,10 +344,17 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, - fee_address + fee_address, + aggregation_root, + local_root, + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number WHERE is_sealed AND number = $1 @@ -841,6 +848,8 @@ impl BlocksDal<'_, '_> { fair_pubdata_price, gas_limit, logs_bloom, + l2_da_validator_address, + pubdata_type, created_at, updated_at ) @@ -864,6 +873,8 @@ impl BlocksDal<'_, '_> { $16, $17, $18, + $19, + $20, NOW(), NOW() ) @@ -896,6 +907,11 @@ impl BlocksDal<'_, '_> { l2_block_header.batch_fee_input.fair_pubdata_price() as i64, l2_block_header.gas_limit as i64, l2_block_header.logs_bloom.as_bytes(), + l2_block_header + .pubdata_params + .l2_da_validator_address + .as_bytes(), + l2_block_header.pubdata_params.pubdata_type.to_string(), ); instrumentation.with(query).execute(self.storage).await?; @@ -924,7 +940,9 @@ impl BlocksDal<'_, '_> { virtual_blocks, fair_pubdata_price, gas_limit, - logs_bloom + logs_bloom, + l2_da_validator_address, + pubdata_type FROM miniblocks ORDER BY @@ -965,7 +983,9 @@ impl BlocksDal<'_, '_> { virtual_blocks, fair_pubdata_price, gas_limit, - logs_bloom + logs_bloom, + l2_da_validator_address, + pubdata_type FROM miniblocks WHERE @@ -1062,9 +1082,12 @@ impl BlocksDal<'_, '_> { compressed_state_diffs = $7, compressed_initial_writes = $8, compressed_repeated_writes = $9, + state_diff_hash = $10, + aggregation_root = $11, + local_root = $12, updated_at = NOW() WHERE - number = $10 + number = $13 AND commitment IS NULL "#, commitment_artifacts.commitment_hash.commitment.as_bytes(), @@ -1082,6 +1105,9 @@ impl BlocksDal<'_, '_> { commitment_artifacts.compressed_state_diffs, commitment_artifacts.compressed_initial_writes, commitment_artifacts.compressed_repeated_writes, + commitment_artifacts.state_diff_hash.as_bytes(), + commitment_artifacts.aggregation_root.as_bytes(), + commitment_artifacts.local_root.as_bytes(), i64::from(number.0), ) .instrument("save_l1_batch_commitment_artifacts") @@ -1189,10 +1215,17 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, - fee_address + fee_address, + aggregation_root, + local_root, + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number WHERE number = 0 OR eth_commit_tx_id IS NOT NULL @@ -1377,10 +1410,17 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, - fee_address + fee_address, + aggregation_root, + local_root, + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number WHERE eth_commit_tx_id IS NOT NULL AND eth_prove_tx_id IS NULL @@ -1459,7 +1499,11 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, - fee_address + fee_address, + aggregation_root, + local_root, + state_diff_hash, + data_availability.inclusion_data FROM ( SELECT @@ -1480,6 +1524,7 @@ impl BlocksDal<'_, '_> { $2 ) inn LEFT JOIN commitments ON commitments.l1_batch_number = inn.number + LEFT JOIN data_availability ON data_availability.l1_batch_number = inn.number WHERE number - row_number = $1 "#, @@ -1534,10 +1579,17 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, - fee_address + fee_address, + aggregation_root, + local_root, + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number WHERE eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL @@ -1663,10 +1715,17 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, - fee_address + fee_address, + aggregation_root, + local_root, + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number WHERE number BETWEEN $1 AND $2 ORDER BY @@ -1729,11 +1788,18 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, - fee_address + fee_address, + aggregation_root, + local_root, + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number WHERE eth_commit_tx_id IS NULL AND number != 0 @@ -1809,7 +1875,11 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, - fee_address + fee_address, + aggregation_root, + local_root, + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number diff --git a/core/lib/dal/src/consensus/conv.rs b/core/lib/dal/src/consensus/conv.rs index 269c47fa2dd1..2b8488dd0c2a 100644 --- a/core/lib/dal/src/consensus/conv.rs +++ b/core/lib/dal/src/consensus/conv.rs @@ -4,7 +4,9 @@ use zksync_concurrency::net; use zksync_consensus_roles::{attester, node}; use zksync_protobuf::{read_required, required, ProtoFmt, ProtoRepr}; use zksync_types::{ - abi, ethabi, + abi, + commitment::{L1BatchCommitmentMode, PubdataParams}, + ethabi, fee::Fee, l1::{OpProcessingType, PriorityQueueType}, l2::TransactionType, @@ -135,6 +137,20 @@ impl ProtoFmt for Payload { } } + let pubdata_params = if let Some(pubdata_params) = &r.pubdata_params { + Some(PubdataParams { + l2_da_validator_address: required(&pubdata_params.l2_da_validator_address) + .and_then(|a| parse_h160(a)) + .context("l2_da_validator_address")?, + pubdata_type: required(&pubdata_params.pubdata_type) + .and_then(|x| Ok(proto::L1BatchCommitDataGeneratorMode::try_from(*x)?)) + .context("pubdata_type")? + .parse(), + }) + } else { + None + }; + Ok(Self { protocol_version, hash: required(&r.hash) @@ -153,6 +169,7 @@ impl ProtoFmt for Payload { .context("operator_address")?, transactions, last_in_batch: *required(&r.last_in_batch).context("last_in_batch")?, + pubdata_params, }) } @@ -171,6 +188,16 @@ impl ProtoFmt for Payload { transactions: vec![], transactions_v25: vec![], last_in_batch: Some(self.last_in_batch), + pubdata_params: self + .pubdata_params + .map(|pubdata_params| proto::PubdataParams { + l2_da_validator_address: Some( + pubdata_params.l2_da_validator_address.as_bytes().into(), + ), + pubdata_type: Some(proto::L1BatchCommitDataGeneratorMode::new( + &pubdata_params.pubdata_type, + ) as i32), + }), }; match self.protocol_version { v if v >= ProtocolVersionId::Version25 => { @@ -517,3 +544,19 @@ impl ProtoRepr for proto::AttesterCommittee { } } } + +impl proto::L1BatchCommitDataGeneratorMode { + pub(crate) fn new(n: &L1BatchCommitmentMode) -> Self { + match n { + L1BatchCommitmentMode::Rollup => Self::Rollup, + L1BatchCommitmentMode::Validium => Self::Validium, + } + } + + pub(crate) fn parse(&self) -> L1BatchCommitmentMode { + match self { + Self::Rollup => L1BatchCommitmentMode::Rollup, + Self::Validium => L1BatchCommitmentMode::Validium, + } + } +} diff --git a/core/lib/dal/src/consensus/mod.rs b/core/lib/dal/src/consensus/mod.rs index 8e88265730e9..c7e46b2cf1b7 100644 --- a/core/lib/dal/src/consensus/mod.rs +++ b/core/lib/dal/src/consensus/mod.rs @@ -2,7 +2,9 @@ use std::collections::BTreeMap; use zksync_concurrency::net; use zksync_consensus_roles::{attester, node, validator}; -use zksync_types::{ethabi, Address, L1BatchNumber, ProtocolVersionId, Transaction, H256}; +use zksync_types::{ + commitment::PubdataParams, ethabi, Address, L1BatchNumber, ProtocolVersionId, Transaction, H256, +}; mod conv; pub mod proto; @@ -46,6 +48,7 @@ pub struct Payload { pub operator_address: Address, pub transactions: Vec, pub last_in_batch: bool, + pub pubdata_params: Option, } impl Payload { diff --git a/core/lib/dal/src/consensus/proto/mod.proto b/core/lib/dal/src/consensus/proto/mod.proto index 421904bf966b..49a69e8a36ec 100644 --- a/core/lib/dal/src/consensus/proto/mod.proto +++ b/core/lib/dal/src/consensus/proto/mod.proto @@ -26,6 +26,12 @@ message Payload { // Set for protocol_version >= 25. repeated TransactionV25 transactions_v25 = 12; optional bool last_in_batch = 10; // required + optional PubdataParams pubdata_params = 13; // optional +} + +message PubdataParams { + optional bytes l2_da_validator_address = 1; // required; H160 + optional L1BatchCommitDataGeneratorMode pubdata_type = 2; // required } message L1Transaction { @@ -142,3 +148,8 @@ message AttestationStatus { optional roles.validator.GenesisHash genesis = 1; // required optional uint64 next_batch_to_attest = 2; // required } + +enum L1BatchCommitDataGeneratorMode { + Rollup = 0; + Validium = 1; +} diff --git a/core/lib/dal/src/consensus/tests.rs b/core/lib/dal/src/consensus/tests.rs index e8342b7446cc..c9fd91748b2b 100644 --- a/core/lib/dal/src/consensus/tests.rs +++ b/core/lib/dal/src/consensus/tests.rs @@ -9,7 +9,9 @@ use zksync_protobuf::{ }; use zksync_test_account::Account; use zksync_types::{ - web3::Bytes, Execute, ExecuteTransactionCommon, L1BatchNumber, ProtocolVersionId, Transaction, + commitment::{L1BatchCommitmentMode, PubdataParams}, + web3::Bytes, + Execute, ExecuteTransactionCommon, L1BatchNumber, ProtocolVersionId, Transaction, }; use super::*; @@ -51,6 +53,13 @@ fn payload(rng: &mut impl Rng, protocol_version: ProtocolVersionId) -> Payload { }) .collect(), last_in_batch: rng.gen(), + pubdata_params: Some(PubdataParams { + pubdata_type: match rng.gen_range(0..2) { + 0 => L1BatchCommitmentMode::Rollup, + _ => L1BatchCommitmentMode::Validium, + }, + l2_da_validator_address: rng.gen(), + }), } } diff --git a/core/lib/dal/src/consensus_dal/tests.rs b/core/lib/dal/src/consensus_dal/tests.rs index 772e7b2bf5e7..694abc8508b6 100644 --- a/core/lib/dal/src/consensus_dal/tests.rs +++ b/core/lib/dal/src/consensus_dal/tests.rs @@ -131,6 +131,9 @@ async fn test_batch_certificate() { compressed_repeated_writes: None, zkporter_is_available: false, aux_commitments: None, + aggregation_root: rng.gen(), + local_root: rng.gen(), + state_diff_hash: rng.gen(), }, ) .await diff --git a/core/lib/dal/src/eth_watcher_dal.rs b/core/lib/dal/src/eth_watcher_dal.rs index bdfc7f24c7b5..062ad47219d8 100644 --- a/core/lib/dal/src/eth_watcher_dal.rs +++ b/core/lib/dal/src/eth_watcher_dal.rs @@ -107,7 +107,7 @@ mod tests { async fn test_get_or_set_next_block_to_process_with_different_event_types() { let pool = ConnectionPool::::test_pool().await; let mut conn = pool.connection().await.unwrap(); - let mut dal = conn.processed_events_dal(); + let mut dal = conn.eth_watcher_dal(); // Test with ProtocolUpgrades let next_block = dal diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index fbe225beb902..20b428adec44 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -131,7 +131,7 @@ where fn base_token_dal(&mut self) -> BaseTokenDal<'_, 'a>; - fn processed_events_dal(&mut self) -> EthWatcherDal<'_, 'a>; + fn eth_watcher_dal(&mut self) -> EthWatcherDal<'_, 'a>; } #[derive(Clone, Debug)] @@ -255,7 +255,7 @@ impl<'a> CoreDal<'a> for Connection<'a, Core> { BaseTokenDal { storage: self } } - fn processed_events_dal(&mut self) -> EthWatcherDal<'_, 'a> { + fn eth_watcher_dal(&mut self) -> EthWatcherDal<'_, 'a> { EthWatcherDal { storage: self } } } diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index 3bb433a05cf8..159ed71cc3e9 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -7,7 +7,7 @@ use zksync_contracts::BaseSystemContractsHashes; use zksync_types::{ api, block::{L1BatchHeader, L2BlockHeader, UnsealedL1BatchHeader}, - commitment::{L1BatchMetaParameters, L1BatchMetadata}, + commitment::{L1BatchCommitmentMode, L1BatchMetaParameters, L1BatchMetadata, PubdataParams}, fee_model::{BatchFeeInput, L1PeggedBatchFeeModelInput, PubdataIndependentBatchFeeModelInput}, l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log, UserL2ToL1Log}, Address, Bloom, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, @@ -155,6 +155,10 @@ pub(crate) struct StorageL1Batch { pub bootloader_initial_content_commitment: Option>, pub pubdata_input: Option>, pub fee_address: Vec, + pub aggregation_root: Option>, + pub local_root: Option>, + pub state_diff_hash: Option>, + pub inclusion_data: Option>, } impl StorageL1Batch { @@ -263,6 +267,10 @@ impl TryFrom for L1BatchMetadata { bootloader_initial_content_commitment: batch .bootloader_initial_content_commitment .map(|v| H256::from_slice(&v)), + state_diff_hash: batch.state_diff_hash.map(|v| H256::from_slice(&v)), + local_root: batch.local_root.map(|v| H256::from_slice(&v)), + aggregation_root: batch.aggregation_root.map(|v| H256::from_slice(&v)), + da_inclusion_data: batch.inclusion_data, }) } } @@ -485,6 +493,8 @@ pub(crate) struct StorageL2BlockHeader { /// This value should bound the maximal amount of gas that can be spent by transactions in the miniblock. pub gas_limit: Option, pub logs_bloom: Option>, + pub l2_da_validator_address: Vec, + pub pubdata_type: String, } impl From for L2BlockHeader { @@ -532,6 +542,10 @@ impl From for L2BlockHeader { .logs_bloom .map(|b| Bloom::from_slice(&b)) .unwrap_or_default(), + pubdata_params: PubdataParams { + l2_da_validator_address: Address::from_slice(&row.l2_da_validator_address), + pubdata_type: L1BatchCommitmentMode::from_str(&row.pubdata_type).unwrap(), + }, } } } diff --git a/core/lib/dal/src/models/storage_sync.rs b/core/lib/dal/src/models/storage_sync.rs index 7a4ebe074fe0..0eb65a606d1f 100644 --- a/core/lib/dal/src/models/storage_sync.rs +++ b/core/lib/dal/src/models/storage_sync.rs @@ -1,7 +1,11 @@ +use std::str::FromStr; + use zksync_contracts::BaseSystemContractsHashes; use zksync_db_connection::error::SqlxContext; use zksync_types::{ - api::en, parse_h160, parse_h256, parse_h256_opt, Address, L1BatchNumber, L2BlockNumber, + api::en, + commitment::{L1BatchCommitmentMode, PubdataParams}, + parse_h160, parse_h256, parse_h256_opt, Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, Transaction, H256, }; @@ -25,6 +29,8 @@ pub(crate) struct StorageSyncBlock { pub protocol_version: i32, pub virtual_blocks: i64, pub hash: Vec, + pub l2_da_validator_address: Vec, + pub pubdata_type: String, } pub(crate) struct SyncBlock { @@ -40,6 +46,7 @@ pub(crate) struct SyncBlock { pub virtual_blocks: u32, pub hash: H256, pub protocol_version: ProtocolVersionId, + pub pubdata_params: PubdataParams, } impl TryFrom for SyncBlock { @@ -89,6 +96,12 @@ impl TryFrom for SyncBlock { .decode_column("virtual_blocks")?, hash: parse_h256(&block.hash).decode_column("hash")?, protocol_version: parse_protocol_version(block.protocol_version)?, + pubdata_params: PubdataParams { + pubdata_type: L1BatchCommitmentMode::from_str(&block.pubdata_type) + .decode_column("Invalid pubdata type")?, + l2_da_validator_address: parse_h160(&block.l2_da_validator_address) + .decode_column("l2_da_validator_address")?, + }, }) } } @@ -109,6 +122,7 @@ impl SyncBlock { virtual_blocks: Some(self.virtual_blocks), hash: Some(self.hash), protocol_version: self.protocol_version, + pubdata_params: Some(self.pubdata_params), } } @@ -125,6 +139,7 @@ impl SyncBlock { operator_address: self.fee_account_address, transactions, last_in_batch: self.last_in_batch, + pubdata_params: Some(self.pubdata_params), } } } diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index 265c61354887..55e6543c0285 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -56,7 +56,9 @@ impl SyncDal<'_, '_> { miniblocks.virtual_blocks, miniblocks.hash, miniblocks.protocol_version AS "protocol_version!", - miniblocks.fee_account_address AS "fee_account_address!" + miniblocks.fee_account_address AS "fee_account_address!", + miniblocks.l2_da_validator_address AS "l2_da_validator_address!", + miniblocks.pubdata_type AS "pubdata_type!" FROM miniblocks WHERE diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index bf85008f7b58..baa2ee584856 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -4,6 +4,7 @@ use zksync_contracts::BaseSystemContractsHashes; use zksync_db_connection::connection_pool::ConnectionPool; use zksync_types::{ block::{L1BatchHeader, L2BlockHasher, L2BlockHeader}, + commitment::PubdataParams, fee::Fee, fee_model::BatchFeeInput, helpers::unix_timestamp_ms, @@ -52,6 +53,7 @@ pub(crate) fn create_l2_block_header(number: u32) -> L2BlockHeader { virtual_blocks: 1, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: PubdataParams::default(), } } diff --git a/core/lib/env_config/src/contracts.rs b/core/lib/env_config/src/contracts.rs index 298c43b80ccd..3792f356be4e 100644 --- a/core/lib/env_config/src/contracts.rs +++ b/core/lib/env_config/src/contracts.rs @@ -63,6 +63,7 @@ mod tests { l2_weth_bridge_addr: Some(addr("8656770FA78c830456B00B4fFCeE6b1De0e1b888")), l1_shared_bridge_proxy_addr: Some(addr("8656770FA78c830456B00B4fFCeE6b1De0e1b888")), l2_shared_bridge_addr: Some(addr("8656770FA78c830456B00B4fFCeE6b1De0e1b888")), + l2_legacy_shared_bridge_addr: Some(addr("8656770FA78c830456B00B4fFCeE6b1De0e1b888")), l2_testnet_paymaster_addr: Some(addr("FC073319977e314F251EAE6ae6bE76B0B3BAeeCF")), l1_multicall3_addr: addr("0xcA11bde05977b3631167028862bE2a173976CA11"), ecosystem_contracts: Some(EcosystemContracts { @@ -72,6 +73,7 @@ mod tests { }), base_token_addr: Some(SHARED_BRIDGE_ETHER_TOKEN_ADDRESS), chain_admin_addr: Some(addr("0xdd6fa5c14e7550b4caf2aa2818d24c69cbc347ff")), + l2_da_validator_addr: Some(addr("0xed6fa5c14e7550b4caf2aa2818d24c69cbc347ff")), } } @@ -93,11 +95,13 @@ CONTRACTS_L2_CONSENSUS_REGISTRY_ADDR="D64e136566a9E04eb05B30184fF577F52682D182" CONTRACTS_L1_MULTICALL3_ADDR="0xcA11bde05977b3631167028862bE2a173976CA11" CONTRACTS_L1_SHARED_BRIDGE_PROXY_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L2_SHARED_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" +CONTRACTS_L2_LEGACY_SHARED_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_BRIDGEHUB_PROXY_ADDR="0x35ea7f92f4c5f433efe15284e99c040110cf6297" CONTRACTS_STATE_TRANSITION_PROXY_ADDR="0xd90f1c081c6117241624e97cb6147257c3cb2097" CONTRACTS_TRANSPARENT_PROXY_ADMIN_ADDR="0xdd6fa5c14e7550b4caf2aa2818d24c69cbc347e5" CONTRACTS_BASE_TOKEN_ADDR="0x0000000000000000000000000000000000000001" CONTRACTS_CHAIN_ADMIN_ADDR="0xdd6fa5c14e7550b4caf2aa2818d24c69cbc347ff" +CONTRACTS_L2_DA_VALIDATOR_ADDR="0xed6fa5c14e7550b4caf2aa2818d24c69cbc347ff" "#; lock.set_env(config); diff --git a/core/lib/eth_client/src/clients/http/query.rs b/core/lib/eth_client/src/clients/http/query.rs index 5e788509461d..de115cf6e7a6 100644 --- a/core/lib/eth_client/src/clients/http/query.rs +++ b/core/lib/eth_client/src/clients/http/query.rs @@ -15,7 +15,7 @@ use crate::{ BaseFees, EthFeeInterface, EthInterface, RawTransactionBytes, }; -const FEE_HISTORY_MAX_REQUEST_CHUNK: usize = 1024; +const FEE_HISTORY_MAX_REQUEST_CHUNK: usize = 1023; #[async_trait] impl EthInterface for T @@ -304,14 +304,14 @@ where COUNTERS.call[&(Method::BaseFeeHistory, client.component())].inc(); let latency = LATENCIES.direct[&Method::BaseFeeHistory].start(); let mut history = Vec::with_capacity(block_count); - let from_block = upto_block.saturating_sub(block_count); + let from_block = upto_block.saturating_sub(block_count - 1); // Here we are requesting `fee_history` from blocks // `(from_block; upto_block)` in chunks of size `MAX_REQUEST_CHUNK` // starting from the oldest block. for chunk_start in (from_block..=upto_block).step_by(FEE_HISTORY_MAX_REQUEST_CHUNK) { let chunk_end = (chunk_start + FEE_HISTORY_MAX_REQUEST_CHUNK).min(upto_block); - let chunk_size = chunk_end - chunk_start; + let chunk_size = chunk_end - chunk_start + 1; let fee_history = client .fee_history( @@ -324,22 +324,50 @@ where .with_arg("block", &chunk_end) .await?; - // Check that the lengths are the same. + if fee_history.oldest_block != web3::BlockNumber::Number(chunk_start.into()) { + let oldest_block = match fee_history.oldest_block { + web3::BlockNumber::Number(oldest_block) => oldest_block.to_string(), + _ => format!("{:?}", fee_history.oldest_block), + }; + let message = + format!("unexpected `oldest_block`, expected: {chunk_start}, got {oldest_block}"); + return Err(EnrichedClientError::custom(message, "l1_fee_history") + .with_arg("chunk_size", &chunk_size) + .with_arg("chunk_end", &chunk_end)); + } + + if fee_history.base_fee_per_gas.len() != chunk_size + 1 { + let message = format!( + "unexpected `base_fee_per_gas.len()`, expected: {}, got {}", + chunk_size + 1, + fee_history.base_fee_per_gas.len() + ); + return Err(EnrichedClientError::custom(message, "l1_fee_history") + .with_arg("chunk_size", &chunk_size) + .with_arg("chunk_end", &chunk_end)); + } + // Per specification, the values should always be provided, and must be 0 for blocks // prior to EIP-4844. // https://ethereum.github.io/execution-apis/api-documentation/ - if fee_history.base_fee_per_gas.len() != fee_history.base_fee_per_blob_gas.len() { - tracing::error!( - "base_fee_per_gas and base_fee_per_blob_gas have different lengths: {} and {}", - fee_history.base_fee_per_gas.len(), + if fee_history.base_fee_per_blob_gas.len() != chunk_size + 1 { + let message = format!( + "unexpected `base_fee_per_blob_gas.len()`, expected: {}, got {}", + chunk_size + 1, fee_history.base_fee_per_blob_gas.len() ); + return Err(EnrichedClientError::custom(message, "l1_fee_history") + .with_arg("chunk_size", &chunk_size) + .with_arg("chunk_end", &chunk_end)); } + // We take `chunk_size` entries for consistency with `l2_base_fee_history` which doesn't + // have correct data for block with number `upto_block + 1`. for (base, blob) in fee_history .base_fee_per_gas .into_iter() .zip(fee_history.base_fee_per_blob_gas) + .take(chunk_size) { let fees = BaseFees { base_fee_per_gas: cast_to_u64(base, "base_fee_per_gas")?, @@ -387,14 +415,14 @@ where COUNTERS.call[&(Method::L2FeeHistory, client.component())].inc(); let latency = LATENCIES.direct[&Method::BaseFeeHistory].start(); let mut history = Vec::with_capacity(block_count); - let from_block = upto_block.saturating_sub(block_count); + let from_block = upto_block.saturating_sub(block_count - 1); // Here we are requesting `fee_history` from blocks // `(from_block; upto_block)` in chunks of size `FEE_HISTORY_MAX_REQUEST_CHUNK` // starting from the oldest block. for chunk_start in (from_block..=upto_block).step_by(FEE_HISTORY_MAX_REQUEST_CHUNK) { let chunk_end = (chunk_start + FEE_HISTORY_MAX_REQUEST_CHUNK).min(upto_block); - let chunk_size = chunk_end - chunk_start; + let chunk_size = chunk_end - chunk_start + 1; let fee_history = client .fee_history(U64::from(chunk_size).into(), chunk_end.into(), vec![]) @@ -403,19 +431,46 @@ where .with_arg("block", &chunk_end) .await?; - // Check that the lengths are the same. - if fee_history.inner.base_fee_per_gas.len() != fee_history.l2_pubdata_price.len() { - tracing::error!( - "base_fee_per_gas and pubdata_price have different lengths: {} and {}", - fee_history.inner.base_fee_per_gas.len(), + if fee_history.inner.oldest_block != web3::BlockNumber::Number(chunk_start.into()) { + let oldest_block = match fee_history.inner.oldest_block { + web3::BlockNumber::Number(oldest_block) => oldest_block.to_string(), + _ => format!("{:?}", fee_history.inner.oldest_block), + }; + let message = + format!("unexpected `oldest_block`, expected: {chunk_start}, got {oldest_block}"); + return Err(EnrichedClientError::custom(message, "l2_fee_history") + .with_arg("chunk_size", &chunk_size) + .with_arg("chunk_end", &chunk_end)); + } + + if fee_history.inner.base_fee_per_gas.len() != chunk_size + 1 { + let message = format!( + "unexpected `base_fee_per_gas.len()`, expected: {}, got {}", + chunk_size + 1, + fee_history.inner.base_fee_per_gas.len() + ); + return Err(EnrichedClientError::custom(message, "l2_fee_history") + .with_arg("chunk_size", &chunk_size) + .with_arg("chunk_end", &chunk_end)); + } + + if fee_history.l2_pubdata_price.len() != chunk_size { + let message = format!( + "unexpected `l2_pubdata_price.len()`, expected: {}, got {}", + chunk_size + 1, fee_history.l2_pubdata_price.len() ); + return Err(EnrichedClientError::custom(message, "l2_fee_history") + .with_arg("chunk_size", &chunk_size) + .with_arg("chunk_end", &chunk_end)); } + // We take `chunk_size` entries because base fee for block `upto_block + 1` may change. for (base, l2_pubdata_price) in fee_history .inner .base_fee_per_gas .into_iter() + .take(chunk_size) .zip(fee_history.l2_pubdata_price) { let fees = BaseFees { diff --git a/core/lib/eth_client/src/clients/mock.rs b/core/lib/eth_client/src/clients/mock.rs index b33554b6292c..8e81b6c6f209 100644 --- a/core/lib/eth_client/src/clients/mock.rs +++ b/core/lib/eth_client/src/clients/mock.rs @@ -415,25 +415,35 @@ fn l2_eth_fee_history( let from_block = from_block.as_usize(); let start_block = from_block.saturating_sub(block_count.as_usize() - 1); + // duplicates last value to follow `feeHistory` response format, it should return `block_count + 1` values + let base_fee_per_gas = base_fee_history[start_block..=from_block] + .iter() + .chain([&base_fee_history[from_block]]) + .map(|fee| U256::from(fee.base_fee_per_gas)) + .collect(); + + // duplicates last value to follow `feeHistory` response format, it should return `block_count + 1` values + let base_fee_per_blob_gas = base_fee_history[start_block..=from_block] + .iter() + .chain([&base_fee_history[from_block]]) // duplicate last value + .map(|fee| fee.base_fee_per_blob_gas) + .collect(); + + let l2_pubdata_price = base_fee_history[start_block..=from_block] + .iter() + .map(|fee| fee.l2_pubdata_price) + .collect(); + FeeHistory { inner: web3::FeeHistory { oldest_block: start_block.into(), - base_fee_per_gas: base_fee_history[start_block..=from_block] - .iter() - .map(|fee| U256::from(fee.base_fee_per_gas)) - .collect(), - base_fee_per_blob_gas: base_fee_history[start_block..=from_block] - .iter() - .map(|fee| fee.base_fee_per_blob_gas) - .collect(), + base_fee_per_gas, + base_fee_per_blob_gas, gas_used_ratio: vec![], // not used blob_gas_used_ratio: vec![], // not used reward: None, }, - l2_pubdata_price: base_fee_history[start_block..=from_block] - .iter() - .map(|fee| fee.l2_pubdata_price) - .collect(), + l2_pubdata_price, } } diff --git a/core/lib/multivm/Cargo.toml b/core/lib/multivm/Cargo.toml index e49086a6b8b1..eb770bf9b57e 100644 --- a/core/lib/multivm/Cargo.toml +++ b/core/lib/multivm/Cargo.toml @@ -29,6 +29,7 @@ zksync_contracts.workspace = true zksync_utils.workspace = true zksync_system_constants.workspace = true zksync_vm_interface.workspace = true +zksync_mini_merkle_tree.workspace = true anyhow.workspace = true hex.workspace = true diff --git a/core/lib/multivm/src/lib.rs b/core/lib/multivm/src/lib.rs index 520274c14ae0..1cba2c0fb92b 100644 --- a/core/lib/multivm/src/lib.rs +++ b/core/lib/multivm/src/lib.rs @@ -20,6 +20,7 @@ pub use crate::{ }; mod glue; +pub mod pubdata_builders; pub mod tracers; pub mod utils; mod versions; diff --git a/core/lib/multivm/src/pubdata_builders/mod.rs b/core/lib/multivm/src/pubdata_builders/mod.rs new file mode 100644 index 000000000000..c52c4c70c86a --- /dev/null +++ b/core/lib/multivm/src/pubdata_builders/mod.rs @@ -0,0 +1,24 @@ +use std::rc::Rc; + +pub use rollup::RollupPubdataBuilder; +pub use validium::ValidiumPubdataBuilder; +use zksync_types::commitment::{L1BatchCommitmentMode, PubdataParams}; + +use crate::interface::pubdata::PubdataBuilder; + +mod rollup; +#[cfg(test)] +mod tests; +mod utils; +mod validium; + +pub fn pubdata_params_to_builder(params: PubdataParams) -> Rc { + match params.pubdata_type { + L1BatchCommitmentMode::Rollup => { + Rc::new(RollupPubdataBuilder::new(params.l2_da_validator_address)) + } + L1BatchCommitmentMode::Validium => { + Rc::new(ValidiumPubdataBuilder::new(params.l2_da_validator_address)) + } + } +} diff --git a/core/lib/multivm/src/pubdata_builders/rollup.rs b/core/lib/multivm/src/pubdata_builders/rollup.rs new file mode 100644 index 000000000000..4a818dfe2314 --- /dev/null +++ b/core/lib/multivm/src/pubdata_builders/rollup.rs @@ -0,0 +1,128 @@ +use zksync_types::{ + ethabi, + ethabi::{ParamType, Token}, + l2_to_l1_log::l2_to_l1_logs_tree_size, + writes::compress_state_diffs, + Address, ProtocolVersionId, +}; + +use super::utils::{ + build_chained_bytecode_hash, build_chained_log_hash, build_chained_message_hash, + build_logs_root, encode_user_logs, +}; +use crate::interface::pubdata::{PubdataBuilder, PubdataInput}; + +#[derive(Debug, Clone, Copy)] +pub struct RollupPubdataBuilder { + pub l2_da_validator: Address, +} + +impl RollupPubdataBuilder { + pub fn new(l2_da_validator: Address) -> Self { + Self { l2_da_validator } + } +} + +impl PubdataBuilder for RollupPubdataBuilder { + fn l2_da_validator(&self) -> Address { + self.l2_da_validator + } + + fn l1_messenger_operator_input( + &self, + input: &PubdataInput, + protocol_version: ProtocolVersionId, + ) -> Vec { + if protocol_version.is_pre_gateway() { + let mut operator_input = vec![]; + extend_from_pubdata_input(&mut operator_input, input); + + // Extend with uncompressed state diffs. + operator_input.extend((input.state_diffs.len() as u32).to_be_bytes()); + for state_diff in &input.state_diffs { + operator_input.extend(state_diff.encode_padded()); + } + + operator_input + } else { + let mut pubdata = vec![]; + extend_from_pubdata_input(&mut pubdata, input); + + // Extend with uncompressed state diffs. + pubdata.extend((input.state_diffs.len() as u32).to_be_bytes()); + for state_diff in &input.state_diffs { + pubdata.extend(state_diff.encode_padded()); + } + + let chained_log_hash = build_chained_log_hash(&input.user_logs); + let log_root_hash = + build_logs_root(&input.user_logs, l2_to_l1_logs_tree_size(protocol_version)); + let chained_msg_hash = build_chained_message_hash(&input.l2_to_l1_messages); + let chained_bytecodes_hash = build_chained_bytecode_hash(&input.published_bytecodes); + + let l2_da_header = vec![ + Token::FixedBytes(chained_log_hash), + Token::FixedBytes(log_root_hash), + Token::FixedBytes(chained_msg_hash), + Token::FixedBytes(chained_bytecodes_hash), + Token::Bytes(pubdata), + ]; + + // Selector of `IL2DAValidator::validatePubdata`. + let func_selector = ethabi::short_signature( + "validatePubdata", + &[ + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::Bytes, + ], + ); + + [func_selector.to_vec(), ethabi::encode(&l2_da_header)].concat() + } + } + + fn settlement_layer_pubdata( + &self, + input: &PubdataInput, + _protocol_version: ProtocolVersionId, + ) -> Vec { + let mut pubdata = vec![]; + extend_from_pubdata_input(&mut pubdata, input); + + pubdata + } +} + +fn extend_from_pubdata_input(buffer: &mut Vec, pubdata_input: &PubdataInput) { + let PubdataInput { + user_logs, + l2_to_l1_messages, + published_bytecodes, + state_diffs, + } = pubdata_input; + + // Adding user L2->L1 logs. + buffer.extend(encode_user_logs(user_logs)); + + // Encoding L2->L1 messages + // Format: `[(numberOfMessages as u32) || (messages[1].len() as u32) || messages[1] || ... || (messages[n].len() as u32) || messages[n]]` + buffer.extend((l2_to_l1_messages.len() as u32).to_be_bytes()); + for message in l2_to_l1_messages { + buffer.extend((message.len() as u32).to_be_bytes()); + buffer.extend(message); + } + // Encoding bytecodes + // Format: `[(numberOfBytecodes as u32) || (bytecodes[1].len() as u32) || bytecodes[1] || ... || (bytecodes[n].len() as u32) || bytecodes[n]]` + buffer.extend((published_bytecodes.len() as u32).to_be_bytes()); + for bytecode in published_bytecodes { + buffer.extend((bytecode.len() as u32).to_be_bytes()); + buffer.extend(bytecode); + } + // Encoding state diffs + // Format: `[size of compressed state diffs u32 || compressed state diffs || (# state diffs: intial + repeated) as u32 || sorted state diffs by ]` + let state_diffs_compressed = compress_state_diffs(state_diffs.clone()); + buffer.extend(state_diffs_compressed); +} diff --git a/core/lib/multivm/src/pubdata_builders/tests.rs b/core/lib/multivm/src/pubdata_builders/tests.rs new file mode 100644 index 000000000000..bc24b8e47346 --- /dev/null +++ b/core/lib/multivm/src/pubdata_builders/tests.rs @@ -0,0 +1,123 @@ +use zksync_types::{ + writes::StateDiffRecord, Address, ProtocolVersionId, ACCOUNT_CODE_STORAGE_ADDRESS, + BOOTLOADER_ADDRESS, +}; +use zksync_utils::u256_to_h256; + +use super::{rollup::RollupPubdataBuilder, validium::ValidiumPubdataBuilder}; +use crate::interface::pubdata::{L1MessengerL2ToL1Log, PubdataBuilder, PubdataInput}; + +fn mock_input() -> PubdataInput { + // Just using some constant addresses for tests + let addr1 = BOOTLOADER_ADDRESS; + let addr2 = ACCOUNT_CODE_STORAGE_ADDRESS; + + let user_logs = vec![L1MessengerL2ToL1Log { + l2_shard_id: 0, + is_service: false, + tx_number_in_block: 0, + sender: addr1, + key: 1.into(), + value: 128.into(), + }]; + + let l2_to_l1_messages = vec![hex::decode("deadbeef").unwrap()]; + + let published_bytecodes = vec![hex::decode("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb").unwrap()]; + + // For covering more cases, we have two state diffs: + // One with enumeration index present (and so it is a repeated write) and the one without it. + let state_diffs = vec![ + StateDiffRecord { + address: addr2, + key: 155.into(), + derived_key: u256_to_h256(125.into()).0, + enumeration_index: 12, + initial_value: 11.into(), + final_value: 12.into(), + }, + StateDiffRecord { + address: addr2, + key: 156.into(), + derived_key: u256_to_h256(126.into()).0, + enumeration_index: 0, + initial_value: 0.into(), + final_value: 14.into(), + }, + ]; + + PubdataInput { + user_logs, + l2_to_l1_messages, + published_bytecodes, + state_diffs, + } +} + +#[test] +fn test_rollup_pubdata_building() { + let input = mock_input(); + + let rollup_pubdata_builder = RollupPubdataBuilder::new(Address::zero()); + + let actual = + rollup_pubdata_builder.l1_messenger_operator_input(&input, ProtocolVersionId::Version24); + let expected = "00000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000060bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901000000020000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009b000000000000000000000000000000000000000000000000000000000000007d000000000000000c000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009c000000000000000000000000000000000000000000000000000000000000007e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `l1_messenger_operator_input` (pre gateway)" + ); + + let actual = + rollup_pubdata_builder.settlement_layer_pubdata(&input, ProtocolVersionId::Version24); + let expected = "00000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000060bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `settlement_layer_pubdata` (pre gateway)" + ); + + let actual = + rollup_pubdata_builder.l1_messenger_operator_input(&input, ProtocolVersionId::Version27); + let expected = "89f9a07233e608561d90f7c4e7bcea24d718e425a6bd6c8eefb48a334366143694c75fae278944d856d68e33bbd32937cb3a1ea35cbf7d6eeeb1150f500dd0d64d0efe420d6dafe5897eab2fc27b2e47af303397ed285ace146d836d042717b0a3dc4b28a603a33b28ce1d5c52c593a46a15a99f1afa1c1d92715284288958fd54a93de700000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000032300000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000060bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901000000020000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009b000000000000000000000000000000000000000000000000000000000000007d000000000000000c000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009c000000000000000000000000000000000000000000000000000000000000007e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `l1_messenger_operator_input` (post gateway)" + ); + + let actual = + rollup_pubdata_builder.settlement_layer_pubdata(&input, ProtocolVersionId::Version27); + let expected = "00000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000060bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `settlement_layer_pubdata` (post gateway)" + ); +} + +#[test] +fn test_validium_pubdata_building() { + let input = mock_input(); + + let validium_pubdata_builder = ValidiumPubdataBuilder::new(Address::zero()); + + let actual = + validium_pubdata_builder.l1_messenger_operator_input(&input, ProtocolVersionId::Version27); + let expected = "89f9a07233e608561d90f7c4e7bcea24d718e425a6bd6c8eefb48a334366143694c75fae278944d856d68e33bbd32937cb3a1ea35cbf7d6eeeb1150f500dd0d64d0efe420d6dafe5897eab2fc27b2e47af303397ed285ace146d836d042717b0a3dc4b28a603a33b28ce1d5c52c593a46a15a99f1afa1c1d92715284288958fd54a93de700000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000005c000000010000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000008000000000"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `l1_messenger_operator_input`" + ); + + let actual = + validium_pubdata_builder.settlement_layer_pubdata(&input, ProtocolVersionId::Version27); + let expected = "fa96e2436e6fb4d668f5a06681a7c53fcb199b2747ee624ee52a13e85aac5f1e"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `settlement_layer_pubdata`" + ); +} diff --git a/core/lib/multivm/src/pubdata_builders/utils.rs b/core/lib/multivm/src/pubdata_builders/utils.rs new file mode 100644 index 000000000000..57361a674fb7 --- /dev/null +++ b/core/lib/multivm/src/pubdata_builders/utils.rs @@ -0,0 +1,70 @@ +use zksync_mini_merkle_tree::MiniMerkleTree; +use zksync_types::web3::keccak256; +use zksync_utils::bytecode::hash_bytecode; + +use crate::interface::pubdata::L1MessengerL2ToL1Log; + +pub(crate) fn build_chained_log_hash(user_logs: &[L1MessengerL2ToL1Log]) -> Vec { + let mut chained_log_hash = vec![0u8; 32]; + + for log in user_logs { + let log_bytes = log.packed_encoding(); + let hash = keccak256(&log_bytes); + + chained_log_hash = keccak256(&[chained_log_hash, hash.to_vec()].concat()).to_vec(); + } + + chained_log_hash +} + +pub(crate) fn build_logs_root( + user_logs: &[L1MessengerL2ToL1Log], + l2_to_l1_logs_tree_size: usize, +) -> Vec { + let logs = user_logs.iter().map(|log| { + let encoded = log.packed_encoding(); + let mut slice = [0u8; 88]; + slice.copy_from_slice(&encoded); + slice + }); + MiniMerkleTree::new(logs, Some(l2_to_l1_logs_tree_size)) + .merkle_root() + .as_bytes() + .to_vec() +} + +pub(crate) fn build_chained_message_hash(l2_to_l1_messages: &[Vec]) -> Vec { + let mut chained_msg_hash = vec![0u8; 32]; + + for msg in l2_to_l1_messages { + let hash = keccak256(msg); + + chained_msg_hash = keccak256(&[chained_msg_hash, hash.to_vec()].concat()).to_vec(); + } + + chained_msg_hash +} + +pub(crate) fn build_chained_bytecode_hash(published_bytecodes: &[Vec]) -> Vec { + let mut chained_bytecode_hash = vec![0u8; 32]; + + for bytecode in published_bytecodes { + let hash = hash_bytecode(bytecode).to_fixed_bytes(); + + chained_bytecode_hash = + keccak256(&[chained_bytecode_hash, hash.to_vec()].concat()).to_vec(); + } + + chained_bytecode_hash +} + +pub(crate) fn encode_user_logs(user_logs: &[L1MessengerL2ToL1Log]) -> Vec { + // Encoding user L2->L1 logs. + // Format: `[(numberOfL2ToL1Logs as u32) || l2tol1logs[1] || ... || l2tol1logs[n]]` + let mut result = vec![]; + result.extend((user_logs.len() as u32).to_be_bytes()); + for l2tol1log in user_logs { + result.extend(l2tol1log.packed_encoding()); + } + result +} diff --git a/core/lib/multivm/src/pubdata_builders/validium.rs b/core/lib/multivm/src/pubdata_builders/validium.rs new file mode 100644 index 000000000000..a9156e970aad --- /dev/null +++ b/core/lib/multivm/src/pubdata_builders/validium.rs @@ -0,0 +1,93 @@ +use zksync_types::{ + ethabi, + ethabi::{ParamType, Token}, + l2_to_l1_log::l2_to_l1_logs_tree_size, + web3::keccak256, + Address, ProtocolVersionId, +}; + +use super::utils::{ + build_chained_bytecode_hash, build_chained_log_hash, build_chained_message_hash, + build_logs_root, encode_user_logs, +}; +use crate::interface::pubdata::{PubdataBuilder, PubdataInput}; + +#[derive(Debug, Clone, Copy)] +pub struct ValidiumPubdataBuilder { + pub l2_da_validator: Address, +} + +impl ValidiumPubdataBuilder { + pub fn new(l2_da_validator: Address) -> Self { + Self { l2_da_validator } + } +} + +impl PubdataBuilder for ValidiumPubdataBuilder { + fn l2_da_validator(&self) -> Address { + self.l2_da_validator + } + + fn l1_messenger_operator_input( + &self, + input: &PubdataInput, + protocol_version: ProtocolVersionId, + ) -> Vec { + assert!( + !protocol_version.is_pre_gateway(), + "ValidiumPubdataBuilder must not be called for pre gateway" + ); + + let mut pubdata = vec![]; + pubdata.extend(encode_user_logs(&input.user_logs)); + + let chained_log_hash = build_chained_log_hash(&input.user_logs); + let log_root_hash = + build_logs_root(&input.user_logs, l2_to_l1_logs_tree_size(protocol_version)); + let chained_msg_hash = build_chained_message_hash(&input.l2_to_l1_messages); + let chained_bytecodes_hash = build_chained_bytecode_hash(&input.published_bytecodes); + + let l2_da_header = vec![ + Token::FixedBytes(chained_log_hash), + Token::FixedBytes(log_root_hash), + Token::FixedBytes(chained_msg_hash), + Token::FixedBytes(chained_bytecodes_hash), + Token::Bytes(pubdata), + ]; + + // Selector of `IL2DAValidator::validatePubdata`. + let func_selector = ethabi::short_signature( + "validatePubdata", + &[ + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::Bytes, + ], + ); + + [func_selector.to_vec(), ethabi::encode(&l2_da_header)] + .concat() + .to_vec() + } + + fn settlement_layer_pubdata( + &self, + input: &PubdataInput, + protocol_version: ProtocolVersionId, + ) -> Vec { + assert!( + !protocol_version.is_pre_gateway(), + "ValidiumPubdataBuilder must not be called for pre gateway" + ); + + let state_diffs_packed = input + .state_diffs + .iter() + .flat_map(|diff| diff.encode_padded()) + .collect::>(); + + keccak256(&state_diffs_packed).to_vec() + } +} diff --git a/core/lib/multivm/src/utils/events.rs b/core/lib/multivm/src/utils/events.rs index 9720cb779142..d84651989e75 100644 --- a/core/lib/multivm/src/utils/events.rs +++ b/core/lib/multivm/src/utils/events.rs @@ -1,59 +1,10 @@ use zksync_system_constants::L1_MESSENGER_ADDRESS; use zksync_types::{ ethabi::{self, Token}, - l2_to_l1_log::L2ToL1Log, - Address, H256, U256, + H256, U256, }; -use zksync_utils::{u256_to_bytes_be, u256_to_h256}; -use crate::interface::VmEvent; - -/// Corresponds to the following solidity event: -/// ```solidity -/// struct L2ToL1Log { -/// uint8 l2ShardId; -/// bool isService; -/// uint16 txNumberInBlock; -/// address sender; -/// bytes32 key; -/// bytes32 value; -/// } -/// ``` -#[derive(Debug, Default, Clone, PartialEq)] -pub(crate) struct L1MessengerL2ToL1Log { - pub l2_shard_id: u8, - pub is_service: bool, - pub tx_number_in_block: u16, - pub sender: Address, - pub key: U256, - pub value: U256, -} - -impl L1MessengerL2ToL1Log { - pub fn packed_encoding(&self) -> Vec { - let mut res: Vec = vec![]; - res.push(self.l2_shard_id); - res.push(self.is_service as u8); - res.extend_from_slice(&self.tx_number_in_block.to_be_bytes()); - res.extend_from_slice(self.sender.as_bytes()); - res.extend(u256_to_bytes_be(&self.key)); - res.extend(u256_to_bytes_be(&self.value)); - res - } -} - -impl From for L2ToL1Log { - fn from(log: L1MessengerL2ToL1Log) -> Self { - L2ToL1Log { - shard_id: log.l2_shard_id, - is_service: log.is_service, - tx_number_in_block: log.tx_number_in_block, - sender: log.sender, - key: u256_to_h256(log.key), - value: u256_to_h256(log.value), - } - } -} +use crate::interface::{pubdata::L1MessengerL2ToL1Log, VmEvent}; #[derive(Debug, PartialEq)] pub(crate) struct L1MessengerBytecodePublicationRequest { @@ -142,7 +93,8 @@ mod tests { use zksync_system_constants::{ BOOTLOADER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L2_BASE_TOKEN_ADDRESS, }; - use zksync_types::L1BatchNumber; + use zksync_types::{Address, L1BatchNumber}; + use zksync_utils::u256_to_h256; use super::*; diff --git a/core/lib/multivm/src/utils/mod.rs b/core/lib/multivm/src/utils/mod.rs index 5d8fba7a2acd..a55adb16c85a 100644 --- a/core/lib/multivm/src/utils/mod.rs +++ b/core/lib/multivm/src/utils/mod.rs @@ -53,7 +53,9 @@ pub fn derive_base_fee_and_gas_per_pubdata( VmVersion::Vm1_4_2 => crate::vm_1_4_2::utils::fee::derive_base_fee_and_gas_per_pubdata( batch_fee_input.into_pubdata_independent(), ), - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => { crate::vm_latest::utils::fee::derive_base_fee_and_gas_per_pubdata( batch_fee_input.into_pubdata_independent(), ) @@ -81,9 +83,9 @@ pub fn get_batch_base_fee(l1_batch_env: &L1BatchEnv, vm_version: VmVersion) -> u } VmVersion::Vm1_4_1 => crate::vm_1_4_1::utils::fee::get_batch_base_fee(l1_batch_env), VmVersion::Vm1_4_2 => crate::vm_1_4_2::utils::fee::get_batch_base_fee(l1_batch_env), - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { - crate::vm_latest::utils::fee::get_batch_base_fee(l1_batch_env) - } + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => crate::vm_latest::utils::fee::get_batch_base_fee(l1_batch_env), } } @@ -209,9 +211,9 @@ pub fn derive_overhead( } VmVersion::Vm1_4_1 => crate::vm_1_4_1::utils::overhead::derive_overhead(encoded_len), VmVersion::Vm1_4_2 => crate::vm_1_4_2::utils::overhead::derive_overhead(encoded_len), - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { - crate::vm_latest::utils::overhead::derive_overhead(encoded_len) - } + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => crate::vm_latest::utils::overhead::derive_overhead(encoded_len), } } @@ -245,6 +247,9 @@ pub fn get_bootloader_encoding_space(version: VmVersion) -> u32 { crate::vm_latest::MultiVMSubversion::IncreasedBootloaderMemory, ) } + VmVersion::VmGateway => crate::vm_latest::constants::get_bootloader_tx_encoding_space( + crate::vm_latest::MultiVMSubversion::Gateway, + ), } } @@ -264,9 +269,9 @@ pub fn get_bootloader_max_txs_in_batch(version: VmVersion) -> usize { VmVersion::VmBoojumIntegration => crate::vm_boojum_integration::constants::MAX_TXS_IN_BLOCK, VmVersion::Vm1_4_1 => crate::vm_1_4_1::constants::MAX_TXS_IN_BATCH, VmVersion::Vm1_4_2 => crate::vm_1_4_2::constants::MAX_TXS_IN_BATCH, - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { - crate::vm_latest::constants::MAX_TXS_IN_BATCH - } + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => crate::vm_latest::constants::MAX_TXS_IN_BATCH, } } @@ -287,9 +292,9 @@ pub fn gas_bootloader_batch_tip_overhead(version: VmVersion) -> u32 { } VmVersion::Vm1_4_1 => crate::vm_1_4_1::constants::BOOTLOADER_BATCH_TIP_OVERHEAD, VmVersion::Vm1_4_2 => crate::vm_1_4_2::constants::BOOTLOADER_BATCH_TIP_OVERHEAD, - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { - crate::vm_latest::constants::BOOTLOADER_BATCH_TIP_OVERHEAD - } + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => crate::vm_latest::constants::BOOTLOADER_BATCH_TIP_OVERHEAD, } } @@ -310,7 +315,9 @@ pub fn circuit_statistics_bootloader_batch_tip_overhead(version: VmVersion) -> u VmVersion::Vm1_4_2 => { crate::vm_1_4_2::constants::BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD as usize } - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => { crate::vm_latest::constants::BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD as usize } } @@ -333,7 +340,9 @@ pub fn execution_metrics_bootloader_batch_tip_overhead(version: VmVersion) -> us VmVersion::Vm1_4_2 => { crate::vm_1_4_2::constants::BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD as usize } - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => { crate::vm_latest::constants::BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD as usize } } @@ -357,9 +366,9 @@ pub fn get_max_gas_per_pubdata_byte(version: VmVersion) -> u64 { } VmVersion::Vm1_4_1 => crate::vm_1_4_1::constants::MAX_GAS_PER_PUBDATA_BYTE, VmVersion::Vm1_4_2 => crate::vm_1_4_2::constants::MAX_GAS_PER_PUBDATA_BYTE, - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { - crate::vm_latest::constants::MAX_GAS_PER_PUBDATA_BYTE - } + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => crate::vm_latest::constants::MAX_GAS_PER_PUBDATA_BYTE, } } @@ -393,6 +402,9 @@ pub fn get_used_bootloader_memory_bytes(version: VmVersion) -> usize { crate::vm_latest::MultiVMSubversion::IncreasedBootloaderMemory, ) } + VmVersion::VmGateway => crate::vm_latest::constants::get_used_bootloader_memory_bytes( + crate::vm_latest::MultiVMSubversion::Gateway, + ), } } @@ -426,6 +438,9 @@ pub fn get_used_bootloader_memory_words(version: VmVersion) -> usize { crate::vm_latest::MultiVMSubversion::IncreasedBootloaderMemory, ) } + VmVersion::VmGateway => crate::vm_latest::constants::get_used_bootloader_memory_bytes( + crate::vm_latest::MultiVMSubversion::Gateway, + ), } } @@ -447,9 +462,9 @@ pub fn get_max_batch_gas_limit(version: VmVersion) -> u64 { } VmVersion::Vm1_4_1 => crate::vm_1_4_1::constants::BLOCK_GAS_LIMIT as u64, VmVersion::Vm1_4_2 => crate::vm_1_4_2::constants::BLOCK_GAS_LIMIT as u64, - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { - crate::vm_latest::constants::BATCH_GAS_LIMIT - } + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => crate::vm_latest::constants::BATCH_GAS_LIMIT, } } @@ -473,9 +488,9 @@ pub fn get_eth_call_gas_limit(version: VmVersion) -> u64 { } VmVersion::Vm1_4_1 => crate::vm_1_4_1::constants::ETH_CALL_GAS_LIMIT as u64, VmVersion::Vm1_4_2 => crate::vm_1_4_2::constants::ETH_CALL_GAS_LIMIT as u64, - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { - crate::vm_latest::constants::ETH_CALL_GAS_LIMIT - } + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => crate::vm_latest::constants::ETH_CALL_GAS_LIMIT, } } @@ -496,9 +511,9 @@ pub fn get_max_batch_base_layer_circuits(version: VmVersion) -> usize { // We avoid providing `0` for the old versions to avoid potential errors when working with old versions. crate::vm_1_4_2::constants::MAX_BASE_LAYER_CIRCUITS } - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { - crate::vm_latest::constants::MAX_BASE_LAYER_CIRCUITS - } + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => crate::vm_latest::constants::MAX_BASE_LAYER_CIRCUITS, } } diff --git a/core/lib/multivm/src/versions/shadow/mod.rs b/core/lib/multivm/src/versions/shadow/mod.rs index fe9ce8eefcb9..42a0fbb1b8ba 100644 --- a/core/lib/multivm/src/versions/shadow/mod.rs +++ b/core/lib/multivm/src/versions/shadow/mod.rs @@ -198,7 +198,6 @@ impl Harness { assert!(!exec_result.result.is_failed(), "{:#?}", exec_result); self.new_block(vm, &[deploy_tx.tx.hash(), load_test_tx.hash()]); - vm.finish_batch(); } } diff --git a/core/lib/multivm/src/versions/shadow/tests.rs b/core/lib/multivm/src/versions/shadow/tests.rs index 64179f59be16..6a39a28f7630 100644 --- a/core/lib/multivm/src/versions/shadow/tests.rs +++ b/core/lib/multivm/src/versions/shadow/tests.rs @@ -1,14 +1,15 @@ //! Unit tests from the `testonly` test suite. -use std::collections::HashSet; +use std::{collections::HashSet, rc::Rc}; use zksync_types::{writes::StateDiffRecord, StorageKey, Transaction, H256, U256}; +use zksync_vm_interface::pubdata::PubdataBuilder; use super::ShadowedFastVm; use crate::{ interface::{ utils::{ShadowMut, ShadowRef}, - CurrentExecutionState, L2BlockEnv, VmExecutionMode, VmExecutionResultAndLogs, + CurrentExecutionState, L2BlockEnv, VmExecutionResultAndLogs, }, versions::testonly::TestedVm, }; @@ -41,14 +42,25 @@ impl TestedVm for ShadowedFastVm { }) } - fn execute_with_state_diffs( + fn finish_batch_with_state_diffs( &mut self, diffs: Vec, - mode: VmExecutionMode, + pubdata_builder: Rc, ) -> VmExecutionResultAndLogs { - self.get_custom_mut("execute_with_state_diffs", |r| match r { - ShadowMut::Main(vm) => vm.execute_with_state_diffs(diffs.clone(), mode), - ShadowMut::Shadow(vm) => vm.execute_with_state_diffs(diffs.clone(), mode), + self.get_custom_mut("finish_batch_with_state_diffs", |r| match r { + ShadowMut::Main(vm) => { + vm.finish_batch_with_state_diffs(diffs.clone(), pubdata_builder.clone()) + } + ShadowMut::Shadow(vm) => { + vm.finish_batch_with_state_diffs(diffs.clone(), pubdata_builder.clone()) + } + }) + } + + fn finish_batch_without_pubdata(&mut self) -> VmExecutionResultAndLogs { + self.get_custom_mut("finish_batch_without_pubdata", |r| match r { + ShadowMut::Main(vm) => vm.finish_batch_without_pubdata(), + ShadowMut::Shadow(vm) => vm.finish_batch_without_pubdata(), }) } diff --git a/core/lib/multivm/src/versions/testonly/block_tip.rs b/core/lib/multivm/src/versions/testonly/block_tip.rs index 7700f347ca6a..220653308a7e 100644 --- a/core/lib/multivm/src/versions/testonly/block_tip.rs +++ b/core/lib/multivm/src/versions/testonly/block_tip.rs @@ -11,11 +11,11 @@ use zksync_types::{ use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; use super::{ - get_complex_upgrade_abi, get_empty_storage, read_complex_upgrade, + default_pubdata_builder, get_complex_upgrade_abi, get_empty_storage, read_complex_upgrade, tester::{TestedVm, VmTesterBuilder}, }; use crate::{ - interface::{L1BatchEnv, TxExecutionMode, VmExecutionMode, VmInterfaceExt}, + interface::{InspectExecutionMode, L1BatchEnv, TxExecutionMode, VmInterfaceExt}, versions::testonly::default_l1_batch, vm_latest::constants::{ BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, @@ -156,7 +156,7 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !result.result.is_failed(), "Transaction {i} wasn't successful for input: {:#?}", @@ -169,7 +169,7 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics let gas_before = vm.vm.gas_remaining(); let result = vm .vm - .execute_with_state_diffs(test_data.state_diffs.clone(), VmExecutionMode::Batch); + .finish_batch_with_state_diffs(test_data.state_diffs.clone(), default_pubdata_builder()); assert!( !result.result.is_failed(), "Batch wasn't successful for input: {test_data:?}" diff --git a/core/lib/multivm/src/versions/testonly/bootloader.rs b/core/lib/multivm/src/versions/testonly/bootloader.rs index e3177e078518..4b9b63252d6a 100644 --- a/core/lib/multivm/src/versions/testonly/bootloader.rs +++ b/core/lib/multivm/src/versions/testonly/bootloader.rs @@ -2,7 +2,7 @@ use assert_matches::assert_matches; use zksync_types::U256; use super::{get_bootloader, tester::VmTesterBuilder, TestedVm, BASE_SYSTEM_CONTRACTS}; -use crate::interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterfaceExt}; +use crate::interface::{ExecutionResult, Halt, TxExecutionMode}; pub(crate) fn test_dummy_bootloader() { let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); @@ -14,7 +14,7 @@ pub(crate) fn test_dummy_bootloader() { .with_execution_mode(TxExecutionMode::VerifyExecute) .build::(); - let result = vm.vm.execute(VmExecutionMode::Batch); + let result = vm.vm.finish_batch_without_pubdata(); assert!(!result.result.is_failed()); let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); @@ -33,7 +33,7 @@ pub(crate) fn test_bootloader_out_of_gas() { .with_execution_mode(TxExecutionMode::VerifyExecute) .build::(); - let res = vm.vm.execute(VmExecutionMode::Batch); + let res = vm.vm.finish_batch_without_pubdata(); assert_matches!( res.result, diff --git a/core/lib/multivm/src/versions/testonly/bytecode_publishing.rs b/core/lib/multivm/src/versions/testonly/bytecode_publishing.rs index 346241a96245..9da005b995d3 100644 --- a/core/lib/multivm/src/versions/testonly/bytecode_publishing.rs +++ b/core/lib/multivm/src/versions/testonly/bytecode_publishing.rs @@ -1,8 +1,8 @@ use zksync_test_account::TxType; -use super::{read_test_contract, tester::VmTesterBuilder, TestedVm}; +use super::{default_pubdata_builder, read_test_contract, tester::VmTesterBuilder, TestedVm}; use crate::{ - interface::{TxExecutionMode, VmEvent, VmExecutionMode, VmInterfaceExt}, + interface::{InspectExecutionMode, TxExecutionMode, VmEvent, VmInterfaceExt}, utils::bytecode, }; @@ -30,10 +30,10 @@ pub(crate) fn test_bytecode_publishing() { compressed_bytecode ); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed(), "Transaction wasn't successful"); - vm.vm.execute(VmExecutionMode::Batch); + vm.vm.finish_batch(default_pubdata_builder()); let state = vm.vm.get_current_execution_state(); let long_messages = VmEvent::extract_long_l2_to_l1_messages(&state.events); diff --git a/core/lib/multivm/src/versions/testonly/circuits.rs b/core/lib/multivm/src/versions/testonly/circuits.rs index 9503efe9208f..de987a8912db 100644 --- a/core/lib/multivm/src/versions/testonly/circuits.rs +++ b/core/lib/multivm/src/versions/testonly/circuits.rs @@ -2,7 +2,7 @@ use zksync_types::{Address, Execute, U256}; use super::tester::VmTesterBuilder; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}, + interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, versions::testonly::TestedVm, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; @@ -28,7 +28,7 @@ pub(crate) fn test_circuits() { None, ); vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!res.result.is_failed(), "{res:#?}"); let s = res.statistics.circuit_statistic; diff --git a/core/lib/multivm/src/versions/testonly/code_oracle.rs b/core/lib/multivm/src/versions/testonly/code_oracle.rs index b786539329b9..767a294f44ab 100644 --- a/core/lib/multivm/src/versions/testonly/code_oracle.rs +++ b/core/lib/multivm/src/versions/testonly/code_oracle.rs @@ -9,7 +9,7 @@ use super::{ tester::VmTesterBuilder, TestedVm, }; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}, + interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, versions::testonly::ContractToDeploy, }; @@ -68,7 +68,7 @@ pub(crate) fn test_code_oracle() { ); vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !result.result.is_failed(), "Transaction wasn't successful: {result:#?}" @@ -91,7 +91,7 @@ pub(crate) fn test_code_oracle() { None, ); vm.vm.push_transaction(tx2); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !result.result.is_failed(), "Transaction wasn't successful: {result:#?}" @@ -160,7 +160,7 @@ pub(crate) fn test_code_oracle_big_bytecode() { ); vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !result.result.is_failed(), "Transaction wasn't successful: {result:#?}" @@ -222,7 +222,7 @@ pub(crate) fn test_refunds_in_code_oracle() { ); vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !result.result.is_failed(), "Transaction wasn't successful: {result:#?}" diff --git a/core/lib/multivm/src/versions/testonly/default_aa.rs b/core/lib/multivm/src/versions/testonly/default_aa.rs index 3f121dcf7e6c..c69c00de4508 100644 --- a/core/lib/multivm/src/versions/testonly/default_aa.rs +++ b/core/lib/multivm/src/versions/testonly/default_aa.rs @@ -7,9 +7,9 @@ use zksync_types::{ }; use zksync_utils::h256_to_u256; -use super::{read_test_contract, tester::VmTesterBuilder, TestedVm}; +use super::{default_pubdata_builder, read_test_contract, tester::VmTesterBuilder, TestedVm}; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}, + interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, vm_latest::utils::fee::get_batch_base_fee, }; @@ -32,10 +32,10 @@ pub(crate) fn test_default_aa_interaction() { let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.l1_batch_env); vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed(), "Transaction wasn't successful"); - vm.vm.execute(VmExecutionMode::Batch); + vm.vm.finish_batch(default_pubdata_builder()); vm.vm.get_current_execution_state(); diff --git a/core/lib/multivm/src/versions/testonly/get_used_contracts.rs b/core/lib/multivm/src/versions/testonly/get_used_contracts.rs index d3ffee20c344..9d0908807e21 100644 --- a/core/lib/multivm/src/versions/testonly/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/testonly/get_used_contracts.rs @@ -15,7 +15,8 @@ use super::{ }; use crate::{ interface::{ - ExecutionResult, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmInterfaceExt, + ExecutionResult, InspectExecutionMode, TxExecutionMode, VmExecutionResultAndLogs, + VmInterfaceExt, }, versions::testonly::ContractToDeploy, }; @@ -35,7 +36,7 @@ pub(crate) fn test_get_used_contracts() { let account = &mut vm.rich_accounts[0]; let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed()); assert!(vm @@ -70,7 +71,7 @@ pub(crate) fn test_get_used_contracts() { vm.vm.push_transaction(tx2.clone()); - let res2 = vm.vm.execute(VmExecutionMode::OneTx); + let res2 = vm.vm.execute(InspectExecutionMode::OneTx); assert!(res2.result.is_failed()); diff --git a/core/lib/multivm/src/versions/testonly/is_write_initial.rs b/core/lib/multivm/src/versions/testonly/is_write_initial.rs index ef1fe2088c10..cac9be173639 100644 --- a/core/lib/multivm/src/versions/testonly/is_write_initial.rs +++ b/core/lib/multivm/src/versions/testonly/is_write_initial.rs @@ -2,7 +2,9 @@ use zksync_test_account::TxType; use zksync_types::get_nonce_key; use super::{read_test_contract, tester::VmTesterBuilder, TestedVm}; -use crate::interface::{storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterfaceExt}; +use crate::interface::{ + storage::ReadStorage, InspectExecutionMode, TxExecutionMode, VmInterfaceExt, +}; pub(crate) fn test_is_write_initial_behaviour() { // In this test, we check result of `is_write_initial` at different stages. @@ -27,7 +29,7 @@ pub(crate) fn test_is_write_initial_behaviour() { let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); // Check that `is_write_initial` still returns true for the nonce key. assert!(vm diff --git a/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs b/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs index 212b1f16f207..e98a8385f020 100644 --- a/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs @@ -11,7 +11,7 @@ use zksync_utils::{h256_to_u256, u256_to_h256}; use super::{read_test_contract, tester::VmTesterBuilder, TestedVm, BASE_SYSTEM_CONTRACTS}; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}, + interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, utils::StorageWritesDeduplicator, }; @@ -60,7 +60,7 @@ pub(crate) fn test_l1_tx_execution() { vm.vm.push_transaction(deploy_tx.tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); // The code hash of the deployed contract should be marked as republished. let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); @@ -84,7 +84,7 @@ pub(crate) fn test_l1_tx_execution() { TxType::L1 { serial_id: 0 }, ); vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); let storage_logs = res.logs.storage_logs; let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); @@ -99,7 +99,7 @@ pub(crate) fn test_l1_tx_execution() { TxType::L1 { serial_id: 0 }, ); vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); let storage_logs = res.logs.storage_logs; let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); // We changed one slot inside contract. @@ -110,7 +110,7 @@ pub(crate) fn test_l1_tx_execution() { assert_eq!(res.repeated_storage_writes, 0); vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; + let storage_logs = vm.vm.execute(InspectExecutionMode::OneTx).logs.storage_logs; let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated. // But now the base pubdata spent has changed too. @@ -125,7 +125,7 @@ pub(crate) fn test_l1_tx_execution() { TxType::L1 { serial_id: 1 }, ); vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); // Method is not payable tx should fail assert!(result.result.is_failed(), "The transaction should fail"); @@ -176,7 +176,7 @@ pub(crate) fn test_l1_tx_execution_high_gas_limit() { vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); assert!(res.result.is_failed(), "The transaction should've failed"); } diff --git a/core/lib/multivm/src/versions/testonly/l2_blocks.rs b/core/lib/multivm/src/versions/testonly/l2_blocks.rs index 634a9b34bf6d..947d8b5859f8 100644 --- a/core/lib/multivm/src/versions/testonly/l2_blocks.rs +++ b/core/lib/multivm/src/versions/testonly/l2_blocks.rs @@ -17,8 +17,8 @@ use zksync_utils::{h256_to_u256, u256_to_h256}; use super::{default_l1_batch, get_empty_storage, tester::VmTesterBuilder, TestedVm}; use crate::{ interface::{ - storage::StorageView, ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, - VmInterfaceExt, + storage::StorageView, ExecutionResult, Halt, InspectExecutionMode, L2BlockEnv, + TxExecutionMode, VmInterfaceExt, }, vm_latest::{ constants::{TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO}, @@ -66,7 +66,7 @@ pub(crate) fn test_l2_block_initialization_timestamp() { let l1_tx = get_l1_noop(); vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); assert_matches!( res.result, @@ -100,7 +100,7 @@ pub(crate) fn test_l2_block_initialization_number_non_zero() { set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); assert_eq!( res.result, @@ -128,7 +128,7 @@ fn test_same_l2_block( let l1_tx = get_l1_noop(); vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!res.result.is_failed()); let mut current_l2_block = vm.l1_batch_env.first_l2_block; @@ -147,7 +147,7 @@ fn test_same_l2_block( vm.vm.push_transaction(l1_tx); set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); if let Some(err) = expected_error { assert_eq!(result.result, ExecutionResult::Halt { reason: err }); @@ -203,7 +203,7 @@ fn test_new_l2_block( // Firstly we execute the first transaction vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); let mut second_l2_block = vm.l1_batch_env.first_l2_block; second_l2_block.number += 1; @@ -223,7 +223,7 @@ fn test_new_l2_block( vm.vm.push_l2_block_unchecked(second_l2_block); vm.vm.push_transaction(l1_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); if let Some(err) = expected_error { assert_eq!(result.result, ExecutionResult::Halt { reason: err }); } else { @@ -350,7 +350,7 @@ fn test_first_in_batch( vm.vm.push_transaction(l1_tx); set_manual_l2_block_info(&mut vm.vm, 0, proposed_block); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); if let Some(err) = expected_error { assert_eq!(result.result, ExecutionResult::Halt { reason: err }); } else { diff --git a/core/lib/multivm/src/versions/testonly/mod.rs b/core/lib/multivm/src/versions/testonly/mod.rs index 74cda6a95229..eece1d475bba 100644 --- a/core/lib/multivm/src/versions/testonly/mod.rs +++ b/core/lib/multivm/src/versions/testonly/mod.rs @@ -9,7 +9,7 @@ //! - Tests use [`VmTester`] built using [`VmTesterBuilder`] to create a VM instance. This allows to set up storage for the VM, //! custom [`SystemEnv`] / [`L1BatchEnv`], deployed contracts, pre-funded accounts etc. -use std::collections::HashSet; +use std::{collections::HashSet, rc::Rc}; use ethabi::Contract; use once_cell::sync::Lazy; @@ -23,11 +23,14 @@ use zksync_types::{ ProtocolVersionId, U256, }; use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; -use zksync_vm_interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; +use zksync_vm_interface::{ + pubdata::PubdataBuilder, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, +}; pub(super) use self::tester::{TestedVm, VmTester, VmTesterBuilder}; use crate::{ - interface::storage::InMemoryStorage, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, + interface::storage::InMemoryStorage, pubdata_builders::RollupPubdataBuilder, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; pub(super) mod block_tip; @@ -175,6 +178,10 @@ pub(super) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { } } +pub(super) fn default_pubdata_builder() -> Rc { + Rc::new(RollupPubdataBuilder::new(Address::zero())) +} + pub(super) fn make_address_rich(storage: &mut InMemoryStorage, address: Address) { let key = storage_key_for_eth_balance(&address); storage.set_value(key, u256_to_h256(U256::from(10_u64.pow(19)))); diff --git a/core/lib/multivm/src/versions/testonly/nonce_holder.rs b/core/lib/multivm/src/versions/testonly/nonce_holder.rs index 8ef120c693ca..36f736c0bbe5 100644 --- a/core/lib/multivm/src/versions/testonly/nonce_holder.rs +++ b/core/lib/multivm/src/versions/testonly/nonce_holder.rs @@ -3,7 +3,7 @@ use zksync_types::{Execute, ExecuteTransactionCommon, Nonce}; use super::{read_nonce_holder_tester, tester::VmTesterBuilder, ContractToDeploy, TestedVm}; use crate::interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterfaceExt, + ExecutionResult, Halt, InspectExecutionMode, TxExecutionMode, TxRevertReason, VmInterfaceExt, VmRevertReason, }; @@ -53,7 +53,7 @@ fn run_nonce_test( }; tx_data.signature = vec![test_mode.into()]; vm.push_transaction(transaction); - let result = vm.execute(VmExecutionMode::OneTx); + let result = vm.execute(InspectExecutionMode::OneTx); if let Some(msg) = error_message { let expected_error = diff --git a/core/lib/multivm/src/versions/testonly/precompiles.rs b/core/lib/multivm/src/versions/testonly/precompiles.rs index 270afab07317..2e26dc134b07 100644 --- a/core/lib/multivm/src/versions/testonly/precompiles.rs +++ b/core/lib/multivm/src/versions/testonly/precompiles.rs @@ -3,7 +3,7 @@ use zksync_types::{Address, Execute}; use super::{read_precompiles_contract, tester::VmTesterBuilder, TestedVm}; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}, + interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, versions::testonly::ContractToDeploy, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; @@ -36,7 +36,7 @@ pub(crate) fn test_keccak() { ); vm.vm.push_transaction(tx); - let exec_result = vm.vm.execute(VmExecutionMode::OneTx); + let exec_result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); let keccak_count = exec_result.statistics.circuit_statistic.keccak256 @@ -72,7 +72,7 @@ pub(crate) fn test_sha256() { ); vm.vm.push_transaction(tx); - let exec_result = vm.vm.execute(VmExecutionMode::OneTx); + let exec_result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); let sha_count = exec_result.statistics.circuit_statistic.sha256 @@ -101,7 +101,7 @@ pub(crate) fn test_ecrecover() { ); vm.vm.push_transaction(tx); - let exec_result = vm.vm.execute(VmExecutionMode::OneTx); + let exec_result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); let ecrecover_count = exec_result.statistics.circuit_statistic.ecrecover diff --git a/core/lib/multivm/src/versions/testonly/refunds.rs b/core/lib/multivm/src/versions/testonly/refunds.rs index 565607dff105..874425fc435c 100644 --- a/core/lib/multivm/src/versions/testonly/refunds.rs +++ b/core/lib/multivm/src/versions/testonly/refunds.rs @@ -3,10 +3,10 @@ use zksync_test_account::TxType; use zksync_types::{Address, Execute, U256}; use super::{ - read_expensive_contract, read_test_contract, tester::VmTesterBuilder, ContractToDeploy, - TestedVm, + default_pubdata_builder, read_expensive_contract, read_test_contract, tester::VmTesterBuilder, + ContractToDeploy, TestedVm, }; -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}; +use crate::interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}; pub(crate) fn test_predetermined_refunded_gas() { // In this test, we compare the execution of the bootloader with the predefined @@ -24,7 +24,7 @@ pub(crate) fn test_predetermined_refunded_gas() { let tx = account.get_deploy_tx(&counter, None, TxType::L2).tx; vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed()); @@ -37,7 +37,10 @@ pub(crate) fn test_predetermined_refunded_gas() { ); assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); + let result_without_predefined_refunds = vm + .vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); assert!(!result_without_predefined_refunds.result.is_failed(),); @@ -56,7 +59,10 @@ pub(crate) fn test_predetermined_refunded_gas() { vm.vm .push_transaction_with_refund(tx.clone(), result.refunds.gas_refunded); - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); + let result_with_predefined_refunds = vm + .vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); assert!(!result_with_predefined_refunds.result.is_failed()); @@ -107,7 +113,10 @@ pub(crate) fn test_predetermined_refunded_gas() { let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; vm.vm .push_transaction_with_refund(tx, changed_operator_suggested_refund); - let result = vm.vm.execute(VmExecutionMode::Batch); + let result = vm + .vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); assert!(!result.result.is_failed()); @@ -185,7 +194,7 @@ pub(crate) fn test_negative_pubdata_for_transaction() { None, ); vm.vm.push_transaction(expensive_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !result.result.is_failed(), "Transaction wasn't successful: {result:#?}" @@ -202,7 +211,7 @@ pub(crate) fn test_negative_pubdata_for_transaction() { None, ); vm.vm.push_transaction(clean_up_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !result.result.is_failed(), "Transaction wasn't successful: {result:#?}" diff --git a/core/lib/multivm/src/versions/testonly/require_eip712.rs b/core/lib/multivm/src/versions/testonly/require_eip712.rs index 1ea3964d7cd1..e789fbda2902 100644 --- a/core/lib/multivm/src/versions/testonly/require_eip712.rs +++ b/core/lib/multivm/src/versions/testonly/require_eip712.rs @@ -8,7 +8,7 @@ use zksync_types::{ use super::{ read_many_owners_custom_account_contract, tester::VmTesterBuilder, ContractToDeploy, TestedVm, }; -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}; +use crate::interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}; /// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy /// and EIP712 transactions. @@ -52,7 +52,7 @@ pub(crate) fn test_require_eip712() { ); vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed()); let private_account_balance = vm.get_eth_balance(private_account.address); @@ -85,7 +85,7 @@ pub(crate) fn test_require_eip712() { let transaction: Transaction = l2_tx.into(); vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed()); assert_eq!( @@ -133,7 +133,7 @@ pub(crate) fn test_require_eip712() { let transaction: Transaction = l2_tx.into(); vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); assert_eq!( vm.get_eth_balance(beneficiary_address), diff --git a/core/lib/multivm/src/versions/testonly/secp256r1.rs b/core/lib/multivm/src/versions/testonly/secp256r1.rs index 60197913601e..37d428f82101 100644 --- a/core/lib/multivm/src/versions/testonly/secp256r1.rs +++ b/core/lib/multivm/src/versions/testonly/secp256r1.rs @@ -4,7 +4,7 @@ use zksync_types::{web3::keccak256, Execute, H256, U256}; use zksync_utils::h256_to_u256; use super::{tester::VmTesterBuilder, TestedVm}; -use crate::interface::{ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterfaceExt}; +use crate::interface::{ExecutionResult, InspectExecutionMode, TxExecutionMode, VmInterfaceExt}; pub(crate) fn test_secp256r1() { // In this test, we aim to test whether a simple account interaction (without any fee logic) @@ -55,7 +55,7 @@ pub(crate) fn test_secp256r1() { vm.vm.push_transaction(tx); - let execution_result = vm.vm.execute(VmExecutionMode::Batch); + let execution_result = vm.vm.execute(InspectExecutionMode::OneTx); let ExecutionResult::Success { output } = execution_result.result else { panic!("batch failed") diff --git a/core/lib/multivm/src/versions/testonly/simple_execution.rs b/core/lib/multivm/src/versions/testonly/simple_execution.rs index fcd7a144ab1f..96239fb362d2 100644 --- a/core/lib/multivm/src/versions/testonly/simple_execution.rs +++ b/core/lib/multivm/src/versions/testonly/simple_execution.rs @@ -1,8 +1,8 @@ use assert_matches::assert_matches; use zksync_test_account::TxType; -use super::{tester::VmTesterBuilder, TestedVm}; -use crate::interface::{ExecutionResult, VmExecutionMode, VmInterfaceExt}; +use super::{default_pubdata_builder, tester::VmTesterBuilder, TestedVm}; +use crate::interface::{ExecutionResult, InspectExecutionMode, VmInterfaceExt}; pub(crate) fn test_estimate_fee() { let mut vm_tester = VmTesterBuilder::new() @@ -23,7 +23,7 @@ pub(crate) fn test_estimate_fee() { vm_tester.vm.push_transaction(tx); - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); + let result = vm_tester.vm.execute(InspectExecutionMode::OneTx); assert_matches!(result.result, ExecutionResult::Success { .. }); } @@ -64,12 +64,14 @@ pub(crate) fn test_simple_execute() { vm.push_transaction(tx1); vm.push_transaction(tx2); vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); + let tx = vm.execute(InspectExecutionMode::OneTx); assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); + let tx = vm.execute(InspectExecutionMode::OneTx); assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); + let tx = vm.execute(InspectExecutionMode::OneTx); assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); + let block_tip = vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; assert_matches!(block_tip.result, ExecutionResult::Success { .. }); } diff --git a/core/lib/multivm/src/versions/testonly/storage.rs b/core/lib/multivm/src/versions/testonly/storage.rs index 4951272a60c4..efe7be1edbd1 100644 --- a/core/lib/multivm/src/versions/testonly/storage.rs +++ b/core/lib/multivm/src/versions/testonly/storage.rs @@ -3,7 +3,7 @@ use zksync_contracts::{load_contract, read_bytecode}; use zksync_types::{Address, Execute, U256}; use super::{tester::VmTesterBuilder, ContractToDeploy, TestedVm}; -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}; +use crate::interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}; fn test_storage(first_tx_calldata: Vec, second_tx_calldata: Vec) -> u32 { let bytecode = read_bytecode( @@ -45,20 +45,20 @@ fn test_storage(first_tx_calldata: Vec, second_tx_calldata: Ve vm.vm.make_snapshot(); vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed(), "First tx failed"); vm.vm.pop_snapshot_no_rollback(); // We rollback once because transient storage and rollbacks are a tricky combination. vm.vm.make_snapshot(); vm.vm.push_transaction(tx2.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed(), "Second tx failed"); vm.vm.rollback_to_the_latest_snapshot(); vm.vm.make_snapshot(); vm.vm.push_transaction(tx2); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed(), "Second tx failed on second run"); result.statistics.pubdata_published diff --git a/core/lib/multivm/src/versions/testonly/tester/mod.rs b/core/lib/multivm/src/versions/testonly/tester/mod.rs index 7432322e0c8d..716b9386235f 100644 --- a/core/lib/multivm/src/versions/testonly/tester/mod.rs +++ b/core/lib/multivm/src/versions/testonly/tester/mod.rs @@ -1,4 +1,4 @@ -use std::{collections::HashSet, fmt}; +use std::{collections::HashSet, fmt, rc::Rc}; use zksync_contracts::BaseSystemContracts; use zksync_test_account::{Account, TxType}; @@ -8,7 +8,8 @@ use zksync_types::{ Address, L1BatchNumber, StorageKey, Transaction, H256, U256, }; use zksync_vm_interface::{ - CurrentExecutionState, VmExecutionResultAndLogs, VmInterfaceHistoryEnabled, + pubdata::PubdataBuilder, CurrentExecutionState, InspectExecutionMode, VmExecutionResultAndLogs, + VmInterfaceHistoryEnabled, }; pub(crate) use self::transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; @@ -16,8 +17,7 @@ use super::{get_empty_storage, read_test_contract}; use crate::{ interface::{ storage::{InMemoryStorage, StoragePtr, StorageView}, - L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmFactory, - VmInterfaceExt, + L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmFactory, VmInterfaceExt, }, versions::testonly::{ default_l1_batch, default_system_env, make_address_rich, ContractToDeploy, @@ -44,7 +44,7 @@ impl VmTester { let tx = account.get_deploy_tx(&contract, None, TxType::L2).tx; let nonce = tx.nonce().unwrap().0.into(); self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); + self.vm.execute(InspectExecutionMode::OneTx); let deployed_address = deployed_address_create(account.address, nonce); self.test_contract = Some(deployed_address); } @@ -187,12 +187,14 @@ pub(crate) trait TestedVm: /// Unlike [`Self::known_bytecode_hashes()`], the output should only include successfully decommitted bytecodes. fn decommitted_hashes(&self) -> HashSet; - fn execute_with_state_diffs( + fn finish_batch_with_state_diffs( &mut self, diffs: Vec, - mode: VmExecutionMode, + pubdata_builder: Rc, ) -> VmExecutionResultAndLogs; + fn finish_batch_without_pubdata(&mut self) -> VmExecutionResultAndLogs; + fn insert_bytecodes(&mut self, bytecodes: &[&[u8]]); /// Includes bytecodes that have failed to decommit. Should exclude base system contract bytecodes (default AA / EVM emulator). diff --git a/core/lib/multivm/src/versions/testonly/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/testonly/tester/transaction_test_info.rs index 87468d3e4d5f..b9373e331c30 100644 --- a/core/lib/multivm/src/versions/testonly/tester/transaction_test_info.rs +++ b/core/lib/multivm/src/versions/testonly/tester/transaction_test_info.rs @@ -1,9 +1,12 @@ use zksync_types::{ExecuteTransactionCommon, Nonce, Transaction, H160}; use super::{TestedVm, VmTester}; -use crate::interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterfaceExt, VmRevertReason, +use crate::{ + interface::{ + CurrentExecutionState, ExecutionResult, Halt, InspectExecutionMode, TxRevertReason, + VmExecutionResultAndLogs, VmInterfaceExt, VmRevertReason, + }, + versions::testonly::default_pubdata_builder, }; #[derive(Debug, Clone)] @@ -181,7 +184,7 @@ impl VmTester { for tx_test_info in txs { self.execute_tx_and_verify(tx_test_info.clone()); } - self.vm.execute(VmExecutionMode::Batch); + self.vm.finish_batch(default_pubdata_builder()); let mut state = self.vm.get_current_execution_state(); state.used_contract_hashes.sort(); state @@ -202,7 +205,7 @@ fn execute_tx_and_verify( let inner_state_before = vm.dump_state(); vm.make_snapshot(); vm.push_transaction(tx_test_info.tx.clone()); - let result = vm.execute(VmExecutionMode::OneTx); + let result = vm.execute(InspectExecutionMode::OneTx); tx_test_info.verify_result(&result); if tx_test_info.should_rollback() { vm.rollback_to_the_latest_snapshot(); diff --git a/core/lib/multivm/src/versions/testonly/transfer.rs b/core/lib/multivm/src/versions/testonly/transfer.rs index 051826a64f24..3572adba147c 100644 --- a/core/lib/multivm/src/versions/testonly/transfer.rs +++ b/core/lib/multivm/src/versions/testonly/transfer.rs @@ -3,8 +3,10 @@ use zksync_contracts::{load_contract, read_bytecode}; use zksync_types::{utils::storage_key_for_eth_balance, Address, Execute, U256}; use zksync_utils::u256_to_h256; -use super::{get_empty_storage, tester::VmTesterBuilder, ContractToDeploy, TestedVm}; -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}; +use super::{ + default_pubdata_builder, get_empty_storage, tester::VmTesterBuilder, ContractToDeploy, TestedVm, +}; +use crate::interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}; enum TestOptions { Send(U256), @@ -72,13 +74,16 @@ fn test_send_or_transfer(test_option: TestOptions) { ); vm.vm.push_transaction(tx); - let tx_result = vm.vm.execute(VmExecutionMode::OneTx); + let tx_result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !tx_result.result.is_failed(), "Transaction wasn't successful" ); - let batch_result = vm.vm.execute(VmExecutionMode::Batch); + let batch_result = vm + .vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); let new_recipient_balance = vm.get_eth_balance(recipient_address); @@ -161,7 +166,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOp ); vm.vm.push_transaction(tx1); - let tx1_result = vm.vm.execute(VmExecutionMode::OneTx); + let tx1_result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !tx1_result.result.is_failed(), "Transaction 1 wasn't successful" @@ -178,13 +183,16 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOp ); vm.vm.push_transaction(tx2); - let tx2_result = vm.vm.execute(VmExecutionMode::OneTx); + let tx2_result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( tx2_result.result.is_failed(), "Transaction 2 should have failed, but it succeeded" ); - let batch_result = vm.vm.execute(VmExecutionMode::Batch); + let batch_result = vm + .vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); } diff --git a/core/lib/multivm/src/versions/testonly/upgrade.rs b/core/lib/multivm/src/versions/testonly/upgrade.rs index 9401cbb4ba84..359f19faedb2 100644 --- a/core/lib/multivm/src/versions/testonly/upgrade.rs +++ b/core/lib/multivm/src/versions/testonly/upgrade.rs @@ -14,7 +14,9 @@ use super::{ get_complex_upgrade_abi, get_empty_storage, read_complex_upgrade, read_test_contract, tester::VmTesterBuilder, TestedVm, }; -use crate::interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterfaceExt}; +use crate::interface::{ + ExecutionResult, Halt, InspectExecutionMode, TxExecutionMode, VmInterfaceExt, +}; /// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: /// - This transaction must be the only one in block @@ -71,9 +73,9 @@ pub(crate) fn test_protocol_upgrade_is_first() { vm.vm.push_transaction(normal_l1_transaction.clone()); vm.vm.push_transaction(another_protocol_upgrade_transaction); - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert_eq!( result.result, ExecutionResult::Halt { @@ -87,8 +89,8 @@ pub(crate) fn test_protocol_upgrade_is_first() { vm.vm.push_transaction(normal_l1_transaction.clone()); vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert_eq!( result.result, ExecutionResult::Halt { @@ -101,8 +103,8 @@ pub(crate) fn test_protocol_upgrade_is_first() { vm.vm.push_transaction(protocol_upgrade_transaction); vm.vm.push_transaction(normal_l1_transaction); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed()); } @@ -137,7 +139,7 @@ pub(crate) fn test_force_deploy_upgrade() { vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !result.result.is_failed(), "The force upgrade was not successful" @@ -186,7 +188,7 @@ pub(crate) fn test_complex_upgrader() { ); vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !result.result.is_failed(), "The force upgrade was not successful" diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs index 31457fc9676a..d9768652c2f3 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs @@ -1,16 +1,16 @@ -use std::collections::HashSet; +use std::{collections::HashSet, rc::Rc}; use zksync_types::Transaction; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, - L2BlockEnv, PushTransactionResult, SystemEnv, TxExecutionMode, VmExecutionMode, - VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, + L2BlockEnv, PushTransactionResult, SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, + VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, tracers::old::TracerDispatcher, utils::bytecode, @@ -61,7 +61,7 @@ impl VmInterface for Vm { fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { if let Some(storage_invocations) = tracer.storage_invocations { self.vm @@ -70,7 +70,7 @@ impl VmInterface for Vm { } match execution_mode { - VmExecutionMode::OneTx => { + InspectExecutionMode::OneTx => { match self.system_env.execution_mode { TxExecutionMode::VerifyExecute => { let enable_call_tracer = tracer @@ -93,8 +93,7 @@ impl VmInterface for Vm { .glue_into(), } } - VmExecutionMode::Batch => self.finish_batch().block_tip_execution_result, - VmExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), + InspectExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), } } @@ -184,7 +183,7 @@ impl VmInterface for Vm { } } - fn finish_batch(&mut self) -> FinishedL1Batch { + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { self.vm .execute_till_block_end( crate::vm_1_3_2::vm_with_bootloader::BootloaderJobType::BlockPostprocessing, diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_1/tracers/pubdata_tracer.rs index 238804bc7fca..6f927c5c99a8 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/tracers/pubdata_tracer.rs @@ -10,6 +10,7 @@ use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; use crate::{ interface::{ + pubdata::L1MessengerL2ToL1Log, storage::{StoragePtr, WriteStorage}, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, L1BatchEnv, VmEvent, VmExecutionMode, @@ -17,7 +18,7 @@ use crate::{ tracers::dynamic::vm_1_4_1::DynTracer, utils::events::{ extract_bytecode_publication_requests_from_l1_messenger, - extract_l2tol1logs_from_l1_messenger, L1MessengerL2ToL1Log, + extract_l2tol1logs_from_l1_messenger, }, vm_1_4_1::{ bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, diff --git a/core/lib/multivm/src/versions/vm_1_4_1/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_1_4_1/types/internals/pubdata.rs index d07732ae4350..c1ca93152a03 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/types/internals/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/types/internals/pubdata.rs @@ -1,6 +1,6 @@ use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; -use crate::utils::events::L1MessengerL2ToL1Log; +use crate::interface::pubdata::L1MessengerL2ToL1Log; /// Struct based on which the pubdata blob is formed #[derive(Debug, Clone, Default)] diff --git a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs index 1c38958bb318..af483feedd7e 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs @@ -1,8 +1,11 @@ +use std::rc::Rc; + use circuit_sequencer_api_1_4_1::sort_storage_access::sort_storage_access_queries; use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, Transaction, }; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, @@ -95,9 +98,9 @@ impl VmInterface for Vm { fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode, None) + self.inspect_inner(tracer, execution_mode.into(), None) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -128,8 +131,12 @@ impl VmInterface for Vm { } } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner( + &mut TracerDispatcher::default(), + VmExecutionMode::Batch, + None, + ); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_2/tracers/pubdata_tracer.rs index ffe65b5e050b..6c4f737f9e94 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tracers/pubdata_tracer.rs @@ -10,6 +10,7 @@ use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; use crate::{ interface::{ + pubdata::L1MessengerL2ToL1Log, storage::{StoragePtr, WriteStorage}, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, L1BatchEnv, VmEvent, VmExecutionMode, @@ -17,7 +18,7 @@ use crate::{ tracers::dynamic::vm_1_4_1::DynTracer, utils::events::{ extract_bytecode_publication_requests_from_l1_messenger, - extract_l2tol1logs_from_l1_messenger, L1MessengerL2ToL1Log, + extract_l2tol1logs_from_l1_messenger, }, vm_1_4_2::{ bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, diff --git a/core/lib/multivm/src/versions/vm_1_4_2/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_1_4_2/types/internals/pubdata.rs index d07732ae4350..c1ca93152a03 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/types/internals/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/types/internals/pubdata.rs @@ -1,6 +1,6 @@ use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; -use crate::utils::events::L1MessengerL2ToL1Log; +use crate::interface::pubdata::L1MessengerL2ToL1Log; /// Struct based on which the pubdata blob is formed #[derive(Debug, Clone, Default)] diff --git a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs index ca69a191e26f..e7c8e7acdd95 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs @@ -1,10 +1,11 @@ -use std::mem; +use std::{mem, rc::Rc}; use circuit_sequencer_api_1_4_2::sort_storage_access::sort_storage_access_queries; use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, Transaction, }; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, @@ -97,9 +98,9 @@ impl VmInterface for Vm { fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(mem::take(tracer), execution_mode, None) + self.inspect_inner(mem::take(tracer), execution_mode.into(), None) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -130,8 +131,8 @@ impl VmInterface for Vm { } } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner(TracerDispatcher::default(), VmExecutionMode::Batch, None); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs index 326a57896124..2f7d141cb0a7 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs @@ -10,6 +10,7 @@ use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; use crate::{ interface::{ + pubdata::L1MessengerL2ToL1Log, storage::{StoragePtr, WriteStorage}, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, L1BatchEnv, VmEvent, VmExecutionMode, @@ -17,7 +18,7 @@ use crate::{ tracers::dynamic::vm_1_4_0::DynTracer, utils::events::{ extract_bytecode_publication_requests_from_l1_messenger, - extract_l2tol1logs_from_l1_messenger, L1MessengerL2ToL1Log, + extract_l2tol1logs_from_l1_messenger, }, vm_boojum_integration::{ bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/pubdata.rs index 9df9009831f4..152ccad2fbcb 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/pubdata.rs @@ -1,6 +1,6 @@ use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; -use crate::utils::events::L1MessengerL2ToL1Log; +use crate::interface::pubdata::L1MessengerL2ToL1Log; /// Struct based on which the pubdata blob is formed #[derive(Debug, Clone, Default)] diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs index bfd055a5cc84..43c9900486db 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs @@ -1,8 +1,11 @@ +use std::rc::Rc; + use circuit_sequencer_api_1_4_0::sort_storage_access::sort_storage_access_queries; use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, Transaction, }; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, @@ -95,9 +98,9 @@ impl VmInterface for Vm { fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode) + self.inspect_inner(tracer, execution_mode.into()) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -129,8 +132,8 @@ impl VmInterface for Vm { } } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_fast/pubdata.rs b/core/lib/multivm/src/versions/vm_fast/pubdata.rs index d07732ae4350..c1ca93152a03 100644 --- a/core/lib/multivm/src/versions/vm_fast/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_fast/pubdata.rs @@ -1,6 +1,6 @@ use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; -use crate::utils::events::L1MessengerL2ToL1Log; +use crate::interface::pubdata::L1MessengerL2ToL1Log; /// Struct based on which the pubdata blob is formed #[derive(Debug, Clone, Default)] diff --git a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs index f385ca2a438f..2b4665f82241 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs @@ -1,11 +1,11 @@ -use std::{any::Any, collections::HashSet, fmt}; +use std::{any::Any, collections::HashSet, fmt, rc::Rc}; use zksync_types::{writes::StateDiffRecord, StorageKey, Transaction, H160, H256, U256}; use zksync_utils::h256_to_u256; use zksync_vm2::interface::{Event, HeapId, StateInterface}; use zksync_vm_interface::{ - storage::ReadStorage, CurrentExecutionState, L2BlockEnv, VmExecutionMode, - VmExecutionResultAndLogs, VmInterfaceExt, + pubdata::PubdataBuilder, storage::ReadStorage, CurrentExecutionState, L2BlockEnv, + VmExecutionMode, VmExecutionResultAndLogs, VmInterface, }; use super::Vm; @@ -99,13 +99,18 @@ impl TestedVm for Vm> { self.decommitted_hashes().collect() } - fn execute_with_state_diffs( + fn finish_batch_with_state_diffs( &mut self, diffs: Vec, - mode: VmExecutionMode, + pubdata_builder: Rc, ) -> VmExecutionResultAndLogs { self.enforce_state_diffs(diffs); - self.execute(mode) + self.finish_batch(pubdata_builder) + .block_tip_execution_result + } + + fn finish_batch_without_pubdata(&mut self) -> VmExecutionResultAndLogs { + self.inspect_inner(&mut Default::default(), VmExecutionMode::Batch) } fn insert_bytecodes(&mut self, bytecodes: &[&[u8]]) { diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index 88e0b10b5eaf..a2114a339481 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, fmt, mem}; +use std::{collections::HashMap, fmt, mem, rc::Rc}; use zk_evm_1_5_0::{ aux_structures::LogQuery, zkevm_opcode_defs::system_params::INITIAL_FRAME_FORMAL_EH_LOCATION, @@ -21,6 +21,7 @@ use zksync_vm2::{ interface::{CallframeInterface, HeapId, StateInterface, Tracer}, ExecutionEnd, FatPointer, Program, Settings, StorageSlot, VirtualMachine, }; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use super::{ bootloader_state::{BootloaderState, BootloaderStateSnapshot}, @@ -103,7 +104,7 @@ pub struct Vm { enforced_state_diffs: Option>, } -impl Vm { +impl Vm { pub fn custom(batch_env: L1BatchEnv, system_env: SystemEnv, storage: S) -> Self { assert!( is_supported_by_fast_vm(system_env.version), @@ -533,39 +534,10 @@ impl Vm { pubdata_costs: world_diff.pubdata_costs().to_vec(), } } -} -impl VmFactory> for Vm, Tr> -where - S: ReadStorage, - Tr: Tracer + Default + 'static, -{ - fn new( - batch_env: L1BatchEnv, - system_env: SystemEnv, - storage: StoragePtr>, - ) -> Self { - let storage = ImmutableStorageView::new(storage); - Self::custom(batch_env, system_env, storage) - } -} - -impl VmInterface for Vm { - type TracerDispatcher = Tr; - - fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { - self.push_transaction_inner(tx, 0, true); - PushTransactionResult { - compressed_bytecodes: self - .bootloader_state - .get_last_tx_compressed_bytecodes() - .into(), - } - } - - fn inspect( + pub(crate) fn inspect_inner( &mut self, - tracer: &mut Self::TracerDispatcher, + tracer: &mut Tr, execution_mode: VmExecutionMode, ) -> VmExecutionResultAndLogs { let mut track_refunds = false; @@ -655,6 +627,43 @@ impl VmInterface for Vm { new_known_factory_deps: None, } } +} + +impl VmFactory> for Vm, Tr> +where + S: ReadStorage, + Tr: Tracer + Default + 'static, +{ + fn new( + batch_env: L1BatchEnv, + system_env: SystemEnv, + storage: StoragePtr>, + ) -> Self { + let storage = ImmutableStorageView::new(storage); + Self::custom(batch_env, system_env, storage) + } +} + +impl VmInterface for Vm { + type TracerDispatcher = Tr; + + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { + self.push_transaction_inner(tx, 0, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } + } + + fn inspect( + &mut self, + tracer: &mut Self::TracerDispatcher, + execution_mode: InspectExecutionMode, + ) -> VmExecutionResultAndLogs { + self.inspect_inner(tracer, execution_mode.into()) + } fn inspect_transaction_with_bytecode_compression( &mut self, @@ -663,7 +672,7 @@ impl VmInterface for Vm { with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_inner(tx, 0, with_compression); - let result = self.inspect(tracer, VmExecutionMode::OneTx); + let result = self.inspect(tracer, InspectExecutionMode::OneTx); let compression_result = if self.has_unpublished_bytecodes() { Err(BytecodeCompressionError::BytecodeCompressionFailed) @@ -680,8 +689,8 @@ impl VmInterface for Vm { self.bootloader_state.start_new_l2_block(l2_block_env) } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut Tr::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner(&mut Tr::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs index 4ba27b14bad6..2085bbaba31f 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs @@ -1,11 +1,15 @@ use std::cmp::Ordering; use once_cell::sync::OnceCell; -use zksync_types::{L2ChainId, U256}; +use zksync_types::{L2ChainId, ProtocolVersionId, U256}; +use zksync_vm_interface::pubdata::PubdataBuilder; use super::{tx::BootloaderTx, utils::apply_pubdata_to_memory}; use crate::{ - interface::{BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, TxExecutionMode}, + interface::{ + pubdata::PubdataInput, BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, + TxExecutionMode, + }, vm_latest::{ bootloader_state::{ l2_block::BootloaderL2Block, @@ -13,7 +17,7 @@ use crate::{ utils::{apply_l2_block, apply_tx_to_memory}, }, constants::TX_DESCRIPTION_OFFSET, - types::internals::{PubdataInput, TransactionData}, + types::internals::TransactionData, utils::l2_blocks::assert_next_block, }, }; @@ -45,6 +49,8 @@ pub struct BootloaderState { free_tx_offset: usize, /// Information about the the pubdata that will be needed to supply to the L1Messenger pubdata_information: OnceCell, + /// Protocol version. + protocol_version: ProtocolVersionId, } impl BootloaderState { @@ -52,6 +58,7 @@ impl BootloaderState { execution_mode: TxExecutionMode, initial_memory: BootloaderMemory, first_l2_block: L2BlockEnv, + protocol_version: ProtocolVersionId, ) -> Self { let l2_block = BootloaderL2Block::new(first_l2_block, 0); Self { @@ -62,6 +69,7 @@ impl BootloaderState { execution_mode, free_tx_offset: 0, pubdata_information: Default::default(), + protocol_version, } } @@ -135,18 +143,31 @@ impl BootloaderState { pub(crate) fn last_l2_block(&self) -> &BootloaderL2Block { self.l2_blocks.last().unwrap() } + pub(crate) fn get_pubdata_information(&self) -> &PubdataInput { self.pubdata_information .get() .expect("Pubdata information is not set") } + pub(crate) fn settlement_layer_pubdata(&self, pubdata_builder: &dyn PubdataBuilder) -> Vec { + let pubdata_information = self + .pubdata_information + .get() + .expect("Pubdata information is not set"); + + pubdata_builder.settlement_layer_pubdata(pubdata_information, self.protocol_version) + } + fn last_mut_l2_block(&mut self) -> &mut BootloaderL2Block { self.l2_blocks.last_mut().unwrap() } /// Apply all bootloader transaction to the initial memory - pub(crate) fn bootloader_memory(&self) -> BootloaderMemory { + pub(crate) fn bootloader_memory( + &self, + pubdata_builder: &dyn PubdataBuilder, + ) -> BootloaderMemory { let mut initial_memory = self.initial_memory.clone(); let mut offset = 0; let mut compressed_bytecodes_offset = 0; @@ -174,11 +195,15 @@ impl BootloaderState { let pubdata_information = self .pubdata_information - .clone() - .into_inner() + .get() .expect("Empty pubdata information"); - apply_pubdata_to_memory(&mut initial_memory, pubdata_information); + apply_pubdata_to_memory( + &mut initial_memory, + pubdata_builder, + pubdata_information, + self.protocol_version, + ); initial_memory } @@ -291,4 +316,8 @@ impl BootloaderState { ); } } + + pub(crate) fn protocol_version(&self) -> ProtocolVersionId { + self.protocol_version + } } diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs index 23c079202c1f..c409bda35c1d 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs @@ -1,9 +1,12 @@ -use zksync_types::{ethabi, U256}; +use zksync_types::{ethabi, ProtocolVersionId, U256}; use zksync_utils::{bytes_to_be_words, h256_to_u256}; use super::tx::BootloaderTx; use crate::{ - interface::{BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode}, + interface::{ + pubdata::{PubdataBuilder, PubdataInput}, + BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode, + }, utils::bytecode, vm_latest::{ bootloader_state::l2_block::BootloaderL2Block, @@ -14,7 +17,6 @@ use crate::{ TX_DESCRIPTION_OFFSET, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, TX_OVERHEAD_OFFSET, TX_TRUSTED_GAS_LIMIT_OFFSET, }, - types::internals::PubdataInput, }, }; @@ -124,26 +126,61 @@ fn apply_l2_block_inner( ]) } +fn bootloader_memory_input( + pubdata_builder: &dyn PubdataBuilder, + input: &PubdataInput, + protocol_version: ProtocolVersionId, +) -> Vec { + let l2_da_validator_address = pubdata_builder.l2_da_validator(); + let operator_input = pubdata_builder.l1_messenger_operator_input(input, protocol_version); + + ethabi::encode(&[ + ethabi::Token::Address(l2_da_validator_address), + ethabi::Token::Bytes(operator_input), + ]) +} + pub(crate) fn apply_pubdata_to_memory( memory: &mut BootloaderMemory, - pubdata_information: PubdataInput, + pubdata_builder: &dyn PubdataBuilder, + pubdata_information: &PubdataInput, + protocol_version: ProtocolVersionId, ) { - // Skipping two slots as they will be filled by the bootloader itself: - // - One slot is for the selector of the call to the L1Messenger. - // - The other slot is for the 0x20 offset for the calldata. - let l1_messenger_pubdata_start_slot = OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET + 2; - - // Need to skip first word as it represents array offset - // while bootloader expects only [len || data] - let pubdata = ethabi::encode(&[ethabi::Token::Bytes( - pubdata_information.build_pubdata(true), - )])[32..] - .to_vec(); - - assert!( - pubdata.len() / 32 <= OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS - 2, - "The encoded pubdata is too big" - ); + let (l1_messenger_pubdata_start_slot, pubdata) = if protocol_version.is_pre_gateway() { + // Skipping two slots as they will be filled by the bootloader itself: + // - One slot is for the selector of the call to the L1Messenger. + // - The other slot is for the 0x20 offset for the calldata. + let l1_messenger_pubdata_start_slot = OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET + 2; + + // Need to skip first word as it represents array offset + // while bootloader expects only [len || data] + let pubdata = ethabi::encode(&[ethabi::Token::Bytes( + pubdata_builder.l1_messenger_operator_input(pubdata_information, protocol_version), + )])[32..] + .to_vec(); + + assert!( + pubdata.len() / 32 <= OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS - 2, + "The encoded pubdata is too big" + ); + + (l1_messenger_pubdata_start_slot, pubdata) + } else { + // Skipping the first slot as it will be filled by the bootloader itself: + // It is for the selector of the call to the L1Messenger. + let l1_messenger_pubdata_start_slot = OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET + 1; + + let pubdata = + bootloader_memory_input(pubdata_builder, pubdata_information, protocol_version); + + assert!( + // Note that unlike the previous version, the difference is `1`, since now it also includes the offset + pubdata.len() / 32 < OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS, + "The encoded pubdata is too big" + ); + + (l1_messenger_pubdata_start_slot, pubdata) + }; pubdata .chunks(32) diff --git a/core/lib/multivm/src/versions/vm_latest/constants.rs b/core/lib/multivm/src/versions/vm_latest/constants.rs index 01f697ec91a2..c047e6ffa3b0 100644 --- a/core/lib/multivm/src/versions/vm_latest/constants.rs +++ b/core/lib/multivm/src/versions/vm_latest/constants.rs @@ -26,6 +26,7 @@ pub(crate) const fn get_used_bootloader_memory_bytes(subversion: MultiVMSubversi match subversion { MultiVMSubversion::SmallBootloaderMemory => 59_000_000, MultiVMSubversion::IncreasedBootloaderMemory => 63_800_000, + MultiVMSubversion::Gateway => 63_800_000, } } @@ -201,6 +202,6 @@ pub(crate) const TX_SLOT_OVERHEAD_GAS: u32 = 10_000; /// getting often sealed due to the memory limit being reached, the L2 fair gas price will be increased. pub(crate) const TX_MEMORY_OVERHEAD_GAS: u32 = 10; -const ZK_SYNC_BYTES_PER_BLOB: usize = BLOB_CHUNK_SIZE * ELEMENTS_PER_4844_BLOCK; +pub(crate) const ZK_SYNC_BYTES_PER_BLOB: usize = BLOB_CHUNK_SIZE * ELEMENTS_PER_4844_BLOCK; pub const MAX_BLOBS_PER_BATCH: usize = 6; pub const MAX_VM_PUBDATA_PER_BATCH: usize = MAX_BLOBS_PER_BATCH * ZK_SYNC_BYTES_PER_BLOB; diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs index e70f05f85ef2..d9331720ce28 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs @@ -69,6 +69,7 @@ impl Vm { self.batch_env.clone(), execution_mode, self.subversion, + None, )) }), self.subversion, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs index e1dfdc7e68c5..b502ea50b1af 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs @@ -5,7 +5,7 @@ use zksync_types::{Address, Execute}; use super::TestedLatestVm; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{InspectExecutionMode, TxExecutionMode, VmInterface}, tracers::CallTracer, versions::testonly::{ read_max_depth_contract, read_test_contract, ContractToDeploy, VmTesterBuilder, @@ -43,7 +43,7 @@ fn test_max_depth() { vm.vm.push_transaction(tx); let res = vm .vm - .inspect(&mut call_tracer.into(), VmExecutionMode::OneTx); + .inspect(&mut call_tracer.into(), InspectExecutionMode::OneTx); assert!(result.get().is_some()); assert!(res.result.is_failed()); } @@ -79,7 +79,7 @@ fn test_basic_behavior() { vm.vm.push_transaction(tx); let res = vm .vm - .inspect(&mut call_tracer.into(), VmExecutionMode::OneTx); + .inspect(&mut call_tracer.into(), InspectExecutionMode::OneTx); let call_tracer_result = result.get().unwrap(); diff --git a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs index 6f748d543d35..96d59f208b03 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs @@ -1,4 +1,7 @@ -use std::collections::{HashMap, HashSet}; +use std::{ + collections::{HashMap, HashSet}, + rc::Rc, +}; use zk_evm_1_5_0::{ aux_structures::{MemoryPage, Timestamp}, @@ -7,6 +10,7 @@ use zk_evm_1_5_0::{ }; use zksync_types::{writes::StateDiffRecord, StorageKey, StorageValue, Transaction, H256, U256}; use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; +use zksync_vm_interface::pubdata::PubdataBuilder; use super::{HistoryEnabled, Vm}; use crate::{ @@ -75,18 +79,31 @@ impl TestedVm for TestedLatestVm { self.get_used_contracts().into_iter().collect() } - fn execute_with_state_diffs( + fn finish_batch_with_state_diffs( &mut self, diffs: Vec, - mode: VmExecutionMode, + pubdata_builder: Rc, ) -> VmExecutionResultAndLogs { let pubdata_tracer = PubdataTracer::new_with_forced_state_diffs( self.batch_env.clone(), VmExecutionMode::Batch, diffs, crate::vm_latest::MultiVMSubversion::latest(), + Some(pubdata_builder), ); - self.inspect_inner(&mut TracerDispatcher::default(), mode, Some(pubdata_tracer)) + self.inspect_inner( + &mut TracerDispatcher::default(), + VmExecutionMode::Batch, + Some(pubdata_tracer), + ) + } + + fn finish_batch_without_pubdata(&mut self) -> VmExecutionResultAndLogs { + self.inspect_inner( + &mut TracerDispatcher::default(), + VmExecutionMode::Batch, + None, + ) } fn insert_bytecodes(&mut self, bytecodes: &[&[u8]]) { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs index 838c4e342dcb..7028f7a89711 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs @@ -6,7 +6,7 @@ use zksync_types::{utils::deployed_address_create, Execute, U256}; use super::TestedLatestVm; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, + interface::{InspectExecutionMode, TxExecutionMode, VmInterface, VmInterfaceExt}, tracers::PrestateTracer, versions::testonly::{read_simple_transfer_contract, VmTesterBuilder}, vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, ToTracerPointer}, @@ -38,7 +38,7 @@ fn test_prestate_tracer() { let prestate_tracer = PrestateTracer::new(false, prestate_tracer_result.clone()); let tracer_ptr = prestate_tracer.into_tracer_pointer(); vm.vm - .inspect(&mut tracer_ptr.into(), VmExecutionMode::Batch); + .inspect(&mut tracer_ptr.into(), InspectExecutionMode::OneTx); let prestate_result = Arc::try_unwrap(prestate_tracer_result) .unwrap() @@ -61,7 +61,7 @@ fn test_prestate_tracer_diff_mode() { let tx = account.get_deploy_tx(&contract, None, TxType::L2).tx; let nonce = tx.nonce().unwrap().0.into(); vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); let deployed_address = deployed_address_create(account.address, nonce); vm.test_contract = Some(deployed_address); @@ -69,7 +69,7 @@ fn test_prestate_tracer_diff_mode() { let tx2 = account.get_deploy_tx(&contract, None, TxType::L2).tx; let nonce2 = tx2.nonce().unwrap().0.into(); vm.vm.push_transaction(tx2); - vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); let deployed_address2 = deployed_address_create(account.address, nonce2); let account = &mut vm.rich_accounts[0]; @@ -98,7 +98,7 @@ fn test_prestate_tracer_diff_mode() { let prestate_tracer = PrestateTracer::new(true, prestate_tracer_result.clone()); let tracer_ptr = prestate_tracer.into_tracer_pointer(); vm.vm - .inspect(&mut tracer_ptr.into(), VmExecutionMode::Bootloader); + .inspect(&mut tracer_ptr.into(), InspectExecutionMode::Bootloader); let prestate_result = Arc::try_unwrap(prestate_tracer_result) .unwrap() diff --git a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs index c948315266ad..de674498427d 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs @@ -2,13 +2,14 @@ use ethabi::Token; use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; use zksync_test_account::{DeployContractsTx, TxType}; use zksync_types::{get_nonce_key, U256}; +use zksync_vm_interface::InspectExecutionMode; use super::TestedLatestVm; use crate::{ interface::{ storage::WriteStorage, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, + TxExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, tracers::dynamic::vm_1_5_0::DynTracer, versions::testonly::{ @@ -80,7 +81,7 @@ fn test_layered_rollback() { TxType::L2, ); vm.vm.push_transaction(deploy_tx); - let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); + let deployment_res = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!deployment_res.result.is_failed(), "transaction failed"); let loadnext_transaction = account.get_loadnext_transaction( @@ -107,7 +108,8 @@ fn test_layered_rollback() { max_recursion_depth: 15, } .into_tracer_pointer(); - vm.vm.inspect(&mut tracer.into(), VmExecutionMode::OneTx); + vm.vm + .inspect(&mut tracer.into(), InspectExecutionMode::OneTx); let nonce_val2 = vm .vm @@ -134,7 +136,7 @@ fn test_layered_rollback() { ); vm.vm.push_transaction(loadnext_transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed(), "transaction must not fail"); } diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs index 32f3984834c8..998e8a13ad25 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs @@ -1,4 +1,4 @@ -use std::marker::PhantomData; +use std::{marker::PhantomData, rc::Rc}; use circuit_sequencer_api_1_5_0::sort_storage_access::sort_storage_access_queries; use zk_evm_1_5_0::{ @@ -7,9 +7,11 @@ use zk_evm_1_5_0::{ }; use zksync_types::{writes::StateDiffRecord, AccountTreeId, StorageKey, L1_MESSENGER_ADDRESS}; use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; +use zksync_vm_interface::pubdata::PubdataBuilder; use crate::{ interface::{ + pubdata::{L1MessengerL2ToL1Log, PubdataInput}, storage::{StoragePtr, WriteStorage}, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, L1BatchEnv, VmEvent, VmExecutionMode, @@ -17,14 +19,14 @@ use crate::{ tracers::dynamic::vm_1_5_0::DynTracer, utils::events::{ extract_bytecode_publication_requests_from_l1_messenger, - extract_l2tol1logs_from_l1_messenger, L1MessengerL2ToL1Log, + extract_l2tol1logs_from_l1_messenger, }, vm_latest::{ bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, constants::BOOTLOADER_HEAP_PAGE, old_vm::{history_recorder::HistoryMode, memory::SimpleMemory}, tracers::{traits::VmTracer, utils::VmHook}, - types::internals::{PubdataInput, ZkSyncVmState}, + types::internals::ZkSyncVmState, utils::logs::collect_events_and_l1_system_logs_after_timestamp, vm::MultiVMSubversion, StorageOracle, @@ -41,6 +43,7 @@ pub(crate) struct PubdataTracer { // to the L1Messenger. enforced_state_diffs: Option>, subversion: MultiVMSubversion, + pubdata_builder: Option>, _phantom_data: PhantomData, } @@ -49,6 +52,7 @@ impl PubdataTracer { l1_batch_env: L1BatchEnv, execution_mode: VmExecutionMode, subversion: MultiVMSubversion, + pubdata_builder: Option>, ) -> Self { Self { l1_batch_env, @@ -56,6 +60,7 @@ impl PubdataTracer { execution_mode, enforced_state_diffs: None, subversion, + pubdata_builder, _phantom_data: Default::default(), } } @@ -68,6 +73,7 @@ impl PubdataTracer { execution_mode: VmExecutionMode, forced_state_diffs: Vec, subversion: MultiVMSubversion, + pubdata_builder: Option>, ) -> Self { Self { l1_batch_env, @@ -75,6 +81,7 @@ impl PubdataTracer { execution_mode, enforced_state_diffs: Some(forced_state_diffs), subversion, + pubdata_builder, _phantom_data: Default::default(), } } @@ -221,13 +228,22 @@ impl VmTracer for PubdataTracer { if self.pubdata_info_requested { let pubdata_input = self.build_pubdata_input(state); - // Save the pubdata for the future initial bootloader memory building - bootloader_state.set_pubdata_input(pubdata_input.clone()); - // Apply the pubdata to the current memory let mut memory_to_apply = vec![]; - apply_pubdata_to_memory(&mut memory_to_apply, pubdata_input); + apply_pubdata_to_memory( + &mut memory_to_apply, + self.pubdata_builder + .as_ref() + .expect("`pubdata_builder` is required to finish batch") + .as_ref(), + &pubdata_input, + bootloader_state.protocol_version(), + ); + + // Save the pubdata for the future initial bootloader memory building + bootloader_state.set_pubdata_input(pubdata_input); + state.memory.populate_page( BOOTLOADER_HEAP_PAGE as usize, memory_to_apply, diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/mod.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/mod.rs index 7dc60ec5b0fb..601b7b8bd014 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/mod.rs @@ -1,9 +1,7 @@ -pub(crate) use pubdata::PubdataInput; pub(crate) use snapshot::VmSnapshot; pub(crate) use transaction_data::TransactionData; pub(crate) use vm_state::new_vm_state; pub use vm_state::ZkSyncVmState; -mod pubdata; mod snapshot; mod transaction_data; mod vm_state; diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs deleted file mode 100644 index d07732ae4350..000000000000 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs +++ /dev/null @@ -1,123 +0,0 @@ -use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; - -use crate::utils::events::L1MessengerL2ToL1Log; - -/// Struct based on which the pubdata blob is formed -#[derive(Debug, Clone, Default)] -pub(crate) struct PubdataInput { - pub(crate) user_logs: Vec, - pub(crate) l2_to_l1_messages: Vec>, - pub(crate) published_bytecodes: Vec>, - pub(crate) state_diffs: Vec, -} - -impl PubdataInput { - pub(crate) fn build_pubdata(self, with_uncompressed_state_diffs: bool) -> Vec { - let mut l1_messenger_pubdata = vec![]; - - let PubdataInput { - user_logs, - l2_to_l1_messages, - published_bytecodes, - state_diffs, - } = self; - - // Encoding user L2->L1 logs. - // Format: `[(numberOfL2ToL1Logs as u32) || l2tol1logs[1] || ... || l2tol1logs[n]]` - l1_messenger_pubdata.extend((user_logs.len() as u32).to_be_bytes()); - for l2tol1log in user_logs { - l1_messenger_pubdata.extend(l2tol1log.packed_encoding()); - } - - // Encoding L2->L1 messages - // Format: `[(numberOfMessages as u32) || (messages[1].len() as u32) || messages[1] || ... || (messages[n].len() as u32) || messages[n]]` - l1_messenger_pubdata.extend((l2_to_l1_messages.len() as u32).to_be_bytes()); - for message in l2_to_l1_messages { - l1_messenger_pubdata.extend((message.len() as u32).to_be_bytes()); - l1_messenger_pubdata.extend(message); - } - - // Encoding bytecodes - // Format: `[(numberOfBytecodes as u32) || (bytecodes[1].len() as u32) || bytecodes[1] || ... || (bytecodes[n].len() as u32) || bytecodes[n]]` - l1_messenger_pubdata.extend((published_bytecodes.len() as u32).to_be_bytes()); - for bytecode in published_bytecodes { - l1_messenger_pubdata.extend((bytecode.len() as u32).to_be_bytes()); - l1_messenger_pubdata.extend(bytecode); - } - - // Encoding state diffs - // Format: `[size of compressed state diffs u32 || compressed state diffs || (# state diffs: intial + repeated) as u32 || sorted state diffs by ]` - let state_diffs_compressed = compress_state_diffs(state_diffs.clone()); - l1_messenger_pubdata.extend(state_diffs_compressed); - - if with_uncompressed_state_diffs { - l1_messenger_pubdata.extend((state_diffs.len() as u32).to_be_bytes()); - for state_diff in state_diffs { - l1_messenger_pubdata.extend(state_diff.encode_padded()); - } - } - - l1_messenger_pubdata - } -} - -#[cfg(test)] -mod tests { - use zksync_system_constants::{ACCOUNT_CODE_STORAGE_ADDRESS, BOOTLOADER_ADDRESS}; - use zksync_utils::u256_to_h256; - - use super::*; - - #[test] - fn test_basic_pubdata_building() { - // Just using some constant addresses for tests - let addr1 = BOOTLOADER_ADDRESS; - let addr2 = ACCOUNT_CODE_STORAGE_ADDRESS; - - let user_logs = vec![L1MessengerL2ToL1Log { - l2_shard_id: 0, - is_service: false, - tx_number_in_block: 0, - sender: addr1, - key: 1.into(), - value: 128.into(), - }]; - - let l2_to_l1_messages = vec![hex::decode("deadbeef").unwrap()]; - - let published_bytecodes = vec![hex::decode("aaaabbbb").unwrap()]; - - // For covering more cases, we have two state diffs: - // One with enumeration index present (and so it is a repeated write) and the one without it. - let state_diffs = vec![ - StateDiffRecord { - address: addr2, - key: 155.into(), - derived_key: u256_to_h256(125.into()).0, - enumeration_index: 12, - initial_value: 11.into(), - final_value: 12.into(), - }, - StateDiffRecord { - address: addr2, - key: 156.into(), - derived_key: u256_to_h256(126.into()).0, - enumeration_index: 0, - initial_value: 0.into(), - final_value: 14.into(), - }, - ]; - - let input = PubdataInput { - user_logs, - l2_to_l1_messages, - published_bytecodes, - state_diffs, - }; - - let pubdata = - ethabi::encode(&[ethabi::Token::Bytes(input.build_pubdata(true))])[32..].to_vec(); - - assert_eq!(hex::encode(pubdata), "00000000000000000000000000000000000000000000000000000000000002c700000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000004aaaabbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901000000020000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009b000000000000000000000000000000000000000000000000000000000000007d000000000000000c000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009c000000000000000000000000000000000000000000000000000000000000007e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"); - } -} diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs index cb4b13eecdf0..d25f66361f1b 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs @@ -191,6 +191,7 @@ pub(crate) fn new_vm_state( system_env.execution_mode, bootloader_initial_memory, first_l2_block, + system_env.version, ); (vm, bootloader_state) diff --git a/core/lib/multivm/src/versions/vm_latest/vm.rs b/core/lib/multivm/src/versions/vm_latest/vm.rs index 3a36b008e884..ef6cee454a87 100644 --- a/core/lib/multivm/src/versions/vm_latest/vm.rs +++ b/core/lib/multivm/src/versions/vm_latest/vm.rs @@ -1,4 +1,4 @@ -use std::collections::HashMap; +use std::{collections::HashMap, rc::Rc}; use circuit_sequencer_api_1_5_0::sort_storage_access::sort_storage_access_queries; use zksync_types::{ @@ -7,6 +7,7 @@ use zksync_types::{ Transaction, H256, }; use zksync_utils::{be_words_to_bytes, h256_to_u256, u256_to_h256}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, @@ -21,7 +22,7 @@ use crate::{ vm_latest::{ bootloader_state::BootloaderState, old_vm::{events::merge_events, history_recorder::HistoryEnabled}, - tracers::dispatcher::TracerDispatcher, + tracers::{dispatcher::TracerDispatcher, PubdataTracer}, types::internals::{new_vm_state, VmSnapshot, ZkSyncVmState}, }, HistoryMode, @@ -38,6 +39,8 @@ pub(crate) enum MultiVMSubversion { SmallBootloaderMemory, /// The final correct version of v1.5.0 IncreasedBootloaderMemory, + /// VM for post-gateway versions. + Gateway, } impl MultiVMSubversion { @@ -55,6 +58,7 @@ impl TryFrom for MultiVMSubversion { match value { VmVersion::Vm1_5_0SmallBootloaderMemory => Ok(Self::SmallBootloaderMemory), VmVersion::Vm1_5_0IncreasedBootloaderMemory => Ok(Self::IncreasedBootloaderMemory), + VmVersion::VmGateway => Ok(Self::Gateway), _ => Err(VmVersionIsNotVm150Error), } } @@ -148,9 +152,9 @@ impl VmInterface for Vm { fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode, None) + self.inspect_inner(tracer, execution_mode.into(), None) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -182,19 +186,30 @@ impl VmInterface for Vm { } } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + let pubdata_tracer = Some(PubdataTracer::new( + self.batch_env.clone(), + VmExecutionMode::Batch, + self.subversion, + Some(pubdata_builder.clone()), + )); + + let result = self.inspect_inner( + &mut TracerDispatcher::default(), + VmExecutionMode::Batch, + pubdata_tracer, + ); let execution_state = self.get_current_execution_state(); - let bootloader_memory = self.bootloader_state.bootloader_memory(); + let bootloader_memory = self + .bootloader_state + .bootloader_memory(pubdata_builder.as_ref()); FinishedL1Batch { block_tip_execution_result: result, final_execution_state: execution_state, final_bootloader_memory: Some(bootloader_memory), pubdata_input: Some( self.bootloader_state - .get_pubdata_information() - .clone() - .build_pubdata(false), + .settlement_layer_pubdata(pubdata_builder.as_ref()), ), state_diffs: Some( self.bootloader_state diff --git a/core/lib/multivm/src/versions/vm_m5/vm.rs b/core/lib/multivm/src/versions/vm_m5/vm.rs index 3d57d1cd5439..55afeed17cd1 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm.rs @@ -1,13 +1,15 @@ +use std::rc::Rc; + use zksync_types::{vm::VmVersion, Transaction}; use zksync_utils::h256_to_u256; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ storage::StoragePtr, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - PushTransactionResult, SystemEnv, TxExecutionMode, VmExecutionMode, - VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, + PushTransactionResult, SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, VmFactory, + VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, vm_m5::{ storage::Storage, @@ -75,10 +77,10 @@ impl VmInterface for Vm { fn inspect( &mut self, _tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { match execution_mode { - VmExecutionMode::OneTx => match self.system_env.execution_mode { + InspectExecutionMode::OneTx => match self.system_env.execution_mode { TxExecutionMode::VerifyExecute => self.vm.execute_next_tx().glue_into(), TxExecutionMode::EstimateFee | TxExecutionMode::EthCall => self .vm @@ -87,8 +89,7 @@ impl VmInterface for Vm { ) .glue_into(), }, - VmExecutionMode::Batch => self.finish_batch().block_tip_execution_result, - VmExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), + InspectExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), } } @@ -110,11 +111,11 @@ impl VmInterface for Vm { // Bytecode compression isn't supported ( Ok(vec![].into()), - self.inspect(&mut (), VmExecutionMode::OneTx), + self.inspect(&mut (), InspectExecutionMode::OneTx), ) } - fn finish_batch(&mut self) -> FinishedL1Batch { + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { self.vm .execute_till_block_end( crate::vm_m5::vm_with_bootloader::BootloaderJobType::BlockPostprocessing, diff --git a/core/lib/multivm/src/versions/vm_m6/vm.rs b/core/lib/multivm/src/versions/vm_m6/vm.rs index 1ee6aa618220..4c67a2184180 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm.rs @@ -1,13 +1,14 @@ -use std::collections::HashSet; +use std::{collections::HashSet, rc::Rc}; use zksync_types::{vm::VmVersion, Transaction}; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ storage::StoragePtr, BytecodeCompressionError, BytecodeCompressionResult, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, TxExecutionMode, VmExecutionMode, + L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, @@ -88,7 +89,7 @@ impl VmInterface for Vm { fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { if let Some(storage_invocations) = tracer.storage_invocations { self.vm @@ -97,7 +98,7 @@ impl VmInterface for Vm { } match execution_mode { - VmExecutionMode::OneTx => match self.system_env.execution_mode { + InspectExecutionMode::OneTx => match self.system_env.execution_mode { TxExecutionMode::VerifyExecute => { let enable_call_tracer = tracer.call_tracer.is_some(); let result = self.vm.execute_next_tx( @@ -116,8 +117,7 @@ impl VmInterface for Vm { ) .glue_into(), }, - VmExecutionMode::Batch => self.finish_batch().block_tip_execution_result, - VmExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), + InspectExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), } } @@ -207,7 +207,7 @@ impl VmInterface for Vm { } } - fn finish_batch(&mut self) -> FinishedL1Batch { + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { self.vm .execute_till_block_end( crate::vm_m6::vm_with_bootloader::BootloaderJobType::BlockPostprocessing, diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs index 2bcd68bec044..81b0c52cce5e 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs @@ -1,5 +1,8 @@ +use std::rc::Rc; + use circuit_sequencer_api_1_3_3::sort_storage_access::sort_storage_access_queries; use zksync_types::{l2_to_l1_log::UserL2ToL1Log, Transaction}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, @@ -88,9 +91,9 @@ impl VmInterface for Vm { fn inspect( &mut self, dispatcher: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(dispatcher, execution_mode) + self.inspect_inner(dispatcher, execution_mode.into()) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -105,7 +108,7 @@ impl VmInterface for Vm { with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_with_compression(tx, with_compression); - let result = self.inspect(dispatcher, VmExecutionMode::OneTx); + let result = self.inspect(dispatcher, InspectExecutionMode::OneTx); if self.has_unpublished_bytecodes() { ( Err(BytecodeCompressionError::BytecodeCompressionFailed), @@ -122,8 +125,8 @@ impl VmInterface for Vm { } } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs index 497128c64bd9..a2d18e10de44 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs @@ -1,5 +1,8 @@ +use std::rc::Rc; + use circuit_sequencer_api_1_3_3::sort_storage_access::sort_storage_access_queries; use zksync_types::{l2_to_l1_log::UserL2ToL1Log, Transaction}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, @@ -88,9 +91,9 @@ impl VmInterface for Vm { fn inspect( &mut self, tracer: &mut TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode) + self.inspect_inner(tracer, execution_mode.into()) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -122,8 +125,8 @@ impl VmInterface for Vm { } } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index 43a6c48aa9c5..5ff27046377a 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -1,7 +1,8 @@ -use std::mem; +use std::{mem, rc::Rc}; use zksync_types::{vm::VmVersion, ProtocolVersionId, Transaction}; use zksync_vm2::interface::Tracer; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::history_mode::HistoryMode, @@ -9,8 +10,8 @@ use crate::{ storage::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}, utils::ShadowVm, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, + SystemEnv, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, tracers::TracerDispatcher, vm_latest::HistoryEnabled, @@ -63,7 +64,7 @@ impl VmInterface for LegacyVmInstance { fn inspect( &mut self, dispatcher: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { dispatch_legacy_vm!(self.inspect(&mut mem::take(dispatcher).into(), execution_mode)) } @@ -87,8 +88,8 @@ impl VmInterface for LegacyVmInstance { } /// Return the results of execution of all batch - fn finish_batch(&mut self) -> FinishedL1Batch { - dispatch_legacy_vm!(self.finish_batch()) + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + dispatch_legacy_vm!(self.finish_batch(pubdata_builder)) } } @@ -206,6 +207,15 @@ impl LegacyVmInstance { ); Self::Vm1_5_0(vm) } + VmVersion::VmGateway => { + let vm = crate::vm_latest::Vm::new_with_subversion( + l1_batch_env, + system_env, + storage_view, + crate::vm_latest::MultiVMSubversion::Gateway, + ); + Self::Vm1_5_0(vm) + } } } @@ -253,7 +263,7 @@ impl VmInterface for FastVmInsta fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { match self { Self::Fast(vm) => vm.inspect(&mut tracer.1, execution_mode), @@ -283,8 +293,8 @@ impl VmInterface for FastVmInsta } } - fn finish_batch(&mut self) -> FinishedL1Batch { - dispatch_fast_vm!(self.finish_batch()) + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + dispatch_fast_vm!(self.finish_batch(pubdata_builder)) } } diff --git a/core/lib/protobuf_config/src/contracts.rs b/core/lib/protobuf_config/src/contracts.rs index 84c404367503..84f03c5afe3a 100644 --- a/core/lib/protobuf_config/src/contracts.rs +++ b/core/lib/protobuf_config/src/contracts.rs @@ -76,6 +76,12 @@ impl ProtoRepr for proto::Contracts { .map(|x| parse_h160(x)) .transpose() .context("l2_shared_bridge_addr")?, + l2_legacy_shared_bridge_addr: l2 + .legacy_shared_bridge_addr + .as_ref() + .map(|x| parse_h160(x)) + .transpose() + .context("l2_legacy_shared_bridge_addr")?, l1_weth_bridge_proxy_addr: weth_bridge .as_ref() .and_then(|bridge| bridge.l1_address.as_ref().map(|x| parse_h160(x))) @@ -107,6 +113,12 @@ impl ProtoRepr for proto::Contracts { .map(|x| parse_h160(x)) .transpose() .context("chain_admin_addr")?, + l2_da_validator_addr: l2 + .da_validator_addr + .as_ref() + .map(|x| parse_h160(x)) + .transpose() + .context("l2_da_validator_addr")?, }) } @@ -142,6 +154,10 @@ impl ProtoRepr for proto::Contracts { }), l2: Some(proto::L2 { testnet_paymaster_addr: this.l2_testnet_paymaster_addr.map(|a| format!("{:?}", a)), + da_validator_addr: this.l2_da_validator_addr.map(|a| format!("{:?}", a)), + legacy_shared_bridge_addr: this + .l2_legacy_shared_bridge_addr + .map(|a| format!("{:?}", a)), }), bridges: Some(proto::Bridges { shared: Some(proto::Bridge { diff --git a/core/lib/protobuf_config/src/proto/config/contracts.proto b/core/lib/protobuf_config/src/proto/config/contracts.proto index f4488c7901a1..6ab03e6aa11b 100644 --- a/core/lib/protobuf_config/src/proto/config/contracts.proto +++ b/core/lib/protobuf_config/src/proto/config/contracts.proto @@ -21,6 +21,8 @@ message L1 { message L2 { optional string testnet_paymaster_addr = 1; // optional; H160 + optional string da_validator_addr = 2; // optional; H160 + optional string legacy_shared_bridge_addr = 3; // optional; H160 } message Bridge { diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index 97de24f42dae..cfc1d4a0d552 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -5,7 +5,7 @@ use serde_with::{serde_as, Bytes}; use zksync_multivm::interface::{L1BatchEnv, SystemEnv}; use zksync_object_store::{_reexports::BoxedError, serialize_using_bincode, Bucket, StoredObject}; use zksync_types::{ - basic_fri_types::Eip4844Blobs, block::L2BlockExecutionData, + basic_fri_types::Eip4844Blobs, block::L2BlockExecutionData, commitment::PubdataParams, witness_block_state::WitnessStorageState, L1BatchNumber, ProtocolVersionId, H256, U256, }; @@ -269,6 +269,7 @@ pub struct V1TeeVerifierInput { pub l2_blocks_execution_data: Vec, pub l1_batch_env: L1BatchEnv, pub system_env: SystemEnv, + pub pubdata_params: PubdataParams, } impl V1TeeVerifierInput { @@ -278,6 +279,7 @@ impl V1TeeVerifierInput { l2_blocks_execution_data: Vec, l1_batch_env: L1BatchEnv, system_env: SystemEnv, + pubdata_params: PubdataParams, ) -> Self { V1TeeVerifierInput { vm_run_data, @@ -285,6 +287,7 @@ impl V1TeeVerifierInput { l2_blocks_execution_data, l1_batch_env, system_env, + pubdata_params, } } } diff --git a/core/lib/snapshots_applier/src/tests/utils.rs b/core/lib/snapshots_applier/src/tests/utils.rs index 2c9b1440af2a..cf68d2e181a6 100644 --- a/core/lib/snapshots_applier/src/tests/utils.rs +++ b/core/lib/snapshots_applier/src/tests/utils.rs @@ -182,6 +182,7 @@ pub(super) fn mock_l2_block_header(l2_block_number: L2BlockNumber) -> L2BlockHea virtual_blocks: 0, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), } } diff --git a/core/lib/state/src/test_utils.rs b/core/lib/state/src/test_utils.rs index decb2a0f403d..a12508f615f0 100644 --- a/core/lib/state/src/test_utils.rs +++ b/core/lib/state/src/test_utils.rs @@ -88,6 +88,7 @@ pub(crate) async fn create_l2_block( virtual_blocks: 0, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), }; conn.blocks_dal() diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index ffe3a548a02b..140085dbb9fe 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -15,6 +15,7 @@ use zksync_multivm::{ FinishedL1Batch, L2BlockEnv, VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, + pubdata_builders::pubdata_params_to_builder, vm_latest::HistoryEnabled, LegacyVmInstance, }; @@ -22,7 +23,8 @@ use zksync_prover_interface::inputs::{ StorageLogMetadata, V1TeeVerifierInput, WitnessInputMerklePaths, }; use zksync_types::{ - block::L2BlockExecutionData, L1BatchNumber, StorageLog, StorageValue, Transaction, H256, + block::L2BlockExecutionData, commitment::PubdataParams, L1BatchNumber, StorageLog, + StorageValue, Transaction, H256, }; use zksync_utils::u256_to_h256; @@ -88,7 +90,7 @@ impl Verify for V1TeeVerifierInput { let storage_snapshot = StorageSnapshot::new(storage, factory_deps); let storage_view = StorageView::new(storage_snapshot).to_rc_ptr(); let vm = LegacyVmInstance::new(self.l1_batch_env, self.system_env, storage_view); - let vm_out = execute_vm(self.l2_blocks_execution_data, vm)?; + let vm_out = execute_vm(self.l2_blocks_execution_data, vm, self.pubdata_params)?; let block_output_with_proofs = get_bowp(self.merkle_paths)?; @@ -178,6 +180,7 @@ fn get_bowp(witness_input_merkle_paths: WitnessInputMerklePaths) -> Result( l2_blocks_execution_data: Vec, mut vm: LegacyVmInstance, + pubdata_params: PubdataParams, ) -> anyhow::Result { let next_l2_blocks_data = l2_blocks_execution_data.iter().skip(1); @@ -206,7 +209,7 @@ fn execute_vm( tracing::trace!("about to vm.finish_batch()"); - Ok(vm.finish_batch()) + Ok(vm.finish_batch(pubdata_params_to_builder(pubdata_params))) } /// Map `LogQuery` and `TreeLogEntry` to a `TreeInstruction` @@ -356,6 +359,7 @@ mod tests { default_validation_computational_gas_limit: 0, chain_id: Default::default(), }, + Default::default(), ); let tvi = TeeVerifierInput::new(tvi); let serialized = bincode::serialize(&tvi).expect("Failed to serialize TeeVerifierInput."); diff --git a/core/lib/types/src/api/en.rs b/core/lib/types/src/api/en.rs index 209ab7c24f98..daaa5651a032 100644 --- a/core/lib/types/src/api/en.rs +++ b/core/lib/types/src/api/en.rs @@ -1,7 +1,7 @@ //! API types related to the External Node specific methods. use serde::{Deserialize, Serialize}; -use zksync_basic_types::{Address, L1BatchNumber, L2BlockNumber, H256}; +use zksync_basic_types::{commitment::PubdataParams, Address, L1BatchNumber, L2BlockNumber, H256}; use zksync_contracts::BaseSystemContractsHashes; use crate::ProtocolVersionId; @@ -42,6 +42,8 @@ pub struct SyncBlock { pub hash: Option, /// Version of the protocol used for this block. pub protocol_version: ProtocolVersionId, + /// Pubdata params used for this batch + pub pubdata_params: Option, } /// Global configuration of the consensus served by the main node to the external nodes. diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index b8f8a2f05841..a4eb64605534 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -206,6 +206,7 @@ pub struct BridgeAddresses { pub l2_erc20_default_bridge: Option
, pub l1_weth_bridge: Option
, pub l2_weth_bridge: Option
, + pub l2_legacy_shared_bridge: Option
, } #[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] diff --git a/core/lib/types/src/block.rs b/core/lib/types/src/block.rs index 9211a6f1d8cf..310e3a73b8e8 100644 --- a/core/lib/types/src/block.rs +++ b/core/lib/types/src/block.rs @@ -1,7 +1,7 @@ use std::{fmt, ops}; use serde::{Deserialize, Serialize}; -use zksync_basic_types::{Address, Bloom, BloomInput, H256, U256}; +use zksync_basic_types::{commitment::PubdataParams, Address, Bloom, BloomInput, H256, U256}; use zksync_contracts::BaseSystemContractsHashes; use zksync_system_constants::SYSTEM_BLOCK_INFO_BLOCK_NUMBER_MULTIPLIER; use zksync_utils::concat_and_hash; @@ -113,6 +113,7 @@ pub struct L2BlockHeader { /// amount of gas can be spent on pubdata. pub gas_limit: u64, pub logs_bloom: Bloom, + pub pubdata_params: PubdataParams, } /// Structure that represents the data is returned by the storage oracle during batch execution. diff --git a/core/lib/types/src/commitment/mod.rs b/core/lib/types/src/commitment/mod.rs index 759ee8947ba9..40532a1e5899 100644 --- a/core/lib/types/src/commitment/mod.rs +++ b/core/lib/types/src/commitment/mod.rs @@ -9,11 +9,12 @@ use std::{collections::HashMap, convert::TryFrom}; use serde::{Deserialize, Serialize}; -pub use zksync_basic_types::commitment::L1BatchCommitmentMode; +pub use zksync_basic_types::commitment::{L1BatchCommitmentMode, PubdataParams}; use zksync_contracts::BaseSystemContractsHashes; +use zksync_crypto_primitives::hasher::{keccak::KeccakHasher, Hasher}; use zksync_mini_merkle_tree::MiniMerkleTree; use zksync_system_constants::{ - KNOWN_CODES_STORAGE_ADDRESS, L2_TO_L1_LOGS_TREE_ROOT_KEY, STATE_DIFF_HASH_KEY, + KNOWN_CODES_STORAGE_ADDRESS, L2_TO_L1_LOGS_TREE_ROOT_KEY, STATE_DIFF_HASH_KEY_PRE_GATEWAY, ZKPORTER_IS_AVAILABLE, }; use zksync_utils::u256_to_h256; @@ -22,8 +23,8 @@ use crate::{ blob::num_blobs_required, block::{L1BatchHeader, L1BatchTreeData}, l2_to_l1_log::{ - l2_to_l1_logs_tree_size, parse_system_logs_for_blob_hashes, L2ToL1Log, SystemL2ToL1Log, - UserL2ToL1Log, + l2_to_l1_logs_tree_size, parse_system_logs_for_blob_hashes_pre_gateway, L2ToL1Log, + SystemL2ToL1Log, UserL2ToL1Log, }, web3::keccak256, writes::{ @@ -92,6 +93,16 @@ pub struct L1BatchMetadata { /// commitment to the transactions in the batch. pub bootloader_initial_content_commitment: Option, pub state_diffs_compressed: Vec, + /// Hash of packed state diffs. It's present only for post-gateway batches. + pub state_diff_hash: Option, + /// Root hash of the local logs tree. Tree contains logs that were produced on this chain. + /// It's present only for post-gateway batches. + pub local_root: Option, + /// Root hash of the aggregated logs tree. Tree aggregates `local_root`s of chains that settle on this chain. + /// It's present only for post-gateway batches. + pub aggregation_root: Option, + /// Data Availability inclusion proof, that has to be verified on the settlement layer. + pub da_inclusion_data: Option>, } impl L1BatchMetadata { @@ -265,6 +276,13 @@ pub struct L1BatchAuxiliaryCommonOutput { protocol_version: ProtocolVersionId, } +#[derive(Debug, Default, Clone, Copy, Eq, PartialEq)] +#[cfg_attr(test, derive(Serialize, Deserialize))] +pub struct BlobHash { + pub commitment: H256, + pub linear_hash: H256, +} + /// Block Output produced by Virtual Machine #[derive(Debug, Clone, Eq, PartialEq)] #[cfg_attr(test, derive(Serialize, Deserialize))] @@ -283,8 +301,9 @@ pub enum L1BatchAuxiliaryOutput { state_diffs_compressed: Vec, state_diffs_hash: H256, aux_commitments: AuxCommitments, - blob_linear_hashes: Vec, - blob_commitments: Vec, + blob_hashes: Vec, + aggregation_root: H256, + local_root: H256, }, } @@ -333,17 +352,23 @@ impl L1BatchAuxiliaryOutput { system_logs, state_diffs, aux_commitments, - blob_commitments, + blob_hashes, + aggregation_root, } => { let l2_l1_logs_compressed = serialize_commitments(&common_input.l2_to_l1_logs); let merkle_tree_leaves = l2_l1_logs_compressed .chunks(UserL2ToL1Log::SERIALIZED_SIZE) .map(|chunk| <[u8; UserL2ToL1Log::SERIALIZED_SIZE]>::try_from(chunk).unwrap()); - let l2_l1_logs_merkle_root = MiniMerkleTree::new( + let local_root = MiniMerkleTree::new( merkle_tree_leaves, Some(l2_to_l1_logs_tree_size(common_input.protocol_version)), ) .merkle_root(); + let l2_l1_logs_merkle_root = if common_input.protocol_version.is_pre_gateway() { + local_root + } else { + KeccakHasher.compress(&local_root, &aggregation_root) + }; let common_output = L1BatchAuxiliaryCommonOutput { l2_l1_logs_merkle_root, @@ -357,22 +382,33 @@ impl L1BatchAuxiliaryOutput { let state_diffs_hash = H256::from(keccak256(&(state_diffs_packed))); let state_diffs_compressed = compress_state_diffs(state_diffs); - let blob_linear_hashes = - parse_system_logs_for_blob_hashes(&common_input.protocol_version, &system_logs); - // Sanity checks. System logs are empty for the genesis batch, so we can't do checks for it. if !system_logs.is_empty() { - let state_diff_hash_from_logs = system_logs - .iter() - .find_map(|log| { - (log.0.key == u256_to_h256(STATE_DIFF_HASH_KEY.into())) - .then_some(log.0.value) - }) - .expect("Failed to find state diff hash in system logs"); - assert_eq!( - state_diffs_hash, state_diff_hash_from_logs, - "State diff hash mismatch" - ); + if common_input.protocol_version.is_pre_gateway() { + let state_diff_hash_from_logs = system_logs + .iter() + .find_map(|log| { + (log.0.key == u256_to_h256(STATE_DIFF_HASH_KEY_PRE_GATEWAY.into())) + .then_some(log.0.value) + }) + .expect("Failed to find state diff hash in system logs"); + assert_eq!( + state_diffs_hash, state_diff_hash_from_logs, + "State diff hash mismatch" + ); + + let blob_linear_hashes_from_logs = + parse_system_logs_for_blob_hashes_pre_gateway( + &common_input.protocol_version, + &system_logs, + ); + let blob_linear_hashes: Vec<_> = + blob_hashes.iter().map(|b| b.linear_hash).collect(); + assert_eq!( + blob_linear_hashes, blob_linear_hashes_from_logs, + "Blob linear hashes mismatch" + ); + } let l2_to_l1_logs_tree_root_from_logs = system_logs .iter() @@ -387,25 +423,45 @@ impl L1BatchAuxiliaryOutput { ); } - assert_eq!( - blob_linear_hashes.len(), - blob_commitments.len(), - "Blob linear hashes and commitments have different lengths" - ); - Self::PostBoojum { common: common_output, system_logs_linear_hash, state_diffs_compressed, state_diffs_hash, aux_commitments, - blob_linear_hashes, - blob_commitments, + blob_hashes, + local_root, + aggregation_root, } } } } + pub fn local_root(&self) -> H256 { + match self { + Self::PreBoojum { common, .. } => common.l2_l1_logs_merkle_root, + Self::PostBoojum { local_root, .. } => *local_root, + } + } + + pub fn aggregation_root(&self) -> H256 { + match self { + Self::PreBoojum { .. } => H256::zero(), + Self::PostBoojum { + aggregation_root, .. + } => *aggregation_root, + } + } + + pub fn state_diff_hash(&self) -> H256 { + match self { + Self::PreBoojum { .. } => H256::zero(), + Self::PostBoojum { + state_diffs_hash, .. + } => *state_diffs_hash, + } + } + pub fn to_bytes(&self) -> Vec { let mut result = Vec::new(); @@ -426,8 +482,7 @@ impl L1BatchAuxiliaryOutput { system_logs_linear_hash, state_diffs_hash, aux_commitments, - blob_linear_hashes, - blob_commitments, + blob_hashes, .. } => { result.extend(system_logs_linear_hash.as_bytes()); @@ -439,9 +494,9 @@ impl L1BatchAuxiliaryOutput { ); result.extend(aux_commitments.events_queue_commitment.as_bytes()); - for i in 0..blob_commitments.len() { - result.extend(blob_linear_hashes[i].as_bytes()); - result.extend(blob_commitments[i].as_bytes()); + for b in blob_hashes { + result.extend(b.linear_hash.as_bytes()); + result.extend(b.commitment.as_bytes()); } } } @@ -637,6 +692,9 @@ impl L1BatchCommitment { aux_commitments: self.aux_commitments(), compressed_initial_writes, compressed_repeated_writes, + local_root: self.auxiliary_output.local_root(), + aggregation_root: self.auxiliary_output.aggregation_root(), + state_diff_hash: self.auxiliary_output.state_diff_hash(), } } } @@ -673,7 +731,8 @@ pub enum CommitmentInput { system_logs: Vec, state_diffs: Vec, aux_commitments: AuxCommitments, - blob_commitments: Vec, + blob_hashes: Vec, + aggregation_root: H256, }, } @@ -715,11 +774,11 @@ impl CommitmentInput { events_queue_commitment: H256::zero(), bootloader_initial_content_commitment: H256::zero(), }, - blob_commitments: { + blob_hashes: { let num_blobs = num_blobs_required(&protocol_version); - - vec![H256::zero(); num_blobs] + vec![Default::default(); num_blobs] }, + aggregation_root: H256::zero(), } } } @@ -734,4 +793,7 @@ pub struct L1BatchCommitmentArtifacts { pub compressed_repeated_writes: Option>, pub zkporter_is_available: bool, pub aux_commitments: Option, + pub aggregation_root: H256, + pub local_root: H256, + pub state_diff_hash: H256, } diff --git a/core/lib/types/src/commitment/tests/mod.rs b/core/lib/types/src/commitment/tests/mod.rs index 33fb0142b04d..a95318309a28 100644 --- a/core/lib/types/src/commitment/tests/mod.rs +++ b/core/lib/types/src/commitment/tests/mod.rs @@ -55,3 +55,8 @@ fn post_boojum_1_5_0() { fn post_boojum_1_5_0_with_evm() { run_test("post_boojum_1_5_0_test_with_evm"); } + +#[test] +fn post_gateway() { + run_test("post_gateway_test"); +} diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_4_1_test.json b/core/lib/types/src/commitment/tests/post_boojum_1_4_1_test.json index c5eccbce038a..c854a6e77d8f 100644 --- a/core/lib/types/src/commitment/tests/post_boojum_1_4_1_test.json +++ b/core/lib/types/src/commitment/tests/post_boojum_1_4_1_test.json @@ -190,10 +190,17 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ] + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000" } }, "pass_through_data": { @@ -248,14 +255,18 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_linear_hashes": [ - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } ], - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ] + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "local_root": "0xe52d57bd64cabf6c588b30365512da2bf10912c106e7a06483b236d05ac4037e" } }, "hashes": { diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_4_2_test.json b/core/lib/types/src/commitment/tests/post_boojum_1_4_2_test.json index 4983bbeca143..96aa8ab842ce 100644 --- a/core/lib/types/src/commitment/tests/post_boojum_1_4_2_test.json +++ b/core/lib/types/src/commitment/tests/post_boojum_1_4_2_test.json @@ -206,10 +206,17 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002" - ] + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + } + ], + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000" } }, "pass_through_data": { @@ -264,14 +271,18 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_linear_hashes": [ - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004" + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + } ], - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002" - ] + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "local_root": "0x0b6e1ad4643cc2bee06b5e173184ec822d80826e5720f5715172898350433299" } }, "hashes": { diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test.json b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test.json index 59a24b7c90ce..ed61ea67cefc 100644 --- a/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test.json +++ b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test.json @@ -238,24 +238,73 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002", - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ] + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000003", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000005" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000004", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000006" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000005", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000007" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000006", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000008" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000" } }, "pass_through_data": { @@ -310,42 +359,74 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_linear_hashes": [ - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000007", - "0x0000000000000000000000000000000000000000000000000000000000000008", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000003", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000005" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000004", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000006" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000005", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000007" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000006", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000008" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } ], - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002", - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ] + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "local_root": "0x30ba728b1aac22b122de4f32589dd2711da264412cb90e35bf7b1f735dd357ff" } }, "hashes": { diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json index 4e8c0e0814a0..a41aa33c04a1 100644 --- a/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json +++ b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json @@ -239,24 +239,73 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002", - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ] + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000003", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000005" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000004", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000006" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000005", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000007" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000006", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000008" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000" } }, "pass_through_data": { @@ -312,42 +361,74 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_linear_hashes": [ - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000007", - "0x0000000000000000000000000000000000000000000000000000000000000008", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000003", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000005" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000004", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000006" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000005", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000007" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000006", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000008" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } ], - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002", - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ] + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "local_root": "0x30ba728b1aac22b122de4f32589dd2711da264412cb90e35bf7b1f735dd357ff" } }, "hashes": { diff --git a/core/lib/types/src/commitment/tests/post_gateway_test.json b/core/lib/types/src/commitment/tests/post_gateway_test.json new file mode 100644 index 000000000000..4b598ff59f4f --- /dev/null +++ b/core/lib/types/src/commitment/tests/post_gateway_test.json @@ -0,0 +1,1977 @@ +{ + "hashes": { + "pass_through_data": "0x756c1660f611302295f6a56a8f4b9d68f2ebf51f8278f225d6b7e64bb9364be0", + "aux_output": "0xcccf1ef8192054cb1b5fb668868ce4e069a695a1394b9486ebd3031cec12fe12", + "meta_parameters": "0xdb298fa55c75b134333cee0b39f77aea956553a1eb861a5777dc7a66ad7a55b9", + "commitment": "0xd6615c5447c817a320c69c6a5af12c472fd4d5bc2ef4de7806d40afe384ddc27" + }, + "auxiliary_output": { + "PostBoojum": { + "common": { + "l2_l1_logs_merkle_root": "0x38eaeef3afe69b6f6b2fa22c92da8137f1e405a1e1861b7de7cfa30c7d7462dd", + "protocol_version": "Version27" + }, + "system_logs_linear_hash": "0xe8460ce1ed47b77cfee3cadf803aa089c144c506ea2bdd358a6a38ff2c7bc8e3", + "state_diffs_compressed": [ + 1,0,27,89,4,0,148,112,120,89,162,183,230,11,175,17,100,223,232,175,83,47,195,198,157,29,129,145,197,186,61,127,17,109,250,141,181,206,45,0,1,0,1,67,6,51,197,115,134,143,51,94,49,6,252,85,139,173,197,6,118,46,184,24,78,249,206,120,93,239,110,206,130,208,215,121,46,249,196,126,160,123,216,26,86,45,8,246,35,74,8,171,141,141,223,145,137,150,142,180,236,158,154,37,0,1,0,14,207,174,184,55,189,9,139,207,155,222,111,194,204,216,232,169,53,90,27,112,230,1,172,24,205,8,158,179,8,246,11,47,22,184,171,230,29,125,57,179,213,44,191,157,128,184,167,253,5,55,217,60,33,8,75,147,188,5,4,171,60,0,1,0,0,195,40,243,40,221,130,10,29,214,152,4,122,127,125,73,135,77,130,89,25,110,39,53,23,67,10,248,244,128,203,204,98,199,195,136,172,152,215,47,208,131,209,215,32,206,186,255,203,162,198,108,114,94,200,185,197,197,240,116,111,138,0,1,0,0,91,142,88,121,116,4,61,89,191,251,246,50,208,32,231,100,149,154,190,98,228,194,56,216,223,46,98,178,181,235,143,74,199,189,78,241,151,159,154,102,86,114,178,92,208,123,30,61,99,122,89,162,199,107,26,34,232,91,117,146,65,0,1,0,1,67,6,51,197,115,134,143,51,94,49,6,252,85,139,173,197,6,118,46,184,24,78,249,206,120,93,239,110,206,66,202,106,148,168,163,117,186,10,227,150,70,185,29,164,88,23,175,73,33,116,119,174,107,73,193,3,53,191,78,11,115,0,1,0,1,179,126,95,140,175,172,146,75,62,102,55,121,225,44,128,218,206,138,2,177,210,115,174,112,143,39,90,214,42,71,164,52,102,233,54,181,135,193,191,158,16,243,13,105,123,17,66,228,89,233,6,51,219,18,27,114,127,245,180,121,43,0,1,0,0,103,85,79,83,198,102,14,83,189,151,41,240,10,238,1,137,1,13,254,184,104,250,60,189,72,18,50,72,12,95,233,138,250,154,14,193,124,34,99,2,123,232,159,70,229,238,83,58,30,33,169,31,255,76,177,68,204,74,239,33,188,0,1,0,5,181,151,235,2,5,139,35,12,238,106,156,223,71,5,65,13,21,26,209,234,106,211,226,222,119,132,26,207,194,142,107,134,4,159,177,116,20,22,26,103,250,177,44,27,34,144,68,85,154,16,112,202,176,23,66,216,18,18,139,156,190,0,1,0,0,103,85,79,83,198,102,14,83,189,151,41,240,10,238,1,137,1,13,254,184,104,250,60,189,72,18,50,72,12,140,78,248,13,214,150,89,51,34,40,248,136,164,249,180,131,12,12,50,27,140,170,45,67,53,35,250,200,97,150,181,207,0,1,0,4,139,241,177,101,244,200,80,208,30,184,30,92,4,15,112,234,1,220,160,25,231,128,168,127,128,236,120,151,195,235,96,109,232,32,223,19,108,89,177,255,121,225,223,146,96,66,108,164,143,175,146,74,70,53,36,33,31,183,195,161,165,0,1,0,2,1,233,35,243,228,122,223,167,196,240,246,30,168,221,66,86,246,109,16,153,151,11,205,49,72,146,134,73,23,95,127,60,252,81,90,137,254,204,8,97,6,46,254,57,252,48,104,36,27,116,128,26,163,170,147,245,100,172,98,242,54,0,1,0,6,29,196,13,11,194,203,129,144,94,107,193,66,30,189,75,216,183,154,172,184,218,79,157,113,69,178,136,103,79,207,14,96,187,125,119,111,198,230,184,241,1,19,161,190,119,25,192,44,34,151,163,108,216,124,11,59,35,121,140,74,95,0,1,0,7,15,28,212,226,131,6,7,175,48,25,39,67,145,15,223,171,241,221,0,74,128,115,148,66,117,129,157,29,254,53,89,195,90,167,22,152,246,194,202,70,67,239,232,80,69,169,73,79,38,45,119,238,103,193,61,215,52,230,38,48,90,0,1,0,1,67,6,51,197,115,134,143,51,94,49,6,252,85,139,173,197,6,118,46,184,24,78,249,206,120,93,239,110,206,186,93,97,46,65,80,43,253,205,126,211,179,176,210,212,177,245,200,248,185,15,209,21,42,187,224,222,192,14,162,61,7,0,1,0,4,139,241,177,101,244,200,80,208,30,184,30,92,4,15,112,234,1,220,160,25,231,128,168,127,128,236,120,151,195,44,61,141,160,220,54,28,84,148,218,146,175,212,98,94,116,25,190,241,121,131,189,209,145,214,33,89,62,212,173,57,47,0,1,0,3,35,183,38,99,20,185,211,236,56,125,91,205,149,144,165,11,224,14,120,136,55,202,90,136,13,151,227,238,131,48,100,113,86,255,138,164,229,100,8,99,14,34,251,194,115,119,250,250,242,7,188,204,248,210,254,18,115,9,165,229,233,0,1,0,2,21,205,111,48,109,117,125,93,47,219,180,96,198,15,41,133,132,23,158,236,192,113,150,199,174,142,79,141,100,200,51,40,178,38,74,180,112,167,221,220,163,38,200,255,61,159,78,76,252,60,226,78,168,221,216,201,180,12,20,188,185,0,1,0,2,1,233,35,243,228,122,223,167,196,240,246,30,168,221,66,86,246,109,16,153,151,11,205,49,72,146,134,73,23,229,176,77,5,169,51,88,54,33,49,122,209,137,227,159,45,116,33,7,146,238,29,46,153,91,171,175,162,128,71,14,27,0,1,0,1,83,6,251,15,11,95,222,33,153,150,85,28,128,114,198,113,27,186,83,0,178,102,154,235,15,14,76,116,69,250,253,202,115,87,157,171,40,23,48,73,193,157,78,81,69,162,232,29,120,68,42,125,135,121,254,156,149,143,198,173,119,0,1,0,0,53,186,49,237,22,194,17,96,120,10,192,151,99,191,213,147,177,116,143,105,230,131,169,251,16,146,26,164,157,188,67,139,78,87,51,241,113,6,164,3,59,144,48,243,76,127,49,1,147,79,102,218,253,36,37,149,91,92,247,35,64,0,1,0,0,237,48,107,41,18,142,6,90,253,206,115,235,200,224,236,208,140,217,24,253,192,247,76,3,246,77,81,94,72,6,115,118,133,155,130,106,66,115,187,68,69,230,28,222,77,91,95,90,23,4,86,255,161,95,247,195,108,233,152,241,190,0,1,0,1,63,74,44,150,76,113,212,159,45,136,118,161,30,238,75,244,232,209,146,49,101,47,199,117,178,202,228,58,33,129,4,114,76,225,212,61,128,125,223,69,48,213,107,167,249,183,181,194,21,67,99,215,247,166,215,108,189,158,61,249,130,0,1,0,0,103,33,171,145,26,65,216,181,37,2,234,76,223,66,236,153,229,229,41,190,106,62,102,243,173,178,20,60,90,51,232,118,240,225,158,242,19,13,216,95,254,79,35,196,212,101,148,164,24,219,221,10,181,111,253,164,76,93,72,246,206,0,1,0,5,187,116,113,160,1,107,28,51,15,118,122,94,115,173,12,51,9,52,105,76,173,0,124,90,163,249,38,190,101,25,143,99,94,50,239,35,14,215,12,184,219,25,32,81,51,246,142,27,126,246,157,133,33,13,119,172,197,111,163,43,234,0,1,0,5,21,101,126,190,1,77,145,81,72,7,18,222,122,16,141,155,14,26,122,121,141,61,162,148,91,165,60,209,3,90,251,120,52,143,255,91,253,53,60,239,129,160,65,213,230,214,195,241,114,123,145,145,220,232,75,132,91,7,118,101,237,0,1,0,6,49,18,87,196,3,187,186,126,42,239,10,162,8,34,98,124,130,236,116,132,252,179,27,135,221,140,88,42,169,240,163,222,200,2,37,101,9,35,172,42,74,77,142,96,167,8,137,208,171,61,234,142,107,218,41,37,203,138,127,216,252,137,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,174,229,34,198,20,187,1,37,21,66,226,45,128,16,30,45,151,85,103,77,143,214,69,38,254,154,44,77,223,171,97,143,137,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,159,186,133,75,226,253,235,173,50,111,19,111,136,219,244,177,114,214,77,28,237,51,180,171,99,164,148,28,226,73,151,137,7,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,201,172,113,10,243,67,127,194,244,249,48,131,50,164,72,10,88,81,76,45,149,28,73,119,114,174,142,141,132,8,175,27,137,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,182,81,125,81,147,30,201,86,98,178,2,213,133,189,82,214,234,207,27,118,113,82,28,46,150,32,45,104,62,223,226,99,137,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,28,170,140,159,117,250,84,163,177,210,240,18,225,217,234,99,118,79,112,157,28,25,151,121,72,28,143,77,92,237,107,62,9,1,45,86,211,71,72,250,222,240,196,161,223,115,65,15,173,85,177,255,211,89,90,168,146,255,238,205,12,128,137,196,203,27,9,1,12,138,2,247,115,213,230,144,244,253,93,195,182,20,84,243,71,244,71,24,244,103,128,231,65,233,198,173,128,126,246,169,9,1,185,67,192,97,81,170,106,240,157,23,26,106,216,228,65,120,68,165,135,110,2,31,216,158,187,67,79,105,151,157,234,15,9,1,12,91,247,177,168,165,63,93,186,29,121,106,121,167,27,50,198,22,230,125,252,159,77,132,92,155,115,251,100,87,112,147,9,1,127,216,219,101,110,162,74,65,114,24,131,123,143,204,15,44,36,72,8,136,170,255,39,231,108,143,128,71,65,95,117,4,9,1,4,27,221,133,245,254,194,84,195,88,19,141,109,233,58,225,116,116,251,225,170,44,159,23,28,181,85,238,6,151,63,144,9,1,130,114,163,11,234,79,36,77,175,198,107,147,58,183,234,134,122,178,61,205,225,34,184,146,138,50,221,70,198,19,2,191,9,1,41,151,245,131,127,195,211,240,254,148,60,106,169,97,173,173,118,116,195,243,213,115,169,17,155,83,25,181,108,68,48,51,9,1,236,254,112,114,24,207,172,186,132,163,119,198,20,66,226,51,51,142,39,212,88,198,68,118,92,136,138,204,26,155,11,165,9,1,215,116,3,65,140,246,136,209,81,15,184,214,188,49,94,63,81,57,12,135,108,143,41,137,113,88,11,84,15,30,158,184,9,1,50,203,140,60,152,140,103,117,103,130,42,29,70,236,110,49,211,18,247,40,117,35,54,107,171,190,233,18,117,69,68,43,9,1,95,50,92,228,81,22,16,190,182,42,66,158,131,165,204,25,25,20,143,210,29,170,143,129,94,111,129,132,227,28,102,180,9,1,96,153,155,64,152,142,147,161,102,88,190,194,238,147,14,243,71,26,33,184,193,50,249,29,88,2,52,157,179,7,69,77,9,1,212,255,51,143,196,70,53,156,98,221,171,235,82,21,252,198,242,28,2,246,195,67,6,91,2,240,95,173,200,49,66,89,9,1,58,212,15,123,117,242,193,79,217,63,177,112,56,232,153,140,93,188,111,168,108,138,82,113,212,107,209,150,246,205,191,157,9,1,204,180,39,152,129,136,139,125,156,240,127,28,205,2,65,12,140,132,177,76,5,4,95,204,205,9,179,77,57,148,6,231,9,1,249,145,142,225,129,214,86,160,12,71,51,28,109,238,246,115,57,184,6,234,138,46,107,81,103,128,201,242,101,51,179,68,9,1,17,109,102,169,143,117,42,93,149,160,20,188,122,34,0,140,248,73,206,232,146,65,183,250,61,35,40,54,167,63,173,215,9,1,58,197,183,31,149,121,187,250,193,140,202,222,69,149,235,105,78,113,59,213,78,241,15,40,62,137,46,19,193,78,85,31,9,1,209,81,122,212,165,252,102,254,115,58,127,209,26,21,188,113,69,3,30,255,154,72,181,219,97,7,227,96,209,19,138,181,9,1,151,245,134,14,100,30,161,175,227,142,158,71,197,157,213,103,198,28,241,51,173,107,242,84,76,53,176,101,132,26,29,60,9,1,72,239,59,253,40,148,227,213,236,98,100,14,198,212,71,148,180,209,64,152,228,196,11,209,109,231,183,97,135,156,172,241,9,1,205,107,120,198,53,118,206,64,8,204,37,15,58,43,95,189,38,38,203,212,73,105,50,160,21,160,38,124,10,233,46,22,17,1,14,204,0,103,138,56,182,245,161,43,137,56,202,232,138,228,30,242,80,214,237,253,8,17,251,148,203,85,106,127,162,114,20,161,98,175,120,161,168,109,95,168,170,123,117,238,24,132,211,108,163,195,25,58,89,16,198,41,13,94,89,2,119,169,28,80,179,104,66,21,38,252,16,146,163,159,122,68,234,161,165,150,251,139,57,4,0,82,244,49,170,53,221,128,152,46,60,102,97,65,18,80,60,162,198,227,68,116,95,74,43,207,201,189,126,9,199,85,132,67,87,214,19,120,116,147,47,78,236,178,95,23,23,1,171,197,181,63,197,52,162,224,221,93,223,35,243,248,138,100,215,25,1,0,2,94,249,142,243,210,28,27,218,191,10,139,245,66,107,2,111,153,125,18,238,76,249,208,69,34,173,165,21,177,18,82,239,17,1,14,217,248,73,34,237,151,158,186,178,226,225,234,58,186,218,7,175,174,60,185,248,248,25,28,51,154,61,168,213,77,242,169,0,82,244,49,170,53,221,128,152,46,60,102,97,65,18,80,60,162,198,227,68,116,95,74,43,207,201,189,126,9,199,85,132,194,179,227,24,162,84,191,59,100,89,207,244,98,199,135,44,91,35,210,22,182,249,66,219,89,32,250,61,112,54,16,141,161,98,175,120,161,168,109,95,168,170,123,117,238,24,132,211,108,163,195,25,58,60,170,232,127,53,17,58,173,142,247,89,247,207,149,119,134,64,14,158,82,18,231,188,179,163,89,11,174,81,43,46,153,9,9,132,119,216,135,171,185,255,65,210,32,46,77,152,229,32,71,244,141,39,140,188,245,22,25,184,28,198,202,132,222,174,160,25,1,0,2,119,228,107,44,53,217,61,182,86,125,189,169,81,109,32,249,139,212,234,72,144,24,135,118,89,121,216,219,24,207,66,168,17,1,14,213,5,71,76,80,42,16,77,105,27,101,50,79,76,38,232,167,55,134,79,128,251,113,33,35,116,77,254,28,6,176,0,0,82,244,49,170,53,221,128,152,46,60,102,97,65,18,80,60,162,198,227,68,116,95,74,43,207,201,189,126,9,199,85,132,110,177,116,132,158,116,7,177,77,240,138,82,212,212,241,43,54,8,1,75,42,104,243,5,241,73,226,60,72,169,2,41,25,1,0,3,73,127,150,33,212,30,131,171,90,28,221,170,53,22,176,210,81,154,146,160,81,67,188,184,7,13,240,169,97,51,230,181,9,100,71,111,167,179,223,229,107,45,223,184,100,207,103,16,106,234,217,25,120,51,156,12,142,28,186,4,134,110,182,28,191,11,9,9,62,169,255,238,205,94,99,210,162,31,213,85,158,233,223,231,174,18,241,77,26,133,255,75,40,190,65,163,26,48,53,196,25,1,0,2,232,83,248,233,232,89,11,170,74,117,125,224,222,189,198,137,244,49,205,228,155,200,97,42,160,89,8,63,109,25,91,168,9,9,255,134,239,235,78,5,0,110,98,20,109,14,192,231,250,72,49,145,191,114,177,51,38,242,67,121,217,71,114,50,124,171,9,9,209,167,69,145,2,139,203,92,187,46,4,30,218,0,85,77,176,3,253,201,73,229,148,92,229,57,32,59,244,12,109,96,9,9,113,233,23,33,249,145,133,118,215,96,240,47,3,202,196,124,111,64,3,49,96,49,132,142,60,29,153,230,232,58,71,67,65,14,41,230,74,233,195,128,0,49,87,111,239,58,195,179,2,237,163,15,66,168,74,199,52,200,236,175,1,55,3,126,248,127,239,193,246,133,27,151,79,57,134,3,21,27,16,164,160,185,211,150,83,253,116,26,253,56,22,83,204,70,30,122,203,221,134,84,251,39,141,138,17,246,159,212,31,236,239,75,201,65,5,60,24,80,250,182,152,192,250,91,168,183,69,6,78,180,185,147,215,10,134,34,96,243,26,77,158,213,121,211,188,200,73,204,177,205,8,52,178,106,57,74,136,235,186,254,43,32,141,97,126,192,90,203,191,95,226,69,41,166,75,35,133,169,106,173,67,240,155,225,173,169,44,112,64,49,220,193,72,27,65,8,29,65,249,24,254,23,128,162,84,32,193,217,215,5,53,140,19,76,198,1,217,209,132,203,77,253,222,126,28,172,43,195,212,211,139,249,236,68,230,33,5,245,225,0,18,59,175,197,134,247,119,100,72,140,210,76,106,119,84,110,90,15,232,189,251,79,162,3,207,175,252,54,204,228,221,91,137,1,0,0,0,0,0,0,0,0,0,0,0,0,102,252,2,65,142,125,208,106,197,183,59,71,59,230,188,90,81,3,15,76,116,55,101,124,183,178,155,243,118,197,100,184,209,103,90,94,137,2,0,0,0,0,0,0,0,0,0,0,0,0,102,252,2,66,75,168,78,31,55,208,65,188,110,85,186,57,104,38,204,73,78,132,212,129,91,109,181,38,144,66,46,234,115,134,49,79,0,232,231,118,38,88,111,115,185,85,54,76,123,75,191,11,183,247,104,94,189,64,232,82,177,100,99,58,74,203,211,36,76,61,226,32,44,203,98,106,211,135,215,7,34,230,79,190,68,86,46,47,35,26,41,12,8,83,43,141,106,186,64,47,245,0,242,170,235,6,192,229,86,67,74,201,60,35,47,55,221,139,224,167,191,159,67,15,118,235,86,77,249,252,183,112,196,95,121,9,53,136,208,232,71,239,167,58,16,206,32,228,121,159,177,228,102,66,214,86,23,199,229,33,63,160,73,137,217,45,137,2,0,0,0,0,0,0,0,0,0,0,0,0,102,252,2,66,135,222,210,71,225,102,15,130,112,113,199,241,55,25,52,88,151,81,8,83,132,252,159,68,98,193,241,137,124,92,62,239,137,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,134,36,129,147,235,77,210,168,206,129,95,135,108,18,77,72,53,149,34,240,133,77,149,216,7,46,175,240,211,125,85,189,17,3,32,62,137,13,108,44,59,173,166,238,204,150,3,169,156,28,98,89,237,90,100,2,241,199,108,193,139,86,140,58,239,186,15,17,34,169,145,29,210,173,116,63,242,55,212,17,100,138,15,227,44,109,116,238,192,96,113,106,42,116,53,47,107,28,67,91,93,103,0,249,3,11,120,197,191,90,201,151,167,105,98,170,50,201,10,109,142,142,188,233,131,140,142,235,56,141,115,225,247,101,154,214,116,246,54,163,90,111,26,81,86,78,195,55,27,156,77,163,18,109,90,208,186,227,80,207,199,250,234,199,99,99,184,9,255,104,98,246,102,108,85,7,252,51,21,214,132,35,158,2,38,112,107,69,195,65,114,145,245,183,172,194,211,57,80,82,17,161,138,214,89,21,244,44,6,173,6,242,62,51,254,238,79,87,6,12,210,73,180,68,244,119,54,206,136,162,78,107,80,251,5,29,192,174,93,179,175,68,217,8,246,220,217,160,21,208,74,126,225,227,25,1,0,3,59,172,224,22,174,10,65,231,169,237,9,168,91,33,85,109,38,187,242,242,75,76,32,165,75,187,165,27,95,83,162,158,25,1,0,5,237,36,132,158,202,168,131,171,106,32,214,79,172,224,148,150,15,71,73,102,217,162,19,183,2,117,192,112,196,76,181,34,161,61,185,77,114,162,95,100,135,66,67,175,119,110,6,244,73,213,91,169,221,83,157,81,206,111,89,151,62,178,167,63,16,226,11,189,169,125,149,14,110,8,62,221,87,116,233,142,217,139,253,153,16,9,1,186,154,222,88,248,170,108,168,43,242,42,43,72,15,245,221,236,232,166,232,99,81,164,123,16,213,143,51,128,251,219,183,9,1,103,185,128,156,225,233,200,126,96,129,32,179,163,131,84,200,153,155,236,34,245,43,19,243,165,109,226,10,22,113,50,131,9,1,203,224,11,159,230,121,179,34,119,46,123,13,250,7,202,214,183,18,124,144,172,158,237,255,172,53,228,144,236,81,142,168,161,44,60,95,185,9,118,123,106,246,85,186,215,47,93,102,56,245,245,208,160,183,144,135,107,116,64,90,68,61,138,52,178,244,96,20,237,96,5,52,90,158,129,172,204,39,175,55,18,74,73,29,222,9,255,36,49,6,86,93,12,79,206,248,151,94,121,177,178,35,12,1,159,78,58,178,122,63,78,124,169,48,107,159,98,153,132,161,138,214,89,21,244,44,6,173,6,242,62,51,254,238,79,87,6,12,210,73,208,111,9,167,39,114,89,78,179,210,171,35,115,181,211,197,236,176,132,184,74,77,237,45,48,18,241,69,222,221,138,25,161,93,224,137,41,163,192,131,82,50,167,205,32,29,51,131,23,202,171,216,115,199,117,193,115,55,85,171,245,34,173,219,214,151,226,215,10,91,97,70,75,209,104,27,41,137,81,196,246,13,142,199,12,25,1,0,4,113,24,36,136,41,22,138,100,28,59,149,105,31,231,215,27,33,193,211,238,215,254,44,202,236,107,125,180,46,38,146,200,25,1,0,2,38,182,122,48,1,162,205,218,95,52,172,146,222,81,199,193,42,178,228,105,133,88,214,83,137,237,66,230,119,250,5,85,9,255,253,39,5,238,88,207,60,229,238,92,28,224,63,70,109,126,152,54,188,71,18,186,162,153,21,61,132,71,202,121,113,207,161,138,214,89,21,244,44,6,173,6,242,62,51,254,238,79,87,6,12,210,73,191,14,175,103,174,227,73,235,177,89,118,163,111,237,172,26,74,6,101,80,58,211,113,127,243,254,106,224,120,179,101,156,161,76,72,189,176,20,90,137,189,135,248,136,79,63,168,193,228,45,88,94,117,184,2,226,247,10,56,240,40,136,100,146,139,246,198,195,194,209,164,217,45,202,10,147,86,175,254,198,249,92,64,121,164,0,1,0,0,253,83,6,143,195,92,106,35,254,224,103,188,253,63,192,216,128,186,79,121,216,182,90,175,191,240,47,48,93,168,74,2,141,105,206,208,33,164,189,140,91,38,36,168,242,80,217,18,184,248,245,157,129,85,249,94,94,229,138,101,38,9,1,47,241,202,110,153,206,246,252,92,214,119,95,159,94,245,61,243,40,240,8,26,143,180,81,247,55,255,244,73,12,229,83,9,1,95,192,113,95,216,242,21,235,124,16,227,245,80,217,178,9,241,140,170,135,64,175,84,27,211,70,239,73,100,139,20,245,9,1,201,169,20,123,206,251,168,141,33,64,175,106,246,185,19,185,53,101,125,53,5,87,5,184,7,21,91,61,208,130,42,131,9,1,183,44,52,109,222,204,99,77,172,182,15,29,40,214,131,168,39,33,227,213,36,163,61,162,168,47,3,62,136,241,101,126,9,1,133,79,165,174,6,191,41,30,209,5,109,104,28,93,197,246,247,13,23,242,234,3,204,110,233,229,198,255,131,62,203,105,0,9,36,146,140,19,119,166,207,36,195,156,45,70,248,235,157,242,62,129,27,38,220,53,39,229,72,57,111,212,225,115,177,211,102,235,93,180,24,37,200,29,129,191,72,73,93,114,116,50,181,244,253,225,248,223,46,101,251,180,223,113,77,242,139,0,70,112,11,77,64,172,92,53,175,44,34,221,162,120,122,145,235,86,123,6,201,36,168,251,138,233,160,91,32,192,140,33,228,96,52,194,207,181,181,131,126,57,95,233,204,152,190,4,82,34,235,53,200,202,40,109,252,73,189,213,239,94,126,130,9,1,27,8,240,147,212,200,27,37,231,124,191,110,45,189,91,214,149,171,253,138,221,47,115,230,14,214,92,143,87,109,114,128,17,1,249,248,63,134,138,17,62,7,250,227,100,52,50,139,214,30,153,110,204,16,117,222,9,119,59,220,202,187,15,30,237,162,217,9,1,207,50,50,89,38,214,97,46,146,127,167,239,70,37,230,216,37,111,63,130,63,184,65,242,102,240,65,120,90,218,241,226,9,1,247,124,190,104,95,142,126,239,68,219,69,165,161,237,129,135,165,5,236,239,227,84,140,240,18,4,129,67,95,125,116,254,0,70,112,11,77,64,172,92,53,175,44,34,221,162,120,122,145,235,86,123,6,201,36,168,251,138,233,160,91,32,192,140,33,64,31,45,164,25,35,131,214,111,103,185,66,123,36,77,209,130,54,238,77,124,250,76,42,126,68,137,156,53,223,112,84,9,1,172,109,18,138,162,172,98,227,191,233,228,200,186,6,38,31,205,90,238,83,85,200,140,40,95,174,70,100,236,184,92,217,161,244,45,89,208,100,220,62,250,92,105,132,16,63,83,84,164,87,143,157,56,238,206,71,73,61,115,66,84,21,49,226,43,98,209,124,67,230,245,74,241,47,105,36,12,239,120,5,217,170,54,156,84,161,240,229,12,107,226,171,19,248,82,37,157,153,49,126,15,161,81,30,210,115,133,159,227,100,212,172,149,230,75,232,210,108,56,145,60,23,37,166,185,84,193,191,193,253,113,198,103,19,58,1,211,88,161,138,214,89,21,244,44,6,173,6,242,62,51,254,238,79,87,6,12,210,73,244,204,176,178,59,175,3,143,139,2,242,240,21,87,122,194,191,65,151,96,89,50,229,228,174,155,172,240,102,252,221,88,161,54,97,92,243,73,215,246,52,72,145,177,231,202,124,114,136,63,93,192,73,97,131,206,41,240,31,150,151,163,154,135,110,104,89,178,252,214,86,245,40,217,82,157,194,186,14,137,246,116,87,3,221,0,88,209,228,65,175,80,39,254,110,76,107,116,157,222,72,114,28,239,59,179,26,8,211,214,75,156,110,156,114,90,188,114,36,198,234,1,57,238,186,239,33,185,70,78,68,110,74,247,188,177,180,151,164,216,15,115,133,254,13,247,190,87,17,67,0,232,148,145,23,177,217,122,193,138,141,18,36,190,30,64,69,198,123,105,131,146,212,237,192,61,179,237,70,185,233,70,115,24,243,123,134,88,68,215,142,127,133,2,149,85,85,176,160,214,111,3,112,143,142,78,137,211,79,138,29,6,66,209,69,161,250,30,45,145,12,250,62,92,70,95,125,127,105,194,36,213,66,224,165,152,168,81,60,218,234,227,67,148,118,29,59,147,53,55,78,40,8,227,39,217,122,18,110,222,78,162,140,204,238,55,6,95,41,25,0,0,0,0,144,32,12,17,126,234,225,99,200,138,138,108,231,51,212,1,171,8,94,147,139,188,115,131,162,159,107,192,34,19,171,180,161,151,178,33,144,104,181,16,79,208,222,10,42,70,102,179,246,243,151,172,167,86,83,107,175,210,186,181,198,128,36,151,56,29,81,196,245,31,168,78,1,68,190,24,94,31,195,247,20,122,219,85,214,0,172,224,132,217,231,157,205,174,1,20,9,234,148,84,215,130,24,9,46,11,24,156,214,165,23,59,68,102,116,0,213,17,230,13,150,23,44,152,198,242,109,118,74,176,93,184,102,158,85,104,138,31,78,160,214,75,29,223,239,114,74,97,156,59,161,94,69,18,60,117,174,107,34,34,42,46,244,10,252,240,128,232,79,90,245,105,207,185,63,169,21,50,218,22,157,164,50,155,208,136,29,218,73,246,12,13,209,254,95,239,141,35,221,253,207,221,212,9,1,248,135,166,243,99,75,238,65,244,69,142,255,92,110,93,81,203,0,116,149,135,131,96,149,14,49,60,161,204,107,128,214,10,1,164,102,179,128,123,95,250,209,85,215,47,223,202,234,9,179,137,135,46,66,252,162,20,55,210,106,243,173,46,40,178,89,161,54,97,92,243,73,215,246,52,72,145,177,231,202,124,114,136,63,93,192,73,156,179,78,101,253,32,134,94,34,150,203,47,196,201,136,120,12,142,64,149,165,101,204,29,186,80,109,39,8,84,79,232,161,128,2,205,152,207,181,99,73,42,111,179,231,200,36,59,123,154,212,204,146,140,190,129,206,40,197,26,241,53,110,14,80,134,5,37,204,178,45,166,159,129,221,136,86,105,96,102,217,243,16,89,249,161,104,60,252,156,182,35,11,128,137,157,188,182,24,21,145,214,144,137,216,164,177,52,180,226,24,99,117,67,64,64,241,12,41,231,167,74,209,204,218,129,255,34,102,39,251,93,142,41,145,92,203,50,161,138,214,89,21,244,44,6,173,6,242,62,51,254,238,79,87,6,12,210,73,164,143,172,103,50,238,187,229,211,197,117,198,212,234,78,82,14,197,48,46,66,117,89,74,193,245,113,83,209,177,183,217,161,81,125,159,102,197,100,210,237,244,60,182,87,66,79,114,241,194,13,59,255,230,154,81,238,249,118,81,47,67,167,51,81,39,137,145,76,187,230,155,155,70,102,153,242,27,165,84,224,218,253,106,44,9,255,60,175,19,171,24,127,90,244,102,215,109,52,163,108,87,248,78,75,87,18,254,157,225,108,251,34,166,46,135,226,155,114,9,1,137,66,124,98,185,206,118,62,215,166,225,249,150,252,161,203,0,217,115,193,56,251,207,17,2,102,50,45,221,187,226,50,177,93,224,137,41,163,192,131,82,50,167,205,32,29,51,131,23,202,171,216,115,0,2,86,146,226,162,251,7,31,172,102,81,196,200,22,126,55,247,235,106,113,202,76,139,128,246,85,114,151,178,101,95,159,191,161,51,139,210,222,212,86,156,86,143,33,218,23,74,206,199,11,130,110,85,12,238,250,55,221,42,57,86,177,201,103,75,239,3,21,25,161,11,112,13,117,46,113,1,71,100,17,102,72,32,125,98,78,0,1,0,0,253,83,6,143,195,92,106,35,254,224,103,188,253,63,192,216,128,186,79,121,216,182,90,175,191,240,47,48,93,22,155,114,188,54,101,62,87,11,121,52,68,25,197,106,219,1,155,20,238,224,223,45,229,125,209,123,117,113,166,132,12,161,220,14,214,252,191,229,28,159,132,166,98,230,79,209,52,119,54,170,116,134,81,122,205,57,136,220,234,247,156,218,204,189,130,97,114,48,48,160,227,48,48,64,28,58,212,244,195,137,255,228,64,41,161,128,2,205,152,207,181,99,73,42,111,179,231,200,36,59,123,154,212,204,146,237,236,215,194,80,85,137,94,24,27,243,41,116,140,187,70,33,135,209,48,214,17,9,198,53,117,79,21,244,235,240,208,9,255,219,215,135,89,179,97,218,60,174,210,247,236,175,60,97,114,55,143,26,104,199,6,53,175,153,170,254,4,26,49,33,168,161,54,97,92,243,73,215,246,52,72,145,177,231,202,124,114,136,63,93,192,73,131,75,158,95,145,124,241,215,162,81,17,8,190,214,110,90,15,123,1,214,244,31,200,40,196,119,19,72,17,44,27,219,161,44,60,95,185,9,118,123,106,246,85,186,215,47,93,102,56,245,245,208,160,62,110,21,209,63,190,73,44,77,121,157,143,198,176,46,157,199,11,251,128,18,248,171,99,94,148,201,218,67,21,70,232,9,255,16,46,124,130,188,155,165,96,66,61,124,176,157,94,180,222,164,199,68,147,148,121,54,59,60,181,162,4,74,28,114,103,9,1,0,0,0,60,9,235 + ], + "state_diffs_hash": "0xc83cac9cd98a4216cbc0d0830e63c4956e4a1c45c122ebbc88af7ea3b496c406", + "aux_commitments": { + "events_queue_commitment": "0xec82208c87a937d88768a0067b2a80f0525eca8288dad2cf96cf8bbe6a1aa565", + "bootloader_initial_content_commitment": "0x97df88dcecbcd29b49773c042cdee7a44c57a741e64913fff5aa1b3484232f28" + }, + "blob_hashes": [ + { + "commitment": "0xf840cf3f6b7dc92729b2b9ef3b399e7b896d553b746362fe81c4eb911013570d", + "linear_hash": "0xff4feb4bef9401731ab9db3626c2e015baa6880d7b1c4382d03b30da3a0fd75e" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "aggregation_root": "0x0924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1", + "local_root": "0xd4790efa9052ea67dcb473de870e3522e2fc340374e6293ad4646fde312c8c76" + } + }, + "meta_parameters": { + "zkporter_is_available": false, + "bootloader_code_hash": "0x010008c753336bc8d1ddca235602b9f31d346412b2d463cd342899f7bfb73baf", + "default_aa_code_hash": "0x0100055d760f11a3d737e7fd1816e600a4cd874a9f17f7a225d1f1c537c51a1e", + "protocol_version": "Version27" + }, + "pass_through_data": { + "shared_states": [ + { + "last_leaf_index": 212, + "root_hash": "0x0332d2acc43785a44b2b84fc010372c8f3e4ff4d0ca5f312de142ffe74189500" + }, + { + "last_leaf_index": 0, + "root_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ] + }, + "input": { + "PostBoojum": { + "common": { + "l2_to_l1_logs": [ + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 0, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x91ac392a7af99b6df974efe2d6b40e35dc79156fa3b75ea257df4976da0c26e8", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xa088c0c1710f2244aad45e356742e7ac7773a153cf23db6cec4ded7e8da05d69", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 2, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xccdf8bf8f4bf596f8fbb7ed18d67ef6208919707de498d0cae4d2c01c50e2305", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 3, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xe43e076103a2a867921c20b43365e7729003f1a00558c3dc51b31b25c90b2b2a", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 4, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x3eba0d3506eba94a8b328b82b3a32623c2ef77e253bfbb82d2667b163c8714c7", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 5, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xdaf8935b934fe9513d112e3ca61a728dbfae2fdb5ea1edf8e6f56b8283aa4cd8", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 6, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x5527767da575eb332ed996603486300af0f55116f2a177b0c05ed31518a23d77", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 7, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x15f4d69332763eaaf4d80e05e88b04d45d38d0854381f58e4c01688496e03f63", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 8, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x96a04ccc56dc1cea06a00fe97af3231766aee89c948c86f0c16eeebcdddc0aa3", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 9, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x886292e17714665013e6c7fc19b12b15a69676394ec88ceb5d1698a0b198a7dd", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 10, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x9e84bba4d8497ea90d8c5513063912bdbd9cc85ac68185ee1715a7b15ca01f17", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 11, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x3795b197a06415b6b00d2bde238a25741ecc9791269d899c97ff983d88dcd5e6", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 12, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x0e5be5348f9a9fd4e936c4fad6793e0e4395f5642d1b5f9a08e1a3703226f8ef", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 13, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xec3e691650319cdf9fbc5410f388ea24f2c9325b0d7b4ce37db2a1c5957bd86b", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 14, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xfa448e8ac5560693b992b70fae5e08f3e9cae510c8e1fa69d2c212dd1811bf05", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 15, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x6c5a74345d321eb4edebdf43f42a11bc409613a9b92cbfe96730498217b12d43", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 16, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x7912d592b280f7f7a5d04c68eaddae09b518816a0a6d97bc89b143ae3109e78f", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 17, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x3c1fad3b48be6cb9503048860557f3ef99dccdf1f51dfbf26570f630469b1a98", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 18, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xb7e755892fbe6870e93cbd3c0945d772e141b94ee50aa75a2d7bb7219fb53266", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 19, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xb81f1f0fbe80e956956132771d1a99c35bd52856adbf932cc061d3980a79c124", + "value": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 20, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x8f5a7c0d48c9b82137c446c9db31ce5ef4e1a30166dd3ae09580c33595bbe2b7", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 21, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x2f6516d033cfa362a407a7d2d2279c62fa185eaae6742bc6f51fdcb51606094e", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 22, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x82eb8a4152ff724ef814c3ddacea2a65e6e6d09a00d72e57fff9e12b7857461d", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 23, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x08a95d3f4505e0e3fb90a2002a81750c0bae658a5d4a290acaeacdfc2691560a", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 24, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xb8c38e08db553411378fc77ca81f20da7d5b1be77fb316393e33bfe0c08565dd", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 25, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xd54d7593c4d133e4903becb318f109246537ddab2646148ac51ac7c94e25ef8c", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 26, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x3e86b6ddb211d47e057c4e246810e2dbb10061c2679e52ae7e4b647c9c98bf08", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 27, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xe1cee6c0528143fa82ff667c9655d2d775dccdb4204791956096a6225059c9b8", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + } + ], + "rollup_last_leaf_index": 212, + "rollup_root_hash": "0x0332d2acc43785a44b2b84fc010372c8f3e4ff4d0ca5f312de142ffe74189500", + "bootloader_code_hash": "0x010008c753336bc8d1ddca235602b9f31d346412b2d463cd342899f7bfb73baf", + "default_aa_code_hash": "0x0100055d760f11a3d737e7fd1816e600a4cd874a9f17f7a225d1f1c537c51a1e", + "protocol_version": "Version27" + }, + "system_logs": [ + { + "shard_id": 0, + "is_service": false, + "tx_number_in_block": 0, + "sender": "0x000000000000000000000000000000000000800b", + "key": "0x0000000000000000000000000000000000000000000000000000000000000002", + "value": "0xf9030b78c5bf5ac997a76962aa32c90a6d8e8ebce9838c8eeb388d73e1f7659a" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 0, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x0000000000000000000000000000000000000000000000000000000000000007", + "value": "0x91ac392a7af99b6df974efe2d6b40e35dc79156fa3b75ea257df4976da0c26e8" + }, + { + "shard_id": 0, + "is_service": false, + "tx_number_in_block": 28, + "sender": "0x000000000000000000000000000000000000800b", + "key": "0x0000000000000000000000000000000000000000000000000000000000000001", + "value": "0x00000000000000000000000066fc024100000000000000000000000066fc0242" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 28, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x0000000000000000000000000000000000000000000000000000000000000003", + "value": "0x190bda1fde651ac21cf771cb9f125f486678abbab229cce182a7c9a07361afbe" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 28, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x0000000000000000000000000000000000000000000000000000000000000004", + "value": "0x000000000000000000000000000000000000000000000000000000000000001b" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 28, + "sender": "0x0000000000000000000000000000000000008008", + "key": "0x0000000000000000000000000000000000000000000000000000000000000000", + "value": "0x38eaeef3afe69b6f6b2fa22c92da8137f1e405a1e1861b7de7cfa30c7d7462dd" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 28, + "sender": "0x0000000000000000000000000000000000008008", + "key": "0x0000000000000000000000000000000000000000000000000000000000000006", + "value": "0x000000000000000000000000cc4b013229ffd6cb5eae5876251874172cafed0a" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 28, + "sender": "0x0000000000000000000000000000000000008008", + "key": "0x0000000000000000000000000000000000000000000000000000000000000005", + "value": "0x335f4f11c3e55bb502bcbdedfd8e63b8e5c84bea465c984a5c664a8eca7d4a7a" + } + ], + "state_diffs": [ + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x3e013dc3eb10bbd48a8f9c94758f04d081563b6", + "derived_key": [ + 112,120,89,162,183,230,11,175,17,100,223,232,175,83,47,195,198,157,29,129,145,197,186,61,127,17,109,250,141,181,206,45 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10001430633c573868f335e3106fc558badc506762eb8184ef9ce785def6ece" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x868a8819e738818dabe8bfb671ae8e027372dd7", + "derived_key": [ + 130,208,215,121,46,249,196,126,160,123,216,26,86,45,8,246,35,74,8,171,141,141,223,145,137,150,142,180,236,158,154,37 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000ecfaeb837bd098bcf9bde6fc2ccd8e8a9355a1b70e601ac18cd089eb308" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x9af32a0f1b0914742c84d68795a9be9abd6bbd5", + "derived_key": [ + 246,11,47,22,184,171,230,29,125,57,179,213,44,191,157,128,184,167,253,5,55,217,60,33,8,75,147,188,5,4,171,60 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10000c328f328dd820a1dd698047a7f7d49874d8259196e273517430af8f480" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x22acca3c358a523c1ecbf1491d131a597aada298", + "derived_key": [ + 203,204,98,199,195,136,172,152,215,47,208,131,209,215,32,206,186,255,203,162,198,108,114,94,200,185,197,197,240,116,111,138 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100005b8e587974043d59bffbf632d020e764959abe62e4c238d8df2e62b2b5" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "derived_key": [ + 235,143,74,199,189,78,241,151,159,154,102,86,114,178,92,208,123,30,61,99,122,89,162,199,107,26,34,232,91,117,146,65 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10001430633c573868f335e3106fc558badc506762eb8184ef9ce785def6ece" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x30439cdc8796fb3cecb53f4bf5b133f581b5b40f", + "derived_key": [ + 66,202,106,148,168,163,117,186,10,227,150,70,185,29,164,88,23,175,73,33,116,119,174,107,73,193,3,53,191,78,11,115 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10001b37e5f8cafac924b3e663779e12c80dace8a02b1d273ae708f275ad62a" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x338bd2ded4569c568f21da174acec70b826e550c", + "derived_key": [ + 71,164,52,102,233,54,181,135,193,191,158,16,243,13,105,123,17,66,228,89,233,6,51,219,18,27,114,127,245,180,121,43 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000067554f53c6660e53bd9729f00aee0189010dfeb868fa3cbd481232480c" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x48a6f6788413af58f1bdf8c963cb67a4346f5fd8", + "derived_key": [ + 95,233,138,250,154,14,193,124,34,99,2,123,232,159,70,229,238,83,58,30,33,169,31,255,76,177,68,204,74,239,33,188 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10005b597eb02058b230cee6a9cdf4705410d151ad1ea6ad3e2de77841acfc2" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x4c48bdb0145a89bd87f8884f3fa8c1e42d585e75", + "derived_key": [ + 142,107,134,4,159,177,116,20,22,26,103,250,177,44,27,34,144,68,85,154,16,112,202,176,23,66,216,18,18,139,156,190 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000067554f53c6660e53bd9729f00aee0189010dfeb868fa3cbd481232480c" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x517d9f66c564d2edf43cb657424f72f1c20d3bff", + "derived_key": [ + 140,78,248,13,214,150,89,51,34,40,248,136,164,249,180,131,12,12,50,27,140,170,45,67,53,35,250,200,97,150,181,207 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100048bf1b165f4c850d01eb81e5c040f70ea01dca019e780a87f80ec7897c3" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x55f6f01d04a21e76cbd2de9d4a9ff6ee9f8893a6", + "derived_key": [ + 235,96,109,232,32,223,19,108,89,177,255,121,225,223,146,96,66,108,164,143,175,146,74,70,53,36,33,31,183,195,161,165 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000201e923f3e47adfa7c4f0f61ea8dd4256f66d1099970bcd314892864917" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x5caf5f2b06ca757c7cebacdcd6f163af45a6bb83", + "derived_key": [ + 95,127,60,252,81,90,137,254,204,8,97,6,46,254,57,252,48,104,36,27,116,128,26,163,170,147,245,100,172,98,242,54 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100061dc40d0bc2cb81905e6bc1421ebd4bd8b79aacb8da4f9d7145b288674f" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x5e45123c75ae6b22222a2ef40afcf080e84f5af5", + "derived_key": [ + 207,14,96,187,125,119,111,198,230,184,241,1,19,161,190,119,25,192,44,34,151,163,108,216,124,11,59,35,121,140,74,95 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100070f1cd4e2830607af30192743910fdfabf1dd004a8073944275819d1dfe" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x62af78a1a86d5fa8aa7b75ee1884d36ca3c3193a", + "derived_key": [ + 53,89,195,90,167,22,152,246,194,202,70,67,239,232,80,69,169,73,79,38,45,119,238,103,193,61,215,52,230,38,48,90 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10001430633c573868f335e3106fc558badc506762eb8184ef9ce785def6ece" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x683cfc9cb6230b80899dbcb6181591d69089d8a4", + "derived_key": [ + 186,93,97,46,65,80,43,253,205,126,211,179,176,210,212,177,245,200,248,185,15,209,21,42,187,224,222,192,14,162,61,7 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100048bf1b165f4c850d01eb81e5c040f70ea01dca019e780a87f80ec7897c3" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x7cf7e4a85a7a677f6a5b2fe169e6d5eef29219c5", + "derived_key": [ + 44,61,141,160,220,54,28,84,148,218,146,175,212,98,94,116,25,190,241,121,131,189,209,145,214,33,89,62,212,173,57,47 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000323b7266314b9d3ec387d5bcd9590a50be00e788837ca5a880d97e3ee83" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x97b2219068b5104fd0de0a2a4666b3f6f397aca7", + "derived_key": [ + 48,100,113,86,255,138,164,229,100,8,99,14,34,251,194,115,119,250,250,242,7,188,204,248,210,254,18,115,9,165,229,233 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000215cd6f306d757d5d2fdbb460c60f298584179eecc07196c7ae8e4f8d64" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x9961618bfad393730ea065a18399303330f1395f", + "derived_key": [ + 200,51,40,178,38,74,180,112,167,221,220,163,38,200,255,61,159,78,76,252,60,226,78,168,221,216,201,180,12,20,188,185 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000201e923f3e47adfa7c4f0f61ea8dd4256f66d1099970bcd314892864917" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0xcc4b013229ffd6cb5eae5876251874172cafed0a", + "derived_key": [ + 229,176,77,5,169,51,88,54,33,49,122,209,137,227,159,45,116,33,7,146,238,29,46,153,91,171,175,162,128,71,14,27 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100015306fb0f0b5fde219996551c8072c6711bba5300b2669aeb0f0e4c7445" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0xcc8e14c05825cde94522515a0303e4c2e07ca6f9", + "derived_key": [ + 250,253,202,115,87,157,171,40,23,48,73,193,157,78,81,69,162,232,29,120,68,42,125,135,121,254,156,149,143,198,173,119 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000035ba31ed16c21160780ac09763bfd593b1748f69e683a9fb10921aa49d" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0xdb5b25a5c3ff135d39df0dd3417a6b26724d2b24", + "derived_key": [ + 188,67,139,78,87,51,241,113,6,164,3,59,144,48,243,76,127,49,1,147,79,102,218,253,36,37,149,91,92,247,35,64 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10000ed306b29128e065afdce73ebc8e0ecd08cd918fdc0f74c03f64d515e48" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0xdc0ed6fcbfe51c9f84a662e64fd1347736aa7486", + "derived_key": [ + 6,115,118,133,155,130,106,66,115,187,68,69,230,28,222,77,91,95,90,23,4,86,255,161,95,247,195,108,233,152,241,190 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100013f4a2c964c71d49f2d8876a11eee4bf4e8d19231652fc775b2cae43a21" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0xe024f9e4e8fa2f08f768c1cb56bc4a6e3cbd8834", + "derived_key": [ + 129,4,114,76,225,212,61,128,125,223,69,48,213,107,167,249,183,181,194,21,67,99,215,247,166,215,108,189,158,61,249,130 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100006721ab911a41d8b52502ea4cdf42ec99e5e529be6a3e66f3adb2143c5a" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0xe300eb4b0834a551cac3e93f30380643ce153408", + "derived_key": [ + 51,232,118,240,225,158,242,19,13,216,95,254,79,35,196,212,101,148,164,24,219,221,10,181,111,253,164,76,93,72,246,206 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10005bb7471a0016b1c330f767a5e73ad0c330934694cad007c5aa3f926be65" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0xf0e50c6be2ab13f852259d99317e0fa1511ed273", + "derived_key": [ + 25,143,99,94,50,239,35,14,215,12,184,219,25,32,81,51,246,142,27,126,246,157,133,33,13,119,172,197,111,163,43,234 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000515657ebe014d9151480712de7a108d9b0e1a7a798d3da2945ba53cd103" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0xfa1e2d910cfa3e5c465f7d7f69c224d542e0a598", + "derived_key": [ + 90,251,120,52,143,255,91,253,53,60,239,129,160,65,213,230,214,195,241,114,123,145,145,220,232,75,132,91,7,118,101,237 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10006311257c403bbba7e2aef0aa20822627c82ec7484fcb31b87dd8c582aa9" + }, + { + "address": "0x0000000000000000000000000000000000008003", + "key": "0x64534fbb7489d8b2e0974a2a70dee20ad40795d90f17c1a6d62ba36ea19e007", + "derived_key": [ + 240,163,222,200,2,37,101,9,35,172,42,74,77,142,96,167,8,137,208,171,61,234,142,107,218,41,37,203,138,127,216,252 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x200000000000000000000000000000000" + }, + { + "address": "0x0000000000000000000000000000000000008003", + "key": "0x1b458e5ab877fea2e4abf98d12b31ec3f7c93fd4856e807f684322e8cf11fdf7", + "derived_key": [ + 174,229,34,198,20,187,1,37,21,66,226,45,128,16,30,45,151,85,103,77,143,214,69,38,254,154,44,77,223,171,97,143 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100000000000000000000000000000000" + }, + { + "address": "0x0000000000000000000000000000000000008003", + "key": "0x810ca1ae825b138452fb743e9948f909b6286cbfadd5a899190fcb21a75443ab", + "derived_key": [ + 0,159,186,133,75,226,253,235,173,50,111,19,111,136,219,244,177,114,214,77,28,237,51,180,171,99,164,148,28,226,73,151 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x700000000000000000000000000000000" + }, + { + "address": "0x0000000000000000000000000000000000008003", + "key": "0xe6d904d46c5d8b2934bf40eee45740c707124a9797010ceae3f79534391b6de5", + "derived_key": [ + 201,172,113,10,243,67,127,194,244,249,48,131,50,164,72,10,88,81,76,45,149,28,73,119,114,174,142,141,132,8,175,27 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x200000000000000000000000000000000" + }, + { + "address": "0x0000000000000000000000000000000000008003", + "key": "0xeaa2b2fbf0b42c559059e5e9510edc15755f1c1883f0e41d5ba5f9aea4ac201a", + "derived_key": [ + 182,81,125,81,147,30,201,86,98,178,2,213,133,189,82,214,234,207,27,118,113,82,28,46,150,32,45,104,62,223,226,99 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xe00000000000000000000000000000000" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x1000035ba31ed16c21160780ac09763bfd593b1748f69e683a9fb10921aa49d", + "derived_key": [ + 28,170,140,159,117,250,84,163,177,210,240,18,225,217,234,99,118,79,112,157,28,25,151,121,72,28,143,77,92,237,107,62 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x100005b8e587974043d59bffbf632d020e764959abe62e4c238d8df2e62b2b5", + "derived_key": [ + 45,86,211,71,72,250,222,240,196,161,223,115,65,15,173,85,177,255,211,89,90,168,146,255,238,205,12,128,137,196,203,27 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x100006721ab911a41d8b52502ea4cdf42ec99e5e529be6a3e66f3adb2143c5a", + "derived_key": [ + 12,138,2,247,115,213,230,144,244,253,93,195,182,20,84,243,71,244,71,24,244,103,128,231,65,233,198,173,128,126,246,169 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x1000067554f53c6660e53bd9729f00aee0189010dfeb868fa3cbd481232480c", + "derived_key": [ + 185,67,192,97,81,170,106,240,157,23,26,106,216,228,65,120,68,165,135,110,2,31,216,158,187,67,79,105,151,157,234,15 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x10000c328f328dd820a1dd698047a7f7d49874d8259196e273517430af8f480", + "derived_key": [ + 12,91,247,177,168,165,63,93,186,29,121,106,121,167,27,50,198,22,230,125,252,159,77,132,92,155,115,251,100,87,112,147 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x10000ed306b29128e065afdce73ebc8e0ecd08cd918fdc0f74c03f64d515e48", + "derived_key": [ + 127,216,219,101,110,162,74,65,114,24,131,123,143,204,15,44,36,72,8,136,170,255,39,231,108,143,128,71,65,95,117,4 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x10000fd53068fc35c6a23fee067bcfd3fc0d880ba4f79d8b65aafbff02f305d", + "derived_key": [ + 4,27,221,133,245,254,194,84,195,88,19,141,109,233,58,225,116,116,251,225,170,44,159,23,28,181,85,238,6,151,63,144 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x100013f4a2c964c71d49f2d8876a11eee4bf4e8d19231652fc775b2cae43a21", + "derived_key": [ + 130,114,163,11,234,79,36,77,175,198,107,147,58,183,234,134,122,178,61,205,225,34,184,146,138,50,221,70,198,19,2,191 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x10001430633c573868f335e3106fc558badc506762eb8184ef9ce785def6ece", + "derived_key": [ + 41,151,245,131,127,195,211,240,254,148,60,106,169,97,173,173,118,116,195,243,213,115,169,17,155,83,25,181,108,68,48,51 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x100015306fb0f0b5fde219996551c8072c6711bba5300b2669aeb0f0e4c7445", + "derived_key": [ + 236,254,112,114,24,207,172,186,132,163,119,198,20,66,226,51,51,142,39,212,88,198,68,118,92,136,138,204,26,155,11,165 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x10001b37e5f8cafac924b3e663779e12c80dace8a02b1d273ae708f275ad62a", + "derived_key": [ + 215,116,3,65,140,246,136,209,81,15,184,214,188,49,94,63,81,57,12,135,108,143,41,137,113,88,11,84,15,30,158,184 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x1000201e923f3e47adfa7c4f0f61ea8dd4256f66d1099970bcd314892864917", + "derived_key": [ + 50,203,140,60,152,140,103,117,103,130,42,29,70,236,110,49,211,18,247,40,117,35,54,107,171,190,233,18,117,69,68,43 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x1000215cd6f306d757d5d2fdbb460c60f298584179eecc07196c7ae8e4f8d64", + "derived_key": [ + 95,50,92,228,81,22,16,190,182,42,66,158,131,165,204,25,25,20,143,210,29,170,143,129,94,111,129,132,227,28,102,180 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x100023dc5e29b1af44a05d231db67a62a8bfd0c06217caa29b061daa7f2913f", + "derived_key": [ + 96,153,155,64,152,142,147,161,102,88,190,194,238,147,14,243,71,26,33,184,193,50,249,29,88,2,52,157,179,7,69,77 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x1000323b7266314b9d3ec387d5bcd9590a50be00e788837ca5a880d97e3ee83", + "derived_key": [ + 212,255,51,143,196,70,53,156,98,221,171,235,82,21,252,198,242,28,2,246,195,67,6,91,2,240,95,173,200,49,66,89 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x100048bf1b165f4c850d01eb81e5c040f70ea01dca019e780a87f80ec7897c3", + "derived_key": [ + 58,212,15,123,117,242,193,79,217,63,177,112,56,232,153,140,93,188,111,168,108,138,82,113,212,107,209,150,246,205,191,157 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x1000515657ebe014d9151480712de7a108d9b0e1a7a798d3da2945ba53cd103", + "derived_key": [ + 204,180,39,152,129,136,139,125,156,240,127,28,205,2,65,12,140,132,177,76,5,4,95,204,205,9,179,77,57,148,6,231 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x10005b597eb02058b230cee6a9cdf4705410d151ad1ea6ad3e2de77841acfc2", + "derived_key": [ + 249,145,142,225,129,214,86,160,12,71,51,28,109,238,246,115,57,184,6,234,138,46,107,81,103,128,201,242,101,51,179,68 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x10005bb7471a0016b1c330f767a5e73ad0c330934694cad007c5aa3f926be65", + "derived_key": [ + 17,109,102,169,143,117,42,93,149,160,20,188,122,34,0,140,248,73,206,232,146,65,183,250,61,35,40,54,167,63,173,215 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x100061dc40d0bc2cb81905e6bc1421ebd4bd8b79aacb8da4f9d7145b288674f", + "derived_key": [ + 58,197,183,31,149,121,187,250,193,140,202,222,69,149,235,105,78,113,59,213,78,241,15,40,62,137,46,19,193,78,85,31 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x10006311257c403bbba7e2aef0aa20822627c82ec7484fcb31b87dd8c582aa9", + "derived_key": [ + 209,81,122,212,165,252,102,254,115,58,127,209,26,21,188,113,69,3,30,255,154,72,181,219,97,7,227,96,209,19,138,181 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x100070f1cd4e2830607af30192743910fdfabf1dd004a8073944275819d1dfe", + "derived_key": [ + 151,245,134,14,100,30,161,175,227,142,158,71,197,157,213,103,198,28,241,51,173,107,242,84,76,53,176,101,132,26,29,60 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x1000ecfaeb837bd098bcf9bde6fc2ccd8e8a9355a1b70e601ac18cd089eb308", + "derived_key": [ + 72,239,59,253,40,148,227,213,236,98,100,14,198,212,71,148,180,209,64,152,228,196,11,209,109,231,183,97,135,156,172,241 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0xe778a21bcfe90796edfc6e5dba276e58537d4ff192bc30765e18d9ef2aa9a55", + "derived_key": [ + 205,107,120,198,53,118,206,64,8,204,37,15,58,43,95,189,38,38,203,212,73,105,50,160,21,160,38,124,10,233,46,22 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10e" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x201f2e5549e69135c92587d30523c730fd01553abf72828402fad9b12c172e10", + "derived_key": [ + 204,0,103,138,56,182,245,161,43,137,56,202,232,138,228,30,242,80,214,237,253,8,17,251,148,203,85,106,127,162,114,20 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x62af78a1a86d5fa8aa7b75ee1884d36ca3c3193a" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x35dc0a033f8f3476b52059199e9babf078fddd76cb3c290e05ae42462bfc33eb", + "derived_key": [ + 89,16,198,41,13,94,89,2,119,169,28,80,179,104,66,21,38,252,16,146,163,159,122,68,234,161,165,150,251,139,57,4 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x52f431aa35dd80982e3c66614112503ca2c6e344745f4a2bcfc9bd7e09c75584" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x3859fd065954dbed7c74a1359d0e5bc38403ea4cdf0274ae615ce0e3e2afec6b", + "derived_key": [ + 67,87,214,19,120,116,147,47,78,236,178,95,23,23,1,171,197,181,63,197,52,162,224,221,93,223,35,243,248,138,100,215 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10002" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x41c023ccaa2a67013d253ba3488447c2db3843b3f988653fdf8d7c7268862ca9", + "derived_key": [ + 94,249,142,243,210,28,27,218,191,10,139,245,66,107,2,111,153,125,18,238,76,249,208,69,34,173,165,21,177,18,82,239 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10e" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x4792eb7ea10cfac9f83a8d12d965c903854b51c5cb0783e082741ecf0c20dcfe", + "derived_key": [ + 217,248,73,34,237,151,158,186,178,226,225,234,58,186,218,7,175,174,60,185,248,248,25,28,51,154,61,168,213,77,242,169 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x52f431aa35dd80982e3c66614112503ca2c6e344745f4a2bcfc9bd7e09c75584" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x5bad0400c1a2cec7acfd85c5c5c25108540c42f405d3ae6ea01209dfbcc63c29", + "derived_key": [ + 194,179,227,24,162,84,191,59,100,89,207,244,98,199,135,44,91,35,210,22,182,249,66,219,89,32,250,61,112,54,16,141 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x62af78a1a86d5fa8aa7b75ee1884d36ca3c3193a" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x71d2d9399f0017d99e02441b51c782e6f5613748934c615622bc6f2327b79b8d", + "derived_key": [ + 60,170,232,127,53,17,58,173,142,247,89,247,207,149,119,134,64,14,158,82,18,231,188,179,163,89,11,174,81,43,46,153 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x9" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x75579ad6152f71bd465c7f980c773c6df73f53d82aebf8b69c1173f678af2d81", + "derived_key": [ + 132,119,216,135,171,185,255,65,210,32,46,77,152,229,32,71,244,141,39,140,188,245,22,25,184,28,198,202,132,222,174,160 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10002" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x87c06ae8fd6d2ee9919bb86c39ee03f70b0d87028d77b914408152f07043c769", + "derived_key": [ + 119,228,107,44,53,217,61,182,86,125,189,169,81,109,32,249,139,212,234,72,144,24,135,118,89,121,216,219,24,207,66,168 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10e" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x962d8512b88c87f0272660761794a46a130b867d7d15b38fc1adc33433e4fce8", + "derived_key": [ + 213,5,71,76,80,42,16,77,105,27,101,50,79,76,38,232,167,55,134,79,128,251,113,33,35,116,77,254,28,6,176,0 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x52f431aa35dd80982e3c66614112503ca2c6e344745f4a2bcfc9bd7e09c75584" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x9f4693a69c182083198dd36e2803bc42bbe3f851aa03cb0f0de7687a2171336b", + "derived_key": [ + 110,177,116,132,158,116,7,177,77,240,138,82,212,212,241,43,54,8,1,75,42,104,243,5,241,73,226,60,72,169,2,41 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10003" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0xb35ae26426d210bd3178c283cdcb50ce0cdbff27177eb0786fc3fe0f45083b1d", + "derived_key": [ + 73,127,150,33,212,30,131,171,90,28,221,170,53,22,176,210,81,154,146,160,81,67,188,184,7,13,240,169,97,51,230,181 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x64" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0xb62ada1fb8084bc5425b2aea59d59080ac3d0a10a1cc368978230741dca77a19", + "derived_key": [ + 71,111,167,179,223,229,107,45,223,184,100,207,103,16,106,234,217,25,120,51,156,12,142,28,186,4,134,110,182,28,191,11 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x9" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0xcb5ca2f778293159761b941dc7b8f7fd374e3632c39b35a0fd4b1aa20ed4a091", + "derived_key": [ + 62,169,255,238,205,94,99,210,162,31,213,85,158,233,223,231,174,18,241,77,26,133,255,75,40,190,65,163,26,48,53,196 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10002" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0xd8fc94fc3444dd0233f4f4f74b08d69d0079035017309fa37c5b30a7cabb729b", + "derived_key": [ + 232,83,248,233,232,89,11,170,74,117,125,224,222,189,198,137,244,49,205,228,155,200,97,42,160,89,8,63,109,25,91,168 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x9" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0xd9ba5de301f3948ee34a04905cc32b778b54dac455410e096889003b0770d47c", + "derived_key": [ + 255,134,239,235,78,5,0,110,98,20,109,14,192,231,250,72,49,145,191,114,177,51,38,242,67,121,217,71,114,50,124,171 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x9" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0xf7fa34f014959c990f8cabd865f6012c5ad2ae9390bd21dc8ab2c3ee9c340257", + "derived_key": [ + 209,167,69,145,2,139,203,92,187,46,4,30,218,0,85,77,176,3,253,201,73,229,148,92,229,57,32,59,244,12,109,96 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x9" + }, + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0x1", + "derived_key": [ + 113,233,23,33,249,145,133,118,215,96,240,47,3,202,196,124,111,64,3,49,96,49,132,142,60,29,153,230,232,58,71,67 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xe29e64ae9c38000" + }, + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0x1b458e5ab877fea2e4abf98d12b31ec3f7c93fd4856e807f684322e8cf11fdf7", + "derived_key": [ + 49,87,111,239,58,195,179,2,237,163,15,66,168,74,199,52,200,236,175,1,55,3,126,248,127,239,193,246,133,27,151,79 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x8603151b10a4a0" + }, + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0x810ca1ae825b138452fb743e9948f909b6286cbfadd5a899190fcb21a75443ab", + "derived_key": [ + 185,211,150,83,253,116,26,253,56,22,83,204,70,30,122,203,221,134,84,251,39,141,138,17,246,159,212,31,236,239,75,201 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x53c1850fab698c0" + }, + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0xafe379b9510a75924647deef7e3d3d3ebf948699c9f84eda83c07c71414098b8", + "derived_key": [ + 250,91,168,183,69,6,78,180,185,147,215,10,134,34,96,243,26,77,158,213,121,211,188,200,73,204,177,205,8,52,178,106 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x4a88ebbafe2b20" + }, + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0xeaa2b2fbf0b42c559059e5e9510edc15755f1c1883f0e41d5ba5f9aea4ac201a", + "derived_key": [ + 141,97,126,192,90,203,191,95,226,69,41,166,75,35,133,169,106,173,67,240,155,225,173,169,44,112,64,49,220,193,72,27 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x81d41f918fe1780" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x0", + "derived_key": [ + 150,46,36,83,88,148,64,235,173,169,107,3,33,223,255,240,191,103,10,254,52,186,74,130,141,51,66,227,241,78,210,217 + ], + "enumeration_index": 60, + "initial_value": "0x10e", + "final_value": "0x1f9" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x6", + "derived_key": [ + 162,84,32,193,217,215,5,53,140,19,76,198,1,217,209,132,203,77,253,222,126,28,172,43,195,212,211,139,249,236,68,230 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x5f5e100" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x7", + "derived_key": [ + 18,59,175,197,134,247,119,100,72,140,210,76,106,119,84,110,90,15,232,189,251,79,162,3,207,175,252,54,204,228,221,91 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100000000000000000000000066fc0241" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x9", + "derived_key": [ + 142,125,208,106,197,183,59,71,59,230,188,90,81,3,15,76,116,55,101,124,183,178,155,243,118,197,100,184,209,103,90,94 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x200000000000000000000000066fc0242" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0xb", + "derived_key": [ + 75,168,78,31,55,208,65,188,110,85,186,57,104,38,204,73,78,132,212,129,91,109,181,38,144,66,46,234,115,134,49,79 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xe8e77626586f73b955364c7b4bbf0bb7f7685ebd40e852b164633a4acbd3244c" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0xc", + "derived_key": [ + 61,226,32,44,203,98,106,211,135,215,7,34,230,79,190,68,86,46,47,35,26,41,12,8,83,43,141,106,186,64,47,245 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xf2aaeb06c0e556434ac93c232f37dd8be0a7bf9f430f76eb564df9fcb770c45f" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x10c", + "derived_key": [ + 121,9,53,136,208,232,71,239,167,58,16,206,32,228,121,159,177,228,102,66,214,86,23,199,229,33,63,160,73,137,217,45 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x200000000000000000000000066fc0242" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x10d", + "derived_key": [ + 135,222,210,71,225,102,15,130,112,113,199,241,55,25,52,88,151,81,8,83,132,252,159,68,98,193,241,137,124,92,62,239 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100000000000000000000000000000001" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x10f", + "derived_key": [ + 134,36,129,147,235,77,210,168,206,129,95,135,108,18,77,72,53,149,34,240,133,77,149,216,7,46,175,240,211,125,85,189 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x320" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x110", + "derived_key": [ + 62,137,13,108,44,59,173,166,238,204,150,3,169,156,28,98,89,237,90,100,2,241,199,108,193,139,86,140,58,239,186,15 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x22a9" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x5eff886ea0ce6ca488a3d6e336d6c0f75f46d19b42c06ce5ee98e42c96d256c7", + "derived_key": [ + 145,29,210,173,116,63,242,55,212,17,100,138,15,227,44,109,116,238,192,96,113,106,42,116,53,47,107,28,67,91,93,103 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xf9030b78c5bf5ac997a76962aa32c90a6d8e8ebce9838c8eeb388d73e1f7659a" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0x0", + "derived_key": [ + 214,116,246,54,163,90,111,26,81,86,78,195,55,27,156,77,163,18,109,90,208,186,227,80,207,199,250,234,199,99,99,184 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xff" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0x33", + "derived_key": [ + 104,98,246,102,108,85,7,252,51,21,214,132,35,158,2,38,112,107,69,195,65,114,145,245,183,172,194,211,57,80,82,17 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x8ad65915f42c06ad06f23e33feee4f57060cd249" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0xc9", + "derived_key": [ + 180,68,244,119,54,206,136,162,78,107,80,251,5,29,192,174,93,179,175,68,217,8,246,220,217,160,21,208,74,126,225,227 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10003" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0xd3", + "derived_key": [ + 59,172,224,22,174,10,65,231,169,237,9,168,91,33,85,109,38,187,242,242,75,76,32,165,75,187,165,27,95,83,162,158 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10005" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0xd5", + "derived_key": [ + 237,36,132,158,202,168,131,171,106,32,214,79,172,224,148,150,15,71,73,102,217,162,19,183,2,117,192,112,196,76,181,34 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x3db94d72a25f64874243af776e06f449d55ba9dd" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0x4d7101ab951ded1d6f6a567c6e539f8f6a2a675fe1d5eba86fefe5192175b131", + "derived_key": [ + 83,157,81,206,111,89,151,62,178,167,63,16,226,11,189,169,125,149,14,110,8,62,221,87,116,233,142,217,139,253,153,16 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0x8e94fed44239eb2314ab7a406345e6c5a8f0ccedf3b600de3d004e672c33abf4", + "derived_key": [ + 186,154,222,88,248,170,108,168,43,242,42,43,72,15,245,221,236,232,166,232,99,81,164,123,16,213,143,51,128,251,219,183 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0x99d6a8ff20aa8acdd49c8fb0cc74f2b2b57e0fa371d5aadb8e266a8cf9157ef5", + "derived_key": [ + 103,185,128,156,225,233,200,126,96,129,32,179,163,131,84,200,153,155,236,34,245,43,19,243,165,109,226,10,22,113,50,131 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0xca59cc8f90e9fd91e0bc61c0c980b4b130ad1217252dd3bc209e6dfa57a05f63", + "derived_key": [ + 203,224,11,159,230,121,179,34,119,46,123,13,250,7,202,214,183,18,124,144,172,158,237,255,172,53,228,144,236,81,142,168 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0" + }, + { + "address": "0x0000000000000000000000000000000000010003", + "key": "0x0", + "derived_key": [ + 183,144,135,107,116,64,90,68,61,138,52,178,244,96,20,237,96,5,52,90,158,129,172,204,39,175,55,18,74,73,29,222 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xff" + }, + { + "address": "0x0000000000000000000000000000000000010003", + "key": "0x33", + "derived_key": [ + 36,49,6,86,93,12,79,206,248,151,94,121,177,178,35,12,1,159,78,58,178,122,63,78,124,169,48,107,159,98,153,132 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x8ad65915f42c06ad06f23e33feee4f57060cd249" + }, + { + "address": "0x0000000000000000000000000000000000010003", + "key": "0xfa", + "derived_key": [ + 208,111,9,167,39,114,89,78,179,210,171,35,115,181,211,197,236,176,132,184,74,77,237,45,48,18,241,69,222,221,138,25 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x5de08929a3c0835232a7cd201d338317caabd873" + }, + { + "address": "0x0000000000000000000000000000000000010003", + "key": "0x56ca7d7fc0d180f3d83f99276f19310b5c00992edd8618fb359971a7ecb99ab3", + "derived_key": [ + 199,117,193,115,55,85,171,245,34,173,219,214,151,226,215,10,91,97,70,75,209,104,27,41,137,81,196,246,13,142,199,12 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10004" + }, + { + "address": "0x0000000000000000000000000000000000010003", + "key": "0x635799b36cb7719b903c111d5790821f9e51e29061bc47a57c7988be806aff32", + "derived_key": [ + 113,24,36,136,41,22,138,100,28,59,149,105,31,231,215,27,33,193,211,238,215,254,44,202,236,107,125,180,46,38,146,200 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10002" + }, + { + "address": "0x0000000000000000000000000000000000010004", + "key": "0x0", + "derived_key": [ + 38,182,122,48,1,162,205,218,95,52,172,146,222,81,199,193,42,178,228,105,133,88,214,83,137,237,66,230,119,250,5,85 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xff" + }, + { + "address": "0x0000000000000000000000000000000000010004", + "key": "0x33", + "derived_key": [ + 253,39,5,238,88,207,60,229,238,92,28,224,63,70,109,126,152,54,188,71,18,186,162,153,21,61,132,71,202,121,113,207 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x8ad65915f42c06ad06f23e33feee4f57060cd249" + }, + { + "address": "0x0000000000000000000000000000000000010004", + "key": "0xc9", + "derived_key": [ + 191,14,175,103,174,227,73,235,177,89,118,163,111,237,172,26,74,6,101,80,58,211,113,127,243,254,106,224,120,179,101,156 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x4c48bdb0145a89bd87f8884f3fa8c1e42d585e75" + }, + { + "address": "0x0000000000000000000000000000000000010004", + "key": "0xfb", + "derived_key": [ + 184,2,226,247,10,56,240,40,136,100,146,139,246,198,195,194,209,164,217,45,202,10,147,86,175,254,198,249,92,64,121,164 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10000fd53068fc35c6a23fee067bcfd3fc0d880ba4f79d8b65aafbff02f305d" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0x0", + "derived_key": [ + 168,74,2,141,105,206,208,33,164,189,140,91,38,36,168,242,80,217,18,184,248,245,157,129,85,249,94,94,229,138,101,38 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0x4", + "derived_key": [ + 47,241,202,110,153,206,246,252,92,214,119,95,159,94,245,61,243,40,240,8,26,143,180,81,247,55,255,244,73,12,229,83 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0x5", + "derived_key": [ + 95,192,113,95,216,242,21,235,124,16,227,245,80,217,178,9,241,140,170,135,64,175,84,27,211,70,239,73,100,139,20,245 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0x6", + "derived_key": [ + 201,169,20,123,206,251,168,141,33,64,175,106,246,185,19,185,53,101,125,53,5,87,5,184,7,21,91,61,208,130,42,131 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0x36b6384b5eca791c62761152d0c79bb0604c104a5fb6f4eb0703f3154bb3db0", + "derived_key": [ + 183,44,52,109,222,204,99,77,172,182,15,29,40,214,131,168,39,33,227,213,36,163,61,162,168,47,3,62,136,241,101,126 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0x16db2e4b9f8dc120de98f8491964203ba76de27b27b29c2d25f85a325cd37477", + "derived_key": [ + 133,79,165,174,6,191,41,30,209,5,109,104,28,93,197,246,247,13,23,242,234,3,204,110,233,229,198,255,131,62,203,105 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0x7bbeda1ca523343d5e888708327d45f8c743f6cb29e139a7e03dc5068543e6c4", + "derived_key": [ + 211,102,235,93,180,24,37,200,29,129,191,72,73,93,114,116,50,181,244,253,225,248,223,46,101,251,180,223,113,77,242,139 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x46700b4d40ac5c35af2c22dda2787a91eb567b06c924a8fb8ae9a05b20c08c21" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0x8e94fed44239eb2314ab7a406345e6c5a8f0ccedf3b600de3d004e672c33abf4", + "derived_key": [ + 228,96,52,194,207,181,181,131,126,57,95,233,204,152,190,4,82,34,235,53,200,202,40,109,252,73,189,213,239,94,126,130 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0xac33ff75c19e70fe83507db0d683fd3465c996598dc972688b7ace676c89077b", + "derived_key": [ + 27,8,240,147,212,200,27,37,231,124,191,110,45,189,91,214,149,171,253,138,221,47,115,230,14,214,92,143,87,109,114,128 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1f9" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0xe14c171e271191dbbbddd568a762a4325466b12116e776c3243375f110708d73", + "derived_key": [ + 248,63,134,138,17,62,7,250,227,100,52,50,139,214,30,153,110,204,16,117,222,9,119,59,220,202,187,15,30,237,162,217 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0xe14c171e271191dbbbddd568a762a4325466b12116e776c3243375f110708d74", + "derived_key": [ + 207,50,50,89,38,214,97,46,146,127,167,239,70,37,230,216,37,111,63,130,63,184,65,242,102,240,65,120,90,218,241,226 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0xf652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d3f", + "derived_key": [ + 247,124,190,104,95,142,126,239,68,219,69,165,161,237,129,135,165,5,236,239,227,84,140,240,18,4,129,67,95,125,116,254 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x46700b4d40ac5c35af2c22dda2787a91eb567b06c924a8fb8ae9a05b20c08c21" + }, + { + "address": "0x03e013dc3eb10bbd48a8f9c94758f04d081563b6", + "key": "0x0", + "derived_key": [ + 64,31,45,164,25,35,131,214,111,103,185,66,123,36,77,209,130,54,238,77,124,250,76,42,126,68,137,156,53,223,112,84 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x03e013dc3eb10bbd48a8f9c94758f04d081563b6", + "key": "0x33", + "derived_key": [ + 172,109,18,138,162,172,98,227,191,233,228,200,186,6,38,31,205,90,238,83,85,200,140,40,95,174,70,100,236,184,92,217 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xf42d59d064dc3efa5c6984103f5354a4578f9d38" + }, + { + "address": "0x03e013dc3eb10bbd48a8f9c94758f04d081563b6", + "key": "0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc", + "derived_key": [ + 238,206,71,73,61,115,66,84,21,49,226,43,98,209,124,67,230,245,74,241,47,105,36,12,239,120,5,217,170,54,156,84 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xf0e50c6be2ab13f852259d99317e0fa1511ed273" + }, + { + "address": "0x03e013dc3eb10bbd48a8f9c94758f04d081563b6", + "key": "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103", + "derived_key": [ + 133,159,227,100,212,172,149,230,75,232,210,108,56,145,60,23,37,166,185,84,193,191,193,253,113,198,103,19,58,1,211,88 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x8ad65915f42c06ad06f23e33feee4f57060cd249" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0x33", + "derived_key": [ + 244,204,176,178,59,175,3,143,139,2,242,240,21,87,122,194,191,65,151,96,89,50,229,228,174,155,172,240,102,252,221,88 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x36615cf349d7f6344891b1e7ca7c72883f5dc049" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0x9a", + "derived_key": [ + 97,131,206,41,240,31,150,151,163,154,135,110,104,89,178,252,214,86,245,40,217,82,157,194,186,14,137,246,116,87,3,221 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x58d1e441af5027fe6e4c6b749dde48721cef3bb31a08d3d64b9c6e9c725abc72" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0x9b", + "derived_key": [ + 36,198,234,1,57,238,186,239,33,185,70,78,68,110,74,247,188,177,180,151,164,216,15,115,133,254,13,247,190,87,17,67 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xe8949117b1d97ac18a8d1224be1e4045c67b698392d4edc03db3ed46b9e94673" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0x9c", + "derived_key": [ + 24,243,123,134,88,68,215,142,127,133,2,149,85,85,176,160,214,111,3,112,143,142,78,137,211,79,138,29,6,66,209,69 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xfa1e2d910cfa3e5c465f7d7f69c224d542e0a598" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0x9d", + "derived_key": [ + 168,81,60,218,234,227,67,148,118,29,59,147,53,55,78,40,8,227,39,217,122,18,110,222,78,162,140,204,238,55,6,95 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1900000000" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0x9f", + "derived_key": [ + 144,32,12,17,126,234,225,99,200,138,138,108,231,51,212,1,171,8,94,147,139,188,115,131,162,159,107,192,34,19,171,180 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x97b2219068b5104fd0de0a2a4666b3f6f397aca7" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0xa3", + "derived_key": [ + 86,83,107,175,210,186,181,198,128,36,151,56,29,81,196,245,31,168,78,1,68,190,24,94,31,195,247,20,122,219,85,214 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xace084d9e79dcdae011409ea9454d78218092e0b189cd6a5173b44667400d511" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc", + "derived_key": [ + 230,13,150,23,44,152,198,242,109,118,74,176,93,184,102,158,85,104,138,31,78,160,214,75,29,223,239,114,74,97,156,59 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x5e45123c75ae6b22222a2ef40afcf080e84f5af5" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0x8e94fed44239eb2314ab7a406345e6c5a8f0ccedf3b600de3d004e672c33abf4", + "derived_key": [ + 105,207,185,63,169,21,50,218,22,157,164,50,155,208,136,29,218,73,246,12,13,209,254,95,239,141,35,221,253,207,221,212 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0xa2493ae5fceab9e59e3829df3da317d9a236c9b8b11dc1da94cb0e047a357cad", + "derived_key": [ + 248,135,166,243,99,75,238,65,244,69,142,255,92,110,93,81,203,0,116,149,135,131,96,149,14,49,60,161,204,107,128,214 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103", + "derived_key": [ + 164,102,179,128,123,95,250,209,85,215,47,223,202,234,9,179,137,135,46,66,252,162,20,55,210,106,243,173,46,40,178,89 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x36615cf349d7f6344891b1e7ca7c72883f5dc049" + }, + { + "address": "0x338bd2ded4569c568f21da174acec70b826e550c", + "key": "0x0", + "derived_key": [ + 156,179,78,101,253,32,134,94,34,150,203,47,196,201,136,120,12,142,64,149,165,101,204,29,186,80,109,39,8,84,79,232 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x8002cd98cfb563492a6fb3e7c8243b7b9ad4cc92" + }, + { + "address": "0x338bd2ded4569c568f21da174acec70b826e550c", + "key": "0x1", + "derived_key": [ + 140,190,129,206,40,197,26,241,53,110,14,80,134,5,37,204,178,45,166,159,129,221,136,86,105,96,102,217,243,16,89,249 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x683cfc9cb6230b80899dbcb6181591d69089d8a4" + }, + { + "address": "0x4c48bdb0145a89bd87f8884f3fa8c1e42d585e75", + "key": "0x0", + "derived_key": [ + 177,52,180,226,24,99,117,67,64,64,241,12,41,231,167,74,209,204,218,129,255,34,102,39,251,93,142,41,145,92,203,50 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x8ad65915f42c06ad06f23e33feee4f57060cd249" + }, + { + "address": "0x4c48bdb0145a89bd87f8884f3fa8c1e42d585e75", + "key": "0x1", + "derived_key": [ + 164,143,172,103,50,238,187,229,211,197,117,198,212,234,78,82,14,197,48,46,66,117,89,74,193,245,113,83,209,177,183,217 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x517d9f66c564d2edf43cb657424f72f1c20d3bff" + }, + { + "address": "0x517d9f66c564d2edf43cb657424f72f1c20d3bff", + "key": "0x0", + "derived_key": [ + 230,154,81,238,249,118,81,47,67,167,51,81,39,137,145,76,187,230,155,155,70,102,153,242,27,165,84,224,218,253,106,44 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xff" + }, + { + "address": "0x5e45123c75ae6b22222a2ef40afcf080e84f5af5", + "key": "0x8e94fed44239eb2314ab7a406345e6c5a8f0ccedf3b600de3d004e672c33abf4", + "derived_key": [ + 60,175,19,171,24,127,90,244,102,215,109,52,163,108,87,248,78,75,87,18,254,157,225,108,251,34,166,46,135,226,155,114 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x62af78a1a86d5fa8aa7b75ee1884d36ca3c3193a", + "key": "0x0", + "derived_key": [ + 137,66,124,98,185,206,118,62,215,166,225,249,150,252,161,203,0,217,115,193,56,251,207,17,2,102,50,45,221,187,226,50 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x5de08929a3c0835232a7cd201d338317caabd8730002" + }, + { + "address": "0x62af78a1a86d5fa8aa7b75ee1884d36ca3c3193a", + "key": "0x1", + "derived_key": [ + 86,146,226,162,251,7,31,172,102,81,196,200,22,126,55,247,235,106,113,202,76,139,128,246,85,114,151,178,101,95,159,191 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x338bd2ded4569c568f21da174acec70b826e550c" + }, + { + "address": "0x62af78a1a86d5fa8aa7b75ee1884d36ca3c3193a", + "key": "0x2", + "derived_key": [ + 238,250,55,221,42,57,86,177,201,103,75,239,3,21,25,161,11,112,13,117,46,113,1,71,100,17,102,72,32,125,98,78 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10000fd53068fc35c6a23fee067bcfd3fc0d880ba4f79d8b65aafbff02f305d" + }, + { + "address": "0x62af78a1a86d5fa8aa7b75ee1884d36ca3c3193a", + "key": "0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc", + "derived_key": [ + 22,155,114,188,54,101,62,87,11,121,52,68,25,197,106,219,1,155,20,238,224,223,45,229,125,209,123,117,113,166,132,12 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xdc0ed6fcbfe51c9f84a662e64fd1347736aa7486" + }, + { + "address": "0x62af78a1a86d5fa8aa7b75ee1884d36ca3c3193a", + "key": "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103", + "derived_key": [ + 81,122,205,57,136,220,234,247,156,218,204,189,130,97,114,48,48,160,227,48,48,64,28,58,212,244,195,137,255,228,64,41 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x8002cd98cfb563492a6fb3e7c8243b7b9ad4cc92" + }, + { + "address": "0x683cfc9cb6230b80899dbcb6181591d69089d8a4", + "key": "0x0", + "derived_key": [ + 237,236,215,194,80,85,137,94,24,27,243,41,116,140,187,70,33,135,209,48,214,17,9,198,53,117,79,21,244,235,240,208 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xff" + }, + { + "address": "0x97b2219068b5104fd0de0a2a4666b3f6f397aca7", + "key": "0x0", + "derived_key": [ + 219,215,135,89,179,97,218,60,174,210,247,236,175,60,97,114,55,143,26,104,199,6,53,175,153,170,254,4,26,49,33,168 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x36615cf349d7f6344891b1e7ca7c72883f5dc049" + }, + { + "address": "0x97b2219068b5104fd0de0a2a4666b3f6f397aca7", + "key": "0x2", + "derived_key": [ + 131,75,158,95,145,124,241,215,162,81,17,8,190,214,110,90,15,123,1,214,244,31,200,40,196,119,19,72,17,44,27,219 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0" + }, + { + "address": "0xdc0ed6fcbfe51c9f84a662e64fd1347736aa7486", + "key": "0x0", + "derived_key": [ + 62,110,21,209,63,190,73,44,77,121,157,143,198,176,46,157,199,11,251,128,18,248,171,99,94,148,201,218,67,21,70,232 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xff" + }, + { + "address": "0xe024f9e4e8fa2f08f768c1cb56bc4a6e3cbd8834", + "key": "0x8e94fed44239eb2314ab7a406345e6c5a8f0ccedf3b600de3d004e672c33abf4", + "derived_key": [ + 16,46,124,130,188,155,165,96,66,61,124,176,157,94,180,222,164,199,68,147,148,121,54,59,60,181,162,4,74,28,114,103 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + } + ], + "aux_commitments": { + "events_queue_commitment": "0xec82208c87a937d88768a0067b2a80f0525eca8288dad2cf96cf8bbe6a1aa565", + "bootloader_initial_content_commitment": "0x97df88dcecbcd29b49773c042cdee7a44c57a741e64913fff5aa1b3484232f28" + }, + "blob_hashes": [ + { + "commitment": "0xf840cf3f6b7dc92729b2b9ef3b399e7b896d553b746362fe81c4eb911013570d", + "linear_hash": "0xff4feb4bef9401731ab9db3626c2e015baa6880d7b1c4382d03b30da3a0fd75e" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "aggregation_root": "0x0924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1" + } + } +} diff --git a/core/lib/types/src/l2_to_l1_log.rs b/core/lib/types/src/l2_to_l1_log.rs index 59ade8873cd1..957cfa9a1a6a 100644 --- a/core/lib/types/src/l2_to_l1_log.rs +++ b/core/lib/types/src/l2_to_l1_log.rs @@ -1,5 +1,5 @@ use serde::{Deserialize, Serialize}; -use zksync_system_constants::{BLOB1_LINEAR_HASH_KEY, PUBDATA_CHUNK_PUBLISHER_ADDRESS}; +use zksync_system_constants::{BLOB1_LINEAR_HASH_KEY_PRE_GATEWAY, PUBDATA_CHUNK_PUBLISHER_ADDRESS}; use crate::{ blob::{num_blobs_created, num_blobs_required}, @@ -80,10 +80,15 @@ pub fn l2_to_l1_logs_tree_size(protocol_version: ProtocolVersionId) -> usize { } /// Returns the blob hashes parsed out from the system logs -pub fn parse_system_logs_for_blob_hashes( +pub fn parse_system_logs_for_blob_hashes_pre_gateway( protocol_version: &ProtocolVersionId, system_logs: &[SystemL2ToL1Log], ) -> Vec { + assert!( + protocol_version.is_pre_gateway(), + "Cannot parse blob linear hashes from system logs for post gateway" + ); + let num_required_blobs = num_blobs_required(protocol_version) as u32; let num_created_blobs = num_blobs_created(protocol_version) as u32; @@ -95,9 +100,11 @@ pub fn parse_system_logs_for_blob_hashes( .iter() .filter(|log| { log.0.sender == PUBDATA_CHUNK_PUBLISHER_ADDRESS - && log.0.key >= H256::from_low_u64_be(BLOB1_LINEAR_HASH_KEY as u64) + && log.0.key >= H256::from_low_u64_be(BLOB1_LINEAR_HASH_KEY_PRE_GATEWAY as u64) && log.0.key - < H256::from_low_u64_be((BLOB1_LINEAR_HASH_KEY + num_created_blobs) as u64) + < H256::from_low_u64_be( + (BLOB1_LINEAR_HASH_KEY_PRE_GATEWAY + num_created_blobs) as u64, + ) }) .map(|log| (log.0.key, log.0.value)) .collect::>(); diff --git a/core/lib/vm_executor/src/batch/factory.rs b/core/lib/vm_executor/src/batch/factory.rs index bc19086c9692..f974d17f4a75 100644 --- a/core/lib/vm_executor/src/batch/factory.rs +++ b/core/lib/vm_executor/src/batch/factory.rs @@ -6,6 +6,7 @@ use tokio::sync::mpsc; use zksync_multivm::{ interface::{ executor::{BatchExecutor, BatchExecutorFactory}, + pubdata::PubdataBuilder, storage::{ReadStorage, StoragePtr, StorageView, StorageViewStats}, utils::DivergenceHandler, BatchTransactionExecutionResult, BytecodeCompressionError, CompressedBytecodeInfo, @@ -13,12 +14,13 @@ use zksync_multivm::{ VmInterface, VmInterfaceHistoryEnabled, }, is_supported_by_fast_vm, + pubdata_builders::pubdata_params_to_builder, tracers::CallTracer, vm_fast, vm_latest::HistoryEnabled, FastVmInstance, LegacyVmInstance, MultiVMTracer, }; -use zksync_types::{vm::FastVmMode, Transaction}; +use zksync_types::{commitment::PubdataParams, vm::FastVmMode, Transaction}; use super::{ executor::{Command, MainBatchExecutor}, @@ -116,6 +118,7 @@ impl BatchExecutorFactory storage: S, l1_batch_params: L1BatchEnv, system_env: SystemEnv, + pubdata_params: PubdataParams, ) -> Box> { // Since we process `BatchExecutor` commands one-by-one (the next command is never enqueued // until a previous command is processed), capacity 1 is enough for the commands channel. @@ -130,8 +133,14 @@ impl BatchExecutorFactory _tracer: PhantomData::, }; - let handle = - tokio::task::spawn_blocking(move || executor.run(storage, l1_batch_params, system_env)); + let handle = tokio::task::spawn_blocking(move || { + executor.run( + storage, + l1_batch_params, + system_env, + pubdata_params_to_builder(pubdata_params), + ) + }); Box::new(MainBatchExecutor::new(handle, commands_sender)) } } @@ -183,8 +192,8 @@ impl BatchVm { dispatch_batch_vm!(self.start_new_l2_block(l2_block)); } - fn finish_batch(&mut self) -> FinishedL1Batch { - dispatch_batch_vm!(self.finish_batch()) + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + dispatch_batch_vm!(self.finish_batch(pubdata_builder)) } fn make_snapshot(&mut self) { @@ -260,6 +269,7 @@ impl CommandReceiver { storage: S, l1_batch_params: L1BatchEnv, system_env: SystemEnv, + pubdata_builder: Rc, ) -> anyhow::Result> { tracing::info!("Starting executing L1 batch #{}", &l1_batch_params.number); @@ -310,7 +320,7 @@ impl CommandReceiver { } } Command::FinishBatch(resp) => { - let vm_block_result = self.finish_batch(&mut vm)?; + let vm_block_result = self.finish_batch(&mut vm, pubdata_builder)?; if resp.send(vm_block_result).is_err() { break; } @@ -365,10 +375,14 @@ impl CommandReceiver { latency.observe(); } - fn finish_batch(&self, vm: &mut BatchVm) -> anyhow::Result { + fn finish_batch( + &self, + vm: &mut BatchVm, + pubdata_builder: Rc, + ) -> anyhow::Result { // The vm execution was paused right after the last transaction was executed. // There is some post-processing work that the VM needs to do before the block is fully processed. - let result = vm.finish_batch(); + let result = vm.finish_batch(pubdata_builder); anyhow::ensure!( !result.block_tip_execution_result.result.is_failed(), "VM must not fail when finalizing block: {:#?}", diff --git a/core/lib/vm_executor/src/oneshot/block.rs b/core/lib/vm_executor/src/oneshot/block.rs index cc759c032fc1..d6118f15b98e 100644 --- a/core/lib/vm_executor/src/oneshot/block.rs +++ b/core/lib/vm_executor/src/oneshot/block.rs @@ -203,6 +203,7 @@ impl OneshotEnvParameters { enforced_base_fee, ) .await?; + Ok(OneshotEnv { system, l1_batch, diff --git a/core/lib/vm_executor/src/oneshot/contracts.rs b/core/lib/vm_executor/src/oneshot/contracts.rs index dc9ef0c0e8df..d4e0a94f9178 100644 --- a/core/lib/vm_executor/src/oneshot/contracts.rs +++ b/core/lib/vm_executor/src/oneshot/contracts.rs @@ -67,6 +67,8 @@ pub struct MultiVMBaseSystemContracts { vm_1_5_0_increased_memory: BaseSystemContracts, /// Contracts to be used after the protocol defense upgrade vm_protocol_defense: BaseSystemContracts, + /// Contracts to be used after the gateway upgrade + gateway: BaseSystemContracts, // We use `fn() -> C` marker so that the `MultiVMBaseSystemContracts` unconditionally implements `Send + Sync`. _contracts_kind: PhantomData C>, } @@ -105,6 +107,7 @@ impl MultiVMBaseSystemContracts { ProtocolVersionId::Version25 | ProtocolVersionId::Version26 => { &self.vm_protocol_defense } + ProtocolVersionId::Version27 => &self.gateway, }; let base = base.clone(); @@ -133,6 +136,7 @@ impl MultiVMBaseSystemContracts { vm_1_5_0_increased_memory: BaseSystemContracts::estimate_gas_post_1_5_0_increased_memory(), vm_protocol_defense: BaseSystemContracts::estimate_gas_post_protocol_defense(), + gateway: BaseSystemContracts::estimate_gas_gateway(), _contracts_kind: PhantomData, } } @@ -154,6 +158,7 @@ impl MultiVMBaseSystemContracts { vm_1_5_0_increased_memory: BaseSystemContracts::playground_post_1_5_0_increased_memory( ), vm_protocol_defense: BaseSystemContracts::playground_post_protocol_defense(), + gateway: BaseSystemContracts::playground_gateway(), _contracts_kind: PhantomData, } } diff --git a/core/lib/vm_executor/src/oneshot/mod.rs b/core/lib/vm_executor/src/oneshot/mod.rs index 018e5abded6f..5f9e4dd3c6f4 100644 --- a/core/lib/vm_executor/src/oneshot/mod.rs +++ b/core/lib/vm_executor/src/oneshot/mod.rs @@ -19,8 +19,9 @@ use zksync_multivm::{ executor::{OneshotExecutor, TransactionValidator}, storage::{ReadStorage, StoragePtr, StorageView, WriteStorage}, tracer::{ValidationError, ValidationParams}, - ExecutionResult, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, - StoredL2BlockEnv, TxExecutionArgs, TxExecutionMode, VmExecutionMode, VmInterface, + ExecutionResult, InspectExecutionMode, OneshotEnv, OneshotTracingParams, + OneshotTransactionExecutionResult, StoredL2BlockEnv, TxExecutionArgs, TxExecutionMode, + VmInterface, }, tracers::{CallTracer, StorageInvocations, ValidationTracer}, utils::adjust_pubdata_price_for_tx, @@ -169,7 +170,7 @@ where ); let exec_result = executor.apply(|vm, transaction| { vm.push_transaction(transaction); - vm.inspect(&mut tracers.into(), VmExecutionMode::OneTx) + vm.inspect(&mut tracers.into(), InspectExecutionMode::OneTx) }); let validation_result = Arc::make_mut(&mut validation_result) .take() diff --git a/core/lib/vm_executor/src/storage.rs b/core/lib/vm_executor/src/storage.rs index fa0e530c1909..e5a2d404233b 100644 --- a/core/lib/vm_executor/src/storage.rs +++ b/core/lib/vm_executor/src/storage.rs @@ -7,8 +7,9 @@ use zksync_contracts::BaseSystemContracts; use zksync_dal::{Connection, Core, CoreDal, DalError}; use zksync_multivm::interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; use zksync_types::{ - block::L2BlockHeader, fee_model::BatchFeeInput, snapshots::SnapshotRecoveryStatus, Address, - L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, H256, ZKPORTER_IS_AVAILABLE, + block::L2BlockHeader, commitment::PubdataParams, fee_model::BatchFeeInput, + snapshots::SnapshotRecoveryStatus, Address, L1BatchNumber, L2BlockNumber, L2ChainId, + ProtocolVersionId, H256, ZKPORTER_IS_AVAILABLE, }; const BATCH_COMPUTATIONAL_GAS_LIMIT: u32 = u32::MAX; @@ -263,7 +264,7 @@ impl L1BatchParamsProvider { first_l2_block_in_batch: &FirstL2BlockInBatch, validation_computational_gas_limit: u32, chain_id: L2ChainId, - ) -> anyhow::Result<(SystemEnv, L1BatchEnv)> { + ) -> anyhow::Result<(SystemEnv, L1BatchEnv, PubdataParams)> { anyhow::ensure!( first_l2_block_in_batch.l1_batch_number > L1BatchNumber(0), "Loading params for genesis L1 batch not supported" @@ -317,7 +318,7 @@ impl L1BatchParamsProvider { .await .context("failed getting base system contracts")?; - Ok(l1_batch_params( + let (system_env, l1_batch_env) = l1_batch_params( first_l2_block_in_batch.l1_batch_number, first_l2_block_in_batch.header.fee_account_address, l1_batch_timestamp, @@ -333,6 +334,12 @@ impl L1BatchParamsProvider { .context("`protocol_version` must be set for L2 block")?, first_l2_block_in_batch.header.virtual_blocks, chain_id, + ); + + Ok(( + system_env, + l1_batch_env, + first_l2_block_in_batch.header.pubdata_params, )) } @@ -346,7 +353,7 @@ impl L1BatchParamsProvider { number: L1BatchNumber, validation_computational_gas_limit: u32, chain_id: L2ChainId, - ) -> anyhow::Result> { + ) -> anyhow::Result> { let first_l2_block = self .load_first_l2_block_in_batch(storage, number) .await diff --git a/core/lib/vm_interface/src/executor.rs b/core/lib/vm_interface/src/executor.rs index 119f975fecd5..60522ba338a2 100644 --- a/core/lib/vm_interface/src/executor.rs +++ b/core/lib/vm_interface/src/executor.rs @@ -3,7 +3,7 @@ use std::fmt; use async_trait::async_trait; -use zksync_types::{l2::L2Tx, Transaction}; +use zksync_types::{commitment::PubdataParams, l2::L2Tx, Transaction}; use crate::{ storage::{ReadStorage, StorageView}, @@ -20,6 +20,7 @@ pub trait BatchExecutorFactory: 'static + Send + fmt::Debug { storage: S, l1_batch_params: L1BatchEnv, system_env: SystemEnv, + pubdata_params: PubdataParams, ) -> Box>; } diff --git a/core/lib/vm_interface/src/lib.rs b/core/lib/vm_interface/src/lib.rs index e0287483067a..39f949e5d8a9 100644 --- a/core/lib/vm_interface/src/lib.rs +++ b/core/lib/vm_interface/src/lib.rs @@ -24,8 +24,8 @@ pub use crate::{ VmRevertReason, VmRevertReasonParsingError, }, inputs::{ - L1BatchEnv, L2BlockEnv, OneshotEnv, OneshotTracingParams, StoredL2BlockEnv, SystemEnv, - TxExecutionArgs, TxExecutionMode, VmExecutionMode, + InspectExecutionMode, L1BatchEnv, L2BlockEnv, OneshotEnv, OneshotTracingParams, + StoredL2BlockEnv, SystemEnv, TxExecutionArgs, TxExecutionMode, VmExecutionMode, }, outputs::{ BatchTransactionExecutionResult, BootloaderMemory, Call, CallType, CircuitStatistic, @@ -41,6 +41,7 @@ pub use crate::{ }; pub mod executor; +pub mod pubdata; pub mod storage; mod types; pub mod utils; diff --git a/core/lib/vm_interface/src/pubdata/mod.rs b/core/lib/vm_interface/src/pubdata/mod.rs new file mode 100644 index 000000000000..f901687b5fa6 --- /dev/null +++ b/core/lib/vm_interface/src/pubdata/mod.rs @@ -0,0 +1,90 @@ +use zksync_types::{ + l2_to_l1_log::L2ToL1Log, writes::StateDiffRecord, Address, ProtocolVersionId, H256, U256, +}; + +/// Corresponds to the following solidity event: +/// ```solidity +/// struct L2ToL1Log { +/// uint8 l2ShardId; +/// bool isService; +/// uint16 txNumberInBlock; +/// address sender; +/// bytes32 key; +/// bytes32 value; +/// } +/// ``` +#[derive(Debug, Default, Clone, PartialEq)] +pub struct L1MessengerL2ToL1Log { + pub l2_shard_id: u8, + pub is_service: bool, + pub tx_number_in_block: u16, + pub sender: Address, + pub key: U256, + pub value: U256, +} + +impl L1MessengerL2ToL1Log { + pub fn packed_encoding(&self) -> Vec { + /// Converts `U256` value into bytes array + fn u256_to_bytes_be(value: &U256) -> Vec { + let mut bytes = vec![0u8; 32]; + value.to_big_endian(bytes.as_mut_slice()); + bytes + } + + let mut res: Vec = vec![]; + res.push(self.l2_shard_id); + res.push(self.is_service as u8); + res.extend_from_slice(&self.tx_number_in_block.to_be_bytes()); + res.extend_from_slice(self.sender.as_bytes()); + res.extend(u256_to_bytes_be(&self.key)); + res.extend(u256_to_bytes_be(&self.value)); + res + } +} + +impl From for L2ToL1Log { + fn from(log: L1MessengerL2ToL1Log) -> Self { + fn u256_to_h256(num: U256) -> H256 { + let mut bytes = [0u8; 32]; + num.to_big_endian(&mut bytes); + H256::from_slice(&bytes) + } + + L2ToL1Log { + shard_id: log.l2_shard_id, + is_service: log.is_service, + tx_number_in_block: log.tx_number_in_block, + sender: log.sender, + key: u256_to_h256(log.key), + value: u256_to_h256(log.value), + } + } +} + +/// Struct based on which the pubdata blob is formed +#[derive(Debug, Clone, Default)] +pub struct PubdataInput { + pub user_logs: Vec, + pub l2_to_l1_messages: Vec>, + pub published_bytecodes: Vec>, + pub state_diffs: Vec, +} + +/// Trait that encapsulates pubdata building logic. It is implemented for rollup and validium cases. +/// If chains needs custom pubdata format then another implementation should be added. +pub trait PubdataBuilder: std::fmt::Debug { + fn l2_da_validator(&self) -> Address; + + fn l1_messenger_operator_input( + &self, + input: &PubdataInput, + protocol_version: ProtocolVersionId, + ) -> Vec; + + fn settlement_layer_pubdata( + &self, + input: &PubdataInput, + protocol_version: ProtocolVersionId, + ) -> Vec; +} diff --git a/core/lib/vm_interface/src/types/inputs/execution_mode.rs b/core/lib/vm_interface/src/types/inputs/execution_mode.rs index 41492af6edc5..f091a259d30d 100644 --- a/core/lib/vm_interface/src/types/inputs/execution_mode.rs +++ b/core/lib/vm_interface/src/types/inputs/execution_mode.rs @@ -13,3 +13,22 @@ pub enum VmExecutionMode { /// Stop after executing the entire bootloader. But before you exit the bootloader. Bootloader, } + +/// Subset of `VmExecutionMode` variants that do not require any additional input +/// and can be invoked with `inspect` method. +#[derive(Debug, Copy, Clone)] +pub enum InspectExecutionMode { + /// Stop after executing the next transaction. + OneTx, + /// Stop after executing the entire bootloader. But before you exit the bootloader. + Bootloader, +} + +impl From for VmExecutionMode { + fn from(mode: InspectExecutionMode) -> Self { + match mode { + InspectExecutionMode::Bootloader => Self::Bootloader, + InspectExecutionMode::OneTx => Self::OneTx, + } + } +} diff --git a/core/lib/vm_interface/src/types/inputs/mod.rs b/core/lib/vm_interface/src/types/inputs/mod.rs index 24f58ae72f16..cb80ba7c1386 100644 --- a/core/lib/vm_interface/src/types/inputs/mod.rs +++ b/core/lib/vm_interface/src/types/inputs/mod.rs @@ -3,7 +3,7 @@ use zksync_types::{ }; pub use self::{ - execution_mode::VmExecutionMode, + execution_mode::{InspectExecutionMode, VmExecutionMode}, l1_batch_env::L1BatchEnv, l2_block::{L2BlockEnv, StoredL2BlockEnv}, system_env::{SystemEnv, TxExecutionMode}, diff --git a/core/lib/vm_interface/src/utils/dump.rs b/core/lib/vm_interface/src/utils/dump.rs index 4076aa72270b..f23d6f307b89 100644 --- a/core/lib/vm_interface/src/utils/dump.rs +++ b/core/lib/vm_interface/src/utils/dump.rs @@ -1,13 +1,14 @@ -use std::collections::HashMap; +use std::{collections::HashMap, rc::Rc}; use serde::{Deserialize, Serialize}; use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, L2BlockNumber, Transaction, H256}; use crate::{ + pubdata::PubdataBuilder, storage::{ReadStorage, StoragePtr, StorageSnapshot, StorageView}, - BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceExt, - VmInterfaceHistoryEnabled, VmTrackingContracts, + BytecodeCompressionResult, FinishedL1Batch, InspectExecutionMode, L1BatchEnv, L2BlockEnv, + PushTransactionResult, SystemEnv, VmExecutionResultAndLogs, VmFactory, VmInterface, + VmInterfaceExt, VmInterfaceHistoryEnabled, VmTrackingContracts, }; fn create_storage_snapshot( @@ -48,6 +49,7 @@ fn create_storage_snapshot( } /// VM dump allowing to re-run the VM on the same inputs. Can be (de)serialized. +/// Note, dump is not capable of finishing batch in terms of VM execution. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct VmDump { pub l1_batch_env: L1BatchEnv, @@ -98,7 +100,6 @@ impl VmDump { } } } - vm.finish_batch(); vm } } @@ -162,7 +163,7 @@ impl VmInterface for DumpingVm { fn inspect( &mut self, dispatcher: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { self.inner.inspect(dispatcher, execution_mode) } @@ -189,8 +190,8 @@ impl VmInterface for DumpingVm { .inspect_transaction_with_bytecode_compression(tracer, tx, with_compression) } - fn finish_batch(&mut self) -> FinishedL1Batch { - self.inner.finish_batch() + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + self.inner.finish_batch(pubdata_builder) } } diff --git a/core/lib/vm_interface/src/utils/shadow.rs b/core/lib/vm_interface/src/utils/shadow.rs index e8ef87c3c7f8..d12d85fa2e3a 100644 --- a/core/lib/vm_interface/src/utils/shadow.rs +++ b/core/lib/vm_interface/src/utils/shadow.rs @@ -3,6 +3,7 @@ use std::{ cell::RefCell, collections::{BTreeMap, BTreeSet}, fmt, + rc::Rc, sync::Arc, }; @@ -10,9 +11,10 @@ use zksync_types::{StorageKey, StorageLog, StorageLogWithPreviousValue, Transact use super::dump::{DumpingVm, VmDump}; use crate::{ + pubdata::PubdataBuilder, storage::{ReadStorage, StoragePtr, StorageView}, - BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, + BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, InspectExecutionMode, + L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmTrackingContracts, }; @@ -332,7 +334,7 @@ where where Shadow: VmFactory, { - let main = DumpingVm::new(batch_env.clone(), system_env.clone(), storage.clone()); + let main = DumpingVm::new(batch_env.clone(), system_env.clone(), storage); let shadow = Shadow::new(batch_env.clone(), system_env.clone(), shadow_storage); let shadow = VmWithReporting { vm: shadow, @@ -400,7 +402,7 @@ where fn inspect( &mut self, (main_tracer, shadow_tracer): &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { let main_result = self.main.inspect(main_tracer, execution_mode); if let Some(shadow) = self.shadow.get_mut() { @@ -457,10 +459,10 @@ where (main_bytecodes_result, main_tx_result) } - fn finish_batch(&mut self) -> FinishedL1Batch { - let main_batch = self.main.finish_batch(); + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + let main_batch = self.main.finish_batch(pubdata_builder.clone()); if let Some(shadow) = self.shadow.get_mut() { - let shadow_batch = shadow.vm.finish_batch(); + let shadow_batch = shadow.vm.finish_batch(pubdata_builder); let errors = main_batch.check_divergence(&shadow_batch); if let Err(err) = errors.into_result() { self.report(err); diff --git a/core/lib/vm_interface/src/vm.rs b/core/lib/vm_interface/src/vm.rs index 3a06d7f80cbe..2c25d729e318 100644 --- a/core/lib/vm_interface/src/vm.rs +++ b/core/lib/vm_interface/src/vm.rs @@ -11,11 +11,14 @@ //! Generally speaking, in most cases, the tracer dispatcher is a wrapper around `Vec>`, //! where `VmTracer` is a trait implemented for a specific VM version. +use std::rc::Rc; + use zksync_types::{Transaction, H256}; use crate::{ - storage::StoragePtr, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, + pubdata::PubdataBuilder, storage::StoragePtr, BytecodeCompressionResult, FinishedL1Batch, + InspectExecutionMode, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, + VmExecutionResultAndLogs, }; pub trait VmInterface { @@ -35,7 +38,7 @@ pub trait VmInterface { fn inspect( &mut self, dispatcher: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs; /// Start a new L2 block. @@ -51,13 +54,13 @@ pub trait VmInterface { /// Execute batch till the end and return the result, with final execution state /// and bootloader memory. - fn finish_batch(&mut self) -> FinishedL1Batch; + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch; } /// Extension trait for [`VmInterface`] that provides some additional methods. pub trait VmInterfaceExt: VmInterface { /// Executes the next VM step (either next transaction or bootloader or the whole batch). - fn execute(&mut self, execution_mode: VmExecutionMode) -> VmExecutionResultAndLogs { + fn execute(&mut self, execution_mode: InspectExecutionMode) -> VmExecutionResultAndLogs { self.inspect(&mut ::default(), execution_mode) } diff --git a/core/node/api_server/src/web3/state.rs b/core/node/api_server/src/web3/state.rs index 18c206eaf584..a2aee8c7420a 100644 --- a/core/node/api_server/src/web3/state.rs +++ b/core/node/api_server/src/web3/state.rs @@ -146,6 +146,7 @@ impl InternalApiConfig { .l1_weth_bridge_proxy_addr .unwrap_or_default(), ), + l2_legacy_shared_bridge: contracts_config.l2_legacy_shared_bridge_addr, }, bridgehub_proxy_addr: contracts_config .ecosystem_contracts diff --git a/core/node/block_reverter/src/tests.rs b/core/node/block_reverter/src/tests.rs index 85d894b7fd57..b2c4ee6465f6 100644 --- a/core/node/block_reverter/src/tests.rs +++ b/core/node/block_reverter/src/tests.rs @@ -67,6 +67,7 @@ async fn setup_storage(storage: &mut Connection<'_, Core>, storage_logs: &[Stora virtual_blocks: 1, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), }; storage .blocks_dal() diff --git a/core/node/commitment_generator/Cargo.toml b/core/node/commitment_generator/Cargo.toml index 5ec8410124fc..1f4645414cbd 100644 --- a/core/node/commitment_generator/Cargo.toml +++ b/core/node/commitment_generator/Cargo.toml @@ -20,6 +20,7 @@ zksync_utils.workspace = true zksync_eth_client.workspace = true zksync_contracts.workspace = true zksync_multivm.workspace = true +zksync_system_constants.workspace = true circuit_sequencer_api_1_4_0.workspace = true circuit_sequencer_api_1_4_1.workspace = true circuit_sequencer_api_1_5_0.workspace = true diff --git a/core/node/commitment_generator/src/lib.rs b/core/node/commitment_generator/src/lib.rs index cf6971b041c6..9a33d4766f6e 100644 --- a/core/node/commitment_generator/src/lib.rs +++ b/core/node/commitment_generator/src/lib.rs @@ -9,7 +9,7 @@ use zksync_l1_contract_interface::i_executor::commit::kzg::pubdata_to_blob_commi use zksync_types::{ blob::num_blobs_required, commitment::{ - AuxCommitments, CommitmentCommonInput, CommitmentInput, L1BatchAuxiliaryOutput, + AuxCommitments, BlobHash, CommitmentCommonInput, CommitmentInput, L1BatchAuxiliaryOutput, L1BatchCommitment, L1BatchCommitmentArtifacts, L1BatchCommitmentMode, }, writes::{InitialStorageWrite, RepeatedStorageWrite, StateDiffRecord}, @@ -19,7 +19,10 @@ use zksync_utils::h256_to_u256; use crate::{ metrics::{CommitmentStage, METRICS}, - utils::{convert_vm_events_to_log_queries, CommitmentComputer, RealCommitmentComputer}, + utils::{ + convert_vm_events_to_log_queries, pubdata_to_blob_linear_hashes, read_aggregation_root, + CommitmentComputer, RealCommitmentComputer, + }, }; mod metrics; @@ -263,14 +266,40 @@ impl CommitmentGenerator { } state_diffs.sort_unstable_by_key(|rec| (rec.address, rec.key)); - let blob_commitments = if protocol_version.is_post_1_4_2() { + let blob_hashes = if protocol_version.is_post_1_4_2() { let pubdata_input = header.pubdata_input.with_context(|| { format!("`pubdata_input` is missing for L1 batch #{l1_batch_number}") })?; - pubdata_to_blob_commitments(num_blobs_required(&protocol_version), &pubdata_input) + let commitments = pubdata_to_blob_commitments( + num_blobs_required(&protocol_version), + &pubdata_input, + ); + let linear_hashes = pubdata_to_blob_linear_hashes( + num_blobs_required(&protocol_version), + pubdata_input, + ); + + commitments + .into_iter() + .zip(linear_hashes) + .map(|(commitment, linear_hash)| BlobHash { + commitment, + linear_hash, + }) + .collect::>() } else { - vec![H256::zero(); num_blobs_required(&protocol_version)] + vec![Default::default(); num_blobs_required(&protocol_version)] + }; + + let aggregation_root = if protocol_version.is_pre_gateway() { + let mut connection = self + .connection_pool + .connection_tagged("commitment_generator") + .await?; + read_aggregation_root(&mut connection, l1_batch_number).await? + } else { + H256::zero() }; CommitmentInput::PostBoojum { @@ -278,7 +307,8 @@ impl CommitmentGenerator { system_logs: header.system_logs, state_diffs, aux_commitments, - blob_commitments, + blob_hashes, + aggregation_root, } }; @@ -357,14 +387,10 @@ impl CommitmentGenerator { (L1BatchCommitmentMode::Rollup, _) => { // Do nothing } - - ( - L1BatchCommitmentMode::Validium, - CommitmentInput::PostBoojum { - blob_commitments, .. - }, - ) => { - blob_commitments.fill(H256::zero()); + (L1BatchCommitmentMode::Validium, CommitmentInput::PostBoojum { blob_hashes, .. }) => { + for hashes in blob_hashes { + hashes.commitment = H256::zero(); + } } (L1BatchCommitmentMode::Validium, _) => { /* Do nothing */ } } @@ -374,14 +400,9 @@ impl CommitmentGenerator { match (self.commitment_mode, &mut commitment.auxiliary_output) { ( L1BatchCommitmentMode::Validium, - L1BatchAuxiliaryOutput::PostBoojum { - blob_linear_hashes, - blob_commitments, - .. - }, + L1BatchAuxiliaryOutput::PostBoojum { blob_hashes, .. }, ) => { - blob_linear_hashes.fill(H256::zero()); - blob_commitments.fill(H256::zero()); + blob_hashes.fill(Default::default()); } _ => { /* Do nothing */ } } diff --git a/core/node/commitment_generator/src/utils.rs b/core/node/commitment_generator/src/utils.rs index 86643b6b581b..d405a1256a29 100644 --- a/core/node/commitment_generator/src/utils.rs +++ b/core/node/commitment_generator/src/utils.rs @@ -2,6 +2,7 @@ use std::fmt; +use anyhow::Context; use itertools::Itertools; use zk_evm_1_3_3::{ aux_structures::Timestamp as Timestamp_1_3_3, @@ -15,13 +16,18 @@ use zk_evm_1_5_0::{ aux_structures::Timestamp as Timestamp_1_5_0, zk_evm_abstractions::queries::LogQuery as LogQuery_1_5_0, }; +use zksync_dal::{Connection, Core, CoreDal}; +use zksync_l1_contract_interface::i_executor::commit::kzg::ZK_SYNC_BYTES_PER_BLOB; use zksync_multivm::{interface::VmEvent, utils::get_used_bootloader_memory_bytes}; +use zksync_system_constants::message_root::{AGG_TREE_HEIGHT_KEY, AGG_TREE_NODES_KEY}; use zksync_types::{ vm::VmVersion, + web3::keccak256, zk_evm_types::{LogQuery, Timestamp}, - ProtocolVersionId, EVENT_WRITER_ADDRESS, H256, U256, + AccountTreeId, L1BatchNumber, ProtocolVersionId, StorageKey, EVENT_WRITER_ADDRESS, H256, + L2_MESSAGE_ROOT_ADDRESS, U256, }; -use zksync_utils::{address_to_u256, expand_memory_contents, h256_to_u256}; +use zksync_utils::{address_to_u256, expand_memory_contents, h256_to_u256, u256_to_h256}; /// Encapsulates computations of commitment components. /// @@ -68,7 +74,8 @@ impl CommitmentComputer for RealCommitmentComputer { ), )), VmVersion::Vm1_5_0SmallBootloaderMemory - | VmVersion::Vm1_5_0IncreasedBootloaderMemory => Ok(H256( + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => Ok(H256( circuit_sequencer_api_1_5_0::commitments::events_queue_commitment_fixed( &events_queue .iter() @@ -106,7 +113,8 @@ impl CommitmentComputer for RealCommitmentComputer { ), )), VmVersion::Vm1_5_0SmallBootloaderMemory - | VmVersion::Vm1_5_0IncreasedBootloaderMemory => Ok(H256( + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => Ok(H256( circuit_sequencer_api_1_5_0::commitments::initial_heap_content_commitment_fixed( &full_bootloader_memory, ), @@ -234,3 +242,75 @@ pub(crate) fn convert_vm_events_to_log_queries(events: &[VmEvent]) -> Vec, +) -> Vec { + // Now, we need to calculate the linear hashes of the blobs. + // Firstly, let's pad the pubdata to the size of the blob. + if pubdata_input.len() % ZK_SYNC_BYTES_PER_BLOB != 0 { + pubdata_input.resize( + pubdata_input.len() + + (ZK_SYNC_BYTES_PER_BLOB - pubdata_input.len() % ZK_SYNC_BYTES_PER_BLOB), + 0, + ); + } + + let mut result = vec![H256::zero(); blobs_required]; + + pubdata_input + .chunks(ZK_SYNC_BYTES_PER_BLOB) + .enumerate() + .for_each(|(i, chunk)| { + result[i] = H256(keccak256(chunk)); + }); + + result +} + +pub(crate) async fn read_aggregation_root( + connection: &mut Connection<'_, Core>, + l1_batch_number: L1BatchNumber, +) -> anyhow::Result { + let (_, last_l2_block) = connection + .blocks_dal() + .get_l2_block_range_of_l1_batch(l1_batch_number) + .await? + .context("No range for batch")?; + + let agg_tree_height_slot = StorageKey::new( + AccountTreeId::new(L2_MESSAGE_ROOT_ADDRESS), + H256::from_low_u64_be(AGG_TREE_HEIGHT_KEY as u64), + ); + + let agg_tree_height = connection + .storage_web3_dal() + .get_historical_value_unchecked(agg_tree_height_slot.hashed_key(), last_l2_block) + .await?; + let agg_tree_height = h256_to_u256(agg_tree_height); + + // `nodes[height][0]` + let agg_tree_root_hash_key = + n_dim_array_key_in_layout(AGG_TREE_NODES_KEY, &[agg_tree_height, U256::zero()]); + let agg_tree_root_hash_slot = StorageKey::new( + AccountTreeId::new(L2_MESSAGE_ROOT_ADDRESS), + agg_tree_root_hash_key, + ); + + Ok(connection + .storage_web3_dal() + .get_historical_value_unchecked(agg_tree_root_hash_slot.hashed_key(), last_l2_block) + .await?) +} + +fn n_dim_array_key_in_layout(array_key: usize, indices: &[U256]) -> H256 { + let mut key: H256 = u256_to_h256(array_key.into()); + + for index in indices { + key = H256(keccak256(key.as_bytes())); + key = u256_to_h256(h256_to_u256(key).overflowing_add(*index).0); + } + + key +} diff --git a/core/node/consensus/src/storage/store.rs b/core/node/consensus/src/storage/store.rs index 7267d7e1c822..53be2fc63c75 100644 --- a/core/node/consensus/src/storage/store.rs +++ b/core/node/consensus/src/storage/store.rs @@ -28,6 +28,13 @@ fn to_fetched_block( .context("Integer overflow converting block number")?, ); let payload = Payload::decode(payload).context("Payload::decode()")?; + let pubdata_params = if payload.protocol_version.is_pre_gateway() { + payload.pubdata_params.unwrap_or_default() + } else { + payload + .pubdata_params + .context("Missing `pubdata_params` for post-gateway payload")? + }; Ok(FetchedBlock { number, l1_batch_number: payload.l1_batch_number, @@ -38,6 +45,7 @@ fn to_fetched_block( l1_gas_price: payload.l1_gas_price, l2_fair_gas_price: payload.l2_fair_gas_price, fair_pubdata_price: payload.fair_pubdata_price, + pubdata_params, virtual_blocks: payload.virtual_blocks, operator_address: payload.operator_address, transactions: payload diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 4ebcf5c9a617..db433665e570 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -295,6 +295,7 @@ impl StateKeeper { timestamp: self.last_timestamp, virtual_blocks: 1, }, + pubdata_params: Default::default(), }, number: self.last_batch, first_l2_block_number: self.last_block, @@ -568,9 +569,11 @@ impl StateKeeperRunner { let (stop_send, stop_recv) = sync::watch::channel(false); let (persistence, l2_block_sealer) = StateKeeperPersistence::new( self.pool.0.clone(), - ethabi::Address::repeat_byte(11), + Some(ethabi::Address::repeat_byte(11)), 5, - ); + ) + .await + .unwrap(); let io = ExternalIO::new( self.pool.0.clone(), @@ -675,9 +678,11 @@ impl StateKeeperRunner { let (stop_send, stop_recv) = sync::watch::channel(false); let (persistence, l2_block_sealer) = StateKeeperPersistence::new( self.pool.0.clone(), - ethabi::Address::repeat_byte(11), + Some(ethabi::Address::repeat_byte(11)), 5, - ); + ) + .await + .unwrap(); let tree_writes_persistence = TreeWritesPersistence::new(self.pool.0.clone()); let io = ExternalIO::new( diff --git a/core/node/db_pruner/src/tests.rs b/core/node/db_pruner/src/tests.rs index a5458e996e1e..99fbada423dc 100644 --- a/core/node/db_pruner/src/tests.rs +++ b/core/node/db_pruner/src/tests.rs @@ -122,6 +122,7 @@ async fn insert_l2_blocks( virtual_blocks: 0, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), }; conn.blocks_dal() diff --git a/core/node/eth_sender/src/tests.rs b/core/node/eth_sender/src/tests.rs index 9e844a8b8537..8e5032a69cfc 100644 --- a/core/node/eth_sender/src/tests.rs +++ b/core/node/eth_sender/src/tests.rs @@ -126,6 +126,10 @@ pub(crate) fn default_l1_batch_metadata() -> L1BatchMetadata { events_queue_commitment: Some(H256::zero()), bootloader_initial_content_commitment: Some(H256::zero()), state_diffs_compressed: vec![], + state_diff_hash: Some(H256::default()), + local_root: Some(H256::default()), + aggregation_root: Some(H256::default()), + da_inclusion_data: Some(vec![]), } } diff --git a/core/node/eth_watch/src/lib.rs b/core/node/eth_watch/src/lib.rs index a832733b3559..4185878d2ac4 100644 --- a/core/node/eth_watch/src/lib.rs +++ b/core/node/eth_watch/src/lib.rs @@ -140,7 +140,7 @@ impl EthWatch { let finalized_block = client.finalized_block_number().await?; let from_block = storage - .processed_events_dal() + .eth_watcher_dal() .get_or_set_next_block_to_process( processor.event_type(), chain_id, @@ -180,7 +180,7 @@ impl EthWatch { }; storage - .processed_events_dal() + .eth_watcher_dal() .update_next_block_to_process( processor.event_type(), chain_id, diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs index 459b8855b961..6fce46f77225 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs @@ -88,8 +88,8 @@ impl GasAdjuster { anyhow::ensure!(client.gateway_mode, "Must be L2 client in L2 mode"); anyhow::ensure!( - matches!(pubdata_sending_mode, PubdataSendingMode::RelayedL2Calldata), - "Only relayed L2 calldata is available for L2 mode, got: {pubdata_sending_mode:?}" + matches!(pubdata_sending_mode, PubdataSendingMode::RelayedL2Calldata | PubdataSendingMode::Custom), + "Only relayed L2 calldata or Custom is available for L2 mode, got: {pubdata_sending_mode:?}" ); } else { anyhow::ensure!(!client.gateway_mode, "Must be L1 client in L1 mode"); diff --git a/core/node/genesis/src/lib.rs b/core/node/genesis/src/lib.rs index 3e4c0ee30b94..82732342b407 100644 --- a/core/node/genesis/src/lib.rs +++ b/core/node/genesis/src/lib.rs @@ -409,6 +409,7 @@ pub async fn create_genesis_l1_batch( virtual_blocks: 0, gas_limit: 0, logs_bloom: Bloom::zero(), + pubdata_params: Default::default(), }; let mut transaction = storage.start_transaction().await?; diff --git a/core/node/logs_bloom_backfill/src/lib.rs b/core/node/logs_bloom_backfill/src/lib.rs index 4337c0b8dc97..368d2edaf698 100644 --- a/core/node/logs_bloom_backfill/src/lib.rs +++ b/core/node/logs_bloom_backfill/src/lib.rs @@ -158,6 +158,7 @@ mod tests { virtual_blocks: 0, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), }; conn.blocks_dal() diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs b/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs index ec2c415b9bbd..77992f34c7f5 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs @@ -4,7 +4,7 @@ use zksync_config::configs::{ wallets, }; use zksync_state_keeper::{MempoolFetcher, MempoolGuard, MempoolIO, SequencerSealer}; -use zksync_types::L2ChainId; +use zksync_types::{commitment::L1BatchCommitmentMode, Address, L2ChainId}; use crate::{ implementations::resources::{ @@ -39,6 +39,8 @@ pub struct MempoolIOLayer { state_keeper_config: StateKeeperConfig, mempool_config: MempoolConfig, wallets: wallets::StateKeeper, + l2_da_validator_addr: Option
, + l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, } #[derive(Debug, FromContext)] @@ -63,12 +65,16 @@ impl MempoolIOLayer { state_keeper_config: StateKeeperConfig, mempool_config: MempoolConfig, wallets: wallets::StateKeeper, + l2_da_validator_addr: Option
, + l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, ) -> Self { Self { zksync_network_id, state_keeper_config, mempool_config, wallets, + l2_da_validator_addr, + l1_batch_commit_data_generator_mode, } } @@ -129,6 +135,8 @@ impl WiringLayer for MempoolIOLayer { self.wallets.fee_account.address(), self.mempool_config.delay_interval(), self.zksync_network_id, + self.l2_da_validator_addr, + self.l1_batch_commit_data_generator_mode, )?; // Create sealer. diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs b/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs index 5f63e4e19475..1a07591c1cd9 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs @@ -35,7 +35,7 @@ use crate::{ /// - `L2BlockSealerTask` #[derive(Debug)] pub struct OutputHandlerLayer { - l2_shared_bridge_addr: Address, + l2_legacy_shared_bridge_addr: Option
, l2_block_seal_queue_capacity: usize, /// Whether transactions should be pre-inserted to DB. /// Should be set to `true` for EN's IO as EN doesn't store transactions in DB @@ -63,9 +63,12 @@ pub struct Output { } impl OutputHandlerLayer { - pub fn new(l2_shared_bridge_addr: Address, l2_block_seal_queue_capacity: usize) -> Self { + pub fn new( + l2_legacy_shared_bridge_addr: Option
, + l2_block_seal_queue_capacity: usize, + ) -> Self { Self { - l2_shared_bridge_addr, + l2_legacy_shared_bridge_addr, l2_block_seal_queue_capacity, pre_insert_txs: false, protective_reads_persistence_enabled: false, @@ -103,11 +106,13 @@ impl WiringLayer for OutputHandlerLayer { .get_custom(L2BlockSealProcess::subtasks_len()) .await .context("Get master pool")?; + let (mut persistence, l2_block_sealer) = StateKeeperPersistence::new( persistence_pool.clone(), - self.l2_shared_bridge_addr, + self.l2_legacy_shared_bridge_addr, self.l2_block_seal_queue_capacity, - ); + ) + .await?; if self.pre_insert_txs { persistence = persistence.with_tx_insertion(); } diff --git a/core/node/node_sync/src/external_io.rs b/core/node/node_sync/src/external_io.rs index a0be233a002e..1be7e00543f1 100644 --- a/core/node/node_sync/src/external_io.rs +++ b/core/node/node_sync/src/external_io.rs @@ -251,7 +251,7 @@ impl StateKeeperIO for ExternalIO { pending_l2_block_header.set_protocol_version(protocol_version); } - let (system_env, l1_batch_env) = self + let (system_env, l1_batch_env, pubdata_params) = self .l1_batch_params_provider .load_l1_batch_params( &mut storage, @@ -274,7 +274,7 @@ impl StateKeeperIO for ExternalIO { .into_unsealed_header(Some(system_env.version)), ) .await?; - let data = load_pending_batch(&mut storage, system_env, l1_batch_env) + let data = load_pending_batch(&mut storage, system_env, l1_batch_env, pubdata_params) .await .with_context(|| { format!( @@ -529,6 +529,7 @@ mod tests { timestamp: 1, virtual_blocks: 1, }, + pubdata_params: Default::default(), }; actions_sender .push_action_unchecked(SyncAction::OpenBatch { diff --git a/core/node/node_sync/src/fetcher.rs b/core/node/node_sync/src/fetcher.rs index 51b9f7c7a060..9c76d1d93ca3 100644 --- a/core/node/node_sync/src/fetcher.rs +++ b/core/node/node_sync/src/fetcher.rs @@ -1,9 +1,10 @@ +use anyhow::Context; use zksync_dal::{Connection, Core, CoreDal}; use zksync_shared_metrics::{TxStage, APP_METRICS}; use zksync_state_keeper::io::{common::IoCursor, L1BatchParams, L2BlockParams}; use zksync_types::{ - api::en::SyncBlock, block::L2BlockHasher, fee_model::BatchFeeInput, helpers::unix_timestamp_ms, - Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, + api::en::SyncBlock, block::L2BlockHasher, commitment::PubdataParams, fee_model::BatchFeeInput, + helpers::unix_timestamp_ms, Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, }; use super::{ @@ -51,6 +52,7 @@ pub struct FetchedBlock { pub virtual_blocks: u32, pub operator_address: Address, pub transactions: Vec, + pub pubdata_params: PubdataParams, } impl FetchedBlock { @@ -77,6 +79,14 @@ impl TryFrom for FetchedBlock { )); } + let pubdata_params = if block.protocol_version.is_pre_gateway() { + block.pubdata_params.unwrap_or_default() + } else { + block + .pubdata_params + .context("Missing `pubdata_params` for post-gateway payload")? + }; + Ok(Self { number: block.number, l1_batch_number: block.l1_batch_number, @@ -93,6 +103,7 @@ impl TryFrom for FetchedBlock { .into_iter() .map(FetchedTransaction::new) .collect(), + pubdata_params, }) } } @@ -165,6 +176,7 @@ impl IoCursorExt for IoCursor { timestamp: block.timestamp, virtual_blocks: block.virtual_blocks, }, + pubdata_params: block.pubdata_params, }, number: block.l1_batch_number, first_l2_block_number: block.number, diff --git a/core/node/node_sync/src/sync_action.rs b/core/node/node_sync/src/sync_action.rs index e3fd56ae9bb0..897abfafb2a6 100644 --- a/core/node/node_sync/src/sync_action.rs +++ b/core/node/node_sync/src/sync_action.rs @@ -198,6 +198,7 @@ mod tests { timestamp: 1, virtual_blocks: 1, }, + pubdata_params: Default::default(), }, number: L1BatchNumber(1), first_l2_block_number: L2BlockNumber(1), diff --git a/core/node/node_sync/src/tests.rs b/core/node/node_sync/src/tests.rs index 1ae148709b22..172a00e8c14c 100644 --- a/core/node/node_sync/src/tests.rs +++ b/core/node/node_sync/src/tests.rs @@ -44,6 +44,7 @@ fn open_l1_batch(number: u32, timestamp: u64, first_l2_block_number: u32) -> Syn timestamp, virtual_blocks: 1, }, + pubdata_params: Default::default(), }, number: L1BatchNumber(number), first_l2_block_number: L2BlockNumber(first_l2_block_number), @@ -67,6 +68,7 @@ impl MockMainNodeClient { virtual_blocks: Some(0), hash: Some(snapshot.l2_block_hash), protocol_version: ProtocolVersionId::latest(), + pubdata_params: Default::default(), }; Self { @@ -106,7 +108,9 @@ impl StateKeeperHandles { let sync_state = SyncState::default(); let (persistence, l2_block_sealer) = - StateKeeperPersistence::new(pool.clone(), Address::repeat_byte(1), 5); + StateKeeperPersistence::new(pool.clone(), Some(Address::repeat_byte(1)), 5) + .await + .unwrap(); let tree_writes_persistence = TreeWritesPersistence::new(pool.clone()); let output_handler = OutputHandler::new(Box::new(persistence.with_tx_insertion())) .with_handler(Box::new(tree_writes_persistence)) diff --git a/core/node/proof_data_handler/src/request_processor.rs b/core/node/proof_data_handler/src/request_processor.rs index ee266a88971e..89304724a7c2 100644 --- a/core/node/proof_data_handler/src/request_processor.rs +++ b/core/node/proof_data_handler/src/request_processor.rs @@ -17,7 +17,7 @@ use zksync_types::{ basic_fri_types::Eip4844Blobs, commitment::{serialize_commitments, L1BatchCommitmentMode}, web3::keccak256, - L1BatchNumber, H256, + L1BatchNumber, ProtocolVersionId, H256, STATE_DIFF_HASH_KEY_PRE_GATEWAY, }; use crate::{errors::RequestProcessorError, metrics::METRICS}; @@ -226,58 +226,63 @@ impl RequestProcessor { .unwrap() .expect("Proved block without metadata"); - let is_pre_boojum = l1_batch + let protocol_version = l1_batch .header .protocol_version - .map(|v| v.is_pre_boojum()) - .unwrap_or(true); - if !is_pre_boojum { - let events_queue_state = l1_batch - .metadata - .events_queue_commitment - .expect("No events_queue_commitment"); - let bootloader_heap_initial_content = l1_batch - .metadata - .bootloader_initial_content_commitment - .expect("No bootloader_initial_content_commitment"); - - if events_queue_state != events_queue_state_from_prover - || bootloader_heap_initial_content - != bootloader_heap_initial_content_from_prover - { - let server_values = format!("events_queue_state = {events_queue_state}, bootloader_heap_initial_content = {bootloader_heap_initial_content}"); - let prover_values = format!("events_queue_state = {events_queue_state_from_prover}, bootloader_heap_initial_content = {bootloader_heap_initial_content_from_prover}"); - panic!( - "Auxilary output doesn't match, server values: {} prover values: {}", - server_values, prover_values - ); - } + .unwrap_or_else(ProtocolVersionId::last_potentially_undefined); + + let events_queue_state = l1_batch + .metadata + .events_queue_commitment + .expect("No events_queue_commitment"); + let bootloader_heap_initial_content = l1_batch + .metadata + .bootloader_initial_content_commitment + .expect("No bootloader_initial_content_commitment"); + + if events_queue_state != events_queue_state_from_prover + || bootloader_heap_initial_content + != bootloader_heap_initial_content_from_prover + { + panic!( + "Auxilary output doesn't match\n\ + server values: events_queue_state = {events_queue_state}, bootloader_heap_initial_content = {bootloader_heap_initial_content}\n\ + prover values: events_queue_state = {events_queue_state_from_prover}, bootloader_heap_initial_content = {bootloader_heap_initial_content_from_prover}", + ); } let system_logs = serialize_commitments(&l1_batch.header.system_logs); let system_logs_hash = H256(keccak256(&system_logs)); - if !is_pre_boojum { - let state_diff_hash = l1_batch + let state_diff_hash = if protocol_version.is_pre_gateway() { + l1_batch .header .system_logs - .into_iter() - .find(|elem| elem.0.key == H256::from_low_u64_be(2)) - .expect("No state diff hash key") - .0 - .value; - - if state_diff_hash != state_diff_hash_from_prover - || system_logs_hash != system_logs_hash_from_prover - { - let server_values = format!("system_logs_hash = {system_logs_hash}, state_diff_hash = {state_diff_hash}"); - let prover_values = format!("system_logs_hash = {system_logs_hash_from_prover}, state_diff_hash = {state_diff_hash_from_prover}"); - panic!( - "Auxilary output doesn't match, server values: {} prover values: {}", - server_values, prover_values - ); - } + .iter() + .find_map(|log| { + (log.0.key + == H256::from_low_u64_be(STATE_DIFF_HASH_KEY_PRE_GATEWAY as u64)) + .then_some(log.0.value) + }) + .expect("Failed to get state_diff_hash from system logs") + } else { + l1_batch + .metadata + .state_diff_hash + .expect("Failed to get state_diff_hash from metadata") + }; + + if state_diff_hash != state_diff_hash_from_prover + || system_logs_hash != system_logs_hash_from_prover + { + let server_values = format!("system_logs_hash = {system_logs_hash}, state_diff_hash = {state_diff_hash}"); + let prover_values = format!("system_logs_hash = {system_logs_hash_from_prover}, state_diff_hash = {state_diff_hash_from_prover}"); + panic!( + "Auxilary output doesn't match, server values: {} prover values: {}", + server_values, prover_values + ); } + storage .proof_generation_dal() .save_proof_artifacts_metadata(l1_batch_number, &blob_url) diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs index 2c2a56300097..800dede23c76 100644 --- a/core/node/proof_data_handler/src/tee_request_processor.rs +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -130,7 +130,7 @@ impl TeeRequestProcessor { // This means we don't want to reject any execution, therefore we're using MAX as an allow all. let validation_computational_gas_limit = u32::MAX; - let (system_env, l1_batch_env) = l1_batch_params_provider + let (system_env, l1_batch_env, pubdata_params) = l1_batch_params_provider .load_l1_batch_env( &mut connection, l1_batch_number, @@ -149,6 +149,7 @@ impl TeeRequestProcessor { l2_blocks_execution_data, l1_batch_env, system_env, + pubdata_params, })) } diff --git a/core/node/state_keeper/src/executor/tests/tester.rs b/core/node/state_keeper/src/executor/tests/tester.rs index 79072f23aed9..a02aeb47cafa 100644 --- a/core/node/state_keeper/src/executor/tests/tester.rs +++ b/core/node/state_keeper/src/executor/tests/tester.rs @@ -25,6 +25,7 @@ use zksync_state::{OwnedStorage, ReadStorageFactory, RocksdbStorageOptions}; use zksync_test_account::{Account, DeployContractsTx, TxType}; use zksync_types::{ block::L2BlockHasher, + commitment::PubdataParams, ethabi::Token, protocol_version::ProtocolSemanticVersion, snapshots::{SnapshotRecoveryStatus, SnapshotStorageLog}, @@ -104,10 +105,9 @@ impl Tester { &mut self, storage_type: StorageType, ) -> Box> { - let (l1_batch_env, system_env) = self.default_batch_params(); + let (l1_batch_env, system_env, pubdata_params) = self.default_batch_params(); match storage_type { StorageType::AsyncRocksdbCache => { - let (l1_batch_env, system_env) = self.default_batch_params(); let (state_keeper_storage, task) = AsyncRocksdbCache::new( self.pool(), self.state_keeper_db_path(), @@ -122,6 +122,7 @@ impl Tester { Arc::new(state_keeper_storage), l1_batch_env, system_env, + pubdata_params, ) .await } @@ -133,12 +134,18 @@ impl Tester { )), l1_batch_env, system_env, + pubdata_params, ) .await } StorageType::Postgres => { - self.create_batch_executor_inner(Arc::new(self.pool()), l1_batch_env, system_env) - .await + self.create_batch_executor_inner( + Arc::new(self.pool()), + l1_batch_env, + system_env, + pubdata_params, + ) + .await } } } @@ -148,6 +155,7 @@ impl Tester { storage_factory: Arc, l1_batch_env: L1BatchEnv, system_env: SystemEnv, + pubdata_params: PubdataParams, ) -> Box> { let (_stop_sender, stop_receiver) = watch::channel(false); let storage = storage_factory @@ -158,11 +166,11 @@ impl Tester { if self.config.trace_calls { let mut executor = MainBatchExecutorFactory::::new(false); executor.set_fast_vm_mode(self.config.fast_vm_mode); - executor.init_batch(storage, l1_batch_env, system_env) + executor.init_batch(storage, l1_batch_env, system_env, pubdata_params) } else { let mut executor = MainBatchExecutorFactory::<()>::new(false); executor.set_fast_vm_mode(self.config.fast_vm_mode); - executor.init_batch(storage, l1_batch_env, system_env) + executor.init_batch(storage, l1_batch_env, system_env, pubdata_params) } } @@ -212,7 +220,7 @@ impl Tester { snapshot: &SnapshotRecoveryStatus, ) -> Box> { let current_timestamp = snapshot.l2_block_timestamp + 1; - let (mut l1_batch_env, system_env) = + let (mut l1_batch_env, system_env, pubdata_params) = self.batch_params(snapshot.l1_batch_number + 1, current_timestamp); l1_batch_env.previous_batch_hash = Some(snapshot.l1_batch_root_hash); l1_batch_env.first_l2_block = L2BlockEnv { @@ -222,11 +230,11 @@ impl Tester { max_virtual_blocks_to_create: 1, }; - self.create_batch_executor_inner(storage_factory, l1_batch_env, system_env) + self.create_batch_executor_inner(storage_factory, l1_batch_env, system_env, pubdata_params) .await } - pub(super) fn default_batch_params(&self) -> (L1BatchEnv, SystemEnv) { + pub(super) fn default_batch_params(&self) -> (L1BatchEnv, SystemEnv, PubdataParams) { // Not really important for the batch executor - it operates over a single batch. self.batch_params(L1BatchNumber(1), 100) } @@ -236,7 +244,7 @@ impl Tester { &self, l1_batch_number: L1BatchNumber, timestamp: u64, - ) -> (L1BatchEnv, SystemEnv) { + ) -> (L1BatchEnv, SystemEnv, PubdataParams) { let mut system_params = default_system_env(); if let Some(vm_gas_limit) = self.config.vm_gas_limit { system_params.bootloader_gas_limit = vm_gas_limit; @@ -245,7 +253,7 @@ impl Tester { self.config.validation_computational_gas_limit; let mut batch_params = default_l1_batch_env(l1_batch_number.0, timestamp, self.fee_account); batch_params.previous_batch_hash = Some(H256::zero()); // Not important in this context. - (batch_params, system_params) + (batch_params, system_params, PubdataParams::default()) } /// Performs the genesis in the storage. diff --git a/core/node/state_keeper/src/io/common/mod.rs b/core/node/state_keeper/src/io/common/mod.rs index 6bd881414a20..867ffa7fb371 100644 --- a/core/node/state_keeper/src/io/common/mod.rs +++ b/core/node/state_keeper/src/io/common/mod.rs @@ -3,7 +3,7 @@ use std::time::Duration; use anyhow::Context; use zksync_dal::{Connection, Core, CoreDal}; use zksync_multivm::interface::{L1BatchEnv, SystemEnv}; -use zksync_types::{L1BatchNumber, L2BlockNumber, H256}; +use zksync_types::{commitment::PubdataParams, L1BatchNumber, L2BlockNumber, H256}; use super::PendingBatchData; @@ -85,6 +85,7 @@ pub async fn load_pending_batch( storage: &mut Connection<'_, Core>, system_env: SystemEnv, l1_batch_env: L1BatchEnv, + pubdata_params: PubdataParams, ) -> anyhow::Result { let pending_l2_blocks = storage .transactions_dal() @@ -104,6 +105,7 @@ pub async fn load_pending_batch( Ok(PendingBatchData { l1_batch_env, system_env, + pubdata_params, pending_l2_blocks, }) } diff --git a/core/node/state_keeper/src/io/common/tests.rs b/core/node/state_keeper/src/io/common/tests.rs index b2a24acb4956..ec9f906b1cd7 100644 --- a/core/node/state_keeper/src/io/common/tests.rs +++ b/core/node/state_keeper/src/io/common/tests.rs @@ -318,7 +318,7 @@ async fn loading_pending_batch_with_genesis() { .await; let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); - let (system_env, l1_batch_env) = provider + let (system_env, l1_batch_env, pubdata_params) = provider .load_l1_batch_env( &mut storage, L1BatchNumber(1), @@ -331,7 +331,7 @@ async fn loading_pending_batch_with_genesis() { assert_eq!(l1_batch_env.first_l2_block.number, 1); - let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env) + let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env, pubdata_params) .await .unwrap(); @@ -396,7 +396,7 @@ async fn loading_pending_batch_after_snapshot_recovery() { .await; let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); - let (system_env, l1_batch_env) = provider + let (system_env, l1_batch_env, pubdata_params) = provider .load_l1_batch_env( &mut storage, snapshot_recovery.l1_batch_number + 1, @@ -406,7 +406,7 @@ async fn loading_pending_batch_after_snapshot_recovery() { .await .unwrap() .expect("no L1 batch"); - let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env) + let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env, pubdata_params) .await .unwrap(); diff --git a/core/node/state_keeper/src/io/mempool.rs b/core/node/state_keeper/src/io/mempool.rs index 229f54132f76..dfddd36aba71 100644 --- a/core/node/state_keeper/src/io/mempool.rs +++ b/core/node/state_keeper/src/io/mempool.rs @@ -14,7 +14,10 @@ use zksync_mempool::L2TxFilter; use zksync_multivm::{interface::Halt, utils::derive_base_fee_and_gas_per_pubdata}; use zksync_node_fee_model::BatchFeeModelInputProvider; use zksync_types::{ - block::UnsealedL1BatchHeader, protocol_upgrade::ProtocolUpgradeTx, utils::display_timestamp, + block::UnsealedL1BatchHeader, + commitment::{L1BatchCommitmentMode, PubdataParams}, + protocol_upgrade::ProtocolUpgradeTx, + utils::display_timestamp, Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, U256, }; // TODO (SMA-1206): use seconds instead of milliseconds. @@ -55,6 +58,8 @@ pub struct MempoolIO { // Used to keep track of gas prices to set accepted price per pubdata byte in blocks. batch_fee_input_provider: Arc, chain_id: L2ChainId, + l2_da_validator_address: Option
, + pubdata_type: L1BatchCommitmentMode, } impl IoSealCriteria for MempoolIO { @@ -97,7 +102,7 @@ impl StateKeeperIO for MempoolIO { L2BlockSealProcess::clear_pending_l2_block(&mut storage, cursor.next_l2_block - 1).await?; - let Some((system_env, l1_batch_env)) = self + let Some((system_env, l1_batch_env, pubdata_params)) = self .l1_batch_params_provider .load_l1_batch_env( &mut storage, @@ -109,26 +114,24 @@ impl StateKeeperIO for MempoolIO { else { return Ok((cursor, None)); }; - let pending_batch_data = load_pending_batch(&mut storage, system_env, l1_batch_env) - .await - .with_context(|| { - format!( - "failed loading data for re-execution for pending L1 batch #{}", - cursor.l1_batch - ) - })?; + let pending_batch_data = + load_pending_batch(&mut storage, system_env, l1_batch_env, pubdata_params) + .await + .with_context(|| { + format!( + "failed loading data for re-execution for pending L1 batch #{}", + cursor.l1_batch + ) + })?; - let PendingBatchData { - l1_batch_env, - system_env, - pending_l2_blocks, - } = pending_batch_data; // Initialize the filter for the transactions that come after the pending batch. // We use values from the pending block to match the filter with one used before the restart. - let (base_fee, gas_per_pubdata) = - derive_base_fee_and_gas_per_pubdata(l1_batch_env.fee_input, system_env.version.into()); + let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata( + pending_batch_data.l1_batch_env.fee_input, + pending_batch_data.system_env.version.into(), + ); self.filter = L2TxFilter { - fee_input: l1_batch_env.fee_input, + fee_input: pending_batch_data.l1_batch_env.fee_input, fee_per_gas: base_fee, gas_per_pubdata: gas_per_pubdata as u32, }; @@ -136,20 +139,14 @@ impl StateKeeperIO for MempoolIO { storage .blocks_dal() .ensure_unsealed_l1_batch_exists( - l1_batch_env + pending_batch_data + .l1_batch_env .clone() - .into_unsealed_header(Some(system_env.version)), + .into_unsealed_header(Some(pending_batch_data.system_env.version)), ) .await?; - Ok(( - cursor, - Some(PendingBatchData { - l1_batch_env, - system_env, - pending_l2_blocks, - }), - )) + Ok((cursor, Some(pending_batch_data))) } async fn wait_for_new_batch_params( @@ -166,10 +163,11 @@ impl StateKeeperIO for MempoolIO { .get_unsealed_l1_batch() .await? { + let protocol_version = unsealed_storage_batch + .protocol_version + .context("unsealed batch is missing protocol version")?; return Ok(Some(L1BatchParams { - protocol_version: unsealed_storage_batch - .protocol_version - .expect("unsealed batch is missing protocol version"), + protocol_version, validation_computational_gas_limit: self.validation_computational_gas_limit, operator_address: unsealed_storage_batch.fee_address, fee_input: unsealed_storage_batch.fee_input, @@ -178,6 +176,7 @@ impl StateKeeperIO for MempoolIO { // This value is effectively ignored by the protocol. virtual_blocks: 1, }, + pubdata_params: self.pubdata_params(protocol_version)?, })); } @@ -247,6 +246,7 @@ impl StateKeeperIO for MempoolIO { // This value is effectively ignored by the protocol. virtual_blocks: 1, }, + pubdata_params: self.pubdata_params(protocol_version)?, })); } Ok(None) @@ -454,6 +454,7 @@ async fn sleep_past(timestamp: u64, l2_block: L2BlockNumber) -> u64 { } impl MempoolIO { + #[allow(clippy::too_many_arguments)] pub fn new( mempool: MempoolGuard, batch_fee_input_provider: Arc, @@ -462,6 +463,8 @@ impl MempoolIO { fee_account: Address, delay_interval: Duration, chain_id: L2ChainId, + l2_da_validator_address: Option
, + pubdata_type: L1BatchCommitmentMode, ) -> anyhow::Result { Ok(Self { mempool, @@ -477,8 +480,26 @@ impl MempoolIO { delay_interval, batch_fee_input_provider, chain_id, + l2_da_validator_address, + pubdata_type, }) } + + fn pubdata_params(&self, protocol_version: ProtocolVersionId) -> anyhow::Result { + let pubdata_params = match ( + protocol_version.is_pre_gateway(), + self.l2_da_validator_address, + ) { + (true, _) => PubdataParams::default(), + (false, Some(l2_da_validator_address)) => PubdataParams { + l2_da_validator_address, + pubdata_type: self.pubdata_type, + }, + (false, None) => anyhow::bail!("L2 DA validator address not found"), + }; + + Ok(pubdata_params) + } } /// Getters required for testing the MempoolIO. diff --git a/core/node/state_keeper/src/io/mod.rs b/core/node/state_keeper/src/io/mod.rs index 0fc5ebb6c082..e2461e72d7b2 100644 --- a/core/node/state_keeper/src/io/mod.rs +++ b/core/node/state_keeper/src/io/mod.rs @@ -4,8 +4,9 @@ use async_trait::async_trait; use zksync_contracts::BaseSystemContracts; use zksync_multivm::interface::{L1BatchEnv, SystemEnv}; use zksync_types::{ - block::L2BlockExecutionData, fee_model::BatchFeeInput, protocol_upgrade::ProtocolUpgradeTx, - Address, L1BatchNumber, L2ChainId, ProtocolVersionId, Transaction, H256, + block::L2BlockExecutionData, commitment::PubdataParams, fee_model::BatchFeeInput, + protocol_upgrade::ProtocolUpgradeTx, Address, L1BatchNumber, L2ChainId, ProtocolVersionId, + Transaction, H256, }; use zksync_vm_executor::storage::l1_batch_params; @@ -38,6 +39,7 @@ pub struct PendingBatchData { /// (e.g. timestamp) are the same, so transaction would have the same result after re-execution. pub(crate) l1_batch_env: L1BatchEnv, pub(crate) system_env: SystemEnv, + pub(crate) pubdata_params: PubdataParams, /// List of L2 blocks and corresponding transactions that were executed within batch. pub(crate) pending_l2_blocks: Vec, } @@ -70,6 +72,8 @@ pub struct L1BatchParams { pub fee_input: BatchFeeInput, /// Parameters of the first L2 block in the batch. pub first_l2_block: L2BlockParams, + /// Params related to how the pubdata should be processed by the bootloader in the batch. + pub pubdata_params: PubdataParams, } impl L1BatchParams { @@ -79,8 +83,8 @@ impl L1BatchParams { contracts: BaseSystemContracts, cursor: &IoCursor, previous_batch_hash: H256, - ) -> (SystemEnv, L1BatchEnv) { - l1_batch_params( + ) -> (SystemEnv, L1BatchEnv, PubdataParams) { + let (system_env, l1_batch_env) = l1_batch_params( cursor.l1_batch, self.operator_address, self.first_l2_block.timestamp, @@ -93,7 +97,9 @@ impl L1BatchParams { self.protocol_version, self.first_l2_block.virtual_blocks, chain_id, - ) + ); + + (system_env, l1_batch_env, self.pubdata_params) } } diff --git a/core/node/state_keeper/src/io/persistence.rs b/core/node/state_keeper/src/io/persistence.rs index 3e11285e11f1..06f1972a02aa 100644 --- a/core/node/state_keeper/src/io/persistence.rs +++ b/core/node/state_keeper/src/io/persistence.rs @@ -7,7 +7,7 @@ use async_trait::async_trait; use tokio::sync::{mpsc, oneshot}; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_shared_metrics::{BlockStage, APP_METRICS}; -use zksync_types::{writes::TreeWrite, Address}; +use zksync_types::{writes::TreeWrite, Address, ProtocolVersionId}; use zksync_utils::u256_to_h256; use crate::{ @@ -29,7 +29,7 @@ struct Completable { #[derive(Debug)] pub struct StateKeeperPersistence { pool: ConnectionPool, - l2_shared_bridge_addr: Address, + l2_legacy_shared_bridge_addr: Option
, pre_insert_txs: bool, insert_protective_reads: bool, commands_sender: mpsc::Sender>, @@ -41,13 +41,45 @@ pub struct StateKeeperPersistence { impl StateKeeperPersistence { const SHUTDOWN_MSG: &'static str = "L2 block sealer unexpectedly shut down"; + async fn validate_l2_legacy_shared_bridge_addr( + pool: &ConnectionPool, + l2_legacy_shared_bridge_addr: Option
, + ) -> anyhow::Result<()> { + let mut connection = pool.connection_tagged("state_keeper").await?; + + if let Some(l2_block) = connection + .blocks_dal() + .get_earliest_l2_block_number() + .await + .context("failed to load earliest l2 block number")? + { + let header = connection + .blocks_dal() + .get_l2_block_header(l2_block) + .await + .context("failed to load L2 block header")? + .context("missing L2 block header")?; + let protocol_version = header + .protocol_version + .unwrap_or_else(ProtocolVersionId::last_potentially_undefined); + + if protocol_version.is_pre_gateway() && l2_legacy_shared_bridge_addr.is_none() { + anyhow::bail!("Missing `l2_legacy_shared_bridge_addr` for chain that was initialized before gateway upgrade"); + } + } + + Ok(()) + } + /// Creates a sealer that will use the provided Postgres connection and will have the specified /// `command_capacity` for unprocessed sealing commands. - pub fn new( + pub async fn new( pool: ConnectionPool, - l2_shared_bridge_addr: Address, + l2_legacy_shared_bridge_addr: Option
, mut command_capacity: usize, - ) -> (Self, L2BlockSealerTask) { + ) -> anyhow::Result<(Self, L2BlockSealerTask)> { + Self::validate_l2_legacy_shared_bridge_addr(&pool, l2_legacy_shared_bridge_addr).await?; + let is_sync = command_capacity == 0; command_capacity = command_capacity.max(1); @@ -60,14 +92,14 @@ impl StateKeeperPersistence { }; let this = Self { pool, - l2_shared_bridge_addr, + l2_legacy_shared_bridge_addr, pre_insert_txs: false, insert_protective_reads: true, commands_sender, latest_completion_receiver: None, is_sync, }; - (this, sealer) + Ok((this, sealer)) } pub fn with_tx_insertion(mut self) -> Self { @@ -157,8 +189,8 @@ impl StateKeeperOutputHandler for StateKeeperPersistence { } async fn handle_l2_block(&mut self, updates_manager: &UpdatesManager) -> anyhow::Result<()> { - let command = - updates_manager.seal_l2_block_command(self.l2_shared_bridge_addr, self.pre_insert_txs); + let command = updates_manager + .seal_l2_block_command(self.l2_legacy_shared_bridge_addr, self.pre_insert_txs); self.submit_l2_block(command).await; Ok(()) } @@ -174,7 +206,7 @@ impl StateKeeperOutputHandler for StateKeeperPersistence { updates_manager .seal_l1_batch( self.pool.clone(), - self.l2_shared_bridge_addr, + self.l2_legacy_shared_bridge_addr, self.insert_protective_reads, ) .await @@ -392,8 +424,13 @@ mod tests { .unwrap(); drop(storage); - let (persistence, l2_block_sealer) = - StateKeeperPersistence::new(pool.clone(), Address::default(), l2_block_sealer_capacity); + let (persistence, l2_block_sealer) = StateKeeperPersistence::new( + pool.clone(), + Some(Address::default()), + l2_block_sealer_capacity, + ) + .await + .unwrap(); let mut output_handler = OutputHandler::new(Box::new(persistence)) .with_handler(Box::new(TreeWritesPersistence::new(pool.clone()))); tokio::spawn(l2_block_sealer.run()); @@ -451,7 +488,8 @@ mod tests { pool: &ConnectionPool, ) -> H256 { let l1_batch_env = default_l1_batch_env(1, 1, Address::random()); - let mut updates = UpdatesManager::new(&l1_batch_env, &default_system_env()); + let mut updates = + UpdatesManager::new(&l1_batch_env, &default_system_env(), Default::default()); pool.connection() .await .unwrap() @@ -538,7 +576,9 @@ mod tests { drop(storage); let (mut persistence, l2_block_sealer) = - StateKeeperPersistence::new(pool.clone(), Address::default(), 1); + StateKeeperPersistence::new(pool.clone(), Some(Address::default()), 1) + .await + .unwrap(); persistence = persistence.with_tx_insertion().without_protective_reads(); let mut output_handler = OutputHandler::new(Box::new(persistence)); tokio::spawn(l2_block_sealer.run()); @@ -577,11 +617,13 @@ mod tests { async fn l2_block_sealer_handle_blocking() { let pool = ConnectionPool::constrained_test_pool(1).await; let (mut persistence, mut sealer) = - StateKeeperPersistence::new(pool, Address::default(), 1); + StateKeeperPersistence::new(pool, Some(Address::default()), 1) + .await + .unwrap(); // The first command should be successfully submitted immediately. let mut updates_manager = create_updates_manager(); - let seal_command = updates_manager.seal_l2_block_command(Address::default(), false); + let seal_command = updates_manager.seal_l2_block_command(Some(Address::default()), false); persistence.submit_l2_block(seal_command).await; // The second command should lead to blocking @@ -589,7 +631,7 @@ mod tests { timestamp: 2, virtual_blocks: 1, }); - let seal_command = updates_manager.seal_l2_block_command(Address::default(), false); + let seal_command = updates_manager.seal_l2_block_command(Some(Address::default()), false); { let submit_future = persistence.submit_l2_block(seal_command); futures::pin_mut!(submit_future); @@ -617,7 +659,7 @@ mod tests { timestamp: 3, virtual_blocks: 1, }); - let seal_command = updates_manager.seal_l2_block_command(Address::default(), false); + let seal_command = updates_manager.seal_l2_block_command(Some(Address::default()), false); persistence.submit_l2_block(seal_command).await; let command = sealer.commands_receiver.recv().await.unwrap(); command.completion_sender.send(()).unwrap(); @@ -628,12 +670,15 @@ mod tests { async fn l2_block_sealer_handle_parallel_processing() { let pool = ConnectionPool::constrained_test_pool(1).await; let (mut persistence, mut sealer) = - StateKeeperPersistence::new(pool, Address::default(), 5); + StateKeeperPersistence::new(pool, Some(Address::default()), 5) + .await + .unwrap(); // 5 L2 block sealing commands can be submitted without blocking. let mut updates_manager = create_updates_manager(); for i in 1..=5 { - let seal_command = updates_manager.seal_l2_block_command(Address::default(), false); + let seal_command = + updates_manager.seal_l2_block_command(Some(Address::default()), false); updates_manager.push_l2_block(L2BlockParams { timestamp: i, virtual_blocks: 1, diff --git a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs index 7ef466805e36..4fc58bce5c9e 100644 --- a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs +++ b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs @@ -3,7 +3,7 @@ use async_trait::async_trait; use once_cell::sync::Lazy; use zksync_dal::{Connection, Core, CoreDal}; use zksync_multivm::interface::VmEvent; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; +use zksync_system_constants::{CONTRACT_DEPLOYER_ADDRESS, L2_NATIVE_TOKEN_VAULT_ADDRESS}; use zksync_types::{ ethabi, tokens::{TokenInfo, TokenMetadata}, @@ -18,7 +18,7 @@ use crate::{ }; fn extract_added_tokens( - l2_shared_bridge_addr: Address, + l2_token_deployer_addr: Address, all_generated_events: &[VmEvent], ) -> Vec { let deployed_tokens = all_generated_events @@ -28,7 +28,7 @@ fn extract_added_tokens( event.address == CONTRACT_DEPLOYER_ADDRESS && event.indexed_topics.len() == 4 && event.indexed_topics[0] == VmEvent::DEPLOY_EVENT_SIGNATURE - && h256_to_account_address(&event.indexed_topics[1]) == l2_shared_bridge_addr + && h256_to_account_address(&event.indexed_topics[1]) == l2_token_deployer_addr }) .map(|event| h256_to_account_address(&event.indexed_topics[3])); @@ -334,8 +334,10 @@ impl L2BlockSealSubtask for InsertTokensSubtask { ) -> anyhow::Result<()> { let is_fictive = command.is_l2_block_fictive(); let progress = L2_BLOCK_METRICS.start(L2BlockSealStage::ExtractAddedTokens, is_fictive); - let added_tokens = - extract_added_tokens(command.l2_shared_bridge_addr, &command.l2_block.events); + let token_deployer_address = command + .l2_legacy_shared_bridge_addr + .unwrap_or(L2_NATIVE_TOKEN_VAULT_ADDRESS); + let added_tokens = extract_added_tokens(token_deployer_address, &command.l2_block.events); progress.observe(added_tokens.len()); let progress = L2_BLOCK_METRICS.start(L2BlockSealStage::InsertTokens, is_fictive); @@ -464,6 +466,7 @@ mod tests { use zksync_node_test_utils::create_l2_transaction; use zksync_types::{ block::L2BlockHeader, + commitment::PubdataParams, l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, AccountTreeId, Address, L1BatchNumber, ProtocolVersionId, StorageKey, StorageLog, StorageLogKind, StorageLogWithPreviousValue, @@ -552,8 +555,9 @@ mod tests { base_fee_per_gas: Default::default(), base_system_contracts_hashes: Default::default(), protocol_version: Some(ProtocolVersionId::latest()), - l2_shared_bridge_addr: Default::default(), + l2_legacy_shared_bridge_addr: Default::default(), pre_insert_txs: false, + pubdata_params: PubdataParams::default(), }; // Run. @@ -616,6 +620,7 @@ mod tests { virtual_blocks: l2_block_seal_command.l2_block.virtual_blocks, gas_limit: get_max_batch_gas_limit(VmVersion::latest()), logs_bloom: Default::default(), + pubdata_params: l2_block_seal_command.pubdata_params, }; connection .protocol_versions_dal() diff --git a/core/node/state_keeper/src/io/seal_logic/mod.rs b/core/node/state_keeper/src/io/seal_logic/mod.rs index 5859d27786d9..7f05bda7a6f5 100644 --- a/core/node/state_keeper/src/io/seal_logic/mod.rs +++ b/core/node/state_keeper/src/io/seal_logic/mod.rs @@ -46,7 +46,7 @@ impl UpdatesManager { pub(super) async fn seal_l1_batch( &self, pool: ConnectionPool, - l2_shared_bridge_addr: Address, + l2_legacy_shared_bridge_addr: Option
, insert_protective_reads: bool, ) -> anyhow::Result<()> { let started_at = Instant::now(); @@ -59,7 +59,7 @@ impl UpdatesManager { let progress = L1_BATCH_METRICS.start(L1BatchSealStage::FictiveL2Block); // Seal fictive L2 block with last events and storage logs. let l2_block_command = self.seal_l2_block_command( - l2_shared_bridge_addr, + l2_legacy_shared_bridge_addr, false, // fictive L2 blocks don't have txs, so it's fine to pass `false` here. ); @@ -335,8 +335,6 @@ impl L2BlockSealCommand { /// that are created after the last processed tx in the L1 batch: after the last transaction is processed, /// the bootloader enters the "tip" phase in which it can still generate events (e.g., /// one for sending fees to the operator). - /// - /// `l2_shared_bridge_addr` is required to extract the information on newly added tokens. async fn seal_inner( &self, strategy: &mut SealStrategy<'_>, @@ -393,6 +391,7 @@ impl L2BlockSealCommand { virtual_blocks: self.l2_block.virtual_blocks, gas_limit: get_max_batch_gas_limit(definite_vm_version), logs_bloom, + pubdata_params: self.pubdata_params, }; let mut connection = strategy.connection().await?; diff --git a/core/node/state_keeper/src/io/tests/mod.rs b/core/node/state_keeper/src/io/tests/mod.rs index 566eebf7ab72..ece5b67767f6 100644 --- a/core/node/state_keeper/src/io/tests/mod.rs +++ b/core/node/state_keeper/src/io/tests/mod.rs @@ -286,8 +286,9 @@ async fn processing_storage_logs_when_sealing_l2_block() { base_fee_per_gas: 10, base_system_contracts_hashes: BaseSystemContractsHashes::default(), protocol_version: Some(ProtocolVersionId::latest()), - l2_shared_bridge_addr: Address::default(), + l2_legacy_shared_bridge_addr: Some(Address::default()), pre_insert_txs: false, + pubdata_params: Default::default(), }; connection_pool .connection() @@ -376,8 +377,9 @@ async fn processing_events_when_sealing_l2_block() { base_fee_per_gas: 10, base_system_contracts_hashes: BaseSystemContractsHashes::default(), protocol_version: Some(ProtocolVersionId::latest()), - l2_shared_bridge_addr: Address::default(), + l2_legacy_shared_bridge_addr: Some(Address::default()), pre_insert_txs: false, + pubdata_params: Default::default(), }; pool.connection() .await @@ -447,13 +449,13 @@ async fn l2_block_processing_after_snapshot_recovery(commitment_mode: L1BatchCom .await .unwrap() .expect("no batch params generated"); - let (system_env, l1_batch_env) = l1_batch_params.into_env( + let (system_env, l1_batch_env, pubdata_params) = l1_batch_params.into_env( L2ChainId::default(), BASE_SYSTEM_CONTRACTS.clone(), &cursor, previous_batch_hash, ); - let mut updates = UpdatesManager::new(&l1_batch_env, &system_env); + let mut updates = UpdatesManager::new(&l1_batch_env, &system_env, pubdata_params); let tx_hash = tx.hash(); updates.extend_from_executed_transaction( @@ -467,7 +469,9 @@ async fn l2_block_processing_after_snapshot_recovery(commitment_mode: L1BatchCom ); let (mut persistence, l2_block_sealer) = - StateKeeperPersistence::new(connection_pool.clone(), Address::default(), 0); + StateKeeperPersistence::new(connection_pool.clone(), Some(Address::default()), 0) + .await + .unwrap(); tokio::spawn(l2_block_sealer.run()); persistence.handle_l2_block(&updates).await.unwrap(); diff --git a/core/node/state_keeper/src/io/tests/tester.rs b/core/node/state_keeper/src/io/tests/tester.rs index ad189831bad7..daedbebc75e0 100644 --- a/core/node/state_keeper/src/io/tests/tester.rs +++ b/core/node/state_keeper/src/io/tests/tester.rs @@ -147,6 +147,8 @@ impl Tester { wallets.state_keeper.unwrap().fee_account.address(), Duration::from_secs(1), L2ChainId::from(270), + Some(Default::default()), + Default::default(), ) .unwrap(); diff --git a/core/node/state_keeper/src/keeper.rs b/core/node/state_keeper/src/keeper.rs index bd102daa3080..523dd8ecebad 100644 --- a/core/node/state_keeper/src/keeper.rs +++ b/core/node/state_keeper/src/keeper.rs @@ -17,8 +17,9 @@ use zksync_multivm::{ use zksync_shared_metrics::{TxStage, APP_METRICS}; use zksync_state::{OwnedStorage, ReadStorageFactory}; use zksync_types::{ - block::L2BlockExecutionData, l2::TransactionType, protocol_upgrade::ProtocolUpgradeTx, - protocol_version::ProtocolVersionId, utils::display_timestamp, L1BatchNumber, Transaction, + block::L2BlockExecutionData, commitment::PubdataParams, l2::TransactionType, + protocol_upgrade::ProtocolUpgradeTx, protocol_version::ProtocolVersionId, + utils::display_timestamp, L1BatchNumber, Transaction, }; use crate::{ @@ -116,6 +117,7 @@ impl ZkSyncStateKeeper { let PendingBatchData { mut l1_batch_env, mut system_env, + mut pubdata_params, pending_l2_blocks, } = match pending_batch_params { Some(params) => { @@ -132,7 +134,7 @@ impl ZkSyncStateKeeper { } None => { tracing::info!("There is no open pending batch, starting a new empty batch"); - let (system_env, l1_batch_env) = self + let (system_env, l1_batch_env, pubdata_params) = self .wait_for_new_batch_env(&cursor) .await .map_err(|e| e.context("wait_for_new_batch_params()"))?; @@ -140,18 +142,19 @@ impl ZkSyncStateKeeper { l1_batch_env, pending_l2_blocks: Vec::new(), system_env, + pubdata_params, } } }; let protocol_version = system_env.version; - let mut updates_manager = UpdatesManager::new(&l1_batch_env, &system_env); + let mut updates_manager = UpdatesManager::new(&l1_batch_env, &system_env, pubdata_params); let mut protocol_upgrade_tx: Option = self .load_protocol_upgrade_tx(&pending_l2_blocks, protocol_version, l1_batch_env.number) .await?; let mut batch_executor = self - .create_batch_executor(l1_batch_env.clone(), system_env.clone()) + .create_batch_executor(l1_batch_env.clone(), system_env.clone(), pubdata_params) .await?; self.restore_state( &mut *batch_executor, @@ -201,10 +204,11 @@ impl ZkSyncStateKeeper { // Start the new batch. next_cursor.l1_batch += 1; - (system_env, l1_batch_env) = self.wait_for_new_batch_env(&next_cursor).await?; - updates_manager = UpdatesManager::new(&l1_batch_env, &system_env); + (system_env, l1_batch_env, pubdata_params) = + self.wait_for_new_batch_env(&next_cursor).await?; + updates_manager = UpdatesManager::new(&l1_batch_env, &system_env, pubdata_params); batch_executor = self - .create_batch_executor(l1_batch_env.clone(), system_env.clone()) + .create_batch_executor(l1_batch_env.clone(), system_env.clone(), pubdata_params) .await?; let version_changed = system_env.version != sealed_batch_protocol_version; @@ -221,6 +225,7 @@ impl ZkSyncStateKeeper { &mut self, l1_batch_env: L1BatchEnv, system_env: SystemEnv, + pubdata_params: PubdataParams, ) -> Result>, Error> { let storage = self .storage_factory @@ -230,7 +235,7 @@ impl ZkSyncStateKeeper { .ok_or(Error::Canceled)?; Ok(self .batch_executor - .init_batch(storage, l1_batch_env, system_env)) + .init_batch(storage, l1_batch_env, system_env, pubdata_params)) } /// This function is meant to be called only once during the state-keeper initialization. @@ -327,7 +332,7 @@ impl ZkSyncStateKeeper { async fn wait_for_new_batch_env( &mut self, cursor: &IoCursor, - ) -> Result<(SystemEnv, L1BatchEnv), Error> { + ) -> Result<(SystemEnv, L1BatchEnv, PubdataParams), Error> { // `io.wait_for_new_batch_params(..)` is not cancel-safe; once we get new batch params, we must hold onto them // until we get the rest of parameters from I/O or receive a stop signal. let params = self.wait_for_new_batch_params(cursor).await?; diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index d1e82c44bd6f..ad50c8ca8ce6 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -14,9 +14,9 @@ use zksync_multivm::interface::{ use zksync_state::OwnedStorage; use zksync_test_account::Account; use zksync_types::{ - fee::Fee, utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Execute, - L1BatchNumber, L2BlockNumber, PriorityOpId, StorageLog, Transaction, L2_BASE_TOKEN_ADDRESS, - SYSTEM_CONTEXT_MINIMAL_BASE_FEE, U256, + commitment::PubdataParams, fee::Fee, utils::storage_key_for_standard_token_balance, + AccountTreeId, Address, Execute, L1BatchNumber, L2BlockNumber, PriorityOpId, StorageLog, + Transaction, L2_BASE_TOKEN_ADDRESS, SYSTEM_CONTEXT_MINIMAL_BASE_FEE, U256, }; use zksync_utils::u256_to_h256; @@ -50,6 +50,7 @@ impl BatchExecutorFactory for MockBatchExecutor { _storage: OwnedStorage, _l1_batch_env: L1BatchEnv, _system_env: SystemEnv, + _pubdata_params: PubdataParams, ) -> Box> { Box::new(Self) } diff --git a/core/node/state_keeper/src/testonly/test_batch_executor.rs b/core/node/state_keeper/src/testonly/test_batch_executor.rs index cb282f3b7d6d..45787b18f3c9 100644 --- a/core/node/state_keeper/src/testonly/test_batch_executor.rs +++ b/core/node/state_keeper/src/testonly/test_batch_executor.rs @@ -27,8 +27,9 @@ use zksync_multivm::{ use zksync_node_test_utils::create_l2_transaction; use zksync_state::{interface::StorageView, OwnedStorage, ReadStorageFactory}; use zksync_types::{ - fee_model::BatchFeeInput, l2_to_l1_log::UserL2ToL1Log, protocol_upgrade::ProtocolUpgradeTx, - Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, + commitment::PubdataParams, fee_model::BatchFeeInput, l2_to_l1_log::UserL2ToL1Log, + protocol_upgrade::ProtocolUpgradeTx, Address, L1BatchNumber, L2BlockNumber, L2ChainId, + ProtocolVersionId, Transaction, H256, }; use crate::{ @@ -423,6 +424,7 @@ impl BatchExecutorFactory for TestBatchExecutorBuilder { _storage: OwnedStorage, _l1_batch_env: L1BatchEnv, _system_env: SystemEnv, + _pubdata_params: PubdataParams, ) -> Box> { let executor = TestBatchExecutor::new(self.txs.pop_front().unwrap(), self.rollback_set.clone()); @@ -702,6 +704,7 @@ impl StateKeeperIO for TestIO { timestamp: self.timestamp, virtual_blocks: 1, }, + pubdata_params: Default::default(), }; self.l2_block_number += 1; self.timestamp += 1; diff --git a/core/node/state_keeper/src/tests/mod.rs b/core/node/state_keeper/src/tests/mod.rs index 9e971541b204..16eed0b2f7f7 100644 --- a/core/node/state_keeper/src/tests/mod.rs +++ b/core/node/state_keeper/src/tests/mod.rs @@ -59,6 +59,7 @@ pub(crate) fn pending_batch_data(pending_l2_blocks: Vec) - default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, chain_id: L2ChainId::from(270), }, + pubdata_params: Default::default(), pending_l2_blocks, } } @@ -102,7 +103,7 @@ pub(super) fn default_l1_batch_env( pub(super) fn create_updates_manager() -> UpdatesManager { let l1_batch_env = default_l1_batch_env(1, 1, Address::default()); - UpdatesManager::new(&l1_batch_env, &default_system_env()) + UpdatesManager::new(&l1_batch_env, &default_system_env(), Default::default()) } pub(super) fn create_transaction(fee_per_gas: u64, gas_per_pubdata: u64) -> Transaction { diff --git a/core/node/state_keeper/src/updates/mod.rs b/core/node/state_keeper/src/updates/mod.rs index 6211755eb156..b1bd35c921ca 100644 --- a/core/node/state_keeper/src/updates/mod.rs +++ b/core/node/state_keeper/src/updates/mod.rs @@ -9,8 +9,8 @@ use zksync_multivm::{ utils::{get_batch_base_fee, StorageWritesDeduplicator}, }; use zksync_types::{ - block::BlockGasCount, fee_model::BatchFeeInput, Address, L1BatchNumber, L2BlockNumber, - ProtocolVersionId, Transaction, H256, + block::BlockGasCount, commitment::PubdataParams, fee_model::BatchFeeInput, Address, + L1BatchNumber, L2BlockNumber, ProtocolVersionId, Transaction, H256, }; pub(crate) use self::{l1_batch_updates::L1BatchUpdates, l2_block_updates::L2BlockUpdates}; @@ -41,10 +41,15 @@ pub struct UpdatesManager { pub l1_batch: L1BatchUpdates, pub l2_block: L2BlockUpdates, pub storage_writes_deduplicator: StorageWritesDeduplicator, + pubdata_params: PubdataParams, } impl UpdatesManager { - pub fn new(l1_batch_env: &L1BatchEnv, system_env: &SystemEnv) -> Self { + pub fn new( + l1_batch_env: &L1BatchEnv, + system_env: &SystemEnv, + pubdata_params: PubdataParams, + ) -> Self { let protocol_version = system_env.version; Self { batch_timestamp: l1_batch_env.timestamp, @@ -63,6 +68,7 @@ impl UpdatesManager { ), storage_writes_deduplicator: StorageWritesDeduplicator::new(), storage_view_cache: None, + pubdata_params, } } @@ -85,7 +91,7 @@ impl UpdatesManager { pub(crate) fn seal_l2_block_command( &self, - l2_shared_bridge_addr: Address, + l2_legacy_shared_bridge_addr: Option
, pre_insert_txs: bool, ) -> L2BlockSealCommand { L2BlockSealCommand { @@ -97,8 +103,9 @@ impl UpdatesManager { base_fee_per_gas: self.base_fee_per_gas, base_system_contracts_hashes: self.base_system_contract_hashes, protocol_version: Some(self.protocol_version), - l2_shared_bridge_addr, + l2_legacy_shared_bridge_addr, pre_insert_txs, + pubdata_params: self.pubdata_params, } } @@ -211,11 +218,12 @@ pub struct L2BlockSealCommand { pub base_fee_per_gas: u64, pub base_system_contracts_hashes: BaseSystemContractsHashes, pub protocol_version: Option, - pub l2_shared_bridge_addr: Address, + pub l2_legacy_shared_bridge_addr: Option
, /// Whether transactions should be pre-inserted to DB. /// Should be set to `true` for EN's IO as EN doesn't store transactions in DB /// before they are included into L2 blocks. pub pre_insert_txs: bool, + pub pubdata_params: PubdataParams, } #[cfg(test)] diff --git a/core/node/test_utils/src/lib.rs b/core/node/test_utils/src/lib.rs index 9eb53994eee5..86ce3aadd9a1 100644 --- a/core/node/test_utils/src/lib.rs +++ b/core/node/test_utils/src/lib.rs @@ -45,6 +45,7 @@ pub fn create_l2_block(number: u32) -> L2BlockHeader { virtual_blocks: 1, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), } } @@ -98,6 +99,10 @@ pub fn create_l1_batch_metadata(number: u32) -> L1BatchMetadata { events_queue_commitment: Some(H256::zero()), bootloader_initial_content_commitment: Some(H256::zero()), state_diffs_compressed: vec![], + state_diff_hash: Some(H256::zero()), + local_root: Some(H256::zero()), + aggregation_root: Some(H256::zero()), + da_inclusion_data: Some(vec![]), } } @@ -128,6 +133,9 @@ pub fn l1_batch_metadata_to_commitment_artifacts( } _ => None, }, + local_root: metadata.local_root.unwrap(), + aggregation_root: metadata.aggregation_root.unwrap(), + state_diff_hash: metadata.state_diff_hash.unwrap(), } } @@ -213,6 +221,7 @@ impl Snapshot { virtual_blocks: 1, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), }; Snapshot { l1_batch, diff --git a/core/node/vm_runner/src/process.rs b/core/node/vm_runner/src/process.rs index 4f7ac1f97284..dbd218c8dc5f 100644 --- a/core/node/vm_runner/src/process.rs +++ b/core/node/vm_runner/src/process.rs @@ -82,6 +82,7 @@ impl VmRunner { storage, batch_data.l1_batch_env.clone(), batch_data.system_env.clone(), + batch_data.pubdata_params, ); let mut output_handler = self .output_handler_factory diff --git a/core/node/vm_runner/src/storage.rs b/core/node/vm_runner/src/storage.rs index 2285455ba244..9ab4ed87b9f1 100644 --- a/core/node/vm_runner/src/storage.rs +++ b/core/node/vm_runner/src/storage.rs @@ -13,7 +13,9 @@ use zksync_state::{ AsyncCatchupTask, BatchDiff, OwnedStorage, RocksdbCell, RocksdbStorage, RocksdbStorageBuilder, RocksdbWithMemory, }; -use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, L2ChainId}; +use zksync_types::{ + block::L2BlockExecutionData, commitment::PubdataParams, L1BatchNumber, L2ChainId, +}; use zksync_vm_executor::storage::L1BatchParamsProvider; use zksync_vm_interface::{L1BatchEnv, SystemEnv}; @@ -106,6 +108,8 @@ pub struct BatchExecuteData { pub l1_batch_env: L1BatchEnv, /// Execution process parameters. pub system_env: SystemEnv, + /// Pubdata building parameters. + pub pubdata_params: PubdataParams, /// List of L2 blocks and corresponding transactions that were executed within batch. pub l2_blocks: Vec, } @@ -394,7 +398,7 @@ pub(crate) async fn load_batch_execute_data( l1_batch_params_provider: &L1BatchParamsProvider, chain_id: L2ChainId, ) -> anyhow::Result> { - let Some((system_env, l1_batch_env)) = l1_batch_params_provider + let Some((system_env, l1_batch_env, pubdata_params)) = l1_batch_params_provider .load_l1_batch_env( conn, l1_batch_number, @@ -415,6 +419,7 @@ pub(crate) async fn load_batch_execute_data( Ok(Some(BatchExecuteData { l1_batch_env, system_env, + pubdata_params, l2_blocks, })) } diff --git a/core/tests/vm-benchmark/src/vm.rs b/core/tests/vm-benchmark/src/vm.rs index dddef0de82fe..e198be9ea6b2 100644 --- a/core/tests/vm-benchmark/src/vm.rs +++ b/core/tests/vm-benchmark/src/vm.rs @@ -5,7 +5,7 @@ use zksync_contracts::BaseSystemContracts; use zksync_multivm::{ interface::{ storage::{InMemoryStorage, StorageView}, - ExecutionResult, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, + ExecutionResult, InspectExecutionMode, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, @@ -113,7 +113,7 @@ impl BenchmarkingVmFactory for Fast } let mut tracer = InstructionCount(0); - vm.0.inspect(&mut tracer, VmExecutionMode::OneTx); + vm.0.inspect(&mut tracer, InspectExecutionMode::OneTx); tracer.0 } } @@ -144,7 +144,7 @@ impl BenchmarkingVmFactory for Legacy { &mut InstructionCounter::new(count.clone()) .into_tracer_pointer() .into(), - VmExecutionMode::OneTx, + InspectExecutionMode::OneTx, ); count.take() } @@ -191,7 +191,7 @@ impl Default for BenchmarkingVm { impl BenchmarkingVm { pub fn run_transaction(&mut self, tx: &Transaction) -> VmExecutionResultAndLogs { self.0.push_transaction(tx.clone()); - self.0.execute(VmExecutionMode::OneTx) + self.0.execute(InspectExecutionMode::OneTx) } pub fn run_transaction_full(&mut self, tx: &Transaction) -> VmExecutionResultAndLogs { diff --git a/etc/multivm_bootloaders/vm_gateway/commit b/etc/multivm_bootloaders/vm_gateway/commit new file mode 100644 index 000000000000..a3547f577034 --- /dev/null +++ b/etc/multivm_bootloaders/vm_gateway/commit @@ -0,0 +1 @@ +a8bf0ca28d43899882a2e123e2fdf1379f0fd656 diff --git a/etc/multivm_bootloaders/vm_gateway/fee_estimate.yul/fee_estimate.yul.zbin b/etc/multivm_bootloaders/vm_gateway/fee_estimate.yul/fee_estimate.yul.zbin new file mode 100644 index 0000000000000000000000000000000000000000..fb6017f69cf03b963d490070a1d33555531e5d30 GIT binary patch literal 75296 zcmeHw3w&Hhb@$wT>8@-`vTVtcELm4XNpK-xnA?A$)a!QUbK}14;`8VgoJjQeNdz9xeoE3*}h|B!IvFIcMfRX7AnA zYF7@wekl3*?wvDpX6DS9^P0z1DMf!N)vup;q#f0ZM=8A~HK~-_nO@`Es~n`PO<#+D z9mjL-QEG9kQYSCN`-!Z|(6f}0XCW<5uSK3fo_Px3vpIfumpLVMpHdxPQ|c&{JMNhD z4o?3MPG1Q~P7QFqDLj=rCHbfU>S}Ib8tK&TEW`IJRR%tHIhtBTDyqm_pnC9ZVR%MO zW-iCuuv){<4XVJ;-D)d6-A+>u?Oo3O_NYA|jx$U2IN^Ay4y1QE9nOc*okbwsNar>W zeap0~KICxHJMeFxqtZ8WyB(%i$>|U}st)J1jJE^y^y#RMF&(KN)F0(`Q#(4Z^q$0c zmAeA^=rzl@$dFsjZ zOsA=SM>!qpPJUk|>CQ6A=M z`79?$ju73Y9jgg%=jOzDCi5VG+w%-;g?Sjj?RoY)HSqUaXXvrWYQJ&s~Sv z0=c;Om0Et6Al-QWrFh-|&T@0{S2cW=;ce6)9v7waEz|i#-a`+xX*}sRYPb88)Echm zry9=*#^VNfWS+X`rwWvB3CMAj`1jNjjVHT=@tmL9%5ZQX!5zhTKoqMz>vG)gCE#_= zcPdACRv;hgA$sJPp8dHJ&5zV@bki0OLV&^6b>`3Bt*=}BlSnOB#i9MQ?kE14hp zKLLf+;>Bh@<&N+?Q!RfY^9oHwTa?qH3RKQ%nPj~S4n9f!Ot;LE+;SA!aVNn+8lHD8 zheaN>+~Hh~bk8kGy6bYg6yT_rvX_Bp@i2Z9KF^eQU&8%aqv^R)_xHGNziN^GUaIA+ z=C2gBV?=cWljvL3B6R3SI_QPuy@Yqk*@jLnLeD{$`{iI>Q+w3qnyvwz?v2u^WlZRl zyI1Ix({##dI^~$|i+wufG@a1?2%T~_3!QQ=(R7-O(g|>)(|H!1hI~5RVdykO?Iya= zczL@0D#!g@?9&PD1RYWy)3FOVWBCfnOy4DI4V{Fa+>0=d#OJ8%DAkvBOEmr%w^?;D z(yuanls~Nb=uV+eJRdE&R^vD0SKxNT@<|*&ruUKqrd=(0;qz7*zvr6qb7fw5i!?tj z`n%}(xv!D-<^N9Fm)GN$*W;JJC(7sfs`XXe4sY>sp_6-eh)#KASWI+r zFLzIn-QyO}9^%^P5#PKb((g`ZgHEEo&TKrq zZJmZ6Pk=vF!#@ff)bCr;(%x_C_Wn90@4wXVr{&$W|MY2x^|1puwZCJ##-p5_M2GfQ z2>#!gets#TecOU@I7)O-3F9#9#E!$9>3kx-e0#gl3H9cfZqSJ|uJAv!oC@e%M0(rP zr62Bk;3NLs066%!1M)50e&@Ru9@yi=Z&ZGW@UJ{W{B;8DBzXY76}hMRSNaVhtlHE7 zbPc8&o{%9hNO+!c{=gpIg7lC*tjHc+jk)}D<)sdT4z?eHK@ zO6ZFIY5C`<(#ZVlRedrpz!&vjK(^9|g3v=nM5h+VOF{3r+wgA@?_#%ts$w^~+au}7 zsnd5v(wF+_JJAouW6}Luro;9`KSccf03J{00B-vsweT-R{i{2J^3BHIvB3D-8^nJd z(I2B^;uFSy${=1r_A}E5&td(}{lWVN{oYFT<9)Y&=l0 z5zPPJL%w3?)iwAqfXDMm0Jr(@h8p z0^5U#i`@g`YrpPCS9SdEMOpTn7eCGT6PW_>}uIpK5)uU?R zPicCkuMm2rHNDcBUWjWGy>>GGrOLy9u-V%GR`|qy4)~xLl8d(fllaa2<>qs1;BQba ztEIxPkT)FHa63W2B8_ha=+8tWIj7*>He%0{Gbc) zl3612S?CwcXTZbqM@8x3UX4(78zPMx}*p{n)x^VzDWAM-})U8nI#e0)&TUun84 zXKwZQ^VzCLrJ93Z<}=Ch4vLGke?ape@g4f%-h_4+L1(e&eESUV93O3e3#Q3s3QVP3 z@Xca8DV|04a}G~1BVc47whniwkJ58p!J*Y3>ObUi`#*`iI=90q5PzK8kx|D-)FNca zP&s!u_@vmmBT>(aT^+#V^|>hm)*ToFR0k#^P{X!*4C(8jxNqO<-NQ|L8@uQT7;H+v_?vL1&S6A4TmK_Q4 z%<{VJI$vHBewrWb54q!LSF!UMiF_;L9>C+rJ%C$$3oldj!b;S$n&+EdFHl2F{}r50 z%@Mim%Oz*6$R$Zn8#$XXdIRfnu%FV3?Ln5;#80YQkE33X!1yJ}pDVPyo|}9F&z>aw zUDffs6KP*w4^e)9U2=|cbpGv@SM09@=;*{mgKPUze~>6)Fku}*@>^EXF9TvqwSEU zGwe4-a-aPJMe8Y$W1UwIUa#$2yh5(_OFI4~J+}{h|2A|7u5+A}OXblH^ppKi-~P=S`!^fbOPQA%ej3`T9-wmTwy|DSut%Z4+R+!t zzb*jCPHH2>c>4PaoC4N$M_8|D`L6xxIogkHe=qXYDKVa9z+8z3?P&Y3(9@Umiyfvj zH4EcP^yj!hwk-a*d&Hu9H`^KA(6|diclY_x`kFqjVjoaHWxl_F>r0-G;?D-~cs>f? z_I!U~4gA8VLHRoIt8O@z;8XYaogb3D*8VkZr~Lumi!_+xucEtbhqpaRkH63Xi%UWW zCH6&N$FV)x`t`o=t9JOUhF*)2TcyFvGLQZFwU{k1tO-|)m)PxrJhb~qal#QFk8*AL zy{HEL8Xce68u%OZJBYlHYveb9-_Xw|vb+k5SfCw6_R!rzYi0}k2q_#0lU<>>h$M}>boHfcLDtMR(PJ4X4P<)W8T zMcdyfklxX87bWsbwf`sM!Q|0$coF0#%lq^?DGxbE?LfDryu`&iAh{vO384LxIrWG3 zf6({`aWPw8)r!~K>m9eF|3zwtj>D3^8VTXI<`LFySWYZC1;38y0X@!ot=rj3{-o;8 zs^irEF1%xYcffZqQa^or-PsPh^dLp_1AMfy&|y3Lv>w&wb*T3N)-%+het~Bj>H*&N zpE13t{}LZiPFD4X+G*1vU%emhO8~FtZ#Ag*-8%?BTC44+S-6=@FJnKMb&%mX+Ed2g zY$f{#aX`e_AWwl*uVWx0QSt=KJ5SV;&M-vn$^L)6&Y%$I@WOFP_a?}XBIu&h#@_y2=F_(@|B=5&@gB0LDIQ7o z3B@Bx&gAe!{BgM_amckM4#|2aAr47$EgXjo>=j!+iMt@pw{%hsIy~J8W;!c!ur!WW8(a)p&Xbz;C13x{g!uS0Y;c8Afch7QbEfNz+i9fJ5TpU-hI?s0aAiRa95KSkST^>}LjAbvSXd^v3T zq2tloj`HmSZ;tJ~r@r-5x8i&R2_~W^@8=+P)poDg%fjFO`f88wkG-DSU&TIcP+s_S zF!QHYyxzmSWcx_s_5MCBiHk2r2U+f6*Rv=6&B}R$a%wB@t0}{-Cpxt0`L1%ZPN`DY ziF|H>Jx+9L$rbSY?RtPOm+FhxpPBUl*h%2y@cOOxuY>j5v>D&9y+nR4*-Nx;MRpRH zitVIDc#>ZR=0F;{BgLqsHawyQVw9YQ}%o|8P;f>@f>CdHUiGP@Ny+x^l#917@zX$ng ze#4LTC_nvxJ|?jNyNSk+-L41NgN5?^`mM4DV^3*)Uxe)({{f0PX#eTj& zx7HOd0>cqk>bOiN)&a`UA3!JjVO-b))c>5eGapcD=rUd>HuZ7)Iu3BUa^FaFsUo+9 z|K@LpN&mRTL?8DrxV~gNTlgh_$Ma(VxA@VI$I^U<{CD4E^qJTrb?cE7hF19Lx9h%$ z!Kh%L&Tpoi9~OHhm`@nLBFzscGfVTC;zl%k&3?>+>}zr#Fm`~$`_O25%DjY}z`Vq} zls_T)fsgaek{%g>JtXldC@YNtk)wOwA1|kwQqG;-n3&b4-udI0p6MJ3G0y=kPPE+;m;uV?&kOpuQRx} z>2>J81iV_@U)mk$(|A4$;5MI_xQM^c4yt~2>lZq2kEhn_eRy-R2dUj+pAjAQVZGqXKA)dYa-H6fUIuybM7ICB zeZTuC_w({ZIU;^{0FNKv0B()1UIz`}wj7alqX1qfezio?jTQ{A4)O2jb^p!2I$Ezm zEDmJicr=lV^(6Mkh<$GMMe;f*_|99aaMn|;Sht4P)$}@k2lj-q{QYaDTe5r~EX?C>~iCjBjU9 z-oj7S!_J|2vgj>&59Vzxx!NF|+!xjuhX(b%ImZJzs@FvlVD^j{z#(J{bAwRPV??T^xv}2#o=iO}iVE;!R ze%$@U7mqRhRTuYr1=Kc4IdnEfz#r}gDPuHl{RlveDME0Y}a=>~f3WBtN*X)D&bLUJy! z*J|mR2K7KqJ;Uop4bVxiJJr-P!Vg-Gp+2$a`Yn4d@at^87ke&%$B$L_o z$@S%kc!+%0_tvD3Njsp|D6V4dOI!LAnMbgy&AFMmtiRKKFR91Gk^J?2y>Dj;;yB!n zc31Vber8pNu^U7lsBYK+)c@s(!f?BtxyTjb8}<`u9yv76Xx{|a6Pb_Qyi@$V|B3v? z_6a)9{sNwe?-769VE9YhZ}s^Lz>~0VQu_VoN66XtvpZtCM%vaRx{D+SJrhbI{G<)0{-G3=^HyAg2+-vNQ z!nK4V2dG^v5A=Em#fL^ZH~R4bn;$^J4CasfAo2;_CB7$mDme~%BC;;EvBGYtb6>=g zUkaa`OMce~8b$Xgtj7I|4%XU8&QZJ3FZY8E#m9P8L3v2;S`b+!^{Mdm@QmrGizK(JJ zqMlbGr?yp`zuRKuDdaNN5^Q-nYtD-qc}edi4=i~(XTCH0pGqgF5F)P5d0!2=Q&R@v~fAK>Tvg?fmI;U1%O%fKg$3<7xbH6ZaX$ydwQC zdLpm`%sSL=4LA3b-7fEQ=KJV9O6Fc};y1aM8+hxvmzx5;Ws<{RiuP~L?TPAJ(;nvA zG;|ozsjl{1tK0MC8vNd%oOJ*7H>AI}>;4X#_Gr5{4f)9On(vQM?%5VRW5K_+u=Ha4 ze5CW~N%GSDy0p(dg?1G&?)n^)`)^je3y^PjW1G`Ev_AZ%1>X%g`i*@OjEBx2%RUDu zHA`HMWqjHHfPD-1@w=p-NYOnw%HKz!9J#mWA+5J$pKRm&`F>x|iQkTSmm&L(^eS|a ze;@pLQZE2(x4KW#{e<1f^1;J9jc-=J%esqFXKQ)zRLQ^e*T9VV*>wDuH5otIi_{;} z{!5#T-=@ze7a0F1nv9?Qoe+H{n~dM4&;MCq{I6>=ew#k`HyOW8pW{u&Z@2%?YT$pV z$cK)%HferS{3Sg9K3AjudgYUi|AGj9()VJ&K7rChdcyW^#EuN~NW2{xz%75yePRv# zVh0E1>%`x%oTdJS$KlBlIyElGX&-7B|Jo+wXTK;Ur#{9 z`?TlZ{)%yaR@C40_xTMv(eIC~%<=camVD3nv1##R&3DLDc9ZPupWAoHmOKP@X8#m| z6LHcdk7~T;p1+l8kH1fr*5?}>?7M#uT%?-%{jo~^JjwidSt5T*{3w9O^G^V`__Kok zAE=N1ZJ0?EccbaV>;Lnle;ao6gy~P;V`IE$i2g3w+eLIk@1I!ueNZMYUh$zO`#ZKB z^K@xP)8ZvI{;Qje-_~=_h~jq-(0k`qiTum)10H(sUB zN4CF@`G@ojonxi-64E;~zeVpHWV&3DNEh+{0(d-K0=P{VDId_y)@yP;C4k%I?Kt2e zOD>pwIksHx6S>^>XHAj|_eSNyQX}t;T&0)| z%dcuOetVwW8^K@6-~WsG``SeQCb_Y?Gl0kQcL2Be)2)SH_&+FLC;p1@cv+qNf4WKh z|8SK5+i}lZL@)lP^1r< z?cbWGe6;z)mR~nFr+vQswedf%LHxDk`{4z~f9C?@|HB&i|DnltupJM(vPS*2^rOvR zc0BUg3yl9h8$b4+@wzJfM%Xm&d3b&pVoxdFhv_~^uQxk*zZ|Ja>#47{@ir(o?Kt6U z7a0Gp8u;JaAYav*mw!^D{#txx z@3ZBSCC}d+mFKv}WsUPPlH-{*bpJ@lQR5Hb{7NsS=lFgQ+$d6(I3>QneF@TW--yH& z)gifm1owi_{8YJ(d_Lfa@dt3xG@Tc?$M^&0o>oWSKceqZaqykF2dR8k-#?N)Ti+{k zw%ju!`&{vzbx-aeafWpIkmPf=Iq>E2x18Ez;$$X1Rx`ds>+P%O9^Yvz&696!xn__1 zd#aDS`~IkZ=aRipgmT`9eNk(u`wi+@vmWu58ttg1f9!sC17BEv|9zA3+w1iI*kt^? z{sHnD`uuGaKlY(%d&wG~CrLgGOZ+}GTWb(5vg8O6}L)za-oADDZz(&xy1R133iW%Pl$ ze_88=AkHo8R{EEB>^+-%l zUO!xVTg5mGFXTArICVhxuYj)ABY4M#ozh=piPP0G2)}MX#$?wjyG~e|_<#)yYg9YY0s?UKQ?|d#jVOOoif-%n3 z>h~+6??``+&JQ~R`LC*g;~XR7yMf_PuYm8+@Xh-9EY7#3A|K8%0sl5TodjOy$!N#* z48JxC$9|w_`=4dK-(bBz$9jLRdDr!H;YmBQ>6jPo&!%@dPxwwfPxSeeSqJYUe19;* z&AGNYp971W174Ay>p9@gW8rhaE1aZtPibEW9&0y;;FjG?-IjZ?^u3XvkMwcf;rJzamIBUGlDcqAhqLv_C^aKWi`7H5!V7Oob?L>CIr#y8Ur!?;dKRzJ$ZKV|I z8{ykBbo0h%`R>&H}gz z5q7~Jso%XQ&G%n)oz(Zrn0-`qe<#Tsm8!V^vw(3k_u+Sa(yG6}_0u)j;rfZnB0tf; znEoT0jHyTR72ol$=X>2127z_xkwqh}!Epui#eGz2Ve)IcJ|Fmr}?w64IZRoZx%0 z5c}vpU&}4CZbb4I^P>BiLN~X%{AD%Ad@!uzxMIKg_YKThk5-q%=&r3#)f)X>Xwj=$ zzjQxakJ}H?|7!Myv=f61S`LKh6fX~x?F^zb&jXe}WJicRGWQ*0n6S);wu9ezD~R^CvuMp z_{{ol2C1bo%^Q-dmtek_ei=QjBJC!--j~C)uC*LKw=Q2*A8Ng!l=+^ktvAfQ**e|7 zSI+Cz5_0DT-TEUoiJyXV@?ns)-T6%CE^FSGg}gQpVM+YtG_#u z-AHm8ay%;MPxx{k>#mS1Pr=ko{w`}8%_g}H?T7wVlh+V`bnkihtMt5L`4y5|mV9s8 zy!y#+=aul`9PO(Asq<=SbY68mLEBZ3kP5M)t|Dfr=!-KW?;rjxU;fYH%aoD#72ku1 zk@tLGxY+MT?t>3WpK1G@%>C+eKP&RQfbR>DUA{X-ei+I7s9jz%c6sWG8uDJgizIe? zExAwapPSq#dkb>E>-lEBp}){6{{23`u;T5|3+cV+4-7(@m)OUo<4S3cD|O+d2hoMx zOXi>Y_c2{S`nU@oYLW0@e-hD+3=vB2evihVMchmLowc%Vs_Ez;4)!4DPg6foZod1O z`1=7=AIY6${cex*0_k-%NB_jc;Vzf&JavN>_cI;!xOu6L^d!**@rO(3nc?}+{Q;$h zD4&;MK1Diz_ryxGlk!wo2SG&EM(j+GF?&{MU>2X0rw1gWNW0 zpL{PXOW(_4zEPtb2MYLz`_+YY6s{lVbD7@`R_Cj%N>luX=%x88Gc5bxGQ)G+Cs)*Q zO!+QFb$e_XuVkDDzI$x^G0;`ZJ0;&o?tY=6mm}Xt&ROt3G;qn^^%p`<`+iK-Ve~vW z?Q)RQ+%Ih6t4ibFpw~6$>TeqVp6|)@;~EfdaFDOXzokfg6{(KIRZSe6?Oo&pWI^sZ zd!5)5-CsflG|$MN2+P@e_w{9fkNDB-i@Se;@lQ4xKlvNs_8)IDew#k}o~owN-)_IY zhs@x2`%(oy$MM}%#pV`KR^dqeh;cnSHsn-24LXtiZ`kL|`=z>v4SgVQdXaCQ_Xl+M8~R}2$c}(aV7i<5hO@Be zgsHyRd0#I*HZ_SJPkn6Z;qRl-dZhc83H14a@JqMPPl!9R+{C=XdZ$}sUw!v}L$Ai> zSNCp1FFn6rVEE_E&M#1@F}-2St)JJByEoeOZ*u)Vs!{(%mb|Fpe{{dO;qhkwqx*Gw zyaT)OG4Vf4ydC$%7WBH1*=NRfZ_@s^_Ja*kH1h?YLO2_lvUpgEwS+P~<@BT{U1==Cs$;e>b;YXNx!u)~N z^Tc&2vwsVpt5hRBZ}!)v6a*vL7p3{_r&)pZEw(3!bZn=@e?J;E#O|kAfpIFb9Vq*0 zRv0~hruWl04*NwbHizOtv>O5Iafl4ill2dtY)6@K>RBiKx4*M7Yy2wNPgDOm@%M=_ zPSE*P);Xv-(r&D&og_Q-Zdmu$Mo{T>Chu#q1D*Zl~DM&qtbD%uL!}%J%p6q858~l!K zRt0Jg^dzm9^*meXpYp2XcQ4AaKI^&J>@zWT)Ew)X(fq0Dokz4^FinZy-W1M8Y1{Lv|mK?m08c!>x?VDqveZ< zyKF!_3ge6QLFO04mCZi%%q-PY(DBjTxe~Q2w9Y8-Qnn;YPk$L?8n&}qEQM+IE_{GyL zC~wgX`_80aDYTpV*MfPtAM$qvt@Z0n`zQFC*anl8BzmA(W4ZnS^`!=CZygvy!&;AkggFGM5&o}GmTjZ(d^OJcW zEg4S`I_x(AdBks0?&FO9Vf!o(IQdg)_8RN6BE3oeBIR1c>o{5tAWp${1o>mLMZmQk;TpdS%WZ0; z?zBN977iC=~HOux3%7)CK5bbpTE`fbxeDr&mh(*$qxH(jqfq? zw|XAY@W=Oas7vwA zeu0d4->bErywq94{0F;H_%Or#(05qV$zS)(8b5&JBE$e<*GBz-zT2gJ`84e2`_NuA zliGv)SzebP%(1`F`vgt*%Z zlY~E=W4qQ-uzT;6eA3_CURkfr9dZyaP+m(x(sBL-`U(cnUaD8+!B3!Xul8sA`pi7b z@pp;)-tPBfBe%md?auAw_UZC{@7HvjMLT$2x9~p0KG-JAcS_d5`*2?)w>yt1g?d%V ztXt$)5)$|a4?%S#9pAo-+=s(2thku26?$>MXeHl#i zJm`In84uHr)Y;Z}=>4eKjI<-Azt@ZkxE(3o4oJGlxX5>_{Ci|zh6?>tXUVu=pFfXF zn*C!8q+VwS9OKfvTiWH1$B^cG+BZP`!MI5OQaiaFUe*)3rn5^ z$>sd{gMC7TuVmi3k zUEb0YJ)nz2<3#o$$r11y`v1`2^^myj^v?b*m;gTp`o66F+ZJ=qO#IuvFDCYv{o4>c zzP~|vtG(jihRWOh6?z8c>%^~8zW$^05M(#8f7|de~(mobCzdr)> zdY?&tG2V-ij~~_iMts$XzqiW5PyTZ=@zegHQ2Sl7f0~J(=35y5Qx_QjWl{Y8KFxaf z4_Wtu0E8_Kx598I) z`gQ+}+Mh$?tM5l%d8M{1dHuToIf;BA{S4so;}pOxJ|H$Ee-uxSGv&K#(6zyFb3ae` zsXo1L!>p5>DY{N+y8a9IGoXvj|I*I@-Y8wk&Z@?*4bEr9ei=ql>r!uefRyPIrF(*AGG<<^oQ1! zL-LX2C(F(JiUl4&-+$&fyid=>J;Kfh>zua(>ksMJiSD%v?>8_zAq!`I2ToZ5brDXdnj(p`TA2j zU#39$z&Gd9z5s*=xt+7QG@tV!H5bN@e7wJe0uLNVO(WiTHpVf1~23KfS={kiQs$}r#o5RAMWb?(zA4qo$&hW+B&|KTY~mNzkt_6 z`-Ob{N_<81;<(3asgYV=I}XPySN=-N36B5tKf8wf4dC(P9l&k*Ys+t24iS7L#7|cF zJo(KbxkAU|5Far2cqrG#*P#4XRDK^me}`DoNzfMK711C19n3-V6m)wc(?1i9FZJJ$ zNKc3y%WNB=dN4>`UR;sd*1HXp_JE6`^a-Te4c8tta>MeO@%-1sv8pq&_B+SkD2 z-H0Eb;0MI}9pESS13(9P>ihp-SIK)xp42Y3%k%sz#p|^h*xNK-6hEgpI@#wG9|WHc zJfitjx%)7FMJ^?J(cF`f5xod~*n#r~Brn8%pRbB6eZb&@i%gsx4JN&nxXx1jUcLTz>+vF%BRqAkxAbEji~0wF zkfn7f)BpaDXuOZnx=TOKNuG?83z!bFeu_XJ+Dqd@`6~Pk^BQ~~z~lKcfLnae@(=#g0Bj_O`g?%qL6F~;(;Sx%wP!x{H6m}xzZ(HRne(FD zUcATO9~?J+>Wqxv%Po25IV|t6?^(+|j;EkAEXN_VwLHeF>K%C0U+=Q_1)gcUIo)Kt zndA}K$+rEB^U`b|yJDX>g|yzkVb;&H+Md?;a^n6%&YxsY8@ZJsI~sBkydAQmS>L30 zpdC>=+BJ3_)Dqj#1NR#_$abREM*|OQeVa1*W!-V$r-}2F?T!#Ue%=J-t$8YTVW_-4 zFWK%4!Ry4&`>6JT2kCDB|Np-3H|Z7VE!fTV*n?2m!yW3~jF0RVyss;$NARTk7fHTC z53qb=yD%hA?0(U@%jyt3zF&d7vgoGhzEWy`&hhx^z#qrnm4VNN?8A^eLeALpVR8;5 z)DF9y)YaY)yxw-Ad^nCp@}J}}iG}dGq-mcrcGMvF^kqn;^S_7u-FYiT%_1z8636W~0YpPtdrX)OgK#W9;i+$nV9Q^I}TNg}I#@105{7VLgY>2Ldp+ z^_&E|egNhWj|=f7=^?WoZ|>dWb#kN+raEL?;HQVj<)Jx_i&An=Fyb9zC(Ayzfv@`G z!RI&!eys7O_5K^#Z;1xzIEKQxnNQ&rPrlzHlj)G}e&Bq}B}66YqdN(1?*B;X`#`Xd ziQ*KTszr#+J=Og<0sL2O>e4l!L2>6Pt zmL$qcv`qDe;IZ>51h?lstpks&YV+~iaB9bSfO9)nwOj2Y|B5s->4_F4ewV&y7J?k} zi2@XS9?G1T@lb^KBj#`6_>kv{o_}b`J=)LtBJID*Kdu%1X6*O#IjT&n#0O6-5`I$Z zJ)Evu-$eP<@FIaL^%hRwAo=s&B6A)~^1nf+7yR_6bN<^iru_N*z8m|$h;Nm82fb&# z^gZ+r?aJ}4RgjBCk{e!z)76LaoIk5t1+L=+c-QS+lxO*(-r6erZx?+$b0MK$d@;$3 zRrt;LRQi>YFMS)QpAz}+oJtFw{C#qOgT53HBe{ojU~8nk>1m>?IsdilyZ-qvUEV+c zb-R?WIR7;x=b7eqE^F1p(mtI3s*2yeC|@9dZV+PjDA6rj-}$e>c3n@QdOa#Q$5vNA z23KnQCCqakcmKTIF`<)wm;0L0fz*_y(>e|3eYi9k_#O6Wf$%sfwvPtSW&G$p46XtE zt5Tnpa%p`(89+3i2scSN+-r3Y=!LO8N%X?~7(fRy+ZI^JB@N(e` z*^f53Es<`#&n^Uy?^jUXqMLu-D&vSws_Pe4#BnjxRIdMa<8S}l_ zk9IcRi4dMBjH<@Ba6HbpGoMoaI7r zA4L}LDdwxrk>;pMS#kds;t%9`yBg3G*>OtKkKt)BACGZ#kV7KS!n4l6)Wh zvexU_H2VqgE5wfQG6kO3D$D0A2EUDJS6p2+`>-bX;=G2+`57+kg=iOFp&K%Hh;?U^S)Y$IPItciA-nHXn$IiXv1&@3&^PCsIVX~r3IN{i(rSNOT?_<^L zPxoy;UQs5LkRI-;4*$sU6Elam&QMS}>2LJ#=&pk^qe)=3>)_PX==jXW!;_=CW=41K zJiKMk9&oCkf9CLp@=V!E^r84_Z|Ha2PxpgIj>n~&w4H~&-KfBu;A*|GX>V+N*96+M zD`Nc>A&{)#lee(FUG9v?nEws*Wdb8srsdMs9( z_U|e0nwgk7vSD;`I*f+4^;G-YKT(}Uef*QijZYp7XzRnLMyE%o4vp^K zaAdrEU~HGE^QQ7{@1`R&qto7~ZmEIDbNBPB*T4Juaa0L#R1r<5fBCyNK3D_(wegAZ z^F|L$&K&XLJFsVJ;s996+c!EjKDytVo-FT*j6gN~q#bI&9iOZTm6%qo@z7H5`J+>a zT!;TQUbS`Y1s9BXljW)Mfobp7sfqEuVfdBhX>W3L%9}iR6VDS&kVu}O{&@VR{1Rp!{Uzq~iX@Zrwm_vp;c>&yH1Td8w=ZF^^-YRYr*@3a%#ipY&Axe^chmlfUHihyJ@@{9#=1vce{Trq4)mX~AELF0p%pF> z{e2L#dh680At=t>TZMAog z_nn9<Q4sqSEhqhbaR1E3-t@s;yGEytPN;-O>3mapdQ>x@k2d^6+qa2X4jkNn zvA1`0X5-Y}=^bE|8BjAZa~Lsx!~dE+Fpf}ImA|RcJqO2kKOSU^r*h$BdsPvBWV*WU z+c#OAKf+Cm8>D+0L9Bfiy+=`pW_Oehldds5(4BE`i|w*U51#iX!FrDW_}Le~^U?d>bMSQU zE#LfcIkR}=ulIg^#d)9a*?Pw>uif$I?*f0?ESd?UF#JS%KdvQ!i+;SWynhU8B;uTe z5ykxe?nAHtLfo82+AmFZ$NWfmPL~gj?mYZ7sL;`Anh8dm!JRooElR*I$v=GGgPaM? zE?#j}Yz<@6+_6{~C8GbN|3IF20!))kCgGeQly&{2K=zD{kD(h;6AZTM*q$RhB0449 zm_Wb87FTcD_E&Mk=A#J9w;346Tc*bLj)m;H=k2RLf%oj*mC_F%v$7Fd?`_t*{kp_Xu-?~v>l5#@l4`w1 z=2y^vvUI#1M-bvT;H{q+pBX(&*7`s#Y*pat;f~VsDsTd2YrwA^-#0#S>o~bhGe;&X zXZPmfj-5Ad+P-n)jXO4P*;xab1&00MCB*N189rS-Jfq6fRmG<&S2w@aWDN!7$Cvy- zK(rG+{6P6I9Mg%Zv6&;AMn~b3f@6_23=gLpEIu59ByeP}K*UPaco-g0>>7a&pRocn zHk<-Urqa;!!NG^T&}+rdfIrE9M6f_|m+7|4qRv55Q{k-rIuDeE2^^(8+`E^zE9J};*4frV?SKc!LdoyHv zYEkHm!FV*sc)70Bot~y|9A6or~_k>S; zhc#DU5EqhTXyf4RDTt%mz`clUk28a#d1@O$&sG6056yH)?H62EFm zrM~f#o^Re#!+w=*n8}FJ$smU4$N|IyIB}+`sZ)sw#^p8NcqlpeQduIB^7Z?=?u^tA z^D~kGFPnEQ|Dl~0e%EFyELT1M!oV88fBNlLUs*NFv5TuK$72_l)`KGvxtJyclq?Vl z(g#ae#IXd2{9ywHlO5~9L9Mj5f)%V$vcTh(z7F4VYgGgB&`lL23y|a2=&jZL&qFiS z4FuDtke;0IZW{B~C=SI&y@DnjnmHJ}#QZumQ{6v2bf9|wLj1Efj(-!>w%-ym8zKtX zhsY0TZ5FSv-(Z_WOnc5?;FX$P$_M_%*v$0k{)(|oFgyHKK=>W3Td?ZV&$;MaA5Ywm zOTYY-ZE@))?R&qt;V$z#?s?xeJ8p`4^h>aI>t?D`&yxsj7eF z8q7%%tpnxpBd~IKeVZ2wt741-AG$R4?oZvn_uC(S%c?K_>*;I1x#Gf|*R}6DG&cO~ MqpQDmPU|E84^E!&RR910 literal 0 HcmV?d00001 diff --git a/etc/multivm_bootloaders/vm_gateway/gas_test.yul/gas_test.yul.zbin b/etc/multivm_bootloaders/vm_gateway/gas_test.yul/gas_test.yul.zbin new file mode 100644 index 0000000000000000000000000000000000000000..c1726d8301ff2ffc1e8dc3dfdf1552926080a3a3 GIT binary patch literal 71392 zcmeHw3wT{eb?)r*+FO<_$+9I&vSe=&hv1eF+a#oN5^_##ISDmHj-908Wt~{Zwy-V9 zlI++4ief@iXrL)j0!a!=NJ%I(q$H(nNQix*J_4&C{`K;XgIk}>-%nKP`ipx*o z9_8IT)4sY(sg`3(-AMJ7o1uQ-d7huYji0BvCd1Py_1mhIN-~}`N)_q3lk(MDQ17mk zkAAC94s7zBE_$9`r~0UH=6uw{pPQa0{CB0tsGL$8HmI)wpQC(rkxJ7O#@mgbYw0G% z^;+(bal7|{bOm~@{LFc3C9XLxpHYLEixst))%_a4y;7T13txxSI=Z@dIJ!Q@znt!6 z{AkCSLcc&)Kh=Wg_d6}lrFp@j?Wpulri#P!Gw8I)yr~xF zj~Q-QWqZQ*&?nm_V2d(FSZu+Ynl6ZbC&U#qj~r92+xzBk^U;`$Nz`z&}C_4^|5={Vf| zSQY%Bn_3&GUn72^yV{4*bP0bfI;i=tgYXg&QV#fBfkOJ;Ig9viG3NdR^KY*kp$oXk zX;HTVKBVD!XAJzB0|t>R2p-{cKMt;doGd;|%gG>SoB2AG(r}ie#nT!-#qcH6A?8P= z%iRe%Av_<}<)*pZ;u+;0O-nhneIxbzc$(T<0Cvb2r!9g5@CoOnBo?FM|eovP>nWJ)TC@1ozayi1Mx^sg>=hT#|+sPef`o5Ft zmqm#N>0i#}@~gc;(p%~wJx>>-KGDr@P^2$V{9T~9YP?g!RqiP3LFK+n>kGyAj;r!i z&vD0C58kKeEA38^ygCZ~c{9O1jn_Tm{2k$UZ*fQu_}-wj=Xu;O1vuJOk4U?X=$}J$ z_Dy~7W@^6?a?5mGsmDF7`>$LccjFTfC#t9UEk*t4Q_w7+aS#4guF#bez_O&joSddpj_KYQ&?%?sg#O3ql-nzG z%H6E#G!~~5;6$fyT6F3S=yZ#rQ!mkh=tBMQb^le4#~acK{RACSKGSgpN+$9b9t&Un zm7$aHlY0X5Nb>V3JulhZAkDubd>fTo(rEa|lYXe&-b(aYf*zdU`P2Q%bHCav^o#jo z$r(Yvgr8P7FukEo#2#MkYCea^GM_KH&zirt$ozTtN&i4koZ_lc$)^wOn=MIpBDFgvPbx9788h>w^X!mjjze{ZSV$B0QKsOq|f^kvN?i zZxA}U9}+sD-%O{n`ashO{P$+ybqt-{qgo%_uIcn(oKAofoi4QLWa>A(o7yLRAarWD zMd;Lc8~0ziuVuK<36)eM(Z&6Y`ylx%?q|^-;@dAMbvKQBwX4UC=eOed)0&^VF}}5c z8@(~+G2EFp?dDu+Ck3<1^mCx6QQ!HXzE5TNzU4JS_fKwvy#c%h?0qA(t5RvI-|`%R zpE{LlVK_WG>X-ZFz@C$T1t&rKPvAW9mrx$#AU|pf%ps2;91})NITHqXvb060p=&= zUL)}AHBvrXa^6Sf)ei0l+11r3jw(^V-MP`ofzT1)mW)r;aQ{NOrW~FZNYn|MryNi) zA87yMYtSs9d($CfZ$M|jok7PJ{*!We!g!iKE_j@&GI01!)Q&S<27bMUA1?zxDsZ(T z-cR^vEwO%oUHGMGLiiV>6gztmeh=n9HK^z17y91J(Px$yTUH?~A9PZfyFmI)>*dr7o?SjpojXfD-d=`} zw}pIsBaN#@J~n!d^g7M6TB-H=l#{I2PnX_45$bIomyt*4-xJi&c1vC@y{}e-E@aRB zP;VDdujx&~@9q<+b?g^?9vlU`)&h@P+7bUGwO6;Z8Ww`_Xg>!0LgftI$PU~crQ4vB zNVgWm1}9LioIPy*sfJINslP?TkC%bJTHx+CK$lAS73fRVIgk8Dm+%KhLU}m+5k9&& zYOoJJCq3xFzpmqxf8%}={}ymB`G+R@L2&nE>^YH9p8reiIeAL*{9Bku#>0L_m{0mP z+Ap-G%jT0fFB5?$=X=8X(-^=(>(D%iTVZ15=4moPm>2T zemm0#&^6kR=keg)HCk0aN|Ei25eze1RW#p$T z=1G@apzXm@-7he9=0%7x4PVFoh>#Ee56Tx>RTVxA;mLd$!fihMw<`EsslSo-YsAla z&4|CRBrbzji$BO~`UXvxUx+@<{#f*l>O#BwpjQ!>l754}RjeJT zA1bqk>SF}wYWOD6Td2=^E2HxrnLNd5jbG8@SWy|jdm_tu&&F?SyAi~5vux)n-A=pJ zPC%DBjpt{&on=-#sv3THCPW{VzC`Gi*7QnidSQJC^jeH`0p`6$`S|a};DaZGPu%Z; z4=U+p$-{?wH<@2UxGfLguY!M`dResyzar1T{(*Z3=vN?mq%u=PmqCZ^oAH-aJ|*|c zX_a_<#`=!bJ06tlrE>1SYyCy-Me`O|&m#J^F4A&GdI-8nxl|5UlB*uI7}i#)iy-%~ z#aJIwGv&<8d+HLIPm#a$YWOpR$A=uUoTaX0InnDHNQYqssK7rhvIb3t#BQPVLfuGc(H7)A1bls5Wby~;` z(Y5JEqOZ{AAvs#t(mfRR^&c}C+n3EZtIn&tKb)ZDXd>B z{{N_gU;Mo}<5x|ZADSIs?MGe;@ebxex&KM+f{Nw*G~c~f;q{Tj?17I)$vXW%JGBd7mBH3zV^SOejD@`@(j>lK(75Z%TGfytPhu*2=pQ0 zC%)i#!~HR;7Fttf`9kI`geT8i2)Fn``JgL}cO}|c%Y3cJse0M(ZR6+E9nn*PygTbf z-sO4P=&y{?%T9shKCPf_!SAF8X};9*S}k`3wT-8C zJy;Wdtp`^qA2tjA&63Z@{F_it?R^a5--PgFehc9izq#Vygm4=_=e;9(utxl9h0OnH z_HWX`x{UE}pfQTXU*g}apdX|^+@DmDXBmGeugT*N;WmAL%J>(IzV6QmKaESr#b%u% zGt0QvtY^vopPBkmVIvIbVH4jDIs>UO(2e}CKc;Isw*P0zlbX)(i&&2_fAT&7=&@GS zb6ES2iv8AZAQt`B3y6l%jZ?C-)~isP83``Ew0ssQ^Tv_FRZ3bj(~os);X z6MsX-uj5j%<*fX&C0`Z)#TjHg(B_(sQ9R`5U`#|$)vWb!OUhTn>=#SB?GD>->L%uy z=7Zz#>|aUz>yfF9*gMt2{cl(8m=WK&ML z_V`Hd`XX{`jnAX?uu6XTeCLPOF)HDor=PFsjOd!^jB#QPK+hHv$9i6s_Ss*ayZ!YV zs^FjR_?S;HI=IuKAK`DpUSxk?>qnj6P>qf+{(WjjTr2)PHdKH%M5g0xZ`Q2Wdi7k< zs}i?0e@pb9TBP}u;*Vjjw_NO0svvj(Cp=bsuG&TaHWwKWC377tj2>?AD;g(&t&^qG7ll#mJ@4z!6P{>Q2RRXNqp27Ss$?E$gI~b zIf*#CkE&_A*6s{Y{HEHoYMRF1hWmZcBiNV1dLqcDI#+_Ol!q4kaKhL>Jii>zm-sE} zF~B-{Thvc*U4nKfESuAD5^g~D;#bRd4MFbO3h+dg;{zZKA1?N3dS zJtBG;f6A;onD`abI;;4T_?7Z*vy49%pP}hX`VI5ZFr@Fzx`opFe>)ss_;w~A=BeM> z|84pM&9}?6+ys6?TJj;tscE~weBFS#bI49~=<=;-2hWqzZ{z8Suhon^V@Bq8mU+fT42<%N zi*Y4=hRWK0{5sFb>jE?M@dD);!+4GJT|s`1!d20S{9OB8@QYNnn|OrQ1)#^hbr3a~zq#c>Q&9Qr-~Bod%KLD`^`Ku)J-r6=4mox2K)xDr zZMZJ#WTwdOQr@1tceDPVm%OpJ9`=X%#Nl;M5?7JaH0POdFZGWHd1fv*wHW->7pxBe zF#>mPC-$DAQhC`Q0QE2Xx%2V;-0t&?TqEH~^~>z%ZV28RN#57(`e1j`4|@Ngd%ngS ztpDd^-?rZ8-QZ{PVs~A=Pr=puw#`0nyw?Oh6YlTM>HXhXy&pV#sPg`9Sto8r1i*SL z4?T4&{CoI~C(zFZANnxzo~BzZ?>W!7eMa7s@qu2(PZxa^;h!@5_>=RVL4Jt&ERye3 zj>GFiE^=QqZuc1n6Sef`W!HnaUlDks9*n?kJ!sR(*0Yo!>5JqC?E0Dxn0elZKVyB4 z_wz*PHrMzjnT{f-3G{0Q?U;|Ge*wLp^?aPz$uQrRIA0a$t=C17k7Iq)bW4zr(|V~) zJ}$`5qTW75>k}MjyN?K+ymN`qB6%{K52W9rJx%U+2)Fsf*jtlVX`;O1qP?vxoc{~f zOT6={^kbguC8VKZ`Pmo6+Y8nK8q4@O^IZopeps+yDz!=SJhNJtCO#H_jp%^5eaV*s zK0lY@M!LW8BFH;_BM;af#Lt`uT>6u;dW7XN0#BUZ2;7=q$pc2)6eDErJIV37Pb+6?t_ZKs|0pE#rL`?GELcX+4kQAAbKU)YI*} zqJ;Gi?`^Vf_H5H%weh%DpkK-H;C$s`1J~*CZJ|KL(Y8%?$$b25!Ia|(T+#x)f4?}pI58FsgdHpMnr;RxyY5j}ymJz#c zw>RJQuX)a+S8e@E`X9=r-G9j&gzz}Mly?EoN2s^#`Ix8P^DZR(nEiOZp4P)fPE9;u z>`*u2!(l+X9lh?2`V^-j&o6ceKcELlzZ~y0`xfZFa=s^cFIfEG^lbC|-qYHSUBq+> z>1feu3f+qN`8Cdi=|8UWK5qQ|tx&HKAu#@i}UD7Q2TRXeT!dYeLhmZM*P~J z!(@wmH1unK&g?hrs3~uXRm$}~{5f1VwTK^=I)nJEN&9iU!JqW`awJAYuODH#ROw?P zkFaMfr}6dJmczPVDx=q9oi$Qk`aM&wGqS#H){{*BXa(C#_^H?jL+!UBf5ZJ~_EhJh zUESvxe?jyB-V1DDe|9;tP~2~4Hu{A4h2sN~ABX(M#pvfgnYxM-uz{#JF2I!-}7?1!m%THJ*MR~c&`$Au8-loZsk#2Q=ogC4|N{+dnxicx8p9O z{LIC;p04#w^S+butCiw>u1DjJh z-wfU#tD^tCHl6SHIFC4Uo!9K2!|_(k$CST}h{Vgvxuk~uDJ#)4ieE#&H_@I%;I{n9 zdx1gyGW$~kxE&uuJeWS={uI3rbET8#{V6N)eg@Gm*q3Se-&s!julb`;B#;;PPL#h3 z_n(lwn_84M`c&^vLOYn3MJA7}de7DSjMhi@Z#Msc=Z*Xtrl*%T{0@By*;Hm9h0&j8 zpUfJqPdCp}4vpTB{UI~=-&%SCW`Xn1V&5VB&yb#o=skPh=DVLp^lmtB_PmS#AHpsF zUm1O`_kn|QvVKSN-G}#RU&?tfHG?0B@R%p>5|k_8#^||fdU#WG-L($y>HdM};j?Jp zE9p0S4|OfhUv!Z3A^qGba_wz#DBs(u@`}!PT*`0qo{e)G8&C#uA@L~LZ92!#xv16Y zhQuNNry5=!yASfd9@ab~<$~vw4~{$sV#$M0-Xijx_*SVNEI)0e=d2gfbZ^H?;BH0s zf7a(k#`lXi%{IgR1ieSmm5ECqAPeFKW~EUt(1?SH$igCc?GNbnj>z7dc);Y7Oy1JOO=1_q^CrwX>X7yW%z3pp$^C5k-i@6* z5uDFU{O0iayue$_`MgUPiQIa{`1oeqA1`KKgZ?nz&gHz|r8{+huB*cDERS>N7jM@u z$#}6oKzegV9G8ZEWPQ!=+bcIX4_W)Uv`;Yt_jWHVy*Mw?*SffzzV!A8J-xl?R{`9J z2!-Md`S@0o=8?n!a@lYI0{yW$^HAP&m(t7_6_TBLFv^+na zqH_aee*s;U!~1x={joTF-sf2_{fx{P@QXdbj|};9xTpM#dk@+}9>smXvs;0^|RU%J{vPh<<2(cd~qYt(D<5 z>lezCMC8+ZW##_WjDNL-|JoRS@(0Af{Y!ctu_Jc;BYtLRXOjKQ5N`SH?q5~GFMe`Z zzefD?*7G#J$UL}z9itP-G*3H1`|G3lAE`5bS|^F%_kO3&_x|!$ zGxrm9#?NtUXlK1w)EPhJ=c4ob@;c+Eb(RQz_jsN0TYBF8A94I{HT^^R(X|o%RHJ_I zuAufJ^)3D84Os9R@y}a7X#dRH7N^U+@lre+o!8IR8NVgB?&EdFZ}E+{z0UYkar$^y z))_yoS4Qdo$vWew{6!T1r(*aif0aSJPj;U3%I( z=jbGOM&fIg-6H3jX0!aBnuGHz9|zog2O#|J74PR*ay{69<5vh6@~BI&4oCTp;9RGb zX`k9tY5o2Vy`RkZGWz}6%yRkOKxVmoe*oX@L4MgmK3~2|kX|Owon=xE?{mG6x+D1q zY7g=A4F2>sSK&{zfOUtu(ckN@k^Ud46a8`S%(F><_d`Ty%!b}4)cjjKAIkl2dcO2j zW&IF=C(3UKZ@yIU5@*zuhoSXKtV1+?F}c2X`8?w=jX%iq*ml4d`sh3l=}&r(nAQnM zpAz4SK7EGi^|P|{;<#pM2v4S02)Fd9-XB(eK5}TkFQoU(^;wJVKezbZ?5kSs#Pv;= z=$ob`_2Ks>Y}bnN`_lii^i9{o>Kkeg`UZa%J*wpcZ9cL6k=_`e%pboU$5H%VKwehz zjWhVqqxV4yt+}%NNB+m!2t1MhB5;fUs^XWpJ#4>5{Pui!cyEKgbBI4#2d>7S#X9ll zwQ>Gz8hdv56ZsvU@8+Kiznc5T@5b-xHFDx-18=$9x(8jNa6`|k`EHB<-2W~8G5PMG zz4d}0|JJ70Nw30RZGPn(>sOOU@-GX4ykOWh_OBX#@-G(n5ihT3_Ecaf$F6AiM-&$tf#e0s8H$Jc4 zOXuki*yFPG{F#;82c2VjtE!w?bo18K8Nbzk_y5K52Yk^`CqA(AM}HS@53S5ePXv5s zDpU}Z~ z*Py>}yi7muXY`HT$nt)a=JSo+FyC`?^!xw%9D^V(%j)<4vuEn>wwvg!RyMC|}LLr+m)Z zh(DQV&m(ZlUzqni+xCb1yMX$9mCZ*r@~LevtbChCat`_%`&l!6Z2VTf$D?(Ndg8bA zhDYx`%m=@%SIJ)_`7!yx9{h?ugBtF_kSk0KWF?l{r<7e_$jWB z$hZ5y>Wts+{}&b*|KHaczh!6KuhtpAO`m_LGk%IQBlGLMtj_pt{qbT8KlW2o{&mj$ zZ|lFYI^!pM5T(y(3_s;(`Fr>7AJg;l>%Q{+jtD%_-;Ka6f7iXc3Vz9t&3#|XeCNTA zoBkw5r%nnOR1b*tK^{lA58GZBr1@Ms_kF!p@zZ?H1OE&^?@!}?p?UuY-vN!~xm%DY zDYAXB*3-JPd0zigvs+JV`4_Mm;pxusIvvGf6d$(y#KeQ<9J}-yjxQHzUCqRUsuSaZ z{F-&08T6?&UqSjJDsKhc7cky_y)X6yay!V$VA2qHwFYVXzq2*#J z^1qBX_>NHexu-=g^!Wi=E`oJDz3v>+*`5al{+{pqZjpWHGuEBUtaB`VeHr@KsGpV| zO4d(H-#TL+7F0j!b-0l3L!>t%c4eXIUb5^$Il6ziPISMw4Ba1nR_G43Tw{LAjyp|0 zr@fx7PLrM2-zgG(hJ6MD*r0&-m0R(@ z1Ak=c&*j>R_duPCkhitqble)1qwk9Hb+NwUSQWmo{Wmc@kl*2J>-YR-`(9l9{HONEX8T@rD|Gh> z&`I8lZvS|>^>*oB1fFQGBXG-~0Xg_RDqNG_i>AC7{JPUNZoI$Ccs@PO38kPi`-)Z_}C98HkSb-di6b!~Z?_SIPG}ym4xu?s*@=G6FC> zqj;UNW`4`jdw~JJ0UhE4ioOP|bE|!GZgob^t@bl&iu~@?*}PuA%G3H)>l=(dgP;tQ*)PLGg4IIr^i}8`7@5yc(%(O6yWflp=kK7OZCEQVLhkUr z8=f!6m-i5Zd76yP6VCgPa#}t>bUkltWZrO2P)+A31n;x+d!NDlp%MNrDHad)`@Ms) zbAoc_d@;Os&*y-&eKr_ZoNq0@^bXN@B64KQnect6f0Fq=gx89n=@)6g3VwZlrNVck z*Aaish&$-}c{n3OpF0lgqUQrFNc_-{hrXlv9VGeS@3)GaDRl<(i_i5N?RqHBc)i0! zFaF-JsmFMolQ{RhPhs~O2*z@)-*Z*YHkKDm4Dns?{Z+_W^gA>u`QE*18h4XkxPpDKc$rVil-&^0^YP-p5KhN#M3o!Gh$7#-KRjJr_#DniwFPHoS zs%@y{d$eu0S?v#U``G{&C+)X)G|Gu zOq$QrU-n{=XS@eZ{0ByYDu6w~_YVeS9bW5imC5Fb?s=_GgZHSj=KbmP6v0V#ZEhlf5MUdn=BMhJVR)!@8Q}$skV`z&|>IQGllH7z@F@%K90zs_~lh8N|N}&S#jd zovos$(Q(_}s&)FisO$KAJ#By5Vd;z1@1y8fCI3VE38ev=_eAKJtS`&^A%utMD*A~0 z4AEChjr=%t9u|x|{(8WMuh;Wo{Ff2tW-H|hDZvqzU!7s)}OWNF=ICx%yGF?uSJ#ek|%Ig2Lwl}$DdVXFc_Kx_7xXe-cKEAd%ur+N)|AWs&&+B$FX`l4m zvL6c{)~@Gs>~FhwWCz6_Js-CN#{)ZneO}Njh`%6L=sf$IVkgF^ z9)H)_=zYt`?XvNAI{S6`c;*yrE$4hep34naJdxrk}r?E zpK)6B{jp#3<*ycBri{MV-vzFy@A>-x;@=y+ul+6U-!K2mYI;8_`aGYJeZxa3iqoK% zvvI$D(D?1CORDI5`3{=+_0{w~^`HFx>Uy92MDpVyV<*Volbw4Z`l-)D@@}&7=dcUu zJyZ^U4(cGD(|M;f=be`So$<>MVvs$}*1T`}JofM4JF-87{m?`=azyBP`$3IAi@cb` zL+fQ-P}33bvC4kzY?{V_dUM@B&EE^5_8icqynVOld4cS@nq_?QE=`|{<-2a}hxPgX z!Mt7KWG+Pf^1H%^?MKbLr!o|uVP082+Fz&rKR?5Ki|72^qu{x6dSxFx&iCbdh+vVg z$@FIP9RDHzcq8!>@{&A%%d-Oxo@Ho%Jn}u6UW6jJVtz_IugCO!`?|bJovHPY^yens zpVg@W$#bR$D9_o}WB3jH$o!TydC6_kKlxr>R+)YIs>FGtkgs@LZEd=Jg#+5aXKl}~ z%x77Z=6n`LqWLVdM!)a8W|sT(&HK*sT^fpW%ll``c;(}4%-gdQpJAG`{!?;(eLG}} zvIy3NAAchrvdh_AgjX*#?qna{lqmVZI(V3apl z?Zo!s!H?iPV%tCIaZp^q`hITbTeSUyp${gG{EEefKH87SF6{f})LxR@*G!L}*NGng z`Rvjo*q5m7Nc+di(C2#Lm-c|4UdI+2q%#OE=1j`ae{+Z*}9bFo`D#-DpP;(4T;HzoJb z+;VPiz2CE|ZGSPkVd;g}LqDLu_>=E`SnvC`e_-Bx!gja)y_Q}$&34!5(b>K4i~Q~Y zBA8~Z&&c%?*1jv3DV8E*%j?xJ&iLI!!9NJx!|P76E|ZU~*HArv?%hoC+_9_jKAi|W zaeXWTxAM?R=l$KUIUEP9z&^DC{0{faqDRmV^Sy|IylztGCp6~F5#a2D>F|kzAybPvu}^MjL+FNdXD87c+16k@%?(3r&-2% zE8t^Nzh%E($93iSbBg1@2t1iT!}>OV+H}FRv4873103(~@qMt)XZCmS`E2RcX`kUf>Tw<>eFm*ZrcTNJ zvJS}6-RKXrsK%4g`@AsA#J5V{ug>J99ppbyAB=dehHoOjnkJO*v0W=q{ek@?zUgQe z`ltNL_}vp(wsRfN(fOo+zh~Lb3p9Ufx~Gtj)%~sScsA^+`NAEjv;%<8hO69{{L8<=H=LpdTogR=(oz5h7YOeu8{I z81Dm%--u`db{bqR&$GNAi1y#MqCOm2hreIkd4uS+j^B;T1C~))?gb3pVEQ#LDVDrK;uIc!2MU=n^B&}#c>zeO^O$()Vd7T zgwPv~6Og}PKZ5-HDJp-Q$Dsm$9?Nj5uj^gfpGN^O1ov5!}zp`w&^IyN{!v z)7oxP7YSbDdk`J(!ElH^tFeAbe%J#V-?J2Nbv&%+OVbhbwBoIljzM!GHwqtSm>;?}XgUS! zp;;3La9)HMfa!0=0bMVa{&}~;Z^mA`&PRHcE@P`IuU;|J<>UjyB!6;_svpH#+%zC>$thY4)Lq+=H)rw zV}ZTG#@Rj8u4({3fxaC&p6&b+nj$)@9DldG3%+NO7%kgalo;=>LzSUoeZzJ{X@d`b|`ZeNLslfhG zd`o^4$Mv1}Ykm##DNZf%E%500){DotT{mk!x!e+s;Rf9n$$C<1Yk$U7hx~Sm(8Qe}wTApda68>>}EqPrOIxaQ0!Y%>IzK)fvBi zUf18$8NU_JxSw8N{C^wAZ}xN69B13-MBY{>y!1YJbUxn_$4lyGUi@Ow+k1PR@!Rq5 zAJ!Sa-T!wkF#g*Y82`KC_`!$9j?U>jtKNI!`0CCt?_Oa1?^s~`S1mC99fChOUjjWn zU->PQpAmZkzhprCm9G29zQAtA?qPTMxw&@T5r=o(W!=9wejn7kd&cv7xV?Mh&$~Wg z-QQ>Ki4Zuyg#0OTwLmoX&*608t_S}fPw9LP=YbHU@pA;rC`)j}4HBPlzDN6I@N0P= zY&E()>o?wjV}*=h_IoR{VEW!O!{edEn=B9V7MY-G9oD z6fi$Z?=N6_;BH>)*WI17UB4#r;P~bursImTd?51_!jtDIgj;+-Y)J7auCgxjUT&{E z-_3X4UPbt+Jxy1$PI9{F`g~1SKF_8*q>IfzGR_b_U%I}p3SH+Z&x+$R%%bQM1VLKA zm^`vdao%tx*5is4H&cE==0WyLnSHq##p_aPO^y3B{!8|c$@}2lU9;wMy*_C3p&1XY zYen=U=}*?1-ZaxQoS%BnANgk-KeJvA=;_^ErCqKU;j1cqgKZYlj-SfYINfP9CG*1X zjo=&;evV=ItiTn}rW8OV!{k{b7P&wq+27%Xme@33;{i)*PxW8B5 zNAk8@?oU#>TqaNDV6M)keE~=hazCeXX@38Q%v=;d%JKdZN<45LF^zoVtyCXV#{I}D zAN@eNGkISG_!J{h-Ul$hG+xSQ;T|1c#_a(=>!stt^Y#4P$@0E&vyXa;-ghUw!MZj~ zD7BZv(Jy4bUdet0CXL^p4(u!O712w-M@@Vs_S$hcU%3+d^hy6p{-?XWivA7Z$@3k; zZT)NOZ<~(^-WTB~tG;(1;1N4cuhD^TX&;I2yaWBW@y%0zE2_T_pQHS&&zcjzsx`MyiR$gGI2n1+yQlF%o3nH)!^i z`KZF{b`1wvkNTd^`tQjlxA9duU!`>l?}Lks9l4wL8)g=X9X;MC`ckP!__d7UW5N1eoy+$_*heP!57B+rPv1uOS(RbC z(ET*JNBrey___L21DDULMFPK8zl{y7*VTs?$vSo86PXvGzFNGI)@i!0()d*R zol-9SMt*)uL<8@W?0Jrs%NqAd_UL_*sr&+sBiJ9A(&GsBr5+PH>3caJ z4RlY9YdUSva9*z_O7lJTbLEMD99ci?*~0Tkb|nWnq5A(I_0sx%e}HH{p`WE5-V?tK zsej{NIuI?wXZ(r`)}!H{bU!3~X4doEJ96>;kJ?YcbM059GE>yfV0^s}`yJNB_FW#A zbxw4sRs5M$7K@kHS?hVgWA>RVI*$s&!-*P|lv{P!bcXJuU&eCCegF zeQ;drnJ@wJ@cSpK z7ycjYYtO%ne)ArHeJ)^I)u7%dNasx;&LDei?Din+bHkK|=kc8Gr{&%~m2Tm4K~#D@ z`E@-%5&eyGD>#pUs6hM(Ka&?bpXL2Xt3aUw>$#lDw4pgSH^}~TR`!R%Bjs{E9}+*W z;W#RwU8SBY`y+9FiTIhhOSs*8EPS~*KJP)~Gq9g%)lwce`Ss$j49UJa_z6r0vyTYe zkYaxVbdY^P@Q+a+bdl%Aj~JAE3CgoS;r+SJxA8v8z^}mMvi~60?L=HmIQdIde+pQV0zdExv^WIT3#6AwF>ckuhF)!JVnyB*1^(>&D0 zp3`|w#Gl6Q{sqks+U-UI^|0sUmqhV@q|W$ly>H$lUa0;*Ed5W;CxDKYf8{+Sas`0A zS4!`PlANq|;^$Q$o}hDYSK*ATBGX;#vFzN=t5}6LL{EW0&~K~qf!|A+UUYL|&#N%+ z6`TE2vwOc7a=~_N)#dv763b3e#}#%E)1T&l__>IAajV}~G|w?lo%9^{$lI*CO7&)X z=??T~eQe6%-jsuXyz1pBcSiMc_!8CZRIbV!L7nGZI2HU(`th6#2mj{p zw>UTd_Zb(y{dZsg(SQDQ`uKO$$F_g&*jX36_NQOXyy{K&!Y`=sckIH!3Q(nvRc=4s zwRO5u{q&l)%J9!jAD=ujFgacUN$E&w*P+SSl*dZ%Iy62Gmgzq-R@yaL8rpH>iru?0 zlR=ruBb$qpMeAVzRcgl(e+UKqQLg3>PcSC}=Ptp~556249@(P{L~73XIs18)s!8Q% zJPX>VUe0I|AL%pwn;70RQk*sth;1UN^I948Fb!d_xuZ3(LSSEl&7jrE!1k z&~+?l5U}gVM@ROU84AW%rv6L6_2gbtc9z#Q(Q;Re>_6)FUp{d5`Rh0MyNVhBZ@)q zjo!_)&+%5oVT{&@|HgEW4~&l<9v&(U4G0eoj1TWBVNrws_7o=uP(mQ)VFU&0PLA#x z-R~b7+cRDqD*2N~Fin&G?&0wX(sGv-wqLgMiY;5V_wR_*3V&`c>?rKq+W)dkcU%@e zjQoPnJNLhaFP^MaIrFK9D}mt#KXr0=G}P5eg(r{f7@aKchZt`iJ}^A9?0D(mq2ck; z&=rSE<9kcRAsfdigue*GW8hmx$9IeuMF!JzuwG}UV=>?A=NjQ=;w4KRf;@A|F4<=c}L3he{8g}{X>0_yq~=&CU^ul{oDoRQn+r| zdK%uM{_`lPD)$HYj!`XONo6Xk&mTT^JgH9btof3>_Nhj#5MO&BL; zCOl4OiZ3*S1Zbl_wEtMP$bm!qH~M=@ll|j+CbokWCPB@zg`-+X(69Mug~P8Y&-+yFs!;9E z&@BYtx_%=#ljiVJcseikF_AGvIZ@mVtvIyN9~-66cDT6zN-k~4R1Us@kmGOHYYxhf zGXKJF7@ZhKK9K8NuMFA;J;I+#N zf23bnFO(@W@}pO7eE4T~J#y$&=fQ9PsF+#Y_u!syt~mQ2I|gof=Bn*~@-gtI&7zqo zDnlx=n@K$ZTZNjg_QS^>hp#uh`{?$V&53q4v_FaH)&1KZNSd|)MO42{!nwR+e0a|={37%Bnthc8 zc=yn5Q#A2<&3mp4<}#^3*uH2nyUe=peEu=}S(W=3|AzR<)9|++MT+2nziD)2vUG&J z$(}06DuL5$S_Y-pmycWu?TgrwT=PT2Ba<&WS67U}XKH7B1*%d5fGw{AZ>j=cQU#u^ z0>5fx-^l3oBNTQ_9vz!0-CGOWckJA9Wq<$9?OU(dQ3aU=hJWIh$)5!}e4=vtMHMG1 zs!vs{9R8}YDh4XZ9|ehm_#guKf#MMacBA9NlSh$HM@$EfK+z~X`rKd%;7~MSAb1Hf zQewuV@R(uO2m<(In3FNM=^DT(Q4y+xK;DyZMU8PrR&k=V!-1Fz|-H1pBfksEi*kI`8T4M(^$4P^k-H!arKl=%fgGXcIrL%R@AyWs?%0K+y{QSSt#*a#WRHhOf`Tua_{;tsjWBW@{ zCu)DOvi+@u`<`37eozUXe#@Fi8!8n5jLK9h(q0J&SaTJ4#mTKw_ghbQeEVP({}|ge zEB;|^z2cwFTtNMR(u-6R->jQ@qvu0rz4o_m+ zU4_5Hla=Gg!v`vlFTy|TllZTUSr2+dZbi&s2N3xItIp~d{vCXmnCs6441A{Lm-=D+ zF+4d@+CO71%eW;$FQEJm)vS1R?N|NQSplA;9~XZ8h1-&zn`htu$t&JsekWb;dg=C8 zBt6G7-#m}aPl9~E{I|DX`?5_tHx+hlx@_mm`U_X$=hm&JHg@v8c11<`M|NfMiZP^b zCdd8Wiy%QGB?RJ|gEi&X3|Hp=UHdAFI2SMe^JurM)GHThH3iFDi&dD5#Qi{Vf}Y4RlvB_CT_5{eUOKPb@9ghI>G*rjaU_XwGW_ngJ0t2orCPtH)DbFo z%rWV$ocD2Bt!}lsx06w<^P0b?}RirLdU3fMy zJf$X5m*8zs4e@iW%JFlj+C)!xi7AKnF5!NA)E*GWnIU={cf3R^(mS12=TUTL0Z2F8 zw%J>)RH{YwAcvFOihp|?mE6wlc9>oxPOH#SwK{KRyse<8Pe*l>=}7&c{wTML+R=8I z_X5VN+-}q({b@sglp^@@5WL5NrvV=h!Iwti74di1h94bce!IP(6Z+})3BAyA;)hbb zXwPyy`yJ1D4W8+Qmzc!!df^|Dfw`Mb4H3OnPdW#BQpR-Zg?Korobw31t8I+WNe-!D zdQF`Pxljr`$U_cPPm1_}Kgcts@6q_FKEkV#gUO8yZsm6;;R)VUg5?!b;CzbmrIsYl zqh~Tn^*hRGRd?|FVo7%vOFk#>yoJ&g^QrlH$V>9*fu4Czm;R?%{zxuUESG~?4%b#G zhXeJM!!|UtKy+IQ`V)U3&P+EFWs-lxw^tiJIf)3|q&v=#x|7Ne45%*=8qjN4Eu>%6 zU-wL*hdXG=UY9nIbPae_<0WTtLa@@Z>xcZ5M7k=z^A(a|FnkZoC)xasLJ%hgYmf+ zV6;FlHoac!?-6J>9)AfQH-NL=Z2GE(&oI218pM22I$yia_mr^%%^FX#ncD4Mm>A-E zex%{Y8IK#_k#Xu?l*m!Og&@ZflHbP{YCP$MjOU`nCWb=-3GOJy1EN^%xgf*sUIDikZq4ANJY0&j#4l-Sj8M|nQC~Wj4Lz|ZBb5x%27F|VS?>0B=`jNGubdhddpF0#~lO*X?WZ< z91?xhaJzE}(mi)X(p{I^r2t30l)V5vO@sJN_&ihIy^Z@br0Ka-_xG4?ziN>F!tUj% zoR+TywPRRyAVuu0Y7jc~A|3QX@@~Sr@GL{82BBxa%l&fBHgwW-4d`@Fluiw!LZ{3< zLZ^(TQ%2J%!*p-*>6FoQLi;0h%G@Y)%G{>uG!dl};6$f$EjkVObh_QpX@KZJbRm9u zy8SA{{cZB;gm!`s36JU6j-0V_g=D7h!WD*2B2Vrr@R8&kbseGl((VZHAAFlptB`($ zk)!M(Ek}0VF8J#x z|J-|}ec8X3_GLByvYLO{yQ6ZRJx2X>RQ7J+9~@7X^X!|1f7y2k{~Bi`y|Gc`9B|@a zajyJxW&9C6X}qcIA2EM@{-K@3zY(-!SS_Xus*m#t9eR-tIFgy}3lAGQWo6v;H*&w6 zTMV5R5FYRk3V{2w;Nwv`HT{9m$^DSf335mBg@K!+a*jI_rIV}ag!bP-a6>2ePN9?g zc1@?_Q91!mbV3^nkSkvw44Cqb?_fF%!J?Dgsm4Fzc6d$4gih{VAv$G|p^50?=H27u z_qdm$JtVikuhjiizTegTM*3YyFKBu0MgN$7T2D=68ScE>)QgToe>fl0^m816zfZrz zO(%S<|0s0-$R_w9cyGZ>LfS38*~Egb!Ot>%_}wh zSPA%xH2g8(pnmU9N_(Hs?fqp!-hZy&Ps+P#|H+dM+hYfCYJck%jYm1#hz>2U6a2q2 z{rpmi_H7RMaD?cfO7LOEiRHtb>3lrCd`pYa3H4@}Zm@~OSIj@Oo(kxkM|$&%r62A( z;3N564>;tv75XjQe&;(Do_@hYd6%!_Xea3d$gSu-Ex*!lC}GvC`e17? z)bNB3fkVRMjPnQn@Mffk{9)C_?Qc>)QC{K@=wQ!7n+Q+H|Ec{&|{m$*d`>pyN;w14T$OZHW#hGTPzT=1s zk$hbjnaAgKJPAlK0f&!_e_-O*SJXC*`_<25T`U%^uPJ+63DH@j?4@hDp7DzAu zQuImseyzU|ucLW%3t~4X2=3vX^r_b0_$6_-PNO$u{;GP@ny!g0;-^V^vOg*M*hBt1 zQCcO>bjG0^PNvu&X&TXfP)ACaPo+d}r?v?m{obF-iC#|)QhDIHK*QIN9i#9Oze6l? z><=O?b~l`_y}BRm?F9CQ>uqc#Jq~-+VdGnYgIaCUMMJad0Vj+|M^a`P_omU#+NbxNW_M zIuH+s_Ch=r^liINk2~^DD1O^O`S7H1)rav3GN_9&?l08kW{oq$_r#?l|7M&aenfaY zj6)u0i7h-%bUaL@v3}8txhw>l?74eGa!C50Nz!<8cO-c}?e55^V|mr$Elr9pk3XKRdU`Q#q~4VpkHp9OHT{*Q zyK?4Mk3XKRdQ_r5_+>mpEnlzHMtsJ1c&50FSqW0o=B;yDQ*dKyn(Yzgqk^R=_WMD`;Pp_?7lw;CFcn zwv5zD9(LIsrFucdE?*vHeIldvLzA}0X|M`>PQF$1{iOp#%qO*S1+PcE6#NHYTDA(G z+&z#J_+5wE|XOeXr6cjmvKj^W4sFLf|6_hQp!yaEAkN1S0XC&3} zZk7YqN6^ok)J;?_*(T+o57@4wpR5NQCFNy4M`{e^Ngky<6lYn#axCIkUOCIJwERe@ zXV%xv*ZBIH@YDF<`4FPCB)3OOl>6dNyV32G-@^KP46WgRHMfo>YhCqh5~y|4QkfOSQh9n|_1Lo*?|~ zrUmt7l8lOJSOs|{jWfe#QR?XJYL=c zxFv5`ogsM+;5L4iw-CHa{7U;Vr`iA7;^i&>3v6W>hFWMm&l z^Fd8#_-~5zKF<%-68wT5YrC?4MDqo&(5ttoc!)r?2Om z9Huih3w$N|b6g-@koma#N{j9t>}Pbq;?4`*-QS7US9iXOe?a||@&3D9Uuk_O^Vt9% zFGm5~9`CCkTiJ3BAMR(Z`Z+^b!zt91T zOF{=F{zc%&u|L`P^`2)`3+AncUQNiYl8|K?$Nu!!n61#etVz~?fy}maM;JA zT$_GJD$uW1{v54n2-cb-pD^YlKpF2@tOoh^r9xo^2y z%k5%4Zv6Qj=Gj+*Pq%1(Yq`!~OgY3~S?|Qc3F;w!zDDO`y}`}k_4o{Ue}eKGf773j z>iC0lHp}?5=A$0k!*R;wfW#@$Uy{SLUI)4%rPl{DydL20NQL8*kTWx%b6r|kh(9jzBo4X4#39-4l!!x;UJJ(|1AoQVPvUO`aND1e{iy-m(!;LA zAp^LLU-WzcuNJ?=A%pTZewn`q@G9}EPWJzy7n~us6J&-+evO^zNNGFo42m7acxF3l z$L&&N=ZRl359$Rz=$wmwjCjvh@gxWj@n6q(*xw?4hW-1}cGtG6 z@$?AF+x!&&K7iZw7I_KaRpM8@mVXag909$t%dNbBul4Xn&tM$g1!D$#&3rO`ew(%f z+83H(#vbSbmfZ5ltB+lyo zA^JU^>z=P6{M>(u+t9fiqOUKg#zDyg9b_wbiYky0gwlkYXZw@_r5ySMB$Tzbx|YudjCb^RYjr z_LuQbYt$D$9nAQt6tDL%F4;elc)h<*OXA{9=pgGo_Xdn7?r)moM(&QpCf--G1AaZF zH|z1PGCHo5xJLAI1N?EKQ$r?)=dafTe7#g%y#Dm82f$B)9EaC$_53sI6^L8#bIT7W0b%OD&`W521Y_1jicuHyP_jq9JkuXQ#d{;fmi> z8h?#(7=L}V)!}%nS-%b9%&aeicw7d0D6q4%&MyAUpOJpT8|hWDpNoji+P>c)W_{>-N)$)yZFoN zZIQoI_rs)rG>(K1@8J3ZKiZZ@k(U4-@An08%dhm~u{7SH|J^?~_DuYds_jSutQC3s z^|~+iIjLZu&abAP9~6Hj7*F6I#tg(SHACZ>;zl%jwLh(LvaiW~r||nMdSfJV-d(Aj#8F^2k_^wK)ePyCbZS##62|2R zW*kFhQ9IKOBiv4fmHmxWKhCjWd}sahVCr^Wr*twSG9PGIPx`6hDRdY9r0P#^qWyBD z7z_07$4N4IIj5Z7MEJ8_DyPfqI8ej2`n=z@avmAy6j86E(wq1kkjh*m>FH}EU;2RT z@6+d}Ne}iTJBsjrV;w(k#abh`BL}@i^Qs5M@6CG9e;51fB+9L{&MWi6z~02`hX8Kr zhgs{qWm*&%~=UC1rK!3fys~m^-!?>X33GlkRHKdL8;fz{|z`rQLx&jhC|kZp(>@i}?HOVCt7QexYqU$3rOID0crn71~j2 zd`;vvkcYpF*5~hgXi@XB{tkaq`~*Kfo>-&z;myS#q;`vcMs(PN^@1<^a()5nb$UN? zG4ut-9{j%!f8Koy_w$mHdPL^k0X&|+0o>xRUIz`}wjPmnqX1qdezj23jTQ{A49V~3 zb^p!2I$EzmEDmJicr=lV?IiZch<|SOMe;f*0T>^lxQT_weVE%B_}@1D zQ9QCP7~jsIyoH~thrOeCve+$o55{dJy;>uk+)DdAYP9#}91rxUUKc5$M~xmc>vpYL zE;+sn>To={@ylI*C2?MoKk^fpkrAMF3gUMu=q^W#~gziQ`xccNYKda_1-t4^{Huxo#z_uT|`u6$hHmeaEQYW!Sh zbJYI$`?lI08IQt0lKTO*8SQy5kF&te+2c(59l+zqSpbiYvvxFJ_Wwljw6kP@|Fic0 zw4*?|eR#F*|EZCW?op;!p#N-oN&5qMJiP*VlwQjHE5;w_EgL@(G`t@o9)D2Y#{WLT zPlHhW*%^d)ppR+)pNv!E*BblNn~-=>ufyy4luhhUY0>-@J{Q?fnLKq`6-RqCs!c*=F<)I+QasR{nAFP zbKOsHp-bSeRkAZR+JTC8hS!a1pc9v8f4#h&k$!7?i~25Pf3DZ^=Yn~i#ZP@-TL6#e zX8^b5UiP&Ga2tQE^G;(gX*~?`Kzycg(g?NBa$(kOx)3*Ie{N3kQs7x}sqAZ8lh^x$ z`FxA<=Ts|>=8>PZ1Q8E(aaTJw2D(!$>qjzgx+M*xII06^UxtY1Fztes%sfXyx z{vF#*tncf6I|~uV;dZpRs=M(s%esxzid?g<4?qMUVU2HDA=g1$tkqoSRb#UT10j z439su158i%1K?jCZ%WBIIkWH1>}N3hWd;Gi1i7kR$Cv$tbB;He`9te}Q9nZSG@Ea= z?!Oeh8}QBMdxiZ`xRy}sAua!46VE>=J~Z66&W{h+@&FR1Fn-(<$R~7{_@3CQ((6)x z7g?8DH_LCSa$m&4pNpKFL-Vd-G>Yy~SdRM_9jvvHo+ElgF5Q1{C_dJ$a>_$`8{a^2 zu3C!Qvy_4#t?orPke}`^bS`dNzYOY_*2@|PBkyMZrLtS&(CKg=6f{HL;7kT z??aJ%B*#kiq0T2Lo%KYL-d)SzB(+8IOY40?cfpTyKxVxmdQWUL?!0jpy=h$|Cf-cP zb5DRCuK0F_UrzBk?Ifi#~!cs?(dgE z{U!MYe(b;nlhOY*Rq*-CdR&Q~+U(^;e$77owv9%gLN8-2!Pb{E=De8Em-J5h+|rkG zmOHcm$+TxRw+EfA5T|E-iCiRr!)+T&>K}i<7#bS#7il^%G926|Q{w(4XfE-GtoxIC z{PgJkNo0T7e{!#Z{{`Bj8#Irham({Q_gb{KoZN_>4#q*1fj%$SEw%+(zug)WW zx##xO>GM*eKk-HD4W>8V%zWI$eFgz1`!9AP@B_>`)J_dI_mkZ!?{k*>=sim2UT%^% zxtAMwtGSn(0=)&&!~Zwhzd5%jYHv+@SZ?{AaL}o$_FS#o^OXwnUZb9LzxpfE-&=Km z?R}O>=ttJqe1D8`{rjP{-He$G)BOh8m4{I2b4>0NhR(X( zIp{a6voCsB+ruXpPf?kD_4)(;-uiN9(6 zF6%Bzou&0bPVz7MB`{-r)*b&Pb;eJAKJ~}6|3!7iZ`0>f^NjzKb;i&0oe+H{>x|!~ z&#`&N|F$~gx9RiNI^(zL^FW>P+wH%<0{&NverSDHoyIrCU&7<>Unf%RpPH%&r<)wd{_~oQ|)@3_MwLH zpI>MEJTD6Asn6FLKke%YxBqi>#@}z@|7@M{Q+z$#{?F7If1ic_({;vg+v$IvXZ*ie zXZ#eG3e$hM&iHLR@xpn=|4((spRnlji8|w_d25*dYwC=@)55>H&iH8_9&Z1|QT%Qt z`$h5eEU2|wA9 z{UfMP)%E*hv*q(6ET0#bluyjpv91unV-u{-y6NqGrxM7>WkF_mE=szLCLu2z0q+pOAl9z-}aZ^RA>D5IQioU{@L<%eX5`K~YpnR41XYu2IS1JD|>m>irMCHE)_q;{y;;*Xwd*Cn4 zPt3Z2ZHKOk$PY4F`lgcn+xYGI(2MJg-){ev^Nj!UI^(y;@5cJzZ+Y?*^`p%{TYueD zpZ59kXXD>nBmPSI{T1_!{}1LF|D6@^KU!x$*p7!?QK9~7^@AOceEB@%{}UTO_Mh>( zD&~#wY25Shd=zR=Dc*DRi$zH zM-}R?Bv&?mJC1lyo$=fJxxGgGw%#Q_DI7<9bB*{b(dUhI#&6sGH`E!wz0ULcI^(zb z`??zOYyP&rm^H{zbmTGagWQ8 z^BU6QsUf<5r1glI4`9PI$0;*>KL~CVDM*|W-`{>A^5eb{i7Tpua{mbK1)=e&GVAz! zz+p2Vz(vz^Uf^ysA29c{I{N+*eUFNR@60_y<-FKfFH#JN90@*9#9TTU^6 zYd#kf-mhw}UzLbcFPbVr|K4eNC$Aqax@8t02IrFxI!+zX z{cES0?q84UXaD{;TkapI6W!lbg6=pk7Kx9lk3j zD|O=M=SuL?UhkP_e){@sDb};;JmfN*qrVUARY^H7IO6dBeE5fazNv2hq3s9#wa9^5 zr}xpXly_Yptt*Nja7G2WAV0VkJ6Z{Um3FkE{p_>k&YWMf^hlf8A7I+nHMK zdZPM!*8YQemOHA?fgNvqIXz)7S7E^z=W6x)rO|h!zf9+cAA$T=%!1<_BjdZC;V+&A z->Tso^z%B-w{ccJoMQt1&2~B|ywrJU$F&TNes?Y? zMQ5Uu>JGu<>kG=;bf)z(+TTFuyoL#xmM10OH*)`j>PM>3eKDRSml(7Xzpt9^8N~mG zeR%^S_>tXs*!SFe=APS>+;i)tcax}ka`=qhUzex`o3Coe{F-rp_Ih6g8OY7^u4!feGhKBsN+l9INn8eEquQ% z2ukZF=6rTL*24?jF1i1v12?wtyXKeU$+;;W1g5MA?aQ22)X zE~>jXz?|RabB;d$Pzj&kMzBwxXB~;$ACNKk5#e|=-&@;(FT?15Mdj9#OZWTKpU^n6 z$C=1|pnu}!K7dz?pXnE>zXE=J-wAiue11TLNglE}=sV)~ z2#pW^KBKo?UGIaaQ_e~R9cpRJWn#uj%*zE`jBz>V0Ro(O5 z{z1{V=DS9i<$I_%ONcZ+`p!Zd-t~9wqV>D0l6?O~JI41BqN~|QMfZ1-zEO!;_kZTV zH-G$n)~Y|p_0u)j;rfZnGS1MynEoT0oHX#2)qJnJ{p(i!BV2#OtolpUi|>Y4Ro{0a z^(CPnJoQ@e4r0akrl8wwj~DCyq>_C9_TpE|c*eP3l0RrQr~;Z#EJU zqzd(rz8=(ZUh(hz`v~T2XUpqpRAAd%HKe~AJ;dL8qW#`jYw5T_^Yw>lS2_Pf+KE9x z`Xxliczs#g4yiX9)Hj$i$9_HVf>dFV-H!sA)mgtHxs?L zx8t>%A77$=F*f;*2gReQezKQqNbU?=+ZCnE_g-zgV(!(}>8AeRyF`lbd2)ZG``rRN#QYHZ z`Id21*AuPz2~eTJ(N^|9m&&pp?hE$zi&U{^=Gwu%-D@=dHnbq zf^Q!Nc>D%>MeFgo*$29(Al#4nwg(MXeI@mOQtSWBV$Gk6NS>c6|81L;y z{{t?1Ue}WXouKEIT&DFw)p|ZNLh|4)Nsov<$Y?#E*59Q_uhV)yY6p({b^zKC@(f53xxcK)*???Unc6d?+$YW{$ zJ}vq@hwl%O-@Y?J^EA@;QNMk}*w@6R74*G)H%a{ZN_wB#PyT*oy-$AP9*nOUGv3f& z#9RFPe|~Aj<6#$)yMY%DA&pDybJB6AB*&fFml?kdE*;HV(pB$sx{&N~JK{Qd8t>S@ zM0BHxDy4TE*Z9-;{u9wvIV)scRnyT)4(a_)N$Lm6o$7uk{=NX!M|x-Oe4Zz}uI4yD zc~aBo62v!X9`+CVK5(CJmpUo<4DL@v4m&<-_?}4d{xg;0@zL>dr3Q%pUW(-w{O0eZ z`02_SkbP^o-}Z$>XUt#`uSpH0b6hXtk4LCH;*!k2CHP*}s{nVfpAB)8)Bpw$$5YZp z=If_5zdfB_CC<|NN80mQ-Jbr$CW&(<`MX~2dyTw79$DVfIgyLZW@(>%Un@=D*J3$R zMUEQe~>->K`zd0v*kSC*Hvv`SLE2Y)FoXQ@HiFP9pe08Np{ISHrlJo{%KTFHy_@@pO*|Ik zo9};KtJgv2I{!5DeZE)IkApzj#rnxeb!2}31!B##FFTjv@7WS#Nb^m(Ap_-*>!UuXPw`;SHOyS<4VpZoZ3 z`F+A2+ONQXJ5~G1`-IznX~v(}%}%Jt0?C>Gox=IuCrtH`T)QVV9k9P6R?gpL`4_Yf zM*5)NiR_nyAHn;pI-aZhLGuFE_t;;}en55iJ#?I5=mW-)Uva&mkM<+d^LwtC>Z=Pq zHq?n8YoA?u`1@|O9qEMl$I|BqA}<}jJRuIr{wl`RJ&?Bw`|vxkHdIe`@N<1qjh{j#*@FF{=cJq^wWPjD_!i?&#d&HIXODN zXvU%YQ+bExmV09BeSV_EzW5URSv*v}>!I@dbVBgh z^|27#ibE?M_s=>G$5lG76Z&UqK9xr+%)Y1;X0n&SKFoFgVD^FGvzKbP>tPSLc>PMj zUm(9@#QOg99QWmP0*}}qIC|{A#D9kxHN@`A=>(thY`Bx4+jhW9D13FQZ#_ri5xE4%L$V(3182Ufm3m)`r01;k5uHDY zAqiQ+pX47QIpz1WsJ!zy^6|UBj%fCG(EbpW>`!_O_fU@GFv&A%Ju-1p_Lp^CXZE#V zzXIb)>3v@4O*hq}-}_B`4)G6^2j&Yjd=2^4q&D~++qH6p2lkWJ)w+s8|Abc_zq<yTY2K>qiav73mdTx95E&V*k7PP*>b{C|k)7U)sCUpD{Z=@yi?=!X4k((VM>P3>>MI7IBY3$eW;kUz~=db0h> z5}(TXE%d#xbw~p zHuUCsJLVNMZ?984wd-AV!cY7sdu7E_=MBG(r`8R>9glo~w7KY=abO$JU@bckmp1C`LKSzSDt!2pU3-gX@&%)!}BIIJ~D4o?jS(m2QraA zmZcxCFG=Mo{vID@tHw`|eaX0YR{TapJ4r6#$nyGTn)d_I{@XUTPp-q?IqiN-^jg+ADf~4+K+I}ybH^4YPjb~?aw0v1cLmP zlJhPsP!Zfq%Xx@2*4+{7>-xU7Thv5?r>mQ9b^RNzCxW{zBt>mgZaCFwdYn z{;YWgy+faZ4SK!>e-C`?*`o7%peOnHS>L|%_kDZ*SmY#2_Eq@C_k5_;c;|V6@Vn=& z+D@)^hFJdKH;NplSRQ(gXgc}pp=mP@;J6400Mp-^2lTvJ+Lv|VH^a~GSzzpOD$V`r z&+vSq8?soSeqUnd8x4cb1oEF}_?xh<`npe=_}&nGr}J zs0P3H4#_9|&Fq$S+{{6Ty($ZN0)4yne75II;9|aH`1{5^AN2dN zj@#jxc4xM6`*iu9M>XAM(2il?RSmo!vFCuHr>u|nbn13zF{Efbj+piA496#X9`y4I z-Bj{Ip+|bF&;#X|9%)Svk>{S5Y5HenUO@C8;C^6!g>mlJYwnNfPx3hE-eve<+L1WR z;)mY1nodbO68gK(kV9@qLbn5&F2Wc2{*`|p4ct(nf8tEx3-<3bUy?jOhF;L??|_3Z z-TS0nK0gMu+|zyn>JRuL{Yz})c6ez|=$>FX)%?hU!hC)ro5eg@%O#id#}D=s5x!AF zcf4Og^Cy`fnfVr^A~i$(&B?ya(0*x2A2IuX^uB$lQOvLjkJ%?A`Hn~P%`~H5!#&@! z^dsqG(YICV$?n&iaRj-h_ExDUyD^UQBsW@5c0Zu;JxhAB3*+hp+LK)XJ(lnJLCN_T zG=S<3!DIc}5Zv->W!~EJy^{EO-V%bx;}6PP_{HxI)o;&>c-|U<+vUkFK%O9nJg*_T zIK(INuSkzT-q8Q=_K!eAx6nK1hY;XLU(XYIe%oNqtI7Pf=U+?qm*=-3czl0@@>Y9g zej6%p_gCl{l&=!MO8EAV&P|Zt#Pj;@XSBTf@f4?;`7QA1`K=qB-}Zc7kwLqC2-%Nx~S z%Y1Jc@C~CIdOntItTX<$D1P@vb;fVcXL9q5|B@(ve?Mon^K9#Wv20VF@REFl`AK># zUq^Yd=$&n@Gkz;RkfnR>=Cl2@{yCrc>0Ix8;-~Qy!jE_lzhgw7(sxC(v~M|tkM&zE z`o)$Py-rv+{bl2)d-dkA{qCpc8UH5*e|)?|+nZYWQ6fGg{D5D=aTw@}ACrB7-Hg1O zbo1w*Ft`>;K{yCg3y!GMV zHKPz6(_`t84)~|bSoa_4a(oNd0d_VVdS4laL zeg^P(J_T?~4oD2c^M@?m%UF)CHS*21@3Ucir;)C&)O7t~1-eQ<1G?2pSJHRo=vw1= zRy;2QH$|Ud5~THui6g57#|@X>FY_Sx4ydR+aw*04)BE#_6y7QRGFat){4v=-HpBbz zdq?Li=V;G~xcQ6j53MVQ^dsp{)|*+n|1{ql@W-A<*2+J!=RALAz3kI7+gG7pE*EN7 z1-ZdC3#rFT`_W z*G7Oh+njQq)OUM266`by* zdEdCJ_m$7kd3VC=uWRf0R%RjE3;VL3;~rodp9}HrE6EknOP`}Axe|NrI2^C+MNX1S ziU0H#E9l<<9?$OpZtGuLf7^0Q@Zpd=S>>~50v^&Ubc_yiOZ!MX=XGemjZel!;8(=A zH-KC9DJs7QpV7m8MzP}-6@zaO4jQMRo9v%RMdM4oJ4@0NY6^S{;PLGX;8y!2z7#5N z_n-Aj2wo+A8c$j;MDFG2U8Cbmw5~;ZBjHpNUxKvh_>y&RNAERS4mrLQk^{S6wj9Ox zE6|q~-F*F@M7xQ<+e*ZhSboqy@V74Krb^5M;^!TZC!Pm@4)WZspEv5~s62K1f^}L% z$)MpAg)VpX^x`@UvYE zm9G-NN`X}vSJ($UL+dqtZRig4bId;A8S-CFZ67e?0KBiqKH%QJ)BHNcb?IKLV^RO` zU5GTTLz({f;v8*((x0Vum)Qr>mbeSW)yQ8m`<}SH zcweXe{gu>ySR?&ij2Ql@QyO2@^6y?t-+2z}JM4SbdXM+jps}pSp|rI=#;fXHmh#uT z?0tc!+iy5&}vh6?P95wsLE>u4Bq?1eP{TpWeJgxm{eZME}K`iS}8@-hxKN@;5 z>PNG^!S`;J&XHuQU&FrIzORP-!;n5g&e-!| zat+Rr z`9g9@cF3&9oBR2AogC@?iB{nY=ILR+e0Pp~QA+L`M!ZA(WZB2o_bs0ve7>{qXBuBp z@4u1#mS}*EVnN!&(K3vu z03JV{0=PZyX&=V$vQ8hr4X1XT3%Ini%W5CZugEf!ooG-p@6z|xLXl%UQGkNaLz(k3 z9*XdO#O!{K4|%TG`EM?~ThDX8Nc*p{=QOf^0=)P0IjU5n#0QTr5P4GS1DviJ-_G}n z3@#A3Qt#ySwUR&UEimV?B>&rVdd^ROG3UQEWy)W`?>n*oi{w_Rhv_}-CGVzpSXYjB zErVXnliu)BoUZ;p%lXr)QQ&Jt-tey5yCBQ@MZK$0_TMh}c0qN_Rowd}wA^Iy8WfBx%MDL?D{ z*R-5xn%lXoezf2O=z#NIW%0YKvN@X1^*3pI;@@8}$9i-eDv>K+kLpk5gbu0d`Z1{c zF@kZqFV%iwI_ekpU*qc`+i&2wR|p=f=R$BxkBMIxg4_7n?+n4K#2>_^ z!N4my?gS?Q{CQ0HqvN&WU)=<%sKfw`@36dhTJ9v>QOCOpPyBhS{>w!!WItN}ZYXl>M&g4`~AWt54jJ;z>jovtR7Fc+X>8 zU{2yd@8I8v-&eK&V(>nv&UmeMyKk#Aev0FT=;FS0p7BrC8NW@R2kMO9rqBI##&5U( z*!<$pF0V8G5`KGpd*}mY1~1dMBjhgdiTHkqw{Y!aMlawY5kahJ4Fgm?xB$N z-se$2y_F^P8JU>w5Ik0&h2XY6)9W_a!lUiHS#M1Yh&^`rT)i5A{|Ecpk2vTN=$Y(q zz&_{k9MR{W=pMa2XuS&T_6Wvfc1FWH>^4@SVDf1kaTtWK1{|Rl^(@CBuU|u19 zglGJhG@rBR|30c^J(p3bc2w7p8Dal9E$3d~kxITdh@Y1|4w8~yP-n}zFO@EepP9Lo z>qVa;_%cy^+2>>4PktKaOXxTG_2RGWl=DLH6POO>d<3K+!Ttp3K=X3=A;yo8bmK>i z$b1O-+26?O{i`#?k73`h0CU-Ykmpt+E*K6wYQ>*GE`XLJo$e2v;C7N6QEAV4gYx4| z*&(0`$rD`*iE{pZ+OyV8d3_`#KUK>Y>VsVL|BJTIH*@_fO3E|ok>y?hkFPI)+wv^s zOOF>`R}S^ZF3)}s(G7fuYt%3-_!NrEWmm}l9024yVkR!x??mHYm`_lA!&_?jjq%it zQl}XI@)jGt0s=w5dC&FzUYTcLT%MNrmp&IVxA+&v1=}(2eEqz@vQt7|^o@Rq{KL=f z;8%SQi%Ex^5&bCNdFcNohzAYO8|gQlel9)nZt~&XTZ?>WR?eqX%R(yWy%_mP?^VLP zfh~%TKcSre{c~zVxxbV6D*;Crb9~+!&bPiWy0^H~n;!S3iqqa$@n-La z!t{r_$XRdzj@BQvWUwJfn{7Ln`tsgsj z&I@1p(=Vo8_WN&}m{lg6aCG%Z_;uFrqvh*Q_G~ydt4t^%IoMtv{?TK{rw?tKreJcZ zzv7|djsw%hQed^?z~p3cYWWsST>f_gblQd~JSju58Zc_j(oJWx=ZX;m5ZYr>9B%kY(Sn(z{{r_>Mhc zS?t-hqkTlLc7c=|f=Rw6|+?a*DLb<@v3bZ{N6n{nmBcLZyPA zYxCRk+c&Jc>auMJ8RzLk>YQZj!z{eESNe`U${g0Q1#zNMHQz4aFc&-@RHSX{(`1o!oZah&vK=^ia@Wn}4cy)p-i?K+8$&flf95A9ieu~`KycxOo6%!(Wxl=n zmFD*xpM&9ilSSC{F;dMFv3?j9^UD5#K37jce8x)9XJWj({X}_))H?R^;sk03lxO~i z(UNUI`RC9itJELh+s3s5iOW-#eNTV*cw8Ajt>x03_@`(&+3y2;r&oDX2X^czP8pjp z8y==3VDBn#cX4{%Bn-<5b&Txj9-&-+yynWzDVK;O?#?Ojq zrfVAB^wDTa^et?^fN8^umjG9BY&Tq*4J$W{@9vFepY;}FA9CSL2X87Z+m!F>z--OZ zO$6VtVihEhcyt*&trvMr-G@-EEpy1zj=Sz0Z^Da6kEz(I*zH8I%KfikGpS} zr)VS?Yli8}L26M6{F40ihacfgXm+OKfrV_RW70$2K2DSieofIo>!qx_dO_*WI+I z`~cpybC-`R?#H!%yv65b96YFBw3wYoQm+`h4B8EIM{?Z{jE+rTbb-zohR@c1II?LY zwBDPocl&jronXBWJ^6_bTS=8(BjYRRKY2Rd*24&K?DN))k4+a3k+)6@WvBX=9Bds~ zQlW6V0{rT+J!9iHkI}Sg`tZc;(Y+zRb=&s!Th^`HzIDUKZ55DNVE7+iiTItb!>7uJ zXH;RTtol@i^6s~qs9>P{_>vz8h<3t($@4W~eosWkL_r2m^<=ylf5Kt5^yjz)SDH2wDT-S_A- z?t}k$-P)t~e(v_aSunYEPxt2AHa>FVsfcEgxCT52($( z(25kIm>z{6H(l5_ArV(`=6>hiPyaBcn4hHEKOtyBIJ7U=3EJYE|f~!Jtg|*HK=k4X4QOqpLq;5(EAk z60uu5#r;I4r6yyEC+4HO3Zs*OWcVXj3IKl){A1CHX4pDinB)LdU_qJ?l()a-a6fQY&-co~lXnh&C|fQAV3em^mX2~jz*;K6%QCk@*>C)?>xump{9|m( zEc=JG^s;|CG6D4iNuMt3O3lUt=k+Du_-5(g&g>GAl&{~{en+H!xVMoEc-gRH$COyKRnDI^gS>@+8rGn@5csZz2zjfu->#ka}eNBGbn#;Fe zwJyH}KR0X$${%?peZQvkAEGG*{`}ydzVZ2=%H4A8Pi}c^>0kZeYyS`P CQ2T@c literal 0 HcmV?d00001 diff --git a/etc/multivm_bootloaders/vm_gateway/proved_batch.yul/proved_batch.yul.zbin b/etc/multivm_bootloaders/vm_gateway/proved_batch.yul/proved_batch.yul.zbin new file mode 100644 index 0000000000000000000000000000000000000000..2506ce065d74fc6a610beac3843b6c147c63cf10 GIT binary patch literal 71904 zcmeHw3w#|%b@%Lj?aHzxS+-|?Zd zudeTv!;c@zFW$R5GiPSb`<$7bRT)Kp8P%t+xMUsGgUfJsZDv9#_wMXk=XT}b$-3p zC+n*_l#o%6I4&B_3PEw0MAjrI!|Tk3B&Ef&oy+D;dULL zm3h1G1L{ijT=}`P)Cyb+TtBCVbLT7SF|WrpgnOkfP#t_7QETby-tFl2nEz6`m-(X~ zXNK^9-0?FVcz&PL;XJ6+Oe^{u>@2Rl0W)Y9d}p_#UZ10~I~gku(`VS}5W1-j=T3&( z!Si#3ZXJ&bp4<;h|L*<4JUTHC<^HXH{t@f>N6mPZgL=!|kD>1^YK05B;JVm7&R3eZ z;B7hTGu}F#+y)*m(VJ$*eBz!Uc%AE4zLe*q+`o$Vr?`Ej{GC>L9_$ zKJcd2MB3L0pYX2sVm7_P9}B>9W#Ydcf{QEY2tHT9kiK`$Aii68ot7Il;70HQE^<25 zttkI%U0!r1z^?^B5V?Zj5j^)na0TRK;U~4641?Ot*O`niXE|E<0$o1C<%?)Q%#TXf zyC);{Ms&SnTyNp5dY{Tly=}VQ@htVX0QIs0FPG(hKb>VcT?n0Vl<@Fyiyl|LMVAlp zxIZlA;94pNatxfTz$ScBZBfl}Rgu~` z?gZ<>Yf@X&|n%%AQag1hJ< z!;h}ub(P2SQh-+`c;{>UhIqO?jwg3i@Km^6@C24+JQXya3XJ#G;|5O!jVFvhhNr@n zf~Uew8c!2(JfWQM^i2y-0|B0HHh3DKaT8u>9KIgED)4+mJYk%`L&j%3Ek{=#G7 zt8W-Q2|u|HfsQ1H|DoxUFANj?6~Wtx=RY-kLz@`-X+DR@GM_KF$D-dWgnr&VGCtsw>E~(sc{6c7_im$kI?9_7 z`n71d-Yr5u?+rpf$iK)H%fC`62mKsin)oW<^8u&;uBZ7Je6HztWt@JV&^O?7jFad$ zjByOACBT!X`th&Qa@B|DD98O_n%5#|K*q1~v_5Fzc{#9oG#+*S0)hkjwHiF#AIDSc zHG(Jioq{KfoAFdtA80&*|84V;8c+Ae@q}{1)44XDO#7BMQ2(S41WzqD z3!YkUO0WWXz#p3-)C}s-|;5F`^PVZy+L`$B^tiUWT}0}i=_Oq z$1)vUzDLI8ek-u&J!^`G}XuRmB$X8X!i9IM>uJz#MnWqt0wfG&Q z2bu+VZ-dO1i0&(ZXOug`jxYQtehQv2oVFhd9A~CV`5Sflu`1>0>je67j#nuM?WkNW zi;oljSx0Q#-xYpo`!nHR%u?*^0r)+Dkr~!>`L(_`bb3tg4Ie-D7~oQUln|XWOG% zuetGZz!N_HzHa~9Di4>};Nu&9v-lX+MC{H<>g5dVUy1aF*2|fvKE8aM*)T^wzOf1) zUmEi9QJPnsd~Ea@>Giv)z0vD4PO@Gk@eYFaBA$#tJ zdb@;nZGR#B?tUe+mi?lyf}>z-I`+o>lZ>&0t6(7*j`m~Dk$M5%UC;lzrUh)rZ^n=RX@5i3E2hV>HdrqE`)c+yq$Z*)t;2s&ykI;Xqb3@g9 z66a+i<;nS;u)Up6ss+CT{jZrv`0euyzpX*|j}rbtC5RCFpC%7x{C36^(?p67#m zlNW8J{&7E|%XvJwzj2O!|No2SNzc@IQUKNdugW>%M?0KXMt(XqPr7J_wg-##xWL%C zvk_w&zK;75As_x1>X$m#)!@UhJed!}a+?o-S_6J3jW^PNo$xuY8S(d(#APIxoY&1> zqw(@<(Wiw4qHk0$`i1UnU#IVV+>^f4b_BmTzpT29-jF<`>dOn=Rpv6*A38soU6mEP zwcD4{is^- z;h7MARCbf#E35I9)%e2t5aH`748KG9`0w{&UwmsN-GEAkBNAGmh|e<}ru!LZLjHc0nGYB=z;Z@#j?bQjurpG(Den!k@qk z(FJZvk>;iG7s@lrd0blms5m~{hq6rPP@V%ib?SZwQu_(y+3F`;M_jMr@Oq6h_=o<} z^O;*ezL96EAC+kie357H1@t`f+3m=mGXD|Z0e|i;Bon*B3e&LI4>g9fGZ z6qt(hvI}vgbqFNU@P9wDd1ifd!Rik6Y1}PXU*vUK$PKqYUFLU2hf}0{%NZRxbxi!q z9NH`Q*Wi;<=UG+lEZgTud9q#!+uM5Ou^RB%UpaaB9tmvP_(gApw`$YlTaD-EQ*m~yx|pt+UABDrB*&C# z<~Sp(j`whU!TJdLc}QJF<=IYY4}HM(Fi-Fx#Us)lKf?BymFP26NX;jEg_vKtZkAtZ z`H|?4_4T5E4)is_C;8xb$nyyQolmdIw?g-@Jelrcxy84N{3XKI3iPvvFfIDx2IkhQCbt;5GPPi@qcLr~R)`kJ$Sx#QzG*lld(y zxA@H!|0^uF;d5U86{&I)Hv{KIJ@OL@(D#Cq;B(dzuq}H}HS+#{bPn?NaW)3_lI*Q18I_)%r`> zt}6JWuv2aKu{?L7fcBBW`vOI1o-4#|J4NIz2idM@{cikO_)m=QOsb#SwqEp+Gt6*k z{d_hY8V7VqiS%l>$e~*3FrH~x=!fvnd4c?ABp&z3ens)#&ExG>3+D _42u{WYDg z;vdjF#eP$MCCzVZP@j5BYQPsh4fR1XpN8d@K2hCB<$zDUrOu<|ueHBQ`)OUcFA*^% zUd43T4`28UO@G0|0)q$bUw}b0U$Eb|;O5@1sdlqJ5&XIk%~Te0DS4$pz80cHsWTJg zQ2h2#AKLRHIU5Y%DA$^wNBEgf`jCE1gWq7>L2L$p5yD0MhIwAc`fj0#t3BEmnbyDL zaYOps_>+B;VY!Vz%G;*FZ*+b^+~G*vL7^b@RUWpWOnPRa=BGBt7r%bC9ut4w!~+h+ z1Nd9LPV3PPqDO^)JD}N45L}8wNIx%dd^L)za+Yd6SrWaBa)M*!4V36F)qXL

+$< z!>!Pp1W#qxo47*S$Ma(Cm>2g?@&p)=uUUFIVLr6)i|8Nby=;3`D_?J|cX*VCEm8kE z4@+^#UxkMTwGC%cC~Cy#Zi>UbdkD`g&DKl5{_ zPi#CkX!pI91fRwUf9WixW+=WUd>Q}Atb?qcn?DuAn+rfM$^+W|RF!yh;kg>WWPd=9 zmY3*zjwfk&dOvcH;|t%;=EEY5Tl>3h$2H$B(Q*^ScZjpeFZi6Mx901Xm!au-Xy2pj zccLGnXA9K}@{&D)9(yL?ErqTY|GLR~EaVHDf9s8NedwWAVEjtrTxHfbC7xV}0``yG zpJ#lbcV6~e{~|L)cGUd^;vVcDBnZ*PB0bcJ6mxjeFo;hGtz7IjvK>F6)WCV1 zUp4*l_}V|o&z1N47-yQ^7XAzMax#B}t+ymdbJSKWRKzl7z<{e|Urf08Fv zyv_mr?-e1UCBn~1=1Vib%a@9s)A@*Sed{+< $*1a2fZL;IR`mU3kA$cZ$i=@x& zV<<{~*((`8z~S{<5>MHe4m|-lxR>_FgM2jCn;|BacI zbQe4-a*TBv8fTS#=`F$gFUkAUUFd@n=>xqF(jC%pgMADI*?+G0tGD>MqS$R$@3(OE z{&TY*9q(sBuY~*53wj@WUhiwqA57n;F6-UxWj&4}^wO>HBX1JB+~Pz3Mcy~9nICO5 zA7kf786NOu{Cmt)=x+F@%KHedF}gc|K>PCh=&HycsN&bVe%I;M;q=^LP#VX=Yyv@_4Lo+KvTzJYVo$C6AW_UEuGW{ol}gw<=o4;QXzN`1~&D z0q+8$TO_|`^MQ;zw5Q2@7M9z5V(cx^k^HU>I45fs9PKP|-Y{HO*-~R14X&$@*gVQ}eT4(4qaVVBc3}vtGZRi$6%?7XOU!uov<8vjRTf!0|cmkDdp8aUJ`A z-9g+f`NJnw)guyjhvmui4a+V1depwu8J64pBl(uFyiWKE<`VM__|`jJ$7^PNqQ|K( zZ`g_bCm&{b5|?$ZFRGsdyE^~f_K4QM7xKKqxW=BB@MBn>Jg>0a!kg;T@<4G5$;C3s zhY?-bKGVEqy(gX*wBUFb@i;^O+8!tPK9)~Q9%tCzg0JO9=O-CYBuAm#*3zpD;;H8P zh;7G`=a)hU;0O5kx`6)@&990cHG0hCyV|upcWVE?kM-J(%Wr4B7J&moJ{#~SMPA+e z(T@93OL%`qw?lCzt^0{zK<_hz{NBwP3+pC$TbK2&XV`t(FlU zG2e@2y^8GI0NJUztyhJ7no6$)FYSIbZhwOH&Mwe{rt(f_l+Cuw$4<{jF> zWIhbb<8szTV#@1gaX4Mf8AeUfXY?$l% zKnpV1CrECd!*~kuY~g8!*DoS|ew}pC`iRgX?-j@2LksnqLPyYpEnm}qOMo}vYp)_X zW52ZJ8!`fIi9OoMiyrkz;|Jg#W~Pu!mMSh}4Ne$aXh{fR%YU~U*K^&bI~WnFB~6` z{5Ujj%G)DOX#a(t2kkTAer3O>*|&>kypO&EXe#l1u`l#uv{xCCSM2RC({nu^X9jw1 zkjr^J#9N8^mFOPlTwy)b8$dqicHHHZ?DIWU>!9YnE#p_iZu59f!@Se-*TT=C9Z8m# zu-ulH)dZjCFYm!=IS=w4Y)4UU@WB1y{(?EK51+2{`yS_$X0H#MeRepei}~2fE7|+) z+McWFsrh5U9}0iCA2IXL{9*DqX8isf$NNM*-Z{S4;?6pE4D)1sI)}R#KF)<$?}y$p5mNjb`n}2aBrLb(kMoFO`zrfd!uFPYV*euRy>MTP-mkgBDe}IS6?o5t z^kT4o)9^p`3&CG$|27l}@a*1$`gh>|N0N6_ty!Z_^}Z(bLv+FWkgPA&zy`fPN#37O z8D2N+SjBYfLywG4Z^-aF^d&Nn%=@5= z5p$H!OWCi{=sq0LyCL0dx{LoGmfLhS`d;rB2j=8`haBiI*m-7<@3Pww;z1|xGSn;K z#^|{kez>x#C1oH6eS82w21q`xiwI9I;&jDKaua|6o{Cad3P zsG=uDKcW_`7Y=rwAMl;cH^x5GMpMWt;Z5QVhtADd&JQ3zRnEXeh3@HsCR>A%_VS&z;md|59!dHg&|_y;wN z-7)b1$q{%z*k`|5--}%c;{{sJA$}jxby!bXfo=F-{gddu$C%viPkoi6!^PLsPWy${oRpII5 z+yLbU=Q?XYHy1DW4$UvV*dHJ4Jg=I*^sW|sdWSKtk{&nZ6^v0zm7h<2t;vZ^z zc&$}FB5@=3`7j)cgJqwNlbIndS6Q8R#lD+6_+FkL&(Qe?vcG_?%He%H-gU7!ym9{g zSzzbH9-yNf`Ez7fpAI^`8~q`V;=V`vMFHm*+4$U=3~bHg(kzd@Z*ilGyF=E;d6Ww!Oy=p8NQ95jZKDcjo%w= zGJJdd&uB7yd;I4#8NNOKv(w>we<1py{Zq;E?fqVQdENTOl231U`uOUGzY6do^6!l{ z89w{#q5ODzn+%`w64CKr)nxdzz7U0fWeh&~OXB~3oSsMQtnK%RA0FD_WIsGCx8fT2 z6E)yV91*s!6Mn;XkmeV`ll#dSo`9wX?JVUBqwqh|WcZYykHY`^Cc~%qt&ovporRUxMGr#b?T}_6ciQ~t+qRH?n&W+;#AL8)c zTKbLh(`zF7uTK5r?WF!9?JfO@uZzUX>xAF1{?Y!iSB~SQVYrk}h~oX9nhf9aAKcHz z;8Xr8hj^dt80VGUA5(efU=S}_bkO@9!t!J~gyps!s|CLU{jZ7i-)#7#=jRhX`8y53 zpT$ocP@Kr4F8YdwYtDaKk@cxRg>%U`es{1Bf#K!!dz-nX z^4)~oQu&SozCVPVm4ke~d|x5EM4mfKq#oW2elw4U&oQO`5I@i2Pj5#J{zMO0cL@2f zPX4j+?>(*br*WxxweLW2+Nc4AC$L$ zS>W1uuNlYa{Rr(?_X3}g{-XDPDNj%OjQEE2nSQV5DOJZSesfr!Jl?R}(r5bIm0&)z z&c~r~4o2{0x3~Oa59c({_vr8^`&-Purd3W{-g`yf+prU<8T{6UH$qDhdG9>KlK0;E zm3Qh7bi`ksaf0T9YWX;C9q}vjmM5E!v*Fu**xDw;x8u#HrNj4L7|S=z=Km*9`_6N! z@;~|YYa->z{2#Wr_}8liU*s!nUnhK z|E%=>A?GnarKVo7^Isd94Br~R`>Q6yx5n@Oca!1UeD%Es;oE+Tm1pqIY%+Wc{~p=> zQdG~z<-*$(mkXTJzEE!Y-f|pU7~(inx#j5h`tV)EVI9XF;{BWs zzTZdsUlq3Se$r74L*Vd!p!OMKx6O9}9ew_SK1U*mi}U*Yh5YII`+ukFc$N1_IV$5z z9P13|=L0&9y;R4sOKChNj&eVU_bg=HTfRR%>~LJ}ehBd}@U-%ESdU&yK4?01K8tL4`>%!igADdUamgF5-q#&@@c?-!)wyKXtM`SJM;!nftg zUblQwli}O)x~9qSt@S>S_RW==NMFtaN95^}Cc`KBh{&t^jV8ml*USF7$?)y*f4#}@ zEq&=e++_In`2Q^#e#eb*zumq+>3v(4e7j#u?*F9W+w=R^Cd0Svjr&NG;oIZ?W|QGl zoE(uq?~j@c-_{?`wcz9RXxi6%()6|U-%OL?lRk~&=Z|CXDZfqUUn=(<^t}4IsqAl# zlqdTE;l2^e4{-mw27Kbf2G)J?+CgJGSPEQq&yi< z;XZU5Pqp9^JsW^O%OCqd+&>J?1-8}+79&q0ag?Rc~hd`QcOk&8c_B^OUPC32z9KhSa!u7mEV!KVu4jlQoJ*+)NX z-Mq>==Hho(!GFE_>Fz4}>0`6#Ft7SauVaRIpFYKSKWN#7YIy%flkkr9`$W6&gU1E$ zP|FSG6-@r-8lqd^M}+iz$tkAiOXo($u^`5D(Xv6pC=qqeK+MN?`hy6n_II!>W`}3^( z^R4@$xhICqJ&jKlqn+Z5ai3BB;rVm+d)zbR4+rNEqPBidajx(6xv}@UyMH*>_qsbr zqVIKgf4|y(0U2MUJkg(tlv{o>(82G4;VS3Y?#Fyfn5WtALU}QI-||$=`-bTI-H~zI zehQB(Qr_sikJ7xme^3q2geTP#DNpV%Y;WV4@~?zPdT(!#fZ_js{Hx?UCEoSaKi%^_ zqa{?pq%H2wFN#{E-J;*xNf%v;{oyHH)=WJtN zWKq_W3VNNVpwG`X=V`mLE~H#He%`h@hdZy&<<3|1dXMa*q_|*};2Y;p<1feS!RGq_ zU0*Zv!udfMXV?GgdrdFLXL_NXrqhivI^nz$+=uc3q-(mZ7P{e_qq@$42;Rr%_f`Y? zp%Z=|AB%7L{p4Zn^JF^mcLfXPd^E)Ge2z)i&x3iz`PSk~?`1Tfh#c8+MyTtFlqd6j zsGsVE&-j}=e0}~V_%^?m_-j_&LEj0)Ss?n{besaH=|S)R!uGTjq3?(fhe%%tgZ$k>E!X-zUFBTL@&dxpK7`;qvyijs z_jfXnf&1}3Z`Z5j{J)Gse}ig4I1cl@!^~v@FZR6wI!{rbPn4)(7oybJPul<;cyBhz`pq$gBn);UQ<&@GU^_gnoJxqlp1XXv8m zY0jBdnb`N-gYStiMLvb(5Z~ji=exUIC#?R5xqo+7|JC}fx4(yC{bivaeD!M34s6Be z-eB5v?u=TZ=ab9w_W+iRi#$Vb6aRscpb9Wwe7|8x*0HtzR=IqU@LtsVGJmFc@2-6&h@XYmok{(V8nh}dP3z; zz89Za!_MI{@sU}dBmE1x=q_r!;anuilR=)~LwYaAEU=E>4V;`nJ1<>7gFVm>@yTk* zBS9=l4+Q5+%+=1;(9-~D+gr6(fB#kNF74014daG=p!s^YU>s@whl~?S132%G;4xVr zSNB5*4$BGaBl0suUoAHBk`0>3y zo-e=WKy(&8On!f$r^)Uwy@GVi%+#(3BTKD+m%+>6~?dF1=D}7yoYuq(0k6h==aLWj+DSxYJq_t*pFoWS+5?uQ_EvZI(+lJ za2o&m6R{WHL*B&RA#VVA?=t!y<)Y_x zKe?<=dTuG^SO%Wz*Yky8(qr!3`C+jK1+C}v!=mT&TS$*X&&Tb+@xTsXpA_`U4J=m} zeB${}M(>Lror^x7(Q?K5E26jNORjGHom>eY&e8AsZI!FVak=V(#y>&&N5RgmgB(WQ zZ#gCU{?)(Z%ik`(%ou$?>-#kc`kudUApX74``X{q{{7N_uci0%qR)#t*>^jVp*W57 zecW$H{37~3v#Ex@m+!cVUtde_)A-5XudVmV-+~|Cb%l{P%olO5+1KH{*2>-7p%s=_M_Z^?b{vGl@jYqtPEBl%AS(*pho$UT&{@w}o=cL+qo6hrO*VP>36TBe{ONS2$n}eAe~i zbUw?gEa$T@6U}G2)%tzy)pOkEZr<0H@9Lz-+e>)m<4n-)@rloXSFQi_`TN}?24DL8 z{eo3KVanya{w}<0Pkd>gk7~XX2X!Lv^RRvc9armnrQu(o_mRvs{*0ZI8stH$`NZ&EUsc$EI=@E_#Sl^%A z`5g3F!;-+^eR$pY&2UY=BR{|I>r;P(AN(8BD|O@JsZGMi6CPiD1p5-T9qImF75v;R z{L&rp6Y`MkuR^X!zG}P|*S*8wt1l6fDJ{a*8ao6Q&9 zSQj9DgSbxQ z4daa8{S^G;X+N(!$+}E2w$4QD___Cb;)CU{P2Z;zDNkG^Qu>eL2fcGkjLi{+&B4 zc?|D>#H*gC>QInfE@rnL3a7BqV{WW z&;FI6)AD{9Z(uy@8I+0va7N_m-}ePd6?{Jv>usxEXVs7mP3y2!g!!X zHJqH@=Y`pj->vlhs$5a}LH+~n!H64l`DV(a5JCAK+qH@`9y%WYe6##Q!GFe2hwna= zXFIoiyVaYfc6rF=NA^;7;V$xmm^dZd)c)+1-hUtkARH95q132$=F-11jf zlS6!O267WAPnN^5y)B2*K9o~?-yH2<8H~tzygh^#fBp0i;~kapqaWkBj`dBjpUkuzea-a0knvWXJ~sW5@fNnX@P>USGVTnpq9T7zsd6r%kBAU z{4RSTCT#Q<$L-K>h}-Lq2kd;C&ZjQ_vnIi>l24s4_$Hs)H25}tbUw9d@NN9)d}`C+ z+vC^y)TY6=-#_mqd51J6zOM>Bf0Xn*?1NlitFN!q*VoHc%k$HCKQ1{>5JpIHKbSur zH!1gi6j6VQ{j(POfqGfxEB>A$qE+K3@IGXP_kqQ4M6>`q4KA1GdEO61`)@nZ9*V@_ z?=SbfQ}o*M_r&D^%c!jPl+LsELKmJOd9>Ooby@~1e%OGGp_9H;C5P{ovxNMVhruf?jpNM@gmh)n>!y@tp||5U_XNV{28jR{Rr2@U08-wgS`)G ze;yUU5ah26S9>1<3sh9@=jDBfJl5SOG0rb&yG28!@;cvxSpFGIhw!rs>zCw*eN)4G zoZ_wJ-_i8acmzJJcq^mhE%c{;BE$fUe=82?Ma)3s^X`D(jD5j9 zKQQ(w2#NB(SU)2JB0)Be{uZ;2c=8c>c z_`B=9cLwv=!sGDGxC=XYe7b$_J2c*AFpfcj@A7`c-sc;9%KCWE&-A!k&=JYwuvyP8 zaDKA)RY85ho64Rg_{eV;e4riUBd_rx{M?HgJiJDQi0ddK7S zX4+Uke#+91q>n}4)~P3Z@cw<7=W%j+vIla^c+h&X=SB_h@zRr!s}q=y_h!;-ym0 zqDQ1Wd0t_A3vb4sgInk`T|J3!d;ZhdA)PNW{zBl_HEDkfSz1LE;n6_h`Qie(hfo{`)Sj>Q7KtJ(2Q6eHw z4fvcNZvZ}@yBKM2`;m=~e;bV-^wIiDF+AKgw0_<9thugVuUN1B%I7d1FRIE1GS9F) znNDH3#RtTO6p!L6=gqu(p6OJCrE8D{qer-^m6~|?u zqUaL@L0Z3&g-RNcxlYrg!f=(~tZGj-OdC2l({hJl%Jp zE1?U&H-d9a_&KI6>jCB*4?kvClzJHXBjm@t_h$9`XMoc~*vhJ5!1dmjljnGUs{8=% z@6-2@ye-#ze@55K6{#M~)dt!Zfb<}bbEc5x_kYODMd70!?=PXm1LqO5$T#Bs>0qC` zlUF{)1IW|yz6kIsW}rNr)5`Oud=~C8;3eE2;Im#j9z0*i&z(H)8#nu?XXt%*f*Y)B z>-<(>k=U<4Wj_K$pkd~x36PUHf`E)$wLU%BGnwVqgrrlr0MYv|vwJXueK z<+lE{^>>mT8;tOi)!zFck1y~O=rua!*UxRoL`Exx987#GE!bAe3B=v7h>mf^qsEr zOSG;im**Zb#oUG#_$)DZ&T#ylg&7o>!=sEW8E!KZ|jLzI|s`$tyAc zV1A%)Q{J1Z5(gx&(}JIlk~{+sa=lnzpQW$Qma87$)1Y5I1b!;@y|^mBflc8@hvmt5 z3(IZ1QND7p?~elbb~)n-YejyXhqHpGU_A-tJ=9*-m#|-Qt&BH2MR+y)B>TgClDd7cPx3}-KWm@lwCwwv z+y2G=3uS!RCz%T0eW-=^N%n8oav7Y@H^+W<>VAsp{itxiXMOY7tKko4^gM!nsn-jh z^!=)!d{X0Sy)NhVY7z#%$9}FN@sA_xhyAiY63Q1c(ob~Dd`a46_51!PB6>nUOFQh_ zz76!hLB@seU@1*s@hfszkA{EJ_lWSBSr}kI)JTB~a zSR31Sd0f^xF(5gwOJ(w^XXR_G^*q2a`yCXWM+IfVehU6Ake>r&nEA3@<#N^E|7LAh z^KrY{e`lbF6755zJWo$>c8$R2GNO_&`L;no~d^Ybp;dPsS_=Kbfv|j^k zIQ7_7D9hx1iT$q>zVO(t_TN<%Z=~PXM9P!r6}GqV7VJyRVcg^w=zWQb{Txj3G-GFF zT^M{#nt=YAV_)sO=SMJ~68e23Iv~7~eQ6{QZ`OnEZ!%na+#8z=pZq}5FJ}Dued?y+ z#e#44|1}N1jUT=LuW9hD@tgOln+D$=zux~hZ}9)N$^287-wJ(wRPx#Vcaz*fpTzIq zYTf^(b$_?HXT9(=tsgURkH}!k`&yD0`6$hE<&moTj7&^Vq&!idMape`rsEr=y0x7* zd5O$`*kgy^KT!kl|6pHNt`z;|eGK-wgl$#BdY>SjHvyl)&LgzU=<*`UA)hVBsCHV+x2{w^9aDM_z`}tD0V*2`;k_{M=Y_PE2vx-x^oM|yx%A<`@>*Z zxL*I)<$aiz<3K6t1@#2kABpoz#Lp~jVt*3r12K4oI6Ut-@)_7qv~m;AoBVq5S4L!C z9sC5wgV{#}Zpg4dp(Nh{Kg9SE^4$0l!;&vSefB536FT3<`zQmy0>owiL9W{Y6!+i{ zW#v17E)2tU#BDQFj*J2xx8!0~m2f9mEh^as9J ziLsmj-DLmG%EMLpne@n-NO^L9VSAgOrG0gIp}2Z5G9SCWiH9A~o%nr7`zvHuBYAbg zY*XT8I?pL;w?2vc68e1(Ism?i+zr|Dj0OLJCd0S&zV=7vZTz2*@h9gKX5-8IxX2Ys z)F$&0An%2%O2(X$rZ*RVxVD_@I#aRx{Do;%vk>F+W8RjG)n zrvFh}`n#e$RoPmpq*ty|n*G$VVwEC(jPl_c<;S+XU}($c9a}Egxv8{$%g(J^w%841 zm{tG9=Izu!@8d^~rB%@L_eaNejgL)@PEA+Frqk-9EQYMi&!2y!Mt!^Z#^{5s<x9NNc_V1^d*{vszmtCa-nqk{{Nh&U zrvEwZ+*iN!CGY+EgW2QXQ6Jd;nd{GZ=CwcnYVO4^d)q`(v*6kF=MD$IlYU&E-hZ}t z+p(l(=2>=iS9P#QLpgdi+9tNdK?Ktd@pn^Zn&HT|R=0w2UB`~&tFGohl_UH4TFNs)F*}nPv2T3W-oSXGb>Uxr<8MFWPy@)~(yO?1;1qe_l}9QQEm}%X2T> zaZ&g%@(aH1$^W%{;o-E#xli1eRt)#|M-PvVhq^kc^7P>yrW40RVeK*RXl$+ z)w=ttwAMV85KOX()@uW7v zv-V9z@jo$gvi}G7O>gw44({4jnKDk)?D9CCDZbGR5`c~V(Ee-HD*F%a+vxA9OmCUo zGqoM8Fb!;0tsDi6--!Qa_KzXemhv}Q*?n+q($(s>bg4^Z-Pgz7jrQ1WEf+bT4&0b!B>bVR~XTI^9`g zoeD#pS!ML#z|{v*1*AN6II*31Ncx5Kf_Y$F$A0{W8}IwY9rqu6tmnYDe_YNj9K3hW zHf;+|MGd;|MCOiPn$(^Q4j`IWVe$>f^xBMmzVdA!pxYsB?_b3@Be({tzSu! zc|7~2X#*EL^>3Tx`*wFqYf>&?9^xa8h z3qVBm+cccpOD0G6j7I#jtM;Y~@a~b_0j#7S&;Rqw0$L`Ohy9Bdv+Jz=&gVXAKdW&c z}7u6`w*C>D9*xs@6tH&tpnLaWxTe`QEw(r=v^|CEnc5dHx z$&MPptYY{lewF-Ppu?xq(?6;_m8w2fIX(PV6EzG}kiQBN74b;~<@?Kr5m=5-j!qvz zJ|8h9I099p<C0(69%H2kP#Cz9xaa?Rz2b; zJPsa(@x(`~DQVi3h@bjk2`zIu0E6(0^Z~DSd>^9MRJ^l#+ z6M><9#olP;gx`_=d0`;hcS7~pqnkeXzn8}z*f-aod(3_myTAV2hjm_h_c;8`i0`RY zO`v9C&4QcjdATvT4SrmI?nlh;kiG^*?AA{4JW*+dsaWNTcyxDpbTZ_OKysx4;D^aS zW}SG2?bGE+P9TMbGS<`Iw*ON{?16O?Q}$LF-H!d+*ZqXY!J~=E%9*x^dJ6{ zKleS_aFX^%b<)bQ{t(XG-!;B}VqYcdMC~i5+ut6SFHX>OIih#QJu6Z z-D!m=Yp+qBO57T4zx9*l-#$>oKgRaZ)IY50r~c{81=J5XeLB^Zng!>ZcXZtnYY+F- zuC#5}(nsxQ(R=O2!n4)#87-{w`={T1xqdT2V` zfiQCl>?DP-SBwVh6NeJBX5kPHO&<(z5`G<;PLCfC?N6Uygn!m0@n03Q9*l(CikQI; zO5_KuI;&s!cko?eu0K~W<+Clnv=8Ht(dntmzFD-a;+6!Xfbu(7x8l{UU;KRMLVCn zrF0p7Zrf&BV+Y~2%TnbZ*_G)_CXl|Fp7aOKg9MFL5Qtw8tZ830n$G{b_NI$C94fKD z(S@jRc&Wd>(|u@VIgpg(sx{WnewEy+n{~ImXCe*JxhLh+7+AMRQ%aXp8UkKHmtdI)3(db{`;rA@c#qCLjL~% literal 0 HcmV?d00001 diff --git a/prover/Cargo.lock b/prover/Cargo.lock index e5b42f1601b1..dbc3b3425e49 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -8159,6 +8159,7 @@ dependencies = [ "zk_evm 0.141.0", "zk_evm 0.150.6", "zksync_contracts", + "zksync_mini_merkle_tree", "zksync_system_constants", "zksync_types", "zksync_utils", diff --git a/yarn.lock b/yarn.lock index 255bd901e035..58511dd1b9ff 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1424,18 +1424,6 @@ resolved "https://registry.yarnpkg.com/@iarna/toml/-/toml-2.2.5.tgz#b32366c89b43c6f8cefbdefac778b9c828e3ba8c" integrity sha512-trnsAYxU3xnS1gPHPyU961coFyLkh4gAD/0zQ5mymY4yOZ+CYvsPqUbOFSw0aDM4y0tV7tiFxL/1XfXPNC6IPg== -"@isaacs/cliui@^8.0.2": - version "8.0.2" - resolved "https://registry.yarnpkg.com/@isaacs/cliui/-/cliui-8.0.2.tgz#b37667b7bc181c168782259bab42474fbf52b550" - integrity sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA== - dependencies: - string-width "^5.1.2" - string-width-cjs "npm:string-width@^4.2.0" - strip-ansi "^7.0.1" - strip-ansi-cjs "npm:strip-ansi@^6.0.1" - wrap-ansi "^8.1.0" - wrap-ansi-cjs "npm:wrap-ansi@^7.0.0" - "@istanbuljs/load-nyc-config@^1.0.0": version "1.1.0" resolved "https://registry.yarnpkg.com/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz#fd3db1d59ecf7cf121e80650bb86712f9b55eced" @@ -1727,24 +1715,15 @@ resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-deploy/-/hardhat-zksync-deploy-1.5.0.tgz#40cb454fb187da4bb354f3acb48762a6657fcb36" integrity sha512-7LAgYYwoKWHeR+3CyWEvA3NKBKtt7ktcr7SX6ZPgbEYqHAdXH02vxJZGwNADtMWpyYm8h+fEQkpPIgErD4NhmA== dependencies: - "@matterlabs/hardhat-zksync-solc" "^1.0.5" - chalk "4.1.2" - ts-morph "^19.0.0" - -"@matterlabs/hardhat-zksync-deploy@^1.3.0": - version "1.3.0" - resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-deploy/-/hardhat-zksync-deploy-1.3.0.tgz#5c2b723318ddf6c4d3929ec225401864ff54557a" - integrity sha512-4UHOgOwIBC4JA3W8DE9GHqbAuBhCPAjtM+Oew1aiYYGkIsPUAMYsH35+4I2FzJsYyE6mD6ATmoS/HfZweQHTlQ== - dependencies: - "@matterlabs/hardhat-zksync-solc" "^1.0.4" - chai "^4.3.6" - chalk "4.1.2" + "@matterlabs/hardhat-zksync-solc" "^1.2.0" + chai "^4.3.4" + chalk "^4.1.2" fs-extra "^11.2.0" - glob "^10.3.10" + glob "^10.4.1" lodash "^4.17.21" - sinon "^17.0.1" + sinon "^18.0.0" sinon-chai "^3.7.0" - ts-morph "^21.0.1" + ts-morph "^22.0.0" "@matterlabs/hardhat-zksync-node@^0.0.1-beta.7": version "0.0.1" @@ -1789,7 +1768,7 @@ chalk "4.1.2" dockerode "^3.3.4" -"@matterlabs/hardhat-zksync-solc@^1.0.4", "@matterlabs/hardhat-zksync-solc@^1.0.5", "@matterlabs/hardhat-zksync-solc@^1.1.4": +"@matterlabs/hardhat-zksync-solc@^1.0.5", "@matterlabs/hardhat-zksync-solc@^1.1.4": version "1.1.4" resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-solc/-/hardhat-zksync-solc-1.1.4.tgz#04a2fad6fb6b6944c64ad969080ee65b9af3f617" integrity sha512-4/usbogh9neewR2/v8Dn2OzqVblZMUuT/iH2MyPZgPRZYQlL4SlZtMvokU9UQjZT6iSoaKCbbdWESHDHSzfUjA== @@ -1823,10 +1802,10 @@ sinon-chai "^3.7.0" undici "^6.18.2" -"@matterlabs/hardhat-zksync-verify@^0.4.0": - version "0.4.0" - resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-verify/-/hardhat-zksync-verify-0.4.0.tgz#f812c19950022fc36728f3796f6bdae5633e2fcd" - integrity sha512-GPZmAumFl3ZMPKbECX7Qw8CriwZKWd1DlCRhoG/6YYc6mFy4+MXkF1XsHLMs5r34N+GDOfbVZVMeftIlJC96Kg== +"@matterlabs/hardhat-zksync-solc@^1.2.4": + version "1.2.5" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-solc/-/hardhat-zksync-solc-1.2.5.tgz#fbeeabc3fea0dd232fa3c8cb31bd93c103eba11a" + integrity sha512-iZyznWl1Hoe/Z46hnUe1s2drBZBjJOS/eN+Ql2lIBX9B6NevBl9DYzkKzH5HEIMCLGnX9sWpRAJqUQJWy9UB6w== dependencies: "@nomiclabs/hardhat-docker" "^2.0.2" chai "^4.3.4" @@ -1871,20 +1850,20 @@ sinon "^18.0.0" sinon-chai "^3.7.0" -"@matterlabs/hardhat-zksync-vyper@^1.0.8": - version "1.0.8" - resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-vyper/-/hardhat-zksync-vyper-1.0.8.tgz#d5bd496715a1e322b0bf3926b4146b4e18ab64ff" - integrity sha512-XR7rbfDuBG5/LZWYfhQTP9gD+U24hSJHDuZ9U55wgIfiQTOxPoztFwEbQNiC39vjT5MjP/Nv8/IDrlEBkaVCgw== +"@matterlabs/hardhat-zksync-vyper@^1.1.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-vyper/-/hardhat-zksync-vyper-1.1.0.tgz#b3fb304429e88a84b4abc3fe4e5a83b2f5e907bd" + integrity sha512-zDjHPeIuHRpumXiWZUbhoji4UJe09jTDRn4xnxsuVkLH7qLAm0VDFzCXYNMvEuySZSdhbSbekxJsH9Kunc5ycA== dependencies: - "@nomiclabs/hardhat-docker" "^2.0.0" - chai "^4.3.6" - chalk "4.1.2" + "@nomiclabs/hardhat-docker" "^2.0.2" + chai "^4.3.4" + chalk "^4.1.2" dockerode "^4.0.2" - fs-extra "^11.1.1" - semver "^7.5.4" - sinon "^17.0.1" + fs-extra "^11.2.0" + semver "^7.6.2" + sinon "^18.0.0" sinon-chai "^3.7.0" - undici "^5.14.0" + undici "^6.18.2" "@matterlabs/prettier-config@^1.0.3": version "1.0.3" @@ -2324,11 +2303,6 @@ resolved "https://registry.yarnpkg.com/@openzeppelin/contracts/-/contracts-4.9.6.tgz#2a880a24eb19b4f8b25adc2a5095f2aa27f39677" integrity sha512-xSmezSupL+y9VkHZJGDoCBpmnB2ogM13ccaYDWqJTfS3dbuHkgjuwDFUmaFauBCboQMGB/S5UqUl2y54X99BmA== -"@pkgjs/parseargs@^0.11.0": - version "0.11.0" - resolved "https://registry.yarnpkg.com/@pkgjs/parseargs/-/parseargs-0.11.0.tgz#a77ea742fab25775145434eb1d2328cf5013ac33" - integrity sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg== - "@pkgr/core@^0.1.0": version "0.1.1" resolved "https://registry.yarnpkg.com/@pkgr/core/-/core-0.1.1.tgz#1ec17e2edbec25c8306d424ecfbf13c7de1aaa31" @@ -2659,16 +2633,6 @@ mkdirp "^2.1.6" path-browserify "^1.0.1" -"@ts-morph/common@~0.22.0": - version "0.22.0" - resolved "https://registry.yarnpkg.com/@ts-morph/common/-/common-0.22.0.tgz#8951d451622a26472fbc3a227d6c3a90e687a683" - integrity sha512-HqNBuV/oIlMKdkLshXd1zKBqNQCsuPEsgQOkfFQ/eUKjRlwndXW1AjN9LVkBEIukm00gGXSRmfkl0Wv5VXLnlw== - dependencies: - fast-glob "^3.3.2" - minimatch "^9.0.3" - mkdirp "^3.0.1" - path-browserify "^1.0.1" - "@tsconfig/node10@^1.0.7": version "1.0.11" resolved "https://registry.yarnpkg.com/@tsconfig/node10/-/node10-1.0.11.tgz#6ee46400685f130e278128c7b38b7e031ff5b2f2" @@ -3341,11 +3305,6 @@ ansi-regex@^5.0.1: resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304" integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ== -ansi-regex@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-6.0.1.tgz#3183e38fae9a65d7cb5e53945cd5897d0260a06a" - integrity sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA== - ansi-styles@^3.2.1: version "3.2.1" resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" @@ -3365,11 +3324,6 @@ ansi-styles@^5.0.0: resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-5.2.0.tgz#07449690ad45777d1924ac2abb2fc8895dba836b" integrity sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA== -ansi-styles@^6.1.0: - version "6.2.1" - resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-6.2.1.tgz#0e62320cf99c21afff3b3012192546aacbfb05c5" - integrity sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug== - antlr4@^4.11.0: version "4.13.1" resolved "https://registry.yarnpkg.com/antlr4/-/antlr4-4.13.1.tgz#1e0a1830a08faeb86217cb2e6c34716004e4253d" @@ -4481,7 +4435,7 @@ cross-spawn@^6.0.5: shebang-command "^1.2.0" which "^1.2.9" -cross-spawn@^7.0.0, cross-spawn@^7.0.2, cross-spawn@^7.0.3: +cross-spawn@^7.0.2, cross-spawn@^7.0.3: version "7.0.3" resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== @@ -4784,11 +4738,6 @@ dotenv@^8.2.0: resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-8.6.0.tgz#061af664d19f7f4d8fc6e4ff9b584ce237adcb8b" integrity sha512-IrPdXQsk2BbzvCBGBOTmmSH5SodmqZNt4ERAZDmW4CT+tL8VtvinqywuANaFu4bOMWki16nqf0e4oC0QIaDr/g== -eastasianwidth@^0.2.0: - version "0.2.0" - resolved "https://registry.yarnpkg.com/eastasianwidth/-/eastasianwidth-0.2.0.tgz#696ce2ec0aa0e6ea93a397ffcf24aa7840c827cb" - integrity sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA== - ecc-jsbn@~0.1.1: version "0.1.2" resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz#3a83a904e54353287874c564b7549386849a98c9" @@ -4855,11 +4804,6 @@ emoji-regex@^8.0.0: resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37" integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A== -emoji-regex@^9.2.2: - version "9.2.2" - resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-9.2.2.tgz#840c8803b0d8047f4ff0cf963176b32d4ef3ed72" - integrity sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg== - encoding-down@^6.3.0: version "6.3.0" resolved "https://registry.yarnpkg.com/encoding-down/-/encoding-down-6.3.0.tgz#b1c4eb0e1728c146ecaef8e32963c549e76d082b" @@ -5830,14 +5774,6 @@ for-each@^0.3.3: dependencies: is-callable "^1.1.3" -foreground-child@^3.1.0: - version "3.1.1" - resolved "https://registry.yarnpkg.com/foreground-child/-/foreground-child-3.1.1.tgz#1d173e776d75d2772fed08efe4a0de1ea1b12d0d" - integrity sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg== - dependencies: - cross-spawn "^7.0.0" - signal-exit "^4.0.1" - forever-agent@~0.6.1: version "0.6.1" resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91" @@ -6126,17 +6062,6 @@ glob@8.1.0, glob@^8.0.3: minimatch "^5.0.1" once "^1.3.0" -glob@^10.3.10: - version "10.3.16" - resolved "https://registry.yarnpkg.com/glob/-/glob-10.3.16.tgz#bf6679d5d51279c8cfae4febe0d051d2a4bf4c6f" - integrity sha512-JDKXl1DiuuHJ6fVS2FXjownaavciiHNUU4mOvV/B793RLh05vZL1rcPnCSaOgv1hDT6RDlY7AB7ZUvFYAtPgAw== - dependencies: - foreground-child "^3.1.0" - jackspeak "^3.1.2" - minimatch "^9.0.1" - minipass "^7.0.4" - path-scurry "^1.11.0" - glob@^5.0.15: version "5.0.15" resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.15.tgz#1bc936b9e02f4a603fcc222ecf7633d30b8b93b1" @@ -7049,15 +6974,6 @@ istanbul-reports@^3.1.3: html-escaper "^2.0.0" istanbul-lib-report "^3.0.0" -jackspeak@^3.1.2: - version "3.1.2" - resolved "https://registry.yarnpkg.com/jackspeak/-/jackspeak-3.1.2.tgz#eada67ea949c6b71de50f1b09c92a961897b90ab" - integrity sha512-kWmLKn2tRtfYMF/BakihVVRzBKOxz4gJMiL2Rj91WnAB5TPZumSH99R/Yf1qE1u4uRimvCSJfm6hnxohXeEXjQ== - dependencies: - "@isaacs/cliui" "^8.0.2" - optionalDependencies: - "@pkgjs/parseargs" "^0.11.0" - jest-changed-files@^29.7.0: version "29.7.0" resolved "https://registry.yarnpkg.com/jest-changed-files/-/jest-changed-files-29.7.0.tgz#1c06d07e77c78e1585d020424dedc10d6e17ac3a" @@ -7961,11 +7877,6 @@ lowercase-keys@^3.0.0: resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-3.0.0.tgz#c5e7d442e37ead247ae9db117a9d0a467c89d4f2" integrity sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ== -lru-cache@^10.2.0: - version "10.2.2" - resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-10.2.2.tgz#48206bc114c1252940c41b25b41af5b545aca878" - integrity sha512-9hp3Vp2/hFQUiIwKo8XCeFVnrg8Pk3TYNPIR7tJADKi5YfcF7vEaK7avFHTlSy3kOKYaJQaalfEo6YuXdceBOQ== - lru-cache@^5.1.1: version "5.1.1" resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-5.1.1.tgz#1da27e6710271947695daf6848e847f01d84b920" @@ -8264,13 +8175,6 @@ minimatch@^7.4.3: dependencies: brace-expansion "^2.0.1" -minimatch@^9.0.1, minimatch@^9.0.3: - version "9.0.4" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.4.tgz#8e49c731d1749cbec05050ee5145147b32496a51" - integrity sha512-KqWh+VchfxcMNRAJjj2tnsSJdNbHsVgnkBhTNrW7AjVo6OvLtxw8zfT9oLw1JSohlFzJ8jCoTgaoXvJ+kHt6fw== - dependencies: - brace-expansion "^2.0.1" - minimatch@~3.0.4: version "3.0.8" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.8.tgz#5e6a59bd11e2ab0de1cfb843eb2d82e546c321c1" @@ -8283,11 +8187,6 @@ minimist@^1.2.0, minimist@^1.2.5, minimist@^1.2.6, minimist@^1.2.8, minimist@~1. resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c" integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA== -"minipass@^5.0.0 || ^6.0.2 || ^7.0.0", minipass@^7.0.4: - version "7.1.1" - resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.1.tgz#f7f85aff59aa22f110b20e27692465cf3bf89481" - integrity sha512-UZ7eQ+h8ywIRAW1hIEl2AqdwzJucU/Kp59+8kkZeSvafXhZjul247BvIJjEVFVeON6d7lM46XX1HXCduKAS8VA== - mkdirp-classic@^0.5.2: version "0.5.3" resolved "https://registry.yarnpkg.com/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz#fa10c9115cc6d8865be221ba47ee9bed78601113" @@ -8310,11 +8209,6 @@ mkdirp@^2.1.6: resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-2.1.6.tgz#964fbcb12b2d8c5d6fbc62a963ac95a273e2cc19" integrity sha512-+hEnITedc8LAtIP9u3HJDFIdcLV2vXP33sqLLIzkv1Db1zO/1OxbvYf0Y1OC/S/Qo5dxHXepofhmxL02PsKe+A== -mkdirp@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-3.0.1.tgz#e44e4c5607fb279c168241713cc6e0fea9adcb50" - integrity sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg== - mnemonist@^0.38.0: version "0.38.5" resolved "https://registry.yarnpkg.com/mnemonist/-/mnemonist-0.38.5.tgz#4adc7f4200491237fe0fa689ac0b86539685cade" @@ -8780,16 +8674,6 @@ package-json@^8.1.0: registry-url "^6.0.0" semver "^7.3.7" -package-json@^8.1.0: - version "8.1.1" - resolved "https://registry.yarnpkg.com/package-json/-/package-json-8.1.1.tgz#3e9948e43df40d1e8e78a85485f1070bf8f03dc8" - integrity sha512-cbH9IAIJHNj9uXi196JVsRlt7cHKak6u/e6AkL/bkRelZ7rlL3X1YKxsZwa36xipOEKAsdtmaG6aAJoM1fx2zA== - dependencies: - got "^12.1.0" - registry-auth-token "^5.0.1" - registry-url "^6.0.0" - semver "^7.3.7" - parent-module@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/parent-module/-/parent-module-1.0.1.tgz#691d2709e78c79fae3a156622452d00762caaaa2" @@ -8855,14 +8739,6 @@ path-parse@^1.0.6, path-parse@^1.0.7: resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== -path-scurry@^1.11.0: - version "1.11.1" - resolved "https://registry.yarnpkg.com/path-scurry/-/path-scurry-1.11.1.tgz#7960a668888594a0720b12a911d1a742ab9f11d2" - integrity sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA== - dependencies: - lru-cache "^10.2.0" - minipass "^5.0.0 || ^6.0.2 || ^7.0.0" - path-to-regexp@^6.2.1: version "6.2.2" resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-6.2.2.tgz#324377a83e5049cbecadc5554d6a63a9a4866b36" @@ -9863,11 +9739,6 @@ signal-exit@^3.0.2, signal-exit@^3.0.3, signal-exit@^3.0.7: resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.7.tgz#a9a1767f8af84155114eaabd73f99273c8f59ad9" integrity sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ== -signal-exit@^4.0.1: - version "4.1.0" - resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-4.1.0.tgz#952188c1cbd546070e2dd20d0f41c0ae0530cb04" - integrity sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw== - sinon-chai@^3.7.0: version "3.7.0" resolved "https://registry.yarnpkg.com/sinon-chai/-/sinon-chai-3.7.0.tgz#cfb7dec1c50990ed18c153f1840721cf13139783" @@ -10199,15 +10070,6 @@ string-length@^4.0.1: char-regex "^1.0.2" strip-ansi "^6.0.0" -"string-width-cjs@npm:string-width@^4.2.0", string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2, string-width@^4.2.3: - version "4.2.3" - resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" - integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== - dependencies: - emoji-regex "^8.0.0" - is-fullwidth-code-point "^3.0.0" - strip-ansi "^6.0.1" - string-width@^2.1.0, string-width@^2.1.1: version "2.1.1" resolved "https://registry.yarnpkg.com/string-width/-/string-width-2.1.1.tgz#ab93f27a8dc13d28cac815c462143a6d9012ae9e" @@ -10216,14 +10078,14 @@ string-width@^2.1.0, string-width@^2.1.1: is-fullwidth-code-point "^2.0.0" strip-ansi "^4.0.0" -string-width@^5.0.1, string-width@^5.1.2: - version "5.1.2" - resolved "https://registry.yarnpkg.com/string-width/-/string-width-5.1.2.tgz#14f8daec6d81e7221d2a357e668cab73bdbca794" - integrity sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA== +string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2, string-width@^4.2.3: + version "4.2.3" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" + integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== dependencies: - eastasianwidth "^0.2.0" - emoji-regex "^9.2.2" - strip-ansi "^7.0.1" + emoji-regex "^8.0.0" + is-fullwidth-code-point "^3.0.0" + strip-ansi "^6.0.1" string.prototype.padend@^3.0.0: version "3.1.6" @@ -10282,13 +10144,6 @@ string_decoder@~1.1.1: dependencies: safe-buffer "~5.1.0" -"strip-ansi-cjs@npm:strip-ansi@^6.0.1", strip-ansi@^6.0.0, strip-ansi@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" - integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== - dependencies: - ansi-regex "^5.0.1" - strip-ansi@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-4.0.0.tgz#a8479022eb1ac368a871389b635262c505ee368f" @@ -10303,12 +10158,12 @@ strip-ansi@^5.1.0: dependencies: ansi-regex "^4.1.0" -strip-ansi@^7.0.1: - version "7.1.0" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-7.1.0.tgz#d5b6568ca689d8561370b0707685d22434faff45" - integrity sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ== +strip-ansi@^6.0.0, strip-ansi@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" + integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== dependencies: - ansi-regex "^6.0.1" + ansi-regex "^5.0.1" strip-bom@^3.0.0: version "3.0.0" @@ -10413,6 +10268,7 @@ synckit@^0.8.6: fast-glob "^3.3.2" hardhat "=2.22.2" preprocess "^3.2.0" + zksync-ethers "^5.9.0" table-layout@^1.0.2: version "1.0.2" @@ -10664,14 +10520,6 @@ ts-morph@^19.0.0: "@ts-morph/common" "~0.20.0" code-block-writer "^12.0.0" -ts-morph@^21.0.1: - version "21.0.1" - resolved "https://registry.yarnpkg.com/ts-morph/-/ts-morph-21.0.1.tgz#712302a0f6e9dbf1aa8d9cf33a4386c4b18c2006" - integrity sha512-dbDtVdEAncKctzrVZ+Nr7kHpHkv+0JDJb2MjjpBaj8bFeCkePU9rHfMklmhuLFnpeq/EJZk2IhStY6NzqgjOkg== - dependencies: - "@ts-morph/common" "~0.22.0" - code-block-writer "^12.0.0" - ts-node@^10.1.0, ts-node@^10.7.0: version "10.9.2" resolved "https://registry.yarnpkg.com/ts-node/-/ts-node-10.9.2.tgz#70f021c9e185bccdca820e26dc413805c101c71f" @@ -11152,7 +11000,7 @@ workerpool@6.2.1: resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-6.2.1.tgz#46fc150c17d826b86a008e5a4508656777e9c343" integrity sha512-ILEIE97kDZvF9Wb9f6h5aXK4swSlKGUcOEGiIYb2OOu/IrDU9iwj0fD//SsA6E5ibwJxpEvhullJY4Sl4GcpAw== -"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0", wrap-ansi@^7.0.0: +wrap-ansi@^7.0.0: version "7.0.0" resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== @@ -11161,15 +11009,6 @@ workerpool@6.2.1: string-width "^4.1.0" strip-ansi "^6.0.0" -wrap-ansi@^8.1.0: - version "8.1.0" - resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-8.1.0.tgz#56dc22368ee570face1b49819975d9b9a5ead214" - integrity sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ== - dependencies: - ansi-styles "^6.1.0" - string-width "^5.0.1" - strip-ansi "^7.0.1" - wrappy@1: version "1.0.2" resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" diff --git a/zkstack_cli/crates/config/src/contracts.rs b/zkstack_cli/crates/config/src/contracts.rs index e6676989e68c..6d336b5cfc17 100644 --- a/zkstack_cli/crates/config/src/contracts.rs +++ b/zkstack_cli/crates/config/src/contracts.rs @@ -85,6 +85,7 @@ impl ContractsConfig { ) -> anyhow::Result<()> { self.bridges.shared.l2_address = Some(initialize_bridges_output.l2_shared_bridge_proxy); self.bridges.erc20.l2_address = Some(initialize_bridges_output.l2_shared_bridge_proxy); + self.l2.legacy_shared_bridge_addr = Some(initialize_bridges_output.l2_shared_bridge_proxy); Ok(()) } @@ -159,4 +160,5 @@ pub struct L2Contracts { pub default_l2_upgrader: Address, pub consensus_registry: Option

, pub multicall3: Option
, + pub legacy_shared_bridge_addr: Option
, } From ee73a3973b0c65b1d4acef12e4b64db8f813e77d Mon Sep 17 00:00:00 2001 From: Daniyar Itegulov Date: Thu, 24 Oct 2024 20:51:48 +1100 Subject: [PATCH 06/32] feat(zkstack_cli): use docker-managed volumes (#3140) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Host-bound volumes never get cleaned even if you do `docker compose down -v`. Existing dev flows don't seem to rely on volumes being on the host machine, so this is PoC of how we can move to Docker-managed volumes. ## Why ❔ Avoid pesky bugs that prevent user from deleting `./volumes`, rely on Docker to persist and dispose of data as need be ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- .../workflows/build-contract-verifier-template.yml | 1 - .github/workflows/build-core-template.yml | 1 - .github/workflows/build-local-node-docker.yml | 1 - .github/workflows/build-prover-template.yml | 1 - .../workflows/build-witness-generator-template.yml | 1 - .github/workflows/ci-common-reusable.yml | 1 - .github/workflows/ci-prover-e2e.yml | 1 - .github/workflows/ci-prover-reusable.yml | 2 -- bin/ci_localnet_up | 1 - docker-compose-gpu-runner-cuda-12-0.yml | 8 ++++++-- docker-compose-runner-nightly.yml | 5 ++++- docker-compose-unit-tests.yml | 1 - docker-compose.yml | 13 ++++++++----- zkstack_cli/crates/common/src/docker.rs | 6 +++++- .../crates/zkstack/src/commands/containers.rs | 12 ------------ .../zkstack/src/commands/dev/commands/clean/mod.rs | 6 +----- .../crates/zkstack/src/commands/dev/messages.rs | 4 +--- 17 files changed, 25 insertions(+), 40 deletions(-) diff --git a/.github/workflows/build-contract-verifier-template.yml b/.github/workflows/build-contract-verifier-template.yml index e4d04b90410e..1481e542de57 100644 --- a/.github/workflows/build-contract-verifier-template.yml +++ b/.github/workflows/build-contract-verifier-template.yml @@ -101,7 +101,6 @@ jobs: - name: start-services run: | echo "IMAGE_TAG_SUFFIX=${{ env.IMAGE_TAG_SUFFIX }}" >> .env - mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres ci_run pre_download_compilers.sh diff --git a/.github/workflows/build-core-template.yml b/.github/workflows/build-core-template.yml index fe1d23427645..15d4432191dd 100644 --- a/.github/workflows/build-core-template.yml +++ b/.github/workflows/build-core-template.yml @@ -114,7 +114,6 @@ jobs: - name: start-services run: | echo "IMAGE_TAG_SUFFIX=${{ env.IMAGE_TAG_SUFFIX }}" >> .env - mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres ci_run pre_download_compilers.sh diff --git a/.github/workflows/build-local-node-docker.yml b/.github/workflows/build-local-node-docker.yml index 80142cb6005c..cbb4239b5725 100644 --- a/.github/workflows/build-local-node-docker.yml +++ b/.github/workflows/build-local-node-docker.yml @@ -50,7 +50,6 @@ jobs: - name: start-services run: | - mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres diff --git a/.github/workflows/build-prover-template.yml b/.github/workflows/build-prover-template.yml index 2dcb5dadb174..91de5dd51ecf 100644 --- a/.github/workflows/build-prover-template.yml +++ b/.github/workflows/build-prover-template.yml @@ -75,7 +75,6 @@ jobs: - name: start-services run: | echo "IMAGE_TAG_SUFFIX=${{ env.IMAGE_TAG_SUFFIX }}" >> .env - mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres ci_run sccache --start-server diff --git a/.github/workflows/build-witness-generator-template.yml b/.github/workflows/build-witness-generator-template.yml index 33d78b3cf2fc..d9493f97cae1 100644 --- a/.github/workflows/build-witness-generator-template.yml +++ b/.github/workflows/build-witness-generator-template.yml @@ -75,7 +75,6 @@ jobs: - name: start-services run: | echo "IMAGE_TAG_SUFFIX=${{ env.IMAGE_TAG_SUFFIX }}" >> .env - mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres ci_run sccache --start-server diff --git a/.github/workflows/ci-common-reusable.yml b/.github/workflows/ci-common-reusable.yml index 7d75fb224d6e..ea91fc4a7cd6 100644 --- a/.github/workflows/ci-common-reusable.yml +++ b/.github/workflows/ci-common-reusable.yml @@ -27,7 +27,6 @@ jobs: - name: Start services run: | run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull - mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - name: Install zkstack diff --git a/.github/workflows/ci-prover-e2e.yml b/.github/workflows/ci-prover-e2e.yml index 105ae1f1485d..b0b9caf888fc 100644 --- a/.github/workflows/ci-prover-e2e.yml +++ b/.github/workflows/ci-prover-e2e.yml @@ -29,7 +29,6 @@ jobs: - name: Start services run: | run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull - mkdir -p ./volumes/postgres ./volumes/reth/data docker-compose -f ${RUNNER_COMPOSE_FILE} --profile runner up -d --wait ci_run sccache --start-server diff --git a/.github/workflows/ci-prover-reusable.yml b/.github/workflows/ci-prover-reusable.yml index 4154885549b8..7f719b2240db 100644 --- a/.github/workflows/ci-prover-reusable.yml +++ b/.github/workflows/ci-prover-reusable.yml @@ -27,7 +27,6 @@ jobs: - name: Start services run: | run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull - mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - name: Install zkstack @@ -68,7 +67,6 @@ jobs: - name: Start services run: | run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull - mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - name: Install zkstack diff --git a/bin/ci_localnet_up b/bin/ci_localnet_up index 8673a909af77..c399de410d74 100755 --- a/bin/ci_localnet_up +++ b/bin/ci_localnet_up @@ -4,6 +4,5 @@ set -e cd $ZKSYNC_HOME -mkdir -p ./volumes/postgres ./volumes/reth/data run_retried docker-compose pull docker-compose --profile runner up -d --wait diff --git a/docker-compose-gpu-runner-cuda-12-0.yml b/docker-compose-gpu-runner-cuda-12-0.yml index c930fa376f5e..bd91a5a5b0e4 100644 --- a/docker-compose-gpu-runner-cuda-12-0.yml +++ b/docker-compose-gpu-runner-cuda-12-0.yml @@ -6,8 +6,8 @@ services: ports: - 127.0.0.1:8545:8545 volumes: - - type: bind - source: ./volumes/reth/data + - type: volume + source: reth-data target: /rethdata - type: bind source: ./etc/reth/chaindata @@ -69,3 +69,7 @@ services: environment: # We bind only to 127.0.0.1, so setting insecure password is acceptable here - POSTGRES_PASSWORD=notsecurepassword + +volumes: + postgres-data: + reth-data: \ No newline at end of file diff --git a/docker-compose-runner-nightly.yml b/docker-compose-runner-nightly.yml index cadd1009f7a6..4a854aa0b0a4 100644 --- a/docker-compose-runner-nightly.yml +++ b/docker-compose-runner-nightly.yml @@ -1,4 +1,3 @@ -version: '3.2' services: zk: image: ghcr.io/matter-labs/zk-environment:latest2.0-lightweight-nightly @@ -15,3 +14,7 @@ services: extends: file: docker-compose.yml service: reth + +volumes: + postgres-data: + reth-data: \ No newline at end of file diff --git a/docker-compose-unit-tests.yml b/docker-compose-unit-tests.yml index ddbc76bb196c..b839be2d9f4f 100644 --- a/docker-compose-unit-tests.yml +++ b/docker-compose-unit-tests.yml @@ -1,4 +1,3 @@ -version: '3.2' name: unit_tests services: # An instance of postgres configured to execute Rust unit-tests, tuned for performance. diff --git a/docker-compose.yml b/docker-compose.yml index 1e3a273ec9a4..d8f40720fe84 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,4 +1,3 @@ -version: '3.2' services: reth: restart: always @@ -6,8 +5,8 @@ services: ports: - 127.0.0.1:8545:8545 volumes: - - type: bind - source: ./volumes/reth/data + - type: volume + source: reth-data target: /rethdata - type: bind source: ./etc/reth/chaindata @@ -22,8 +21,8 @@ services: ports: - 127.0.0.1:5432:5432 volumes: - - type: bind - source: ./volumes/postgres + - type: volume + source: postgres-data target: /var/lib/postgresql/data environment: # We bind only to 127.0.0.1, so setting insecure password is acceptable here @@ -56,3 +55,7 @@ services: profiles: - runner network_mode: host + +volumes: + postgres-data: + reth-data: \ No newline at end of file diff --git a/zkstack_cli/crates/common/src/docker.rs b/zkstack_cli/crates/common/src/docker.rs index a5731808814f..71e2040ee31c 100644 --- a/zkstack_cli/crates/common/src/docker.rs +++ b/zkstack_cli/crates/common/src/docker.rs @@ -14,7 +14,11 @@ pub fn up(shell: &Shell, docker_compose_file: &str, detach: bool) -> anyhow::Res } pub fn down(shell: &Shell, docker_compose_file: &str) -> anyhow::Result<()> { - Ok(Cmd::new(cmd!(shell, "docker compose -f {docker_compose_file} down")).run()?) + Ok(Cmd::new(cmd!( + shell, + "docker compose -f {docker_compose_file} down -v" + )) + .run()?) } pub fn run(shell: &Shell, docker_image: &str, docker_args: Vec) -> anyhow::Result<()> { diff --git a/zkstack_cli/crates/zkstack/src/commands/containers.rs b/zkstack_cli/crates/zkstack/src/commands/containers.rs index 9c11cc2e3efc..8367289bd67f 100644 --- a/zkstack_cli/crates/zkstack/src/commands/containers.rs +++ b/zkstack_cli/crates/zkstack/src/commands/containers.rs @@ -36,10 +36,6 @@ pub fn run(shell: &Shell, args: ContainersArgs) -> anyhow::Result<()> { } pub fn initialize_docker(shell: &Shell, ecosystem: &EcosystemConfig) -> anyhow::Result<()> { - if !shell.path_exists("volumes") { - create_docker_folders(shell)?; - }; - if !shell.path_exists(DOCKER_COMPOSE_FILE) { copy_dockerfile(shell, ecosystem.link_to_code.clone())?; }; @@ -75,14 +71,6 @@ pub fn start_containers(shell: &Shell, observability: bool) -> anyhow::Result<() Ok(()) } -fn create_docker_folders(shell: &Shell) -> anyhow::Result<()> { - shell.create_dir("volumes")?; - shell.create_dir("volumes/postgres")?; - shell.create_dir("volumes/reth")?; - shell.create_dir("volumes/reth/data")?; - Ok(()) -} - fn copy_dockerfile(shell: &Shell, link_to_code: PathBuf) -> anyhow::Result<()> { let docker_compose_file = link_to_code.join(DOCKER_COMPOSE_FILE); diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/clean/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/clean/mod.rs index 4cb419ce7a46..0929f5e4623f 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/commands/clean/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/clean/mod.rs @@ -5,8 +5,7 @@ use config::{EcosystemConfig, DOCKER_COMPOSE_FILE}; use xshell::Shell; use crate::commands::dev::messages::{ - MSG_CONTRACTS_CLEANING, MSG_CONTRACTS_CLEANING_FINISHED, MSG_DOCKER_COMPOSE_CLEANED, - MSG_DOCKER_COMPOSE_DOWN, MSG_DOCKER_COMPOSE_REMOVE_VOLUMES, + MSG_CONTRACTS_CLEANING, MSG_CONTRACTS_CLEANING_FINISHED, MSG_DOCKER_COMPOSE_DOWN, }; #[derive(Subcommand, Debug)] @@ -35,9 +34,6 @@ pub fn run(shell: &Shell, args: CleanCommands) -> anyhow::Result<()> { pub fn containers(shell: &Shell) -> anyhow::Result<()> { logger::info(MSG_DOCKER_COMPOSE_DOWN); docker::down(shell, DOCKER_COMPOSE_FILE)?; - logger::info(MSG_DOCKER_COMPOSE_REMOVE_VOLUMES); - shell.remove_path("volumes")?; - logger::info(MSG_DOCKER_COMPOSE_CLEANED); Ok(()) } diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs b/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs index a38fff5a178a..3d31497b7ebc 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs @@ -157,9 +157,7 @@ pub(super) const MSG_UPGRADE_TEST_RUN_INFO: &str = "Running upgrade test"; pub(super) const MSG_UPGRADE_TEST_RUN_SUCCESS: &str = "Upgrade test ran successfully"; // Cleaning related messages -pub(super) const MSG_DOCKER_COMPOSE_DOWN: &str = "docker compose down"; -pub(super) const MSG_DOCKER_COMPOSE_REMOVE_VOLUMES: &str = "docker compose remove volumes"; -pub(super) const MSG_DOCKER_COMPOSE_CLEANED: &str = "docker compose network cleaned"; +pub(super) const MSG_DOCKER_COMPOSE_DOWN: &str = "docker compose down -v"; pub(super) const MSG_CONTRACTS_CLEANING: &str = "Removing contracts building and deployment artifacts"; pub(super) const MSG_CONTRACTS_CLEANING_FINISHED: &str = From 1eb69d467802d07f3fc6502de97ff04a69f952fc Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 24 Oct 2024 15:13:12 +0200 Subject: [PATCH 07/32] feat(proof-data-handler): add first processed batch option (#3112) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add an option to the proof data handler to allow the first verified batch to be set. ## Why ❔ To be able to skip some batches if we need to. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- core/lib/config/src/configs/mod.rs | 2 +- .../config/src/configs/proof_data_handler.rs | 33 ++++++++++++++++++- core/lib/config/src/testonly.rs | 5 ++- core/lib/dal/src/tee_proof_generation_dal.rs | 4 +-- core/lib/env_config/src/proof_data_handler.rs | 14 ++++++-- .../protobuf_config/src/proof_data_handler.rs | 16 ++++++--- .../src/proto/config/prover.proto | 3 +- core/node/proof_data_handler/src/lib.rs | 2 +- .../src/tee_request_processor.rs | 6 ++-- core/node/proof_data_handler/src/tests.rs | 12 +++++-- 10 files changed, 78 insertions(+), 19 deletions(-) diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index a8d136d632ea..b3a7c2913437 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -22,7 +22,7 @@ pub use self::{ genesis::GenesisConfig, object_store::ObjectStoreConfig, observability::{ObservabilityConfig, OpentelemetryConfig}, - proof_data_handler::ProofDataHandlerConfig, + proof_data_handler::{ProofDataHandlerConfig, TeeConfig}, prover_job_monitor::ProverJobMonitorConfig, pruning::PruningConfig, secrets::{DatabaseSecrets, L1Secrets, Secrets}, diff --git a/core/lib/config/src/configs/proof_data_handler.rs b/core/lib/config/src/configs/proof_data_handler.rs index de7f6969b05f..1094b1bb1801 100644 --- a/core/lib/config/src/configs/proof_data_handler.rs +++ b/core/lib/config/src/configs/proof_data_handler.rs @@ -1,12 +1,43 @@ use std::time::Duration; use serde::Deserialize; +use zksync_basic_types::L1BatchNumber; + +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub struct TeeConfig { + /// If true, the TEE support is enabled. + pub tee_support: bool, + /// All batches before this one are considered to be processed. + pub first_tee_processed_batch: L1BatchNumber, +} + +impl Default for TeeConfig { + fn default() -> Self { + TeeConfig { + tee_support: Self::default_tee_support(), + first_tee_processed_batch: Self::default_first_tee_processed_batch(), + } + } +} + +impl TeeConfig { + pub fn default_tee_support() -> bool { + false + } + + pub fn default_first_tee_processed_batch() -> L1BatchNumber { + L1BatchNumber(0) + } +} #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct ProofDataHandlerConfig { pub http_port: u16, pub proof_generation_timeout_in_secs: u16, - pub tee_support: bool, + #[serde(skip)] + // ^ Filled in separately in `Self::from_env()`. We cannot use `serde(flatten)` because it + // doesn't work with `envy`: https://github.com/softprops/envy/issues/26 + pub tee_config: TeeConfig, } impl ProofDataHandlerConfig { diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index ce681cc0cc43..3bf4609bb700 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -677,7 +677,10 @@ impl Distribution for EncodeDist { configs::ProofDataHandlerConfig { http_port: self.sample(rng), proof_generation_timeout_in_secs: self.sample(rng), - tee_support: self.sample(rng), + tee_config: configs::TeeConfig { + tee_support: self.sample(rng), + first_tee_processed_batch: L1BatchNumber(rng.gen()), + }, } } } diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index bde07f732802..755d02769101 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -32,10 +32,10 @@ impl TeeProofGenerationDal<'_, '_> { &mut self, tee_type: TeeType, processing_timeout: Duration, - min_batch_number: Option, + min_batch_number: L1BatchNumber, ) -> DalResult> { let processing_timeout = pg_interval_from_duration(processing_timeout); - let min_batch_number = min_batch_number.map_or(0, |num| i64::from(num.0)); + let min_batch_number = i64::from(min_batch_number.0); sqlx::query!( r#" WITH upsert AS ( diff --git a/core/lib/env_config/src/proof_data_handler.rs b/core/lib/env_config/src/proof_data_handler.rs index f69aa1d6dc59..b5bfda4544e7 100644 --- a/core/lib/env_config/src/proof_data_handler.rs +++ b/core/lib/env_config/src/proof_data_handler.rs @@ -4,12 +4,18 @@ use crate::{envy_load, FromEnv}; impl FromEnv for ProofDataHandlerConfig { fn from_env() -> anyhow::Result { - envy_load("proof_data_handler", "PROOF_DATA_HANDLER_") + Ok(Self { + tee_config: envy_load("proof_data_handler.tee", "PROOF_DATA_HANDLER_")?, + ..envy_load("proof_data_handler", "PROOF_DATA_HANDLER_")? + }) } } #[cfg(test)] mod tests { + use zksync_basic_types::L1BatchNumber; + use zksync_config::configs::TeeConfig; + use super::*; use crate::test_utils::EnvMutex; @@ -19,7 +25,10 @@ mod tests { ProofDataHandlerConfig { http_port: 3320, proof_generation_timeout_in_secs: 18000, - tee_support: true, + tee_config: TeeConfig { + tee_support: true, + first_tee_processed_batch: L1BatchNumber(1337), + }, } } @@ -29,6 +38,7 @@ mod tests { PROOF_DATA_HANDLER_PROOF_GENERATION_TIMEOUT_IN_SECS="18000" PROOF_DATA_HANDLER_HTTP_PORT="3320" PROOF_DATA_HANDLER_TEE_SUPPORT="true" + PROOF_DATA_HANDLER_FIRST_TEE_PROCESSED_BATCH="1337" "#; let mut lock = MUTEX.lock(); lock.set_env(config); diff --git a/core/lib/protobuf_config/src/proof_data_handler.rs b/core/lib/protobuf_config/src/proof_data_handler.rs index 4b7bd2fd7c32..a587c702633f 100644 --- a/core/lib/protobuf_config/src/proof_data_handler.rs +++ b/core/lib/protobuf_config/src/proof_data_handler.rs @@ -1,6 +1,7 @@ use anyhow::Context as _; use zksync_config::configs; use zksync_protobuf::{repr::ProtoRepr, required}; +use zksync_types::L1BatchNumber; use crate::proto::prover as proto; @@ -14,9 +15,15 @@ impl ProtoRepr for proto::ProofDataHandler { proof_generation_timeout_in_secs: required(&self.proof_generation_timeout_in_secs) .and_then(|x| Ok((*x).try_into()?)) .context("proof_generation_timeout_in_secs")?, - tee_support: required(&self.tee_support) - .copied() - .context("tee_support")?, + tee_config: configs::TeeConfig { + tee_support: self + .tee_support + .unwrap_or_else(configs::TeeConfig::default_tee_support), + first_tee_processed_batch: self + .first_tee_processed_batch + .map(|x| L1BatchNumber(x as u32)) + .unwrap_or_else(configs::TeeConfig::default_first_tee_processed_batch), + }, }) } @@ -24,7 +31,8 @@ impl ProtoRepr for proto::ProofDataHandler { Self { http_port: Some(this.http_port.into()), proof_generation_timeout_in_secs: Some(this.proof_generation_timeout_in_secs.into()), - tee_support: Some(this.tee_support), + tee_support: Some(this.tee_config.tee_support), + first_tee_processed_batch: Some(this.tee_config.first_tee_processed_batch.0 as u64), } } } diff --git a/core/lib/protobuf_config/src/proto/config/prover.proto b/core/lib/protobuf_config/src/proto/config/prover.proto index 4fe3861183bf..92ba770a7560 100644 --- a/core/lib/protobuf_config/src/proto/config/prover.proto +++ b/core/lib/protobuf_config/src/proto/config/prover.proto @@ -107,5 +107,6 @@ message WitnessVectorGenerator { message ProofDataHandler { optional uint32 http_port = 1; // required; u16 optional uint32 proof_generation_timeout_in_secs = 2; // required; s - optional bool tee_support = 3; // required + optional bool tee_support = 3; // optional + optional uint64 first_tee_processed_batch = 4; // optional } diff --git a/core/node/proof_data_handler/src/lib.rs b/core/node/proof_data_handler/src/lib.rs index 661c76d20006..e014fca15d77 100644 --- a/core/node/proof_data_handler/src/lib.rs +++ b/core/node/proof_data_handler/src/lib.rs @@ -94,7 +94,7 @@ fn create_proof_processing_router( ), ); - if config.tee_support { + if config.tee_config.tee_support { let get_tee_proof_gen_processor = TeeRequestProcessor::new(blob_store, connection_pool, config.clone(), l2_chain_id); let submit_tee_proof_processor = get_tee_proof_gen_processor.clone(); diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs index 800dede23c76..8e06d0c26bc9 100644 --- a/core/node/proof_data_handler/src/tee_request_processor.rs +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -47,7 +47,7 @@ impl TeeRequestProcessor { ) -> Result>, RequestProcessorError> { tracing::info!("Received request for proof generation data: {:?}", request); - let mut min_batch_number: Option = None; + let mut min_batch_number = self.config.tee_config.first_tee_processed_batch; let mut missing_range: Option<(L1BatchNumber, L1BatchNumber)> = None; let result = loop { @@ -72,7 +72,7 @@ impl TeeRequestProcessor { None => Some((l1_batch_number, l1_batch_number)), }; self.unlock_batch(l1_batch_number, request.tee_type).await?; - min_batch_number = Some(min_batch_number.unwrap_or(l1_batch_number) + 1); + min_batch_number = l1_batch_number + 1; } Err(err) => { self.unlock_batch(l1_batch_number, request.tee_type).await?; @@ -156,7 +156,7 @@ impl TeeRequestProcessor { async fn lock_batch_for_proving( &self, tee_type: TeeType, - min_batch_number: Option, + min_batch_number: L1BatchNumber, ) -> Result, RequestProcessorError> { self.pool .connection_tagged("tee_request_processor") diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index a10044cacd9c..63ea087a81c4 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -7,7 +7,7 @@ use axum::{ use serde_json::json; use tower::ServiceExt; use zksync_basic_types::L2ChainId; -use zksync_config::configs::ProofDataHandlerConfig; +use zksync_config::configs::{ProofDataHandlerConfig, TeeConfig}; use zksync_dal::{ConnectionPool, CoreDal}; use zksync_object_store::MockObjectStore; use zksync_prover_interface::api::SubmitTeeProofRequest; @@ -25,7 +25,10 @@ async fn request_tee_proof_inputs() { ProofDataHandlerConfig { http_port: 1337, proof_generation_timeout_in_secs: 10, - tee_support: true, + tee_config: TeeConfig { + tee_support: true, + first_tee_processed_batch: L1BatchNumber(0), + }, }, L1BatchCommitmentMode::Rollup, L2ChainId::default(), @@ -80,7 +83,10 @@ async fn submit_tee_proof() { ProofDataHandlerConfig { http_port: 1337, proof_generation_timeout_in_secs: 10, - tee_support: true, + tee_config: TeeConfig { + tee_support: true, + first_tee_processed_batch: L1BatchNumber(0), + }, }, L1BatchCommitmentMode::Rollup, L2ChainId::default(), From 8089b78b3f2cdbe8d0a23e9b8412a8022d78ada2 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Thu, 24 Oct 2024 15:25:00 +0200 Subject: [PATCH 08/32] fix(consensus): payload encoding protected by protocol_version (#3168) Changing payload encoding without protocol version change would invalidate consensus signatures. --- .github/workflows/ci-core-reusable.yml | 4 ++ core/lib/dal/src/consensus/conv.rs | 83 ++++++++++++++++-------- core/lib/dal/src/consensus/mod.rs | 2 +- core/lib/dal/src/consensus/tests.rs | 34 ++++++---- core/lib/dal/src/models/storage_sync.rs | 2 +- core/node/consensus/src/storage/store.rs | 9 +-- zkstack_cli/rust-toolchain | 2 +- 7 files changed, 85 insertions(+), 51 deletions(-) diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 9aaa476d740d..c79e34315763 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -8,6 +8,10 @@ on: required: false default: '[{ "zksolc": ["1.3.14", "1.3.16", "1.3.17", "1.3.1", "1.3.7", "1.3.18", "1.3.19", "1.3.21"] } , { "zkvyper": ["1.3.13"] }]' +env: + RUST_BACKTRACE: 1 + PASSED_ENV_VARS: RUST_BACKTRACE + jobs: lint: name: lint diff --git a/core/lib/dal/src/consensus/conv.rs b/core/lib/dal/src/consensus/conv.rs index 2b8488dd0c2a..f0948adfd1da 100644 --- a/core/lib/dal/src/consensus/conv.rs +++ b/core/lib/dal/src/consensus/conv.rs @@ -2,7 +2,7 @@ use anyhow::{anyhow, Context as _}; use zksync_concurrency::net; use zksync_consensus_roles::{attester, node}; -use zksync_protobuf::{read_required, required, ProtoFmt, ProtoRepr}; +use zksync_protobuf::{read_optional_repr, read_required, required, ProtoFmt, ProtoRepr}; use zksync_types::{ abi, commitment::{L1BatchCommitmentMode, PubdataParams}, @@ -104,6 +104,31 @@ impl ProtoFmt for AttestationStatus { } } +impl ProtoRepr for proto::PubdataParams { + type Type = PubdataParams; + + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + l2_da_validator_address: required(&self.l2_da_validator_address) + .and_then(|a| parse_h160(a)) + .context("l2_da_validator_address")?, + pubdata_type: required(&self.pubdata_type) + .and_then(|x| Ok(proto::L1BatchCommitDataGeneratorMode::try_from(*x)?)) + .context("pubdata_type")? + .parse(), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + l2_da_validator_address: Some(this.l2_da_validator_address.as_bytes().into()), + pubdata_type: Some( + proto::L1BatchCommitDataGeneratorMode::new(&this.pubdata_type) as i32, + ), + } + } +} + impl ProtoFmt for Payload { type Proto = proto::Payload; @@ -137,21 +162,7 @@ impl ProtoFmt for Payload { } } - let pubdata_params = if let Some(pubdata_params) = &r.pubdata_params { - Some(PubdataParams { - l2_da_validator_address: required(&pubdata_params.l2_da_validator_address) - .and_then(|a| parse_h160(a)) - .context("l2_da_validator_address")?, - pubdata_type: required(&pubdata_params.pubdata_type) - .and_then(|x| Ok(proto::L1BatchCommitDataGeneratorMode::try_from(*x)?)) - .context("pubdata_type")? - .parse(), - }) - } else { - None - }; - - Ok(Self { + let this = Self { protocol_version, hash: required(&r.hash) .and_then(|h| parse_h256(h)) @@ -169,11 +180,32 @@ impl ProtoFmt for Payload { .context("operator_address")?, transactions, last_in_batch: *required(&r.last_in_batch).context("last_in_batch")?, - pubdata_params, - }) + pubdata_params: read_optional_repr(&r.pubdata_params) + .context("pubdata_params")? + .unwrap_or_default(), + }; + if this.protocol_version.is_pre_gateway() { + anyhow::ensure!( + this.pubdata_params == PubdataParams::default(), + "pubdata_params should have the default value in pre-gateway protocol_version" + ); + } + if this.pubdata_params == PubdataParams::default() { + anyhow::ensure!( + r.pubdata_params.is_none(), + "default pubdata_params should be encoded as None" + ); + } + Ok(this) } fn build(&self) -> Self::Proto { + if self.protocol_version.is_pre_gateway() { + assert_eq!( + self.pubdata_params, PubdataParams::default(), + "BUG DETECTED: pubdata_params should have the default value in pre-gateway protocol_version" + ); + } let mut x = Self::Proto { protocol_version: Some((self.protocol_version as u16).into()), hash: Some(self.hash.as_bytes().into()), @@ -188,16 +220,11 @@ impl ProtoFmt for Payload { transactions: vec![], transactions_v25: vec![], last_in_batch: Some(self.last_in_batch), - pubdata_params: self - .pubdata_params - .map(|pubdata_params| proto::PubdataParams { - l2_da_validator_address: Some( - pubdata_params.l2_da_validator_address.as_bytes().into(), - ), - pubdata_type: Some(proto::L1BatchCommitDataGeneratorMode::new( - &pubdata_params.pubdata_type, - ) as i32), - }), + pubdata_params: if self.pubdata_params == PubdataParams::default() { + None + } else { + Some(ProtoRepr::build(&self.pubdata_params)) + }, }; match self.protocol_version { v if v >= ProtocolVersionId::Version25 => { diff --git a/core/lib/dal/src/consensus/mod.rs b/core/lib/dal/src/consensus/mod.rs index c7e46b2cf1b7..96efc6348350 100644 --- a/core/lib/dal/src/consensus/mod.rs +++ b/core/lib/dal/src/consensus/mod.rs @@ -48,7 +48,7 @@ pub struct Payload { pub operator_address: Address, pub transactions: Vec, pub last_in_batch: bool, - pub pubdata_params: Option, + pub pubdata_params: PubdataParams, } impl Payload { diff --git a/core/lib/dal/src/consensus/tests.rs b/core/lib/dal/src/consensus/tests.rs index c9fd91748b2b..df6ee24bfa94 100644 --- a/core/lib/dal/src/consensus/tests.rs +++ b/core/lib/dal/src/consensus/tests.rs @@ -1,7 +1,7 @@ use std::fmt::Debug; use rand::Rng; -use zksync_concurrency::ctx; +use zksync_concurrency::{ctx, testonly::abort_on_panic}; use zksync_protobuf::{ repr::{decode, encode}, testonly::{test_encode, test_encode_all_formats, FmtConv}, @@ -53,19 +53,24 @@ fn payload(rng: &mut impl Rng, protocol_version: ProtocolVersionId) -> Payload { }) .collect(), last_in_batch: rng.gen(), - pubdata_params: Some(PubdataParams { - pubdata_type: match rng.gen_range(0..2) { - 0 => L1BatchCommitmentMode::Rollup, - _ => L1BatchCommitmentMode::Validium, - }, - l2_da_validator_address: rng.gen(), - }), + pubdata_params: if protocol_version.is_pre_gateway() { + PubdataParams::default() + } else { + PubdataParams { + pubdata_type: match rng.gen_range(0..2) { + 0 => L1BatchCommitmentMode::Rollup, + _ => L1BatchCommitmentMode::Validium, + }, + l2_da_validator_address: rng.gen(), + } + }, } } /// Tests struct <-> proto struct conversions. #[test] fn test_encoding() { + abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); test_encode_all_formats::>(rng); @@ -78,10 +83,15 @@ fn test_encoding() { encode_decode::( mock_protocol_upgrade_transaction().into(), ); - let p = payload(rng, ProtocolVersionId::Version24); - test_encode(rng, &p); - let p = payload(rng, ProtocolVersionId::Version25); - test_encode(rng, &p); + // Test encoding in the current and all the future versions. + for v in ProtocolVersionId::latest() as u16.. { + let Ok(v) = ProtocolVersionId::try_from(v) else { + break; + }; + tracing::info!("version {v}"); + let p = payload(rng, v); + test_encode(rng, &p); + } } fn encode_decode(msg: P::Type) diff --git a/core/lib/dal/src/models/storage_sync.rs b/core/lib/dal/src/models/storage_sync.rs index 0eb65a606d1f..3f80f52c56eb 100644 --- a/core/lib/dal/src/models/storage_sync.rs +++ b/core/lib/dal/src/models/storage_sync.rs @@ -139,7 +139,7 @@ impl SyncBlock { operator_address: self.fee_account_address, transactions, last_in_batch: self.last_in_batch, - pubdata_params: Some(self.pubdata_params), + pubdata_params: self.pubdata_params, } } } diff --git a/core/node/consensus/src/storage/store.rs b/core/node/consensus/src/storage/store.rs index 53be2fc63c75..4dce9041a106 100644 --- a/core/node/consensus/src/storage/store.rs +++ b/core/node/consensus/src/storage/store.rs @@ -28,13 +28,6 @@ fn to_fetched_block( .context("Integer overflow converting block number")?, ); let payload = Payload::decode(payload).context("Payload::decode()")?; - let pubdata_params = if payload.protocol_version.is_pre_gateway() { - payload.pubdata_params.unwrap_or_default() - } else { - payload - .pubdata_params - .context("Missing `pubdata_params` for post-gateway payload")? - }; Ok(FetchedBlock { number, l1_batch_number: payload.l1_batch_number, @@ -45,7 +38,7 @@ fn to_fetched_block( l1_gas_price: payload.l1_gas_price, l2_fair_gas_price: payload.l2_fair_gas_price, fair_pubdata_price: payload.fair_pubdata_price, - pubdata_params, + pubdata_params: payload.pubdata_params, virtual_blocks: payload.virtual_blocks, operator_address: payload.operator_address, transactions: payload diff --git a/zkstack_cli/rust-toolchain b/zkstack_cli/rust-toolchain index dbd41264aa9f..03c040b91f1f 100644 --- a/zkstack_cli/rust-toolchain +++ b/zkstack_cli/rust-toolchain @@ -1 +1 @@ -1.81.0 +nightly-2024-08-01 From ffa18e1d84a4bb1ca9b897fbc0a55b9e3ef0964c Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Thu, 24 Oct 2024 17:40:00 +0300 Subject: [PATCH 09/32] feat(zk_toolbox): Add EVM emulator option to `zkstack` CLI (#3139) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Allows to enable EVM emulation support from `zkstack chain create` by specifying the `--evm-emulator` command-line arg or answering the corresponding prompt. The prompt only activates if the EVM emulator bytecode hash is specified in the template genesis config (currently, it's not); more generally, it is impossible to create a chain with EVM emulation support if its bytecode hash is unknown. ## Why ❔ Part of efforts to enable EVM emulation. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- etc/env/file_based/genesis.yaml | 2 +- zkstack_cli/crates/config/src/chain.rs | 4 ++ zkstack_cli/crates/config/src/ecosystem.rs | 7 +++- zkstack_cli/crates/config/src/genesis.rs | 15 +++++++- .../crates/zkstack/completion/_zkstack.zsh | 2 + .../crates/zkstack/completion/zkstack.fish | 2 + .../crates/zkstack/completion/zkstack.sh | 12 +++++- .../zkstack/src/commands/chain/args/create.rs | 38 ++++++++++++++++++- .../src/commands/chain/build_transactions.rs | 2 +- .../zkstack/src/commands/chain/create.rs | 3 ++ .../src/commands/chain/init/configs.rs | 2 +- .../src/commands/ecosystem/args/create.rs | 8 +++- .../zkstack/src/commands/ecosystem/common.rs | 2 +- zkstack_cli/crates/zkstack/src/messages.rs | 5 +++ 14 files changed, 93 insertions(+), 11 deletions(-) diff --git a/etc/env/file_based/genesis.yaml b/etc/env/file_based/genesis.yaml index 1b154b9e9eae..9617b011d2c7 100644 --- a/etc/env/file_based/genesis.yaml +++ b/etc/env/file_based/genesis.yaml @@ -12,5 +12,5 @@ prover: dummy_verifier: true genesis_protocol_semantic_version: 0.25.0 l1_batch_commit_data_generator_mode: Rollup -# Uncomment to enable EVM emulation (requires to run genesis) +# TODO: uncomment once EVM emulator is present in the `contracts` submodule # evm_emulator_hash: 0x01000e53aa35d9d19fa99341c2e2901cf93b3668f01569dd5c6ca409c7696b91 diff --git a/zkstack_cli/crates/config/src/chain.rs b/zkstack_cli/crates/config/src/chain.rs index 6c82d6ef3c37..c8fa0717dff5 100644 --- a/zkstack_cli/crates/config/src/chain.rs +++ b/zkstack_cli/crates/config/src/chain.rs @@ -40,6 +40,8 @@ pub struct ChainConfigInternal { pub wallet_creation: WalletCreation, #[serde(skip_serializing_if = "Option::is_none")] pub legacy_bridge: Option, + #[serde(default)] // for backward compatibility + pub evm_emulator: bool, } /// Chain configuration file. This file is created in the chain @@ -61,6 +63,7 @@ pub struct ChainConfig { pub wallet_creation: WalletCreation, pub shell: OnceCell, pub legacy_bridge: Option, + pub evm_emulator: bool, } impl Serialize for ChainConfig { @@ -157,6 +160,7 @@ impl ChainConfig { base_token: self.base_token.clone(), wallet_creation: self.wallet_creation, legacy_bridge: self.legacy_bridge, + evm_emulator: self.evm_emulator, } } } diff --git a/zkstack_cli/crates/config/src/ecosystem.rs b/zkstack_cli/crates/config/src/ecosystem.rs index 79cb1c4ea27d..c67aebf2a46c 100644 --- a/zkstack_cli/crates/config/src/ecosystem.rs +++ b/zkstack_cli/crates/config/src/ecosystem.rs @@ -178,6 +178,7 @@ impl EcosystemConfig { .artifacts_path .unwrap_or_else(|| self.get_chain_artifacts_path(name)), legacy_bridge: config.legacy_bridge, + evm_emulator: config.evm_emulator, }) } @@ -232,7 +233,11 @@ impl EcosystemConfig { } pub fn get_default_configs_path(&self) -> PathBuf { - self.link_to_code.join(CONFIGS_PATH) + Self::default_configs_path(&self.link_to_code) + } + + pub fn default_configs_path(link_to_code: &Path) -> PathBuf { + link_to_code.join(CONFIGS_PATH) } /// Path to the predefined ecosystem configs diff --git a/zkstack_cli/crates/config/src/genesis.rs b/zkstack_cli/crates/config/src/genesis.rs index 933252541f43..2d9ac7fcdc66 100644 --- a/zkstack_cli/crates/config/src/genesis.rs +++ b/zkstack_cli/crates/config/src/genesis.rs @@ -1,5 +1,6 @@ use std::path::Path; +use anyhow::Context as _; use xshell::Shell; use zksync_basic_types::L1ChainId; pub use zksync_config::GenesisConfig; @@ -11,11 +12,23 @@ use crate::{ ChainConfig, }; -pub fn update_from_chain_config(genesis: &mut GenesisConfig, config: &ChainConfig) { +pub fn update_from_chain_config( + genesis: &mut GenesisConfig, + config: &ChainConfig, +) -> anyhow::Result<()> { genesis.l2_chain_id = config.chain_id; // TODO(EVM-676): for now, the settlement layer is always the same as the L1 network genesis.l1_chain_id = L1ChainId(config.l1_network.chain_id()); genesis.l1_batch_commit_data_generator_mode = config.l1_batch_commit_data_generator_mode; + genesis.evm_emulator_hash = if config.evm_emulator { + Some(genesis.evm_emulator_hash.context( + "impossible to initialize a chain with EVM emulator: the template genesis config \ + does not contain EVM emulator hash", + )?) + } else { + None + }; + Ok(()) } impl FileConfigWithDefaultName for GenesisConfig { diff --git a/zkstack_cli/crates/zkstack/completion/_zkstack.zsh b/zkstack_cli/crates/zkstack/completion/_zkstack.zsh index b985f5b93346..4df431754c84 100644 --- a/zkstack_cli/crates/zkstack/completion/_zkstack.zsh +++ b/zkstack_cli/crates/zkstack/completion/_zkstack.zsh @@ -81,6 +81,7 @@ in-file\:"Specify file with wallets"))' \ '--base-token-price-nominator=[Base token nominator]:BASE_TOKEN_PRICE_NOMINATOR: ' \ '--base-token-price-denominator=[Base token denominator]:BASE_TOKEN_PRICE_DENOMINATOR: ' \ '--set-as-default=[Set as default chain]' \ +'--evm-emulator=[Enable EVM emulator]' \ '--start-containers=[Start reth and postgres containers after creation]' \ '--chain=[Chain to use]:CHAIN: ' \ '--legacy-bridge[]' \ @@ -241,6 +242,7 @@ in-file\:"Specify file with wallets"))' \ '--base-token-price-nominator=[Base token nominator]:BASE_TOKEN_PRICE_NOMINATOR: ' \ '--base-token-price-denominator=[Base token denominator]:BASE_TOKEN_PRICE_DENOMINATOR: ' \ '--set-as-default=[Set as default chain]' \ +'--evm-emulator=[Enable EVM emulator]' \ '--chain=[Chain to use]:CHAIN: ' \ '--legacy-bridge[]' \ '-v[Verbose mode]' \ diff --git a/zkstack_cli/crates/zkstack/completion/zkstack.fish b/zkstack_cli/crates/zkstack/completion/zkstack.fish index f90bcf2c4ac3..a1261082e6f0 100644 --- a/zkstack_cli/crates/zkstack/completion/zkstack.fish +++ b/zkstack_cli/crates/zkstack/completion/zkstack.fish @@ -73,6 +73,7 @@ complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_se complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l base-token-price-nominator -d 'Base token nominator' -r complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l base-token-price-denominator -d 'Base token denominator' -r complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l set-as-default -d 'Set as default chain' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l evm-emulator -d 'Enable EVM emulator' -r -f -a "{true\t'',false\t''}" complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l start-containers -d 'Start reth and postgres containers after creation' -r -f -a "{true\t'',false\t''}" complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l chain -d 'Chain to use' -r complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l legacy-bridge @@ -156,6 +157,7 @@ complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_s complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l base-token-price-nominator -d 'Base token nominator' -r complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l base-token-price-denominator -d 'Base token denominator' -r complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l set-as-default -d 'Set as default chain' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l evm-emulator -d 'Enable EVM emulator' -r -f -a "{true\t'',false\t''}" complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l chain -d 'Chain to use' -r complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l legacy-bridge complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -s v -l verbose -d 'Verbose mode' diff --git a/zkstack_cli/crates/zkstack/completion/zkstack.sh b/zkstack_cli/crates/zkstack/completion/zkstack.sh index d21480bba2ca..7cdb20ae9aa7 100644 --- a/zkstack_cli/crates/zkstack/completion/zkstack.sh +++ b/zkstack_cli/crates/zkstack/completion/zkstack.sh @@ -1162,7 +1162,7 @@ _zkstack() { return 0 ;; zkstack__chain__create) - opts="-v -h --chain-name --chain-id --prover-mode --wallet-creation --wallet-path --l1-batch-commit-data-generator-mode --base-token-address --base-token-price-nominator --base-token-price-denominator --set-as-default --legacy-bridge --verbose --chain --ignore-prerequisites --help" + opts="-v -h --chain-name --chain-id --prover-mode --wallet-creation --wallet-path --l1-batch-commit-data-generator-mode --base-token-address --base-token-price-nominator --base-token-price-denominator --set-as-default --legacy-bridge --evm-emulator --verbose --chain --ignore-prerequisites --help" if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) return 0 @@ -1219,6 +1219,10 @@ _zkstack() { COMPREPLY=($(compgen -W "true false" -- "${cur}")) return 0 ;; + --evm-emulator) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; --chain) COMPREPLY=($(compgen -f "${cur}")) return 0 @@ -4643,7 +4647,7 @@ _zkstack() { return 0 ;; zkstack__ecosystem__create) - opts="-v -h --ecosystem-name --l1-network --link-to-code --chain-name --chain-id --prover-mode --wallet-creation --wallet-path --l1-batch-commit-data-generator-mode --base-token-address --base-token-price-nominator --base-token-price-denominator --set-as-default --legacy-bridge --start-containers --verbose --chain --ignore-prerequisites --help" + opts="-v -h --ecosystem-name --l1-network --link-to-code --chain-name --chain-id --prover-mode --wallet-creation --wallet-path --l1-batch-commit-data-generator-mode --base-token-address --base-token-price-nominator --base-token-price-denominator --set-as-default --legacy-bridge --evm-emulator --start-containers --verbose --chain --ignore-prerequisites --help" if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) return 0 @@ -4715,6 +4719,10 @@ _zkstack() { COMPREPLY=($(compgen -W "true false" -- "${cur}")) return 0 ;; + --evm-emulator) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; --start-containers) COMPREPLY=($(compgen -W "true false" -- "${cur}")) return 0 diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs index ccf64ad27ac9..ae08d4712b34 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs @@ -1,14 +1,22 @@ -use std::{path::PathBuf, str::FromStr}; +use std::{ + path::{Path, PathBuf}, + str::FromStr, +}; use anyhow::{bail, Context}; use clap::{Parser, ValueEnum, ValueHint}; use common::{Prompt, PromptConfirm, PromptSelect}; -use config::forge_interface::deploy_ecosystem::output::Erc20Token; +use config::{ + forge_interface::deploy_ecosystem::output::Erc20Token, traits::ReadConfigWithBasePath, + EcosystemConfig, +}; use serde::{Deserialize, Serialize}; use slugify_rs::slugify; use strum::{Display, EnumIter, IntoEnumIterator}; use types::{BaseToken, L1BatchCommitmentMode, L1Network, ProverMode, WalletCreation}; +use xshell::Shell; use zksync_basic_types::H160; +use zksync_config::GenesisConfig; use crate::{ defaults::L2_CHAIN_ID, @@ -18,6 +26,7 @@ use crate::{ MSG_BASE_TOKEN_PRICE_DENOMINATOR_PROMPT, MSG_BASE_TOKEN_PRICE_NOMINATOR_HELP, MSG_BASE_TOKEN_PRICE_NOMINATOR_PROMPT, MSG_BASE_TOKEN_SELECTION_PROMPT, MSG_CHAIN_ID_HELP, MSG_CHAIN_ID_PROMPT, MSG_CHAIN_ID_VALIDATOR_ERR, MSG_CHAIN_NAME_PROMPT, + MSG_EVM_EMULATOR_HASH_MISSING_ERR, MSG_EVM_EMULATOR_HELP, MSG_EVM_EMULATOR_PROMPT, MSG_L1_BATCH_COMMIT_DATA_GENERATOR_MODE_PROMPT, MSG_L1_COMMIT_DATA_GENERATOR_MODE_HELP, MSG_NUMBER_VALIDATOR_GREATHER_THAN_ZERO_ERR, MSG_NUMBER_VALIDATOR_NOT_ZERO_ERR, MSG_PROVER_MODE_HELP, MSG_PROVER_VERSION_PROMPT, MSG_SET_AS_DEFAULT_HELP, @@ -67,14 +76,18 @@ pub struct ChainCreateArgs { pub(crate) set_as_default: Option, #[clap(long, default_value = "false")] pub(crate) legacy_bridge: bool, + #[arg(long, help = MSG_EVM_EMULATOR_HELP, default_missing_value = "true", num_args = 0..=1)] + evm_emulator: Option, } impl ChainCreateArgs { pub fn fill_values_with_prompt( self, + shell: &Shell, number_of_chains: u32, l1_network: &L1Network, possible_erc20: Vec, + link_to_code: &Path, ) -> anyhow::Result { let mut chain_name = self .chain_name @@ -211,6 +224,25 @@ impl ChainCreateArgs { } }; + let default_genesis_config = GenesisConfig::read_with_base_path( + shell, + EcosystemConfig::default_configs_path(link_to_code), + ) + .context("failed reading genesis config")?; + let has_evm_emulation_support = default_genesis_config.evm_emulator_hash.is_some(); + let evm_emulator = self.evm_emulator.unwrap_or_else(|| { + if !has_evm_emulation_support { + false + } else { + PromptConfirm::new(MSG_EVM_EMULATOR_PROMPT) + .default(false) + .ask() + } + }); + if !has_evm_emulation_support && evm_emulator { + bail!(MSG_EVM_EMULATOR_HASH_MISSING_ERR); + } + let set_as_default = self.set_as_default.unwrap_or_else(|| { PromptConfirm::new(MSG_SET_AS_DEFAULT_PROMPT) .default(true) @@ -227,6 +259,7 @@ impl ChainCreateArgs { base_token, set_as_default, legacy_bridge: self.legacy_bridge, + evm_emulator, }) } } @@ -242,6 +275,7 @@ pub struct ChainCreateArgsFinal { pub base_token: BaseToken, pub set_as_default: bool, pub legacy_bridge: bool, + pub evm_emulator: bool, } #[derive(Debug, Clone, EnumIter, Display, PartialEq, Eq)] diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/build_transactions.rs b/zkstack_cli/crates/zkstack/src/commands/chain/build_transactions.rs index 5f1be15231bf..d3953c656596 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/build_transactions.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/build_transactions.rs @@ -42,7 +42,7 @@ pub(crate) async fn run(args: BuildTransactionsArgs, shell: &Shell) -> anyhow::R logger::note(MSG_SELECTED_CONFIG, logger::object_to_string(&chain_config)); let mut genesis_config = chain_config.get_genesis_config()?; - update_from_chain_config(&mut genesis_config, &chain_config); + update_from_chain_config(&mut genesis_config, &chain_config)?; // Copy ecosystem contracts let mut contracts_config = config diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/create.rs b/zkstack_cli/crates/zkstack/src/commands/chain/create.rs index 48a320ec27e0..bdf5711e3213 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/create.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/create.rs @@ -30,9 +30,11 @@ fn create( let tokens = ecosystem_config.get_erc20_tokens(); let args = args .fill_values_with_prompt( + shell, ecosystem_config.list_of_chains().len() as u32, &ecosystem_config.l1_network, tokens, + &ecosystem_config.link_to_code, ) .context(MSG_ARGS_VALIDATOR_ERR)?; @@ -89,6 +91,7 @@ pub(crate) fn create_chain_inner( wallet_creation: args.wallet_creation, shell: OnceCell::from(shell.clone()), legacy_bridge, + evm_emulator: args.evm_emulator, }; create_wallets( diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs b/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs index 82986d9b41ae..31c5c681e7d3 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs @@ -81,7 +81,7 @@ pub async fn init_configs( // Initialize genesis config let mut genesis_config = chain_config.get_genesis_config()?; - update_from_chain_config(&mut genesis_config, chain_config); + update_from_chain_config(&mut genesis_config, chain_config)?; genesis_config.save_with_base_path(shell, &chain_config.configs)?; // Initialize contracts config diff --git a/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs index 14cb5206f6a3..6b6c1236d363 100644 --- a/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs +++ b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs @@ -71,7 +71,13 @@ impl EcosystemCreateArgs { // Make the only chain as a default one self.chain.set_as_default = Some(true); - let chain = self.chain.fill_values_with_prompt(0, &l1_network, vec![])?; + let chain = self.chain.fill_values_with_prompt( + shell, + 0, + &l1_network, + vec![], + Path::new(&link_to_code), + )?; let start_containers = self.start_containers.unwrap_or_else(|| { PromptConfirm::new(MSG_START_CONTAINERS_PROMPT) diff --git a/zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs index 42b8f79b97eb..00d937bba294 100644 --- a/zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs +++ b/zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs @@ -28,7 +28,7 @@ pub async fn deploy_l1( let deploy_config_path = DEPLOY_ECOSYSTEM_SCRIPT_PARAMS.input(&config.link_to_code); let default_genesis_config = GenesisConfig::read_with_base_path(shell, config.get_default_configs_path()) - .context("Context")?; + .context("failed reading genesis config")?; let wallets_config = config.get_wallets()?; // For deploying ecosystem we only need genesis batch params diff --git a/zkstack_cli/crates/zkstack/src/messages.rs b/zkstack_cli/crates/zkstack/src/messages.rs index b9786dc4d8d1..516194ef721e 100644 --- a/zkstack_cli/crates/zkstack/src/messages.rs +++ b/zkstack_cli/crates/zkstack/src/messages.rs @@ -156,6 +156,7 @@ pub(super) const MSG_BASE_TOKEN_ADDRESS_HELP: &str = "Base token address"; pub(super) const MSG_BASE_TOKEN_PRICE_NOMINATOR_HELP: &str = "Base token nominator"; pub(super) const MSG_BASE_TOKEN_PRICE_DENOMINATOR_HELP: &str = "Base token denominator"; pub(super) const MSG_SET_AS_DEFAULT_HELP: &str = "Set as default chain"; +pub(super) const MSG_EVM_EMULATOR_HELP: &str = "Enable EVM emulator"; pub(super) const MSG_CHAIN_NAME_PROMPT: &str = "What do you want to name the chain?"; pub(super) const MSG_CHAIN_ID_PROMPT: &str = "What's the chain id?"; pub(super) const MSG_WALLET_CREATION_PROMPT: &str = "Select how do you want to create the wallet"; @@ -170,6 +171,7 @@ pub(super) const MSG_BASE_TOKEN_PRICE_NOMINATOR_PROMPT: &str = pub(super) const MSG_BASE_TOKEN_PRICE_DENOMINATOR_PROMPT: &str = "What is the base token price denominator?"; pub(super) const MSG_SET_AS_DEFAULT_PROMPT: &str = "Set this chain as default?"; +pub(super) const MSG_EVM_EMULATOR_PROMPT: &str = "Enable EVM emulator?"; pub(super) const MSG_WALLET_PATH_INVALID_ERR: &str = "Invalid path"; pub(super) const MSG_NUMBER_VALIDATOR_NOT_ZERO_ERR: &str = "Number is not zero"; pub(super) const MSG_NUMBER_VALIDATOR_GREATHER_THAN_ZERO_ERR: &str = @@ -184,6 +186,9 @@ pub(super) const MSG_WALLET_CREATION_VALIDATOR_ERR: &str = "Localhost wallet is not supported for external networks"; pub(super) const MSG_WALLET_TOKEN_MULTIPLIER_SETTER_NOT_FOUND: &str = "Token Multiplier Setter not found. Specify it in a wallet config"; +pub(super) const MSG_EVM_EMULATOR_HASH_MISSING_ERR: &str = + "Impossible to initialize a chain with EVM emulator: the template genesis config \ + does not contain EVM emulator hash"; /// Chain genesis related messages pub(super) const MSG_L1_SECRETS_MUST_BE_PRESENTED: &str = "L1 secret must be presented"; From a5028da65608898ad41c6a4fd5c6ec4c28a45703 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Thu, 24 Oct 2024 17:50:09 +0200 Subject: [PATCH 10/32] fix(consensus): better logging of errors (#3170) Added more debug information in case of payload mismatch. Improved formatting of errors in the node_framework. --- core/lib/dal/src/consensus_dal/mod.rs | 12 ++++----- core/node/consensus/src/en.rs | 11 +++++--- core/node/consensus/src/mn.rs | 7 ++--- core/node/consensus/src/storage/connection.rs | 2 +- core/node/consensus/src/storage/store.rs | 4 +-- core/node/node_framework/src/service/error.rs | 27 ++++++++++++++++--- core/node/node_framework/src/service/mod.rs | 2 +- 7 files changed, 42 insertions(+), 23 deletions(-) diff --git a/core/lib/dal/src/consensus_dal/mod.rs b/core/lib/dal/src/consensus_dal/mod.rs index 4516434868c4..a091421d857c 100644 --- a/core/lib/dal/src/consensus_dal/mod.rs +++ b/core/lib/dal/src/consensus_dal/mod.rs @@ -69,8 +69,8 @@ pub struct ConsensusDal<'a, 'c> { pub enum InsertCertificateError { #[error("corresponding payload is missing")] MissingPayload, - #[error("certificate doesn't match the payload")] - PayloadMismatch, + #[error("certificate doesn't match the payload, payload = {0:?}")] + PayloadMismatch(Payload), #[error(transparent)] Dal(#[from] DalError), #[error(transparent)] @@ -528,7 +528,7 @@ impl ConsensusDal<'_, '_> { .await? .ok_or(E::MissingPayload)?; if header.payload != want_payload.encode().hash() { - return Err(E::PayloadMismatch); + return Err(E::PayloadMismatch(want_payload)); } sqlx::query!( r#" @@ -634,7 +634,7 @@ impl ConsensusDal<'_, '_> { pub async fn insert_batch_certificate( &mut self, cert: &attester::BatchQC, - ) -> Result<(), InsertCertificateError> { + ) -> anyhow::Result<()> { let cfg = self .global_config() .await @@ -652,9 +652,7 @@ impl ConsensusDal<'_, '_> { .context("batch()")? .context("batch is missing")?, ); - if cert.message.hash != hash { - return Err(InsertCertificateError::PayloadMismatch); - } + anyhow::ensure!(cert.message.hash == hash, "hash mismatch"); cert.verify(cfg.genesis.hash(), &committee) .context("cert.verify()")?; sqlx::query!( diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index 8158cc5aeb26..5e9aadc8f37f 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -222,7 +222,11 @@ impl EN { let mut next = attester::BatchNumber(0); loop { let status = loop { - match self.fetch_attestation_status(ctx).await { + match self + .fetch_attestation_status(ctx) + .await + .wrap("fetch_attestation_status()") + { Err(err) => tracing::warn!("{err:#}"), Ok(status) => { if status.genesis != cfg.genesis.hash() { @@ -439,7 +443,7 @@ impl EN { }); while end.map_or(true, |end| queue.next() < end) { let block = recv.recv(ctx).await?.join(ctx).await?; - queue.send(block).await?; + queue.send(block).await.context("queue.send()")?; } Ok(()) }) @@ -448,7 +452,8 @@ impl EN { if first < queue.next() { self.pool .wait_for_payload(ctx, queue.next().prev().unwrap()) - .await?; + .await + .wrap("wait_for_payload()")?; } Ok(()) } diff --git a/core/node/consensus/src/mn.rs b/core/node/consensus/src/mn.rs index 5abbdc3503b3..2a280b2f1616 100644 --- a/core/node/consensus/src/mn.rs +++ b/core/node/consensus/src/mn.rs @@ -10,7 +10,7 @@ use zksync_dal::consensus_dal; use crate::{ config, registry, - storage::{ConnectionPool, InsertCertificateError, Store}, + storage::{ConnectionPool, Store}, }; /// Task running a consensus validator for the main node. @@ -179,10 +179,7 @@ async fn run_attestation_controller( .wrap("connection()")? .insert_batch_certificate(ctx, &qc) .await - .map_err(|err| match err { - InsertCertificateError::Canceled(err) => ctx::Error::Canceled(err), - InsertCertificateError::Inner(err) => ctx::Error::Internal(err.into()), - })?; + .wrap("insert_batch_certificate()")?; } } .await; diff --git a/core/node/consensus/src/storage/connection.rs b/core/node/consensus/src/storage/connection.rs index c30398498a94..6ec5794e968d 100644 --- a/core/node/consensus/src/storage/connection.rs +++ b/core/node/consensus/src/storage/connection.rs @@ -158,7 +158,7 @@ impl<'a> Connection<'a> { &mut self, ctx: &ctx::Ctx, cert: &attester::BatchQC, - ) -> Result<(), super::InsertCertificateError> { + ) -> ctx::Result<()> { Ok(ctx .wait(self.0.consensus_dal().insert_batch_certificate(cert)) .await??) diff --git a/core/node/consensus/src/storage/store.rs b/core/node/consensus/src/storage/store.rs index 4dce9041a106..154509e97b14 100644 --- a/core/node/consensus/src/storage/store.rs +++ b/core/node/consensus/src/storage/store.rs @@ -255,9 +255,7 @@ impl StoreRunner { Err(InsertCertificateError::Canceled(err)) => { return Err(ctx::Error::Canceled(err)) } - Err(InsertCertificateError::Inner(err)) => { - return Err(ctx::Error::Internal(anyhow::Error::from(err))) - } + Err(err) => Err(err).context("insert_block_certificate()")?, } } diff --git a/core/node/node_framework/src/service/error.rs b/core/node/node_framework/src/service/error.rs index 890cc6b7d4b6..66a1c13e8730 100644 --- a/core/node/node_framework/src/service/error.rs +++ b/core/node/node_framework/src/service/error.rs @@ -1,20 +1,41 @@ +use std::fmt; + use crate::{task::TaskId, wiring_layer::WiringError}; /// An error that can occur during the task lifecycle. #[derive(Debug, thiserror::Error)] pub enum TaskError { - #[error("Task {0} failed: {1}")] + #[error("Task {0} failed: {1:#}")] TaskFailed(TaskId, anyhow::Error), #[error("Task {0} panicked: {1}")] TaskPanicked(TaskId, String), #[error("Shutdown for task {0} timed out")] TaskShutdownTimedOut(TaskId), - #[error("Shutdown hook {0} failed: {1}")] + #[error("Shutdown hook {0} failed: {1:#}")] ShutdownHookFailed(TaskId, anyhow::Error), #[error("Shutdown hook {0} timed out")] ShutdownHookTimedOut(TaskId), } +/// Wrapper of a list of errors with a reasonable formatting. +pub struct TaskErrors(pub Vec); + +impl From> for TaskErrors { + fn from(errs: Vec) -> Self { + Self(errs) + } +} + +impl fmt::Debug for TaskErrors { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0 + .iter() + .map(|err| format!("{err:#}")) + .collect::>() + .fmt(f) + } +} + /// An error that can occur during the service lifecycle. #[derive(Debug, thiserror::Error)] pub enum ZkStackServiceError { @@ -25,5 +46,5 @@ pub enum ZkStackServiceError { #[error("One or more wiring layers failed to initialize: {0:?}")] Wiring(Vec<(String, WiringError)>), #[error("One or more tasks failed: {0:?}")] - Task(Vec), + Task(TaskErrors), } diff --git a/core/node/node_framework/src/service/mod.rs b/core/node/node_framework/src/service/mod.rs index b6d420093541..00e50f7dc3b6 100644 --- a/core/node/node_framework/src/service/mod.rs +++ b/core/node/node_framework/src/service/mod.rs @@ -171,7 +171,7 @@ impl ZkStackService { if self.errors.is_empty() { Ok(()) } else { - Err(ZkStackServiceError::Task(self.errors)) + Err(ZkStackServiceError::Task(self.errors.into())) } } From 1ffd22ffbe710469de0e7f27c6aae29453ec6d3e Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Fri, 25 Oct 2024 08:44:37 +0200 Subject: [PATCH 11/32] fix(tee_prover): add prometheus pull listener (#3169) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add a prometheus pull listener. ## Why ❔ To get the metrics out of the zk_tee_prover ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. Signed-off-by: Harald Hoyer --- core/bin/zksync_tee_prover/src/main.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/core/bin/zksync_tee_prover/src/main.rs b/core/bin/zksync_tee_prover/src/main.rs index 70c6f888185a..aa0881011da1 100644 --- a/core/bin/zksync_tee_prover/src/main.rs +++ b/core/bin/zksync_tee_prover/src/main.rs @@ -45,11 +45,12 @@ fn main() -> anyhow::Result<()> { .add_layer(SigintHandlerLayer) .add_layer(TeeProverLayer::new(tee_prover_config)); - if let Some(gateway) = prometheus_config.gateway_endpoint() { - let exporter_config = - PrometheusExporterConfig::push(gateway, prometheus_config.push_interval()); - builder.add_layer(PrometheusExporterLayer(exporter_config)); - } + let exporter_config = if let Some(gateway) = prometheus_config.gateway_endpoint() { + PrometheusExporterConfig::push(gateway, prometheus_config.push_interval()) + } else { + PrometheusExporterConfig::pull(prometheus_config.listener_port) + }; + builder.add_layer(PrometheusExporterLayer(exporter_config)); builder.build().run(observability_guard)?; Ok(()) From 3815252790fd0e9094f308b58dfde3a8b1a82277 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 25 Oct 2024 11:19:05 +0300 Subject: [PATCH 12/32] feat(metadata-calculator): Add debug endpoints for tree API (#3167) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Adds `/debug/nodes` and `/debug/stale-keys` endpoints for tree API to debug tree-related incidents. - Allows to run tree API on EN without a tree. ## Why ❔ Allows investigating tree-related incidents easier. Allowing to run tree API without a tree potentially improves UX / DevEx. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- core/bin/external_node/src/node_builder.rs | 39 ++++- core/bin/external_node/src/tests/mod.rs | 39 +---- core/lib/merkle_tree/src/domain.rs | 27 ++- core/lib/merkle_tree/src/errors.rs | 2 + core/lib/merkle_tree/src/lib.rs | 2 +- core/lib/merkle_tree/src/storage/rocksdb.rs | 27 ++- .../merkle_tree/src/storage/serialization.rs | 59 +++++-- core/lib/merkle_tree/src/types/internal.rs | 88 ++++++++-- core/lib/merkle_tree/src/types/mod.rs | 2 +- .../merkle_tree/tests/integration/domain.rs | 25 +++ .../src/api_server/metrics.rs | 2 + .../metadata_calculator/src/api_server/mod.rs | 161 +++++++++++++++++- .../src/api_server/tests.rs | 56 ++++++ core/node/metadata_calculator/src/helpers.rs | 56 +++++- core/node/metadata_calculator/src/lib.rs | 53 ++++++ .../layers/metadata_calculator.rs | 67 +++++++- 16 files changed, 617 insertions(+), 88 deletions(-) diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs index 883f3f8a5fae..b7f6f8039025 100644 --- a/core/bin/external_node/src/node_builder.rs +++ b/core/bin/external_node/src/node_builder.rs @@ -11,7 +11,9 @@ use zksync_config::{ }, PostgresConfig, }; -use zksync_metadata_calculator::{MetadataCalculatorConfig, MetadataCalculatorRecoveryConfig}; +use zksync_metadata_calculator::{ + MerkleTreeReaderConfig, MetadataCalculatorConfig, MetadataCalculatorRecoveryConfig, +}; use zksync_node_api_server::web3::Namespace; use zksync_node_framework::{ implementations::layers::{ @@ -25,7 +27,7 @@ use zksync_node_framework::{ logs_bloom_backfill::LogsBloomBackfillLayer, main_node_client::MainNodeClientLayer, main_node_fee_params_fetcher::MainNodeFeeParamsFetcherLayer, - metadata_calculator::MetadataCalculatorLayer, + metadata_calculator::{MetadataCalculatorLayer, TreeApiServerLayer}, node_storage_init::{ external_node_strategy::{ExternalNodeInitStrategyLayer, SnapshotRecoveryConfig}, NodeStorageInitializerLayer, @@ -385,6 +387,29 @@ impl ExternalNodeBuilder { Ok(self) } + fn add_isolated_tree_api_layer(mut self) -> anyhow::Result { + let reader_config = MerkleTreeReaderConfig { + db_path: self.config.required.merkle_tree_path.clone(), + max_open_files: self.config.optional.merkle_tree_max_open_files, + multi_get_chunk_size: self.config.optional.merkle_tree_multi_get_chunk_size, + block_cache_capacity: self.config.optional.merkle_tree_block_cache_size(), + include_indices_and_filters_in_block_cache: self + .config + .optional + .merkle_tree_include_indices_and_filters_in_block_cache, + }; + let api_config = MerkleTreeApiConfig { + port: self + .config + .tree_component + .api_port + .context("should contain tree api port")?, + }; + self.node + .add_layer(TreeApiServerLayer::new(reader_config, api_config)); + Ok(self) + } + fn add_tx_sender_layer(mut self) -> anyhow::Result { let postgres_storage_config = PostgresStorageCachesConfig { factory_deps_cache_size: self.config.optional.factory_deps_cache_size() as u64, @@ -607,11 +632,11 @@ impl ExternalNodeBuilder { self = self.add_metadata_calculator_layer(with_tree_api)?; } Component::TreeApi => { - anyhow::ensure!( - components.contains(&Component::Tree), - "Merkle tree API cannot be started without a tree component" - ); - // Do nothing, will be handled by the `Tree` component. + if components.contains(&Component::Tree) { + // Do nothing, will be handled by the `Tree` component. + } else { + self = self.add_isolated_tree_api_layer()?; + } } Component::TreeFetcher => { self = self.add_tree_data_fetcher_layer()?; diff --git a/core/bin/external_node/src/tests/mod.rs b/core/bin/external_node/src/tests/mod.rs index c5dd88748e52..59aceea819f1 100644 --- a/core/bin/external_node/src/tests/mod.rs +++ b/core/bin/external_node/src/tests/mod.rs @@ -17,7 +17,7 @@ mod utils; const SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(10); const POLL_INTERVAL: Duration = Duration::from_millis(100); -#[test_casing(3, ["all", "core", "api"])] +#[test_casing(4, ["all", "core", "api", "core,tree_api"])] #[tokio::test] #[tracing::instrument] // Add args to the test logs async fn external_node_basics(components_str: &'static str) { @@ -170,40 +170,3 @@ async fn running_tree_without_core_is_not_allowed() { err ); } - -#[tokio::test] -async fn running_tree_api_without_tree_is_not_allowed() { - let _guard = zksync_vlog::ObservabilityBuilder::new().try_build().ok(); // Enable logging to simplify debugging - let (env, _env_handles) = utils::TestEnvironment::with_genesis_block("core,tree_api").await; - - let l2_client = utils::mock_l2_client(&env); - let eth_client = utils::mock_eth_client(env.config.diamond_proxy_address()); - - let node_handle = tokio::task::spawn_blocking(move || { - std::thread::spawn(move || { - let mut node = ExternalNodeBuilder::new(env.config)?; - inject_test_layers( - &mut node, - env.sigint_receiver, - env.app_health_sender, - eth_client, - l2_client, - ); - - // We're only interested in the error, so we drop the result. - node.build(env.components.0.into_iter().collect()).map(drop) - }) - .join() - .unwrap() - }); - - // Check that we cannot build the node without the core component. - let result = node_handle.await.expect("Building the node panicked"); - let err = result.expect_err("Building the node with tree api but without tree should fail"); - assert!( - err.to_string() - .contains("Merkle tree API cannot be started without a tree component"), - "Unexpected errror: {}", - err - ); -} diff --git a/core/lib/merkle_tree/src/domain.rs b/core/lib/merkle_tree/src/domain.rs index a4d577fc3ba5..bb69bda209cc 100644 --- a/core/lib/merkle_tree/src/domain.rs +++ b/core/lib/merkle_tree/src/domain.rs @@ -9,10 +9,11 @@ use crate::{ consistency::ConsistencyError, storage::{PatchSet, Patched, RocksDBWrapper}, types::{ - Key, Root, TreeEntry, TreeEntryWithProof, TreeInstruction, TreeLogEntry, ValueHash, - TREE_DEPTH, + Key, NodeKey, RawNode, Root, TreeEntry, TreeEntryWithProof, TreeInstruction, TreeLogEntry, + ValueHash, TREE_DEPTH, }, BlockOutput, HashTree, MerkleTree, MerkleTreePruner, MerkleTreePrunerHandle, NoVersionError, + PruneDatabase, }; impl TreeInstruction { @@ -444,6 +445,28 @@ impl ZkSyncTreeReader { self.0.entries_with_proofs(version, keys) } + /// Returns raw nodes for the specified `keys`. + pub fn raw_nodes(&self, keys: &[NodeKey]) -> Vec> { + let raw_nodes = self.0.db.raw_nodes(keys).into_iter(); + raw_nodes + .zip(keys) + .map(|(slice, key)| { + let slice = slice?; + Some(if key.is_empty() { + RawNode::deserialize_root(&slice) + } else { + RawNode::deserialize(&slice) + }) + }) + .collect() + } + + /// Returns raw stale keys obsoleted in the specified version of the tree. + pub fn raw_stale_keys(&self, l1_batch_number: L1BatchNumber) -> Vec { + let version = u64::from(l1_batch_number.0); + self.0.db.stale_keys(version) + } + /// Verifies consistency of the tree at the specified L1 batch number. /// /// # Errors diff --git a/core/lib/merkle_tree/src/errors.rs b/core/lib/merkle_tree/src/errors.rs index b8130717f93b..c187ce4977bf 100644 --- a/core/lib/merkle_tree/src/errors.rs +++ b/core/lib/merkle_tree/src/errors.rs @@ -22,6 +22,8 @@ pub enum DeserializeErrorKind { /// Bit mask specifying a child kind in an internal tree node is invalid. #[error("invalid bit mask specifying a child kind in an internal tree node")] InvalidChildKind, + #[error("data left after deserialization")] + Leftovers, /// Missing required tag in the tree manifest. #[error("missing required tag `{0}` in tree manifest")] diff --git a/core/lib/merkle_tree/src/lib.rs b/core/lib/merkle_tree/src/lib.rs index 6f9da59cf0ed..824f23eaf526 100644 --- a/core/lib/merkle_tree/src/lib.rs +++ b/core/lib/merkle_tree/src/lib.rs @@ -82,7 +82,7 @@ mod utils; pub mod unstable { pub use crate::{ errors::DeserializeError, - types::{Manifest, Node, NodeKey, ProfiledTreeOperation, Root}, + types::{Manifest, Node, NodeKey, ProfiledTreeOperation, RawNode, Root}, }; } diff --git a/core/lib/merkle_tree/src/storage/rocksdb.rs b/core/lib/merkle_tree/src/storage/rocksdb.rs index 711ccaa6137e..22335c829404 100644 --- a/core/lib/merkle_tree/src/storage/rocksdb.rs +++ b/core/lib/merkle_tree/src/storage/rocksdb.rs @@ -53,6 +53,23 @@ impl NamedColumnFamily for MerkleTreeColumnFamily { type LocalProfiledOperation = RefCell>>; +/// Unifies keys that can be used to load raw data from RocksDB. +pub(crate) trait ToDbKey: Sync { + fn to_db_key(&self) -> Vec; +} + +impl ToDbKey for NodeKey { + fn to_db_key(&self) -> Vec { + NodeKey::to_db_key(*self) + } +} + +impl ToDbKey for (NodeKey, bool) { + fn to_db_key(&self) -> Vec { + NodeKey::to_db_key(self.0) + } +} + /// Main [`Database`] implementation wrapping a [`RocksDB`] reference. /// /// # Cloning @@ -112,7 +129,7 @@ impl RocksDBWrapper { .expect("Failed reading from RocksDB") } - fn raw_nodes(&self, keys: &NodeKeys) -> Vec>> { + pub(crate) fn raw_nodes(&self, keys: &[T]) -> Vec>> { // Propagate the currently profiled operation to rayon threads used in the parallel iterator below. let profiled_operation = self .profiled_operation @@ -126,7 +143,7 @@ impl RocksDBWrapper { let _guard = profiled_operation .as_ref() .and_then(ProfiledOperation::start_profiling); - let keys = chunk.iter().map(|(key, _)| key.to_db_key()); + let keys = chunk.iter().map(ToDbKey::to_db_key); let results = self.db.multi_get_cf(MerkleTreeColumnFamily::Tree, keys); results .into_iter() @@ -144,9 +161,9 @@ impl RocksDBWrapper { // If we didn't succeed with the patch set, or the key version is old, // access the underlying storage. let node = if is_leaf { - LeafNode::deserialize(raw_node).map(Node::Leaf) + LeafNode::deserialize(raw_node, false).map(Node::Leaf) } else { - InternalNode::deserialize(raw_node).map(Node::Internal) + InternalNode::deserialize(raw_node, false).map(Node::Internal) }; node.map_err(|err| { err.with_context(if is_leaf { @@ -187,7 +204,7 @@ impl Database for RocksDBWrapper { let Some(raw_root) = self.raw_node(&NodeKey::empty(version).to_db_key()) else { return Ok(None); }; - Root::deserialize(&raw_root) + Root::deserialize(&raw_root, false) .map(Some) .map_err(|err| err.with_context(ErrorContext::Root(version))) } diff --git a/core/lib/merkle_tree/src/storage/serialization.rs b/core/lib/merkle_tree/src/storage/serialization.rs index f21fece94e09..d0c573fd8170 100644 --- a/core/lib/merkle_tree/src/storage/serialization.rs +++ b/core/lib/merkle_tree/src/storage/serialization.rs @@ -5,7 +5,7 @@ use std::{collections::HashMap, str}; use crate::{ errors::{DeserializeError, DeserializeErrorKind, ErrorContext}, types::{ - ChildRef, InternalNode, Key, LeafNode, Manifest, Node, Root, TreeTags, ValueHash, + ChildRef, InternalNode, Key, LeafNode, Manifest, Node, RawNode, Root, TreeTags, ValueHash, HASH_SIZE, KEY_SIZE, }, }; @@ -15,7 +15,7 @@ use crate::{ const LEB128_SIZE_ESTIMATE: usize = 3; impl LeafNode { - pub(super) fn deserialize(bytes: &[u8]) -> Result { + pub(super) fn deserialize(bytes: &[u8], strict: bool) -> Result { if bytes.len() < KEY_SIZE + HASH_SIZE { return Err(DeserializeErrorKind::UnexpectedEof.into()); } @@ -26,6 +26,10 @@ impl LeafNode { let leaf_index = leb128::read::unsigned(&mut bytes).map_err(|err| { DeserializeErrorKind::Leb128(err).with_context(ErrorContext::LeafIndex) })?; + if strict && !bytes.is_empty() { + return Err(DeserializeErrorKind::Leftovers.into()); + } + Ok(Self { full_key, value_hash, @@ -105,7 +109,7 @@ impl ChildRef { } impl InternalNode { - pub(super) fn deserialize(bytes: &[u8]) -> Result { + pub(super) fn deserialize(bytes: &[u8], strict: bool) -> Result { if bytes.len() < 4 { let err = DeserializeErrorKind::UnexpectedEof; return Err(err.with_context(ErrorContext::ChildrenMask)); @@ -134,6 +138,9 @@ impl InternalNode { } bitmap >>= 2; } + if strict && !bytes.is_empty() { + return Err(DeserializeErrorKind::Leftovers.into()); + } Ok(this) } @@ -161,8 +168,36 @@ impl InternalNode { } } +impl RawNode { + pub(crate) fn deserialize(bytes: &[u8]) -> Self { + Self { + raw: bytes.to_vec(), + leaf: LeafNode::deserialize(bytes, true).ok(), + internal: InternalNode::deserialize(bytes, true).ok(), + } + } + + pub(crate) fn deserialize_root(bytes: &[u8]) -> Self { + let root = Root::deserialize(bytes, true).ok(); + let node = root.and_then(|root| match root { + Root::Empty => None, + Root::Filled { node, .. } => Some(node), + }); + let (leaf, internal) = match node { + None => (None, None), + Some(Node::Leaf(leaf)) => (Some(leaf), None), + Some(Node::Internal(node)) => (None, Some(node)), + }; + Self { + raw: bytes.to_vec(), + leaf, + internal, + } + } +} + impl Root { - pub(super) fn deserialize(mut bytes: &[u8]) -> Result { + pub(super) fn deserialize(mut bytes: &[u8], strict: bool) -> Result { let leaf_count = leb128::read::unsigned(&mut bytes).map_err(|err| { DeserializeErrorKind::Leb128(err).with_context(ErrorContext::LeafCount) })?; @@ -172,11 +207,11 @@ impl Root { // Try both the leaf and internal node serialization; in some cases, a single leaf // may still be persisted as an internal node. Since serialization of an internal node with a single child // is always shorter than that a leaf, the order (first leaf, then internal node) is chosen intentionally. - LeafNode::deserialize(bytes) + LeafNode::deserialize(bytes, strict) .map(Node::Leaf) - .or_else(|_| InternalNode::deserialize(bytes).map(Node::Internal))? + .or_else(|_| InternalNode::deserialize(bytes, strict).map(Node::Internal))? } - _ => Node::Internal(InternalNode::deserialize(bytes)?), + _ => Node::Internal(InternalNode::deserialize(bytes, strict)?), }; Ok(Self::new(leaf_count, node)) } @@ -440,7 +475,7 @@ mod tests { assert_eq!(buffer[64], 42); // leaf index assert_eq!(buffer.len(), 65); - let leaf_copy = LeafNode::deserialize(&buffer).unwrap(); + let leaf_copy = LeafNode::deserialize(&buffer, true).unwrap(); assert_eq!(leaf_copy, leaf); } @@ -471,7 +506,7 @@ mod tests { let child_count = bitmap.count_ones(); assert_eq!(child_count, 2); - let node_copy = InternalNode::deserialize(&buffer).unwrap(); + let node_copy = InternalNode::deserialize(&buffer, true).unwrap(); assert_eq!(node_copy, node); } @@ -482,7 +517,7 @@ mod tests { root.serialize(&mut buffer); assert_eq!(buffer, [0]); - let root_copy = Root::deserialize(&buffer).unwrap(); + let root_copy = Root::deserialize(&buffer, true).unwrap(); assert_eq!(root_copy, root); } @@ -494,7 +529,7 @@ mod tests { root.serialize(&mut buffer); assert_eq!(buffer[0], 1); - let root_copy = Root::deserialize(&buffer).unwrap(); + let root_copy = Root::deserialize(&buffer, true).unwrap(); assert_eq!(root_copy, root); } @@ -506,7 +541,7 @@ mod tests { root.serialize(&mut buffer); assert_eq!(buffer[0], 2); - let root_copy = Root::deserialize(&buffer).unwrap(); + let root_copy = Root::deserialize(&buffer, true).unwrap(); assert_eq!(root_copy, root); } } diff --git a/core/lib/merkle_tree/src/types/internal.rs b/core/lib/merkle_tree/src/types/internal.rs index 399f6c840a3c..2db075d92212 100644 --- a/core/lib/merkle_tree/src/types/internal.rs +++ b/core/lib/merkle_tree/src/types/internal.rs @@ -2,7 +2,9 @@ //! some of these types are declared as public and can be even exported using the `unstable` module. //! Still, logically these types are private, so adding them to new public APIs etc. is a logical error. -use std::{collections::HashMap, fmt, num::NonZeroU64}; +use std::{collections::HashMap, fmt, num::NonZeroU64, str::FromStr}; + +use anyhow::Context; use crate::{ hasher::{HashTree, InternalNodeCache}, @@ -276,6 +278,34 @@ impl fmt::Debug for Nibbles { } } +impl FromStr for Nibbles { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + anyhow::ensure!(s.len() <= KEY_SIZE * 2, "too many nibbles"); + let mut bytes = NibblesBytes::default(); + for (i, byte) in s.bytes().enumerate() { + let nibble = match byte { + b'0'..=b'9' => byte - b'0', + b'A'..=b'F' => byte - b'A' + 10, + b'a'..=b'f' => byte - b'a' + 10, + _ => anyhow::bail!("unexpected nibble: {byte:?}"), + }; + + assert!(nibble < 16); + if i % 2 == 0 { + bytes[i / 2] = nibble * 16; + } else { + bytes[i / 2] += nibble; + } + } + Ok(Self { + nibble_count: s.len(), + bytes, + }) + } +} + /// Versioned key in a radix-16 Merkle tree. #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub struct NodeKey { @@ -283,12 +313,31 @@ pub struct NodeKey { pub(crate) nibbles: Nibbles, } -impl fmt::Debug for NodeKey { +impl fmt::Display for NodeKey { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { write!(formatter, "{}:{}", self.version, self.nibbles) } } +impl fmt::Debug for NodeKey { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, formatter) + } +} + +impl FromStr for NodeKey { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + let (version, nibbles) = s + .split_once(':') + .context("node key does not contain `:` delimiter")?; + let version = version.parse().context("invalid key version")?; + let nibbles = nibbles.parse().context("invalid nibbles")?; + Ok(Self { version, nibbles }) + } +} + impl NodeKey { pub(crate) const fn empty(version: u64) -> Self { Self { @@ -331,19 +380,13 @@ impl NodeKey { } } -impl fmt::Display for NodeKey { - fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(formatter, "{}:{}", self.version, self.nibbles) - } -} - /// Leaf node of the tree. #[derive(Debug, Clone, Copy)] #[cfg_attr(test, derive(PartialEq, Eq))] pub struct LeafNode { - pub(crate) full_key: Key, - pub(crate) value_hash: ValueHash, - pub(crate) leaf_index: u64, + pub full_key: Key, + pub value_hash: ValueHash, + pub leaf_index: u64, } impl LeafNode { @@ -364,7 +407,7 @@ impl LeafNode { /// Reference to a child in an [`InternalNode`]. #[derive(Debug, Clone, Copy)] #[cfg_attr(test, derive(PartialEq, Eq))] -pub(crate) struct ChildRef { +pub struct ChildRef { pub hash: ValueHash, pub version: u64, pub is_leaf: bool, @@ -449,7 +492,7 @@ impl InternalNode { self.cache.get_or_insert(cache) } - pub(crate) fn children(&self) -> impl Iterator + '_ { + pub fn children(&self) -> impl Iterator + '_ { self.children.iter() } @@ -510,6 +553,17 @@ impl From for Node { } } +/// Raw node fetched from a database. +#[derive(Debug)] +pub struct RawNode { + /// Bytes for a serialized node. + pub raw: Vec, + /// Leaf if a node can be deserialized into it. + pub leaf: Option, + /// Internal node if a node can be deserialized into it. + pub internal: Option, +} + /// Root node of the tree. Besides a [`Node`], contains the general information about the tree /// (e.g., the number of leaves). #[derive(Debug, Clone)] @@ -614,15 +668,23 @@ mod tests { fn nibbles_and_node_key_display() { let nibbles = Nibbles::new(&TEST_KEY, 5); assert_eq!(nibbles.to_string(), "deadb"); + let restored: Nibbles = nibbles.to_string().parse().unwrap(); + assert_eq!(restored, nibbles); let nibbles = Nibbles::new(&TEST_KEY, 6); assert_eq!(nibbles.to_string(), "deadbe"); + let restored: Nibbles = nibbles.to_string().parse().unwrap(); + assert_eq!(restored, nibbles); let nibbles = Nibbles::new(&TEST_KEY, 9); assert_eq!(nibbles.to_string(), "deadbeef0"); + let restored: Nibbles = nibbles.to_string().parse().unwrap(); + assert_eq!(restored, nibbles); let node_key = nibbles.with_version(3); assert_eq!(node_key.to_string(), "3:deadbeef0"); + let restored: NodeKey = node_key.to_string().parse().unwrap(); + assert_eq!(restored, node_key); } #[test] diff --git a/core/lib/merkle_tree/src/types/mod.rs b/core/lib/merkle_tree/src/types/mod.rs index 807ae0238769..63db4b318b27 100644 --- a/core/lib/merkle_tree/src/types/mod.rs +++ b/core/lib/merkle_tree/src/types/mod.rs @@ -6,7 +6,7 @@ pub(crate) use self::internal::{ ChildRef, Nibbles, NibblesBytes, StaleNodeKey, TreeTags, HASH_SIZE, KEY_SIZE, TREE_DEPTH, }; pub use self::internal::{ - InternalNode, LeafNode, Manifest, Node, NodeKey, ProfiledTreeOperation, Root, + InternalNode, LeafNode, Manifest, Node, NodeKey, ProfiledTreeOperation, RawNode, Root, }; mod internal; diff --git a/core/lib/merkle_tree/tests/integration/domain.rs b/core/lib/merkle_tree/tests/integration/domain.rs index abd3dbbcd3f3..fa7ec4cfde30 100644 --- a/core/lib/merkle_tree/tests/integration/domain.rs +++ b/core/lib/merkle_tree/tests/integration/domain.rs @@ -68,6 +68,31 @@ fn basic_workflow() { tree.verify_consistency(L1BatchNumber(0)).unwrap(); assert_eq!(tree.root_hash(), expected_root_hash); assert_eq!(tree.next_l1_batch_number(), L1BatchNumber(1)); + + let keys = ["0:", "0:0"].map(|key| key.parse().unwrap()); + let raw_nodes = tree.reader().raw_nodes(&keys); + assert_eq!(raw_nodes.len(), 2); + let raw_root = raw_nodes[0].as_ref().unwrap(); + assert!(!raw_root.raw.is_empty()); + assert!(raw_root.internal.is_some()); + assert!(raw_root.leaf.is_none()); + + let raw_node = raw_nodes[1].as_ref().unwrap(); + assert!(!raw_node.raw.is_empty()); + assert!(raw_node.leaf.is_none()); + let raw_node = raw_node.internal.as_ref().unwrap(); + + let (nibble, _) = raw_node + .children() + .find(|(_, child_ref)| child_ref.is_leaf) + .unwrap(); + let leaf_key = format!("0:0{nibble:x}").parse().unwrap(); + let raw_nodes = tree.reader().raw_nodes(&[leaf_key]); + assert_eq!(raw_nodes.len(), 1); + let raw_leaf = raw_nodes.into_iter().next().unwrap().expect("no leaf"); + assert!(!raw_leaf.raw.is_empty()); + assert!(raw_leaf.leaf.is_some()); + assert!(raw_leaf.internal.is_none()); } #[test] diff --git a/core/node/metadata_calculator/src/api_server/metrics.rs b/core/node/metadata_calculator/src/api_server/metrics.rs index d185861d07c6..92f948e09702 100644 --- a/core/node/metadata_calculator/src/api_server/metrics.rs +++ b/core/node/metadata_calculator/src/api_server/metrics.rs @@ -9,6 +9,8 @@ use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics pub(super) enum MerkleTreeApiMethod { Info, GetProofs, + GetNodes, + GetStaleKeys, } /// Metrics for Merkle tree API. diff --git a/core/node/metadata_calculator/src/api_server/mod.rs b/core/node/metadata_calculator/src/api_server/mod.rs index 6f46e8aeea81..4612d859a3dd 100644 --- a/core/node/metadata_calculator/src/api_server/mod.rs +++ b/core/node/metadata_calculator/src/api_server/mod.rs @@ -1,6 +1,6 @@ //! Primitive Merkle tree API used internally to fetch proofs. -use std::{fmt, future::Future, net::SocketAddr, pin::Pin}; +use std::{collections::HashMap, fmt, future::Future, net::SocketAddr, pin::Pin}; use anyhow::Context as _; use async_trait::async_trait; @@ -10,12 +10,16 @@ use axum::{ response::{IntoResponse, Response}, routing, Json, Router, }; -use serde::{Deserialize, Serialize}; +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use tokio::sync::watch; use zksync_crypto_primitives::hasher::blake2::Blake2Hasher; use zksync_health_check::{CheckHealth, Health, HealthStatus}; -use zksync_merkle_tree::NoVersionError; -use zksync_types::{L1BatchNumber, H256, U256}; +use zksync_merkle_tree::{ + unstable::{NodeKey, RawNode}, + NoVersionError, ValueHash, +}; +use zksync_types::{web3, L1BatchNumber, H256, U256}; +use zksync_utils::u256_to_h256; use self::metrics::{MerkleTreeApiMethod, API_METRICS}; use crate::{AsyncTreeReader, LazyAsyncTreeReader, MerkleTreeInfo}; @@ -77,6 +81,117 @@ impl TreeEntryWithProof { } } +#[derive(Debug, PartialEq, Eq, Hash)] +struct HexNodeKey(NodeKey); + +impl Serialize for HexNodeKey { + fn serialize(&self, serializer: S) -> Result { + serializer.serialize_str(&self.0.to_string()) + } +} + +impl<'de> Deserialize<'de> for HexNodeKey { + fn deserialize>(deserializer: D) -> Result { + struct HexNodeKeyVisitor; + + impl de::Visitor<'_> for HexNodeKeyVisitor { + type Value = HexNodeKey; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("hex-encoded versioned key like `123:c0ffee`") + } + + fn visit_str(self, v: &str) -> Result { + v.parse().map(HexNodeKey).map_err(de::Error::custom) + } + } + + deserializer.deserialize_str(HexNodeKeyVisitor) + } +} + +#[derive(Debug, Serialize)] +struct ApiLeafNode { + full_key: H256, + value_hash: H256, + leaf_index: u64, +} + +#[derive(Debug, Serialize)] +struct ApiChildRef { + hash: ValueHash, + version: u64, + is_leaf: bool, +} + +#[derive(Debug, Serialize)] +#[serde(transparent)] +struct ApiInternalNode(HashMap); + +#[derive(Debug, Serialize)] +struct ApiRawNode { + raw: web3::Bytes, + #[serde(skip_serializing_if = "Option::is_none")] + leaf: Option, + #[serde(skip_serializing_if = "Option::is_none")] + internal: Option, +} + +impl From for ApiRawNode { + fn from(node: RawNode) -> Self { + Self { + raw: web3::Bytes(node.raw), + leaf: node.leaf.map(|leaf| ApiLeafNode { + full_key: u256_to_h256(leaf.full_key), + value_hash: leaf.value_hash, + leaf_index: leaf.leaf_index, + }), + internal: node.internal.map(|internal| { + ApiInternalNode( + internal + .children() + .map(|(nibble, child_ref)| { + let nibble = if nibble < 10 { + b'0' + nibble + } else { + b'a' + nibble - 10 + }; + ( + char::from(nibble), + ApiChildRef { + hash: child_ref.hash, + version: child_ref.version, + is_leaf: child_ref.is_leaf, + }, + ) + }) + .collect(), + ) + }), + } + } +} + +#[derive(Debug, Deserialize)] +struct TreeNodesRequest { + keys: Vec, +} + +#[derive(Debug, Serialize)] +struct TreeNodesResponse { + nodes: HashMap, +} + +#[derive(Debug, Deserialize)] +struct StaleKeysRequest { + l1_batch_number: L1BatchNumber, +} + +#[derive(Debug, Serialize)] +struct StaleKeysResponse { + stale_keys: Vec, +} + /// Server-side tree API error. #[derive(Debug)] enum TreeApiServerError { @@ -343,6 +458,35 @@ impl AsyncTreeReader { Ok(Json(response)) } + async fn get_nodes_handler( + State(this): State, + Json(request): Json, + ) -> Json { + let latency = API_METRICS.latency[&MerkleTreeApiMethod::GetNodes].start(); + let keys: Vec<_> = request.keys.iter().map(|key| key.0).collect(); + let nodes = this.clone().raw_nodes(keys).await; + let nodes = request + .keys + .into_iter() + .zip(nodes) + .filter_map(|(key, node)| Some((key, node?.into()))) + .collect(); + let response = TreeNodesResponse { nodes }; + latency.observe(); + Json(response) + } + + async fn get_stale_keys_handler( + State(this): State, + Json(request): Json, + ) -> Json { + let latency = API_METRICS.latency[&MerkleTreeApiMethod::GetStaleKeys].start(); + let stale_keys = this.clone().raw_stale_keys(request.l1_batch_number).await; + let stale_keys = stale_keys.into_iter().map(HexNodeKey).collect(); + latency.observe(); + Json(StaleKeysResponse { stale_keys }) + } + async fn create_api_server( self, bind_address: &SocketAddr, @@ -353,6 +497,11 @@ impl AsyncTreeReader { let app = Router::new() .route("/", routing::get(Self::info_handler)) .route("/proofs", routing::post(Self::get_proofs_handler)) + .route("/debug/nodes", routing::post(Self::get_nodes_handler)) + .route( + "/debug/stale-keys", + routing::post(Self::get_stale_keys_handler), + ) .with_state(self); let listener = tokio::net::TcpListener::bind(bind_address) @@ -369,8 +518,8 @@ impl AsyncTreeReader { } tracing::info!("Stop signal received, Merkle tree API server is shutting down"); }) - .await - .context("Merkle tree API server failed")?; + .await + .context("Merkle tree API server failed")?; tracing::info!("Merkle tree API server shut down"); Ok(()) diff --git a/core/node/metadata_calculator/src/api_server/tests.rs b/core/node/metadata_calculator/src/api_server/tests.rs index 42a3152e6b53..815522a4cd8e 100644 --- a/core/node/metadata_calculator/src/api_server/tests.rs +++ b/core/node/metadata_calculator/src/api_server/tests.rs @@ -72,11 +72,67 @@ async fn merkle_tree_api() { assert_eq!(err.version_count, 6); assert_eq!(err.missing_version, 10); + let raw_nodes_response = api_client + .inner + .post(format!("http://{local_addr}/debug/nodes")) + .json(&serde_json::json!({ "keys": ["0:", "0:0"] })) + .send() + .await + .unwrap() + .error_for_status() + .unwrap(); + let raw_nodes_response: serde_json::Value = raw_nodes_response.json().await.unwrap(); + assert_raw_nodes_response(&raw_nodes_response); + + let raw_stale_keys_response = api_client + .inner + .post(format!("http://{local_addr}/debug/stale-keys")) + .json(&serde_json::json!({ "l1_batch_number": 1 })) + .send() + .await + .unwrap() + .error_for_status() + .unwrap(); + let raw_stale_keys_response: serde_json::Value = raw_stale_keys_response.json().await.unwrap(); + assert_raw_stale_keys_response(&raw_stale_keys_response); + // Stop the calculator and the tree API server. stop_sender.send_replace(true); api_server_task.await.unwrap().unwrap(); } +fn assert_raw_nodes_response(response: &serde_json::Value) { + let response = response.as_object().expect("not an object"); + let response = response["nodes"].as_object().expect("not an object"); + let root = response["0:"].as_object().expect("not an object"); + assert!( + root.len() == 2 && root.contains_key("internal") && root.contains_key("raw"), + "{root:#?}" + ); + let root = root["internal"].as_object().expect("not an object"); + for key in root.keys() { + assert_eq!(key.len(), 1, "{key}"); + let key = key.as_bytes()[0]; + assert_matches!(key, b'0'..=b'9' | b'a'..=b'f'); + } + + let node = response["0:0"].as_object().expect("not an object"); + assert!( + node.len() == 2 && node.contains_key("internal") && node.contains_key("raw"), + "{node:#?}" + ); +} + +fn assert_raw_stale_keys_response(response: &serde_json::Value) { + let response = response.as_object().expect("not an object"); + let stale_keys = response["stale_keys"].as_array().expect("not an array"); + assert!(!stale_keys.is_empty()); // At least the root is always obsoleted + for stale_key in stale_keys { + let stale_key = stale_key.as_str().expect("not a string"); + stale_key.parse::().unwrap(); + } +} + #[tokio::test] async fn api_client_connection_error() { // Use an address that will definitely fail on a timeout. diff --git a/core/node/metadata_calculator/src/helpers.rs b/core/node/metadata_calculator/src/helpers.rs index b6989afb179f..3f370afaf77e 100644 --- a/core/node/metadata_calculator/src/helpers.rs +++ b/core/node/metadata_calculator/src/helpers.rs @@ -22,6 +22,7 @@ use zksync_health_check::{CheckHealth, Health, HealthStatus, ReactiveHealthCheck use zksync_merkle_tree::{ domain::{TreeMetadata, ZkSyncTree, ZkSyncTreeReader}, recovery::{MerkleTreeRecovery, PersistenceThreadHandle}, + unstable::{NodeKey, RawNode}, Database, Key, MerkleTreeColumnFamily, NoVersionError, RocksDBWrapper, TreeEntry, TreeEntryWithProof, TreeInstruction, }; @@ -35,7 +36,7 @@ use zksync_types::{ use super::{ metrics::{LoadChangesStage, TreeUpdateStage, METRICS}, pruning::PruningHandles, - MetadataCalculatorConfig, MetadataCalculatorRecoveryConfig, + MerkleTreeReaderConfig, MetadataCalculatorConfig, MetadataCalculatorRecoveryConfig, }; /// General information about the Merkle tree. @@ -176,6 +177,40 @@ fn create_db_sync(config: &MetadataCalculatorConfig) -> anyhow::Result anyhow::Result { + tokio::task::spawn_blocking(move || { + let MerkleTreeReaderConfig { + db_path, + max_open_files, + multi_get_chunk_size, + block_cache_capacity, + include_indices_and_filters_in_block_cache, + } = config; + + tracing::info!( + "Initializing Merkle tree database at `{db_path}` (max open files: {max_open_files:?}) with {multi_get_chunk_size} multi-get chunk size, \ + {block_cache_capacity}B block cache (indices & filters included: {include_indices_and_filters_in_block_cache:?})" + ); + let mut db = RocksDB::with_options( + db_path.as_ref(), + RocksDBOptions { + block_cache_capacity: Some(block_cache_capacity), + include_indices_and_filters_in_block_cache, + max_open_files, + ..RocksDBOptions::default() + } + )?; + if cfg!(test) { + db = db.with_sync_writes(); + } + Ok(RocksDBWrapper::from(db)) + }) + .await + .context("panicked creating Merkle tree RocksDB")? +} + /// Wrapper around the "main" tree implementation used by [`MetadataCalculator`]. /// /// Async methods provided by this wrapper are not cancel-safe! This is probably not an issue; @@ -307,6 +342,13 @@ pub struct AsyncTreeReader { } impl AsyncTreeReader { + pub(super) fn new(db: RocksDBWrapper, mode: MerkleTreeMode) -> anyhow::Result { + Ok(Self { + inner: ZkSyncTreeReader::new(db)?, + mode, + }) + } + fn downgrade(&self) -> WeakAsyncTreeReader { WeakAsyncTreeReader { db: self.inner.db().clone().into_inner().downgrade(), @@ -366,6 +408,18 @@ impl AsyncTreeReader { .await .unwrap() } + + pub(crate) async fn raw_nodes(self, keys: Vec) -> Vec> { + tokio::task::spawn_blocking(move || self.inner.raw_nodes(&keys)) + .await + .unwrap() + } + + pub(crate) async fn raw_stale_keys(self, l1_batch_number: L1BatchNumber) -> Vec { + tokio::task::spawn_blocking(move || self.inner.raw_stale_keys(l1_batch_number)) + .await + .unwrap() + } } /// Version of async tree reader that holds a weak reference to RocksDB. Used in [`MerkleTreeHealthCheck`]. diff --git a/core/node/metadata_calculator/src/lib.rs b/core/node/metadata_calculator/src/lib.rs index 451090694b2c..5c64330a0e7d 100644 --- a/core/node/metadata_calculator/src/lib.rs +++ b/core/node/metadata_calculator/src/lib.rs @@ -27,6 +27,7 @@ pub use self::{ helpers::{AsyncTreeReader, LazyAsyncTreeReader, MerkleTreeInfo}, pruning::MerkleTreePruningTask, }; +use crate::helpers::create_readonly_db; pub mod api_server; mod helpers; @@ -264,3 +265,55 @@ impl MetadataCalculator { .await } } + +/// Configuration of [`TreeReaderTask`]. +#[derive(Debug, Clone)] +pub struct MerkleTreeReaderConfig { + /// Filesystem path to the RocksDB instance that stores the tree. + pub db_path: String, + /// Maximum number of files concurrently opened by RocksDB. Useful to fit into OS limits; can be used + /// as a rudimentary way to control RAM usage of the tree. + pub max_open_files: Option, + /// Chunk size for multi-get operations. Can speed up loading data for the Merkle tree on some environments, + /// but the effects vary wildly depending on the setup (e.g., the filesystem used). + pub multi_get_chunk_size: usize, + /// Capacity of RocksDB block cache in bytes. Reasonable values range from ~100 MiB to several GB. + pub block_cache_capacity: usize, + /// If specified, RocksDB indices and Bloom filters will be managed by the block cache, rather than + /// being loaded entirely into RAM on the RocksDB initialization. The block cache capacity should be increased + /// correspondingly; otherwise, RocksDB performance can significantly degrade. + pub include_indices_and_filters_in_block_cache: bool, +} + +/// Alternative to [`MetadataCalculator`] that provides readonly access to the Merkle tree. +#[derive(Debug)] +pub struct TreeReaderTask { + config: MerkleTreeReaderConfig, + tree_reader: watch::Sender>, +} + +impl TreeReaderTask { + /// Creates a new task with the provided configuration. + pub fn new(config: MerkleTreeReaderConfig) -> Self { + Self { + config, + tree_reader: watch::channel(None).0, + } + } + + /// Returns a reference to the tree reader. + pub fn tree_reader(&self) -> LazyAsyncTreeReader { + LazyAsyncTreeReader(self.tree_reader.subscribe()) + } + + /// Runs this task. The task exits on error, or when the tree reader is successfully initialized. + pub async fn run(self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + let db = tokio::select! { + db_result = create_readonly_db(self.config) => db_result?, + _ = stop_receiver.changed() => return Ok(()), + }; + let reader = AsyncTreeReader::new(db, MerkleTreeMode::Lightweight)?; + self.tree_reader.send_replace(Some(reader)); + Ok(()) + } +} diff --git a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs index 827ec69d9427..4092ee6dcd56 100644 --- a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs +++ b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs @@ -7,7 +7,8 @@ use std::{ use anyhow::Context as _; use zksync_config::configs::{api::MerkleTreeApiConfig, database::MerkleTreeMode}; use zksync_metadata_calculator::{ - LazyAsyncTreeReader, MerkleTreePruningTask, MetadataCalculator, MetadataCalculatorConfig, + LazyAsyncTreeReader, MerkleTreePruningTask, MerkleTreeReaderConfig, MetadataCalculator, + MetadataCalculatorConfig, TreeReaderTask, }; use zksync_storage::RocksDB; @@ -19,7 +20,7 @@ use crate::{ web3_api::TreeApiClientResource, }, service::{ShutdownHook, StopReceiver}, - task::{Task, TaskId}, + task::{Task, TaskId, TaskKind}, wiring_layer::{WiringError, WiringLayer}, FromContext, IntoContext, }; @@ -205,3 +206,65 @@ impl Task for MerkleTreePruningTask { (*self).run(stop_receiver.0).await } } + +/// Mutually exclusive with [`MetadataCalculatorLayer`]. +#[derive(Debug)] +pub struct TreeApiServerLayer { + config: MerkleTreeReaderConfig, + api_config: MerkleTreeApiConfig, +} + +impl TreeApiServerLayer { + pub fn new(config: MerkleTreeReaderConfig, api_config: MerkleTreeApiConfig) -> Self { + Self { config, api_config } + } +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct TreeApiServerOutput { + tree_api_client: TreeApiClientResource, + #[context(task)] + tree_reader_task: TreeReaderTask, + #[context(task)] + tree_api_task: TreeApiTask, +} + +#[async_trait::async_trait] +impl WiringLayer for TreeApiServerLayer { + type Input = (); + type Output = TreeApiServerOutput; + + fn layer_name(&self) -> &'static str { + "tree_api_server" + } + + async fn wire(self, (): Self::Input) -> Result { + let tree_reader_task = TreeReaderTask::new(self.config); + let bind_addr = (Ipv4Addr::UNSPECIFIED, self.api_config.port).into(); + let tree_api_task = TreeApiTask { + bind_addr, + tree_reader: tree_reader_task.tree_reader(), + }; + Ok(TreeApiServerOutput { + tree_api_client: TreeApiClientResource(Arc::new(tree_reader_task.tree_reader())), + tree_api_task, + tree_reader_task, + }) + } +} + +#[async_trait::async_trait] +impl Task for TreeReaderTask { + fn kind(&self) -> TaskKind { + TaskKind::OneshotTask + } + + fn id(&self) -> TaskId { + "merkle_tree_reader_task".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await + } +} From c76da16efc769243a02c6e859376182d95ab941d Mon Sep 17 00:00:00 2001 From: Lyova Potyomkin Date: Fri, 25 Oct 2024 13:43:56 +0300 Subject: [PATCH 13/32] fix: extend allowed storage slots for validation as per EIP-7562 (#3166) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ [EIP-7562](https://eips.ethereum.org/EIPS/eip-7562#validation-rules) allows reading `keccak256(address || x) + n` where `x` is `bytes32` and `n` is `0..128`. This PR adds support for the `+ n` as we didn't have it before. ## Why ❔ To support reading larger-than-1-slot structs from mappings, during validation ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --------- Co-authored-by: Vlad Bochok <41153528+vladbochok@users.noreply.github.com> --- core/lib/multivm/src/tracers/validator/mod.rs | 22 ++++++++++++++----- etc/env/base/chain.toml | 4 ++-- etc/env/base/contracts.toml | 4 ++-- 3 files changed, 21 insertions(+), 9 deletions(-) diff --git a/core/lib/multivm/src/tracers/validator/mod.rs b/core/lib/multivm/src/tracers/validator/mod.rs index a1573f24c668..057551a9efeb 100644 --- a/core/lib/multivm/src/tracers/validator/mod.rs +++ b/core/lib/multivm/src/tracers/validator/mod.rs @@ -1,4 +1,8 @@ -use std::{collections::HashSet, marker::PhantomData, sync::Arc}; +use std::{ + collections::{BTreeSet, HashSet}, + marker::PhantomData, + sync::Arc, +}; use once_cell::sync::OnceCell; use zksync_system_constants::{ @@ -8,7 +12,7 @@ use zksync_system_constants::{ use zksync_types::{ vm::VmVersion, web3::keccak256, AccountTreeId, Address, StorageKey, H256, U256, }; -use zksync_utils::{be_bytes_to_safe_address, u256_to_account_address, u256_to_h256}; +use zksync_utils::{address_to_u256, be_bytes_to_safe_address, u256_to_h256}; use self::types::{NewTrustedValidationItems, ValidationTracerMode}; use crate::{ @@ -32,7 +36,7 @@ mod vm_virtual_blocks; #[derive(Debug, Clone)] pub struct ValidationTracer { validation_mode: ValidationTracerMode, - auxilary_allowed_slots: HashSet, + auxilary_allowed_slots: BTreeSet, user_address: Address, #[allow(dead_code)] @@ -51,6 +55,8 @@ pub struct ValidationTracer { type ValidationRoundResult = Result; impl ValidationTracer { + const MAX_ALLOWED_SLOT_OFFSET: u32 = 127; + pub fn new( params: ValidationParams, vm_version: VmVersion, @@ -131,9 +137,15 @@ impl ValidationTracer { } // The user is allowed to touch its own slots or slots semantically related to him. + let from = u256_to_h256(key.saturating_sub(Self::MAX_ALLOWED_SLOT_OFFSET.into())); + let to = u256_to_h256(key); let valid_users_slot = address == self.user_address - || u256_to_account_address(&key) == self.user_address - || self.auxilary_allowed_slots.contains(&u256_to_h256(key)); + || key == address_to_u256(&self.user_address) + || self + .auxilary_allowed_slots + .range(from..=to) + .next() + .is_some(); if valid_users_slot { return true; } diff --git a/etc/env/base/chain.toml b/etc/env/base/chain.toml index 903696e3a819..6d1fdae53cee 100644 --- a/etc/env/base/chain.toml +++ b/etc/env/base/chain.toml @@ -90,8 +90,8 @@ fee_model_version = "V2" validation_computational_gas_limit = 300000 save_call_traces = true -bootloader_hash = "0x010008c37ecadea8b003884eb9d81fdfb7161b3b309504e5318f15da19c500d8" -default_aa_hash = "0x0100055da70d970f98ca4677a4b2fcecef5354f345cc5c6d13a78339e5fd87a9" +bootloader_hash = "0x010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b678" +default_aa_hash = "0x0100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe30" protective_reads_persistence_enabled = false diff --git a/etc/env/base/contracts.toml b/etc/env/base/contracts.toml index dbadbbc2c776..735da993058b 100644 --- a/etc/env/base/contracts.toml +++ b/etc/env/base/contracts.toml @@ -26,8 +26,8 @@ RECURSION_NODE_LEVEL_VK_HASH = "0x1186ec268d49f1905f8d9c1e9d39fc33e98c74f91d91a2 RECURSION_LEAF_LEVEL_VK_HASH = "0x101e08b00193e529145ee09823378ef51a3bc8966504064f1f6ba3f1ba863210" RECURSION_CIRCUITS_SET_VKS_HASH = "0x18c1639094f58177409186e8c48d9f577c9410901d2f1d486b3e7d6cf553ae4c" GENESIS_TX_HASH = "0xb99ebfea46cbe05a21cd80fe5597d97b204befc52a16303f579c607dc1ac2e2e" -GENESIS_ROOT = "0x28a7e67393021f957572495f8fdadc2c477ae3f4f413ae18c16cff6ee65680e2" -GENESIS_BATCH_COMMITMENT = "0xc57085380434970021d87774b377ce1bb12f5b6064af11595e70011965747def" +GENESIS_ROOT = "0x7275936e5a0063b159d5d22734931fea07871e8d57e564d61ef56e4a6ee23e5c" +GENESIS_BATCH_COMMITMENT = "0xf5f9a5abe62e8a6e0cb2d34d27435c3e5a8fbd7e2e54ca1d108fc58cb86c708a" PRIORITY_TX_MAX_GAS_LIMIT = 72000000 DEPLOY_L2_BRIDGE_COUNTERPART_GAS_LIMIT = 10000000 GENESIS_ROLLUP_LEAF_INDEX = "54" From 8e75d4b812b21bc26e2c38ceeb711a8a530d7bc2 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 25 Oct 2024 16:31:15 +0300 Subject: [PATCH 14/32] feat(api): Integrate new VM into API server (no tracers) (#3033) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Integrates the new VM into API server for 3 use cases: gas estimation, calls, and tx execution (without the validation stage). ## Why ❔ These use cases do not require tracers and could benefit from faster VM execution (particularly gas estimation, which runs the VM multiple times). ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --------- Co-authored-by: Joonatan Saarhelo --- .github/workflows/ci-core-reusable.yml | 10 +- Cargo.lock | 1 + core/bin/zksync_server/src/node_builder.rs | 8 +- core/lib/config/src/configs/experimental.rs | 5 + core/lib/config/src/testonly.rs | 1 + core/lib/env_config/src/vm_runner.rs | 2 + .../src/versions/testonly/l1_tx_execution.rs | 49 ++- .../versions/vm_fast/tests/l1_tx_execution.rs | 8 +- core/lib/multivm/src/versions/vm_fast/vm.rs | 27 +- .../vm_latest/tests/l1_tx_execution.rs | 8 +- core/lib/multivm/src/vm_instance.rs | 2 +- core/lib/protobuf_config/src/experimental.rs | 17 +- .../src/proto/config/experimental.proto | 1 + core/lib/vm_executor/Cargo.toml | 1 + core/lib/vm_executor/src/oneshot/metrics.rs | 16 +- core/lib/vm_executor/src/oneshot/mod.rs | 331 ++++++++++++------ core/lib/vm_executor/src/oneshot/tests.rs | 107 ++++++ core/lib/vm_executor/src/testonly.rs | 32 +- core/lib/vm_interface/src/storage/mod.rs | 2 + .../lib/vm_interface/src/storage/overrides.rs | 70 ++++ core/lib/vm_interface/src/types/inputs/mod.rs | 2 +- .../src/execution_sandbox/execute.rs | 18 +- .../src/execution_sandbox/storage.rs | 148 +++----- .../src/execution_sandbox/validate.rs | 7 +- core/node/api_server/src/testonly.rs | 29 +- core/node/api_server/src/tx_sender/mod.rs | 8 + .../src/tx_sender/tests/gas_estimation.rs | 38 +- .../api_server/src/tx_sender/tests/mod.rs | 3 +- core/node/api_server/src/web3/tests/vm.rs | 5 +- core/node/consensus/src/vm.rs | 5 +- .../layers/web3_api/tx_sender.rs | 13 +- .../overrides/tests/integration.yaml | 4 + .../overrides/tests/loadtest-new.yaml | 4 + .../overrides/tests/loadtest-old.yaml | 1 + 34 files changed, 721 insertions(+), 262 deletions(-) create mode 100644 core/lib/vm_executor/src/oneshot/tests.rs create mode 100644 core/lib/vm_interface/src/storage/overrides.rs create mode 100644 etc/env/file_based/overrides/tests/integration.yaml diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index c79e34315763..fb43133868b0 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -105,7 +105,7 @@ jobs: - name: Loadtest configuration run: | - echo EXPECTED_TX_COUNT=${{ matrix.vm_mode == 'NEW' && 21000 || 16000 }} >> .env + echo EXPECTED_TX_COUNT=${{ matrix.vm_mode == 'NEW' && 30000 || 16000 }} >> .env echo ACCOUNTS_AMOUNT="100" >> .env echo MAX_INFLIGHT_TXS="10" >> .env echo SYNC_API_REQUESTS_LIMIT="15" >> .env @@ -360,12 +360,16 @@ jobs: - name: Run servers run: | + # Override config for part of chains to test the default config as well + ci_run zkstack dev config-writer --path etc/env/file_based/overrides/tests/integration.yaml --chain era + ci_run zkstack dev config-writer --path etc/env/file_based/overrides/tests/integration.yaml --chain validium + ci_run zkstack server --ignore-prerequisites --chain era &> ${{ env.SERVER_LOGS_DIR }}/rollup.log & ci_run zkstack server --ignore-prerequisites --chain validium &> ${{ env.SERVER_LOGS_DIR }}/validium.log & ci_run zkstack server --ignore-prerequisites --chain custom_token &> ${{ env.SERVER_LOGS_DIR }}/custom_token.log & ci_run zkstack server --ignore-prerequisites --chain consensus \ - --components=api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,vm_playground,da_dispatcher,consensus \ - &> ${{ env.SERVER_LOGS_DIR }}/consensus.log & + --components=api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,vm_playground,da_dispatcher,consensus \ + &> ${{ env.SERVER_LOGS_DIR }}/consensus.log & ci_run sleep 5 diff --git a/Cargo.lock b/Cargo.lock index 64ae0a9a12f4..de2c2d6c9b22 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11351,6 +11351,7 @@ dependencies = [ "assert_matches", "async-trait", "once_cell", + "test-casing", "tokio", "tracing", "vise", diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index e2bd487f22b6..19edef6e4eec 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -309,10 +309,12 @@ impl MainNodeBuilder { latest_values_cache_size: rpc_config.latest_values_cache_size() as u64, latest_values_max_block_lag: rpc_config.latest_values_max_block_lag(), }; + let vm_config = try_load_config!(self.configs.experimental_vm_config); // On main node we always use master pool sink. self.node.add_layer(MasterPoolSinkLayer); - self.node.add_layer(TxSenderLayer::new( + + let layer = TxSenderLayer::new( TxSenderConfig::new( &sk_config, &rpc_config, @@ -323,7 +325,9 @@ impl MainNodeBuilder { ), postgres_storage_caches_config, rpc_config.vm_concurrency_limit(), - )); + ); + let layer = layer.with_vm_mode(vm_config.api_fast_vm_mode); + self.node.add_layer(layer); Ok(self) } diff --git a/core/lib/config/src/configs/experimental.rs b/core/lib/config/src/configs/experimental.rs index 618cfd3d388c..a87a221ef222 100644 --- a/core/lib/config/src/configs/experimental.rs +++ b/core/lib/config/src/configs/experimental.rs @@ -106,4 +106,9 @@ pub struct ExperimentalVmConfig { /// the new VM doesn't produce call traces and can diverge from the old VM! #[serde(default)] pub state_keeper_fast_vm_mode: FastVmMode, + + /// Fast VM mode to use in the API server. Currently, some operations are not supported by the fast VM (e.g., `debug_traceCall` + /// or transaction validation), so the legacy VM will always be used for them. + #[serde(default)] + pub api_fast_vm_mode: FastVmMode, } diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 3bf4609bb700..f8e53e33042b 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -336,6 +336,7 @@ impl Distribution for EncodeDist { configs::ExperimentalVmConfig { playground: self.sample(rng), state_keeper_fast_vm_mode: gen_fast_vm_mode(rng), + api_fast_vm_mode: gen_fast_vm_mode(rng), } } } diff --git a/core/lib/env_config/src/vm_runner.rs b/core/lib/env_config/src/vm_runner.rs index 730a79dd340a..0a29d1256bd2 100644 --- a/core/lib/env_config/src/vm_runner.rs +++ b/core/lib/env_config/src/vm_runner.rs @@ -55,6 +55,7 @@ mod tests { let mut lock = MUTEX.lock(); let config = r#" EXPERIMENTAL_VM_STATE_KEEPER_FAST_VM_MODE=new + EXPERIMENTAL_VM_API_FAST_VM_MODE=shadow EXPERIMENTAL_VM_PLAYGROUND_FAST_VM_MODE=shadow EXPERIMENTAL_VM_PLAYGROUND_DB_PATH=/db/vm_playground EXPERIMENTAL_VM_PLAYGROUND_FIRST_PROCESSED_BATCH=123 @@ -64,6 +65,7 @@ mod tests { let config = ExperimentalVmConfig::from_env().unwrap(); assert_eq!(config.state_keeper_fast_vm_mode, FastVmMode::New); + assert_eq!(config.api_fast_vm_mode, FastVmMode::Shadow); assert_eq!(config.playground.fast_vm_mode, FastVmMode::Shadow); assert_eq!(config.playground.db_path.unwrap(), "/db/vm_playground"); assert_eq!(config.playground.first_processed_batch, L1BatchNumber(123)); diff --git a/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs b/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs index e98a8385f020..37a2bf2bec20 100644 --- a/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs @@ -1,3 +1,4 @@ +use assert_matches::assert_matches; use ethabi::Token; use zksync_contracts::l1_messenger_contract; use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; @@ -5,13 +6,17 @@ use zksync_test_account::TxType; use zksync_types::{ get_code_key, get_known_code_key, l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - Execute, ExecuteTransactionCommon, U256, + Address, Execute, ExecuteTransactionCommon, U256, }; use zksync_utils::{h256_to_u256, u256_to_h256}; -use super::{read_test_contract, tester::VmTesterBuilder, TestedVm, BASE_SYSTEM_CONTRACTS}; +use super::{ + read_test_contract, tester::VmTesterBuilder, ContractToDeploy, TestedVm, BASE_SYSTEM_CONTRACTS, +}; use crate::{ - interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, + interface::{ + ExecutionResult, InspectExecutionMode, TxExecutionMode, VmInterfaceExt, VmRevertReason, + }, utils::StorageWritesDeduplicator, }; @@ -180,3 +185,41 @@ pub(crate) fn test_l1_tx_execution_high_gas_limit() { assert!(res.result.is_failed(), "The transaction should've failed"); } + +pub(crate) fn test_l1_tx_execution_gas_estimation_with_low_gas() { + let counter_contract = read_test_contract(); + let counter_address = Address::repeat_byte(0x11); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) + .with_execution_mode(TxExecutionMode::EstimateFee) + .with_custom_contracts(vec![ContractToDeploy::new( + counter_contract, + counter_address, + )]) + .with_rich_accounts(1) + .build::(); + + let account = &mut vm.rich_accounts[0]; + let mut tx = account.get_test_contract_transaction( + counter_address, + false, + None, + false, + TxType::L1 { serial_id: 0 }, + ); + let ExecuteTransactionCommon::L1(data) = &mut tx.common_data else { + unreachable!(); + }; + // This gas limit is chosen so that transaction starts getting executed by the bootloader, but then runs out of gas + // before its execution result is posted. + data.gas_limit = 15_000.into(); + + vm.vm.push_transaction(tx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); + assert_matches!( + &res.result, + ExecutionResult::Revert { output: VmRevertReason::General { msg, .. } } + if msg.contains("reverted with empty reason") + ); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs index 0174eeffd7e3..f02957020178 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs @@ -1,6 +1,7 @@ use crate::{ versions::testonly::l1_tx_execution::{ - test_l1_tx_execution, test_l1_tx_execution_high_gas_limit, + test_l1_tx_execution, test_l1_tx_execution_gas_estimation_with_low_gas, + test_l1_tx_execution_high_gas_limit, }, vm_fast::Vm, }; @@ -14,3 +15,8 @@ fn l1_tx_execution() { fn l1_tx_execution_high_gas_limit() { test_l1_tx_execution_high_gas_limit::>(); } + +#[test] +fn l1_tx_execution_gas_estimation_with_low_gas() { + test_l1_tx_execution_gas_estimation_with_low_gas::>(); +} diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index a2114a339481..6ebc4b9c5716 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -51,8 +51,8 @@ use crate::{ }, vm_latest::{ constants::{ - get_vm_hook_params_start_position, get_vm_hook_position, OPERATOR_REFUNDS_OFFSET, - TX_GAS_LIMIT_OFFSET, VM_HOOK_PARAMS_COUNT, + get_result_success_first_slot, get_vm_hook_params_start_position, get_vm_hook_position, + OPERATOR_REFUNDS_OFFSET, TX_GAS_LIMIT_OFFSET, VM_HOOK_PARAMS_COUNT, }, MultiVMSubversion, }, @@ -213,7 +213,22 @@ impl Vm { } Hook::TxHasEnded => { if let VmExecutionMode::OneTx = execution_mode { - break (last_tx_result.take().unwrap(), false); + // The bootloader may invoke `TxHasEnded` hook without posting a tx result previously. One case when this can happen + // is estimating gas for L1 transactions, if a transaction runs out of gas during execution. + let tx_result = last_tx_result.take().unwrap_or_else(|| { + let tx_has_failed = self.get_tx_result().is_zero(); + if tx_has_failed { + let output = VmRevertReason::General { + msg: "Transaction reverted with empty reason. Possibly out of gas" + .to_string(), + data: vec![], + }; + ExecutionResult::Revert { output } + } else { + ExecutionResult::Success { output: vec![] } + } + }); + break (tx_result, false); } } Hook::AskOperatorForRefund => { @@ -361,6 +376,12 @@ impl Vm { .unwrap() } + fn get_tx_result(&self) -> U256 { + let tx_idx = self.bootloader_state.current_tx(); + let slot = get_result_success_first_slot(VM_VERSION) as usize + tx_idx; + self.read_word_from_bootloader_heap(slot) + } + fn get_debug_log(&self) -> (String, String) { let hook_params = self.get_hook_params(); let mut msg = u256_to_h256(hook_params[0]).as_bytes().to_vec(); diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs index 4b7429c28296..3b8a01dbc80f 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs @@ -1,6 +1,7 @@ use crate::{ versions::testonly::l1_tx_execution::{ - test_l1_tx_execution, test_l1_tx_execution_high_gas_limit, + test_l1_tx_execution, test_l1_tx_execution_gas_estimation_with_low_gas, + test_l1_tx_execution_high_gas_limit, }, vm_latest::{HistoryEnabled, Vm}, }; @@ -14,3 +15,8 @@ fn l1_tx_execution() { fn l1_tx_execution_high_gas_limit() { test_l1_tx_execution_high_gas_limit::>(); } + +#[test] +fn l1_tx_execution_gas_estimation_with_low_gas() { + test_l1_tx_execution_gas_estimation_with_low_gas::>(); +} diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index 5ff27046377a..e2f72bd24113 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -234,7 +234,7 @@ pub type ShadowedFastVm = ShadowVm< /// Fast VM variants. #[derive(Debug)] -pub enum FastVmInstance { +pub enum FastVmInstance { /// Fast VM running in isolation. Fast(crate::vm_fast::Vm, Tr>), /// Fast VM shadowed by the latest legacy VM. diff --git a/core/lib/protobuf_config/src/experimental.rs b/core/lib/protobuf_config/src/experimental.rs index 63fa0ca51eb5..750dc7b04f01 100644 --- a/core/lib/protobuf_config/src/experimental.rs +++ b/core/lib/protobuf_config/src/experimental.rs @@ -7,6 +7,14 @@ use zksync_protobuf::{repr::ProtoRepr, required}; use crate::{proto::experimental as proto, read_optional_repr}; +fn parse_vm_mode(raw: Option) -> anyhow::Result { + Ok(raw + .map(proto::FastVmMode::try_from) + .transpose() + .context("fast_vm_mode")? + .map_or_else(FastVmMode::default, |mode| mode.parse())) +} + impl ProtoRepr for proto::Db { type Type = configs::ExperimentalDBConfig; @@ -105,12 +113,8 @@ impl ProtoRepr for proto::Vm { fn read(&self) -> anyhow::Result { Ok(Self::Type { playground: read_optional_repr(&self.playground).unwrap_or_default(), - state_keeper_fast_vm_mode: self - .state_keeper_fast_vm_mode - .map(proto::FastVmMode::try_from) - .transpose() - .context("fast_vm_mode")? - .map_or_else(FastVmMode::default, |mode| mode.parse()), + state_keeper_fast_vm_mode: parse_vm_mode(self.state_keeper_fast_vm_mode)?, + api_fast_vm_mode: parse_vm_mode(self.api_fast_vm_mode)?, }) } @@ -120,6 +124,7 @@ impl ProtoRepr for proto::Vm { state_keeper_fast_vm_mode: Some( proto::FastVmMode::new(this.state_keeper_fast_vm_mode).into(), ), + api_fast_vm_mode: Some(proto::FastVmMode::new(this.api_fast_vm_mode).into()), } } } diff --git a/core/lib/protobuf_config/src/proto/config/experimental.proto b/core/lib/protobuf_config/src/proto/config/experimental.proto index 5e1d045ca670..87af8d3835c6 100644 --- a/core/lib/protobuf_config/src/proto/config/experimental.proto +++ b/core/lib/protobuf_config/src/proto/config/experimental.proto @@ -37,4 +37,5 @@ message VmPlayground { message Vm { optional VmPlayground playground = 1; // optional optional FastVmMode state_keeper_fast_vm_mode = 2; // optional; if not set, fast VM is not used + optional FastVmMode api_fast_vm_mode = 3; // optional; if not set, fast VM is not used } diff --git a/core/lib/vm_executor/Cargo.toml b/core/lib/vm_executor/Cargo.toml index a967aaa969ad..06a531252c54 100644 --- a/core/lib/vm_executor/Cargo.toml +++ b/core/lib/vm_executor/Cargo.toml @@ -26,3 +26,4 @@ vise.workspace = true [dev-dependencies] assert_matches.workspace = true +test-casing.workspace = true diff --git a/core/lib/vm_executor/src/oneshot/metrics.rs b/core/lib/vm_executor/src/oneshot/metrics.rs index 475463300f16..13a832ee3c89 100644 --- a/core/lib/vm_executor/src/oneshot/metrics.rs +++ b/core/lib/vm_executor/src/oneshot/metrics.rs @@ -50,7 +50,7 @@ pub(super) fn report_vm_memory_metrics( tx_id: &str, memory_metrics: &VmMemoryMetrics, vm_execution_took: Duration, - storage_metrics: &StorageViewStats, + storage_stats: &StorageViewStats, ) { MEMORY_METRICS.event_sink_size[&SizeType::Inner].observe(memory_metrics.event_sink_inner); MEMORY_METRICS.event_sink_size[&SizeType::History].observe(memory_metrics.event_sink_history); @@ -65,10 +65,18 @@ pub(super) fn report_vm_memory_metrics( MEMORY_METRICS .storage_view_cache_size - .observe(storage_metrics.cache_size); + .observe(storage_stats.cache_size); MEMORY_METRICS .full - .observe(memory_metrics.full_size() + storage_metrics.cache_size); + .observe(memory_metrics.full_size() + storage_stats.cache_size); - STORAGE_METRICS.observe(&format!("Tx {tx_id}"), vm_execution_took, storage_metrics); + report_vm_storage_metrics(tx_id, vm_execution_took, storage_stats); +} + +pub(super) fn report_vm_storage_metrics( + tx_id: &str, + vm_execution_took: Duration, + storage_stats: &StorageViewStats, +) { + STORAGE_METRICS.observe(&format!("Tx {tx_id}"), vm_execution_took, storage_stats); } diff --git a/core/lib/vm_executor/src/oneshot/mod.rs b/core/lib/vm_executor/src/oneshot/mod.rs index 5f9e4dd3c6f4..154c838f824f 100644 --- a/core/lib/vm_executor/src/oneshot/mod.rs +++ b/core/lib/vm_executor/src/oneshot/mod.rs @@ -17,23 +17,26 @@ use once_cell::sync::OnceCell; use zksync_multivm::{ interface::{ executor::{OneshotExecutor, TransactionValidator}, - storage::{ReadStorage, StoragePtr, StorageView, WriteStorage}, + storage::{ReadStorage, StorageView, StorageWithOverrides}, tracer::{ValidationError, ValidationParams}, - ExecutionResult, InspectExecutionMode, OneshotEnv, OneshotTracingParams, + utils::{DivergenceHandler, ShadowVm}, + Call, ExecutionResult, InspectExecutionMode, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, StoredL2BlockEnv, TxExecutionArgs, TxExecutionMode, - VmInterface, + VmFactory, VmInterface, }, - tracers::{CallTracer, StorageInvocations, ValidationTracer}, + is_supported_by_fast_vm, + tracers::{CallTracer, StorageInvocations, TracerDispatcher, ValidationTracer}, utils::adjust_pubdata_price_for_tx, - vm_latest::HistoryDisabled, + vm_latest::{HistoryDisabled, HistoryEnabled}, zk_evm_latest::ethereum_types::U256, - LegacyVmInstance, MultiVMTracer, + FastVmInstance, HistoryMode, LegacyVmInstance, MultiVMTracer, }; use zksync_types::{ block::pack_block_info, get_nonce_key, l2::L2Tx, utils::{decompose_full_nonce, nonces_to_full_nonce, storage_key_for_eth_balance}, + vm::FastVmMode, AccountTreeId, Nonce, StorageKey, Transaction, SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, }; @@ -54,10 +57,14 @@ mod contracts; mod env; mod metrics; mod mock; +#[cfg(test)] +mod tests; /// Main [`OneshotExecutor`] implementation used by the API server. -#[derive(Debug, Default)] +#[derive(Debug)] pub struct MainOneshotExecutor { + fast_vm_mode: FastVmMode, + panic_on_divergence: bool, missed_storage_invocation_limit: usize, execution_latency_histogram: Option<&'static vise::Histogram>, } @@ -67,11 +74,28 @@ impl MainOneshotExecutor { /// The limit is applied for calls and gas estimations, but not during transaction validation. pub fn new(missed_storage_invocation_limit: usize) -> Self { Self { + fast_vm_mode: FastVmMode::Old, + panic_on_divergence: false, missed_storage_invocation_limit, execution_latency_histogram: None, } } + /// Sets the fast VM mode used by this executor. + pub fn set_fast_vm_mode(&mut self, fast_vm_mode: FastVmMode) { + if !matches!(fast_vm_mode, FastVmMode::Old) { + tracing::warn!( + "Running new VM with modes {fast_vm_mode:?}; this can lead to incorrect node behavior" + ); + } + self.fast_vm_mode = fast_vm_mode; + } + + /// Causes the VM to panic on divergence whenever it executes in the shadow mode. By default, a divergence is logged on `ERROR` level. + pub fn panic_on_divergence(&mut self) { + self.panic_on_divergence = true; + } + /// Sets a histogram for measuring VM execution latency. pub fn set_execution_latency_histogram( &mut self, @@ -79,19 +103,31 @@ impl MainOneshotExecutor { ) { self.execution_latency_histogram = Some(histogram); } + + fn select_fast_vm_mode( + &self, + env: &OneshotEnv, + tracing_params: &OneshotTracingParams, + ) -> FastVmMode { + if tracing_params.trace_calls || !is_supported_by_fast_vm(env.system.version) { + FastVmMode::Old // the fast VM doesn't support call tracing or old protocol versions + } else { + self.fast_vm_mode + } + } } #[async_trait] -impl OneshotExecutor for MainOneshotExecutor +impl OneshotExecutor> for MainOneshotExecutor where S: ReadStorage + Send + 'static, { async fn inspect_transaction_with_bytecode_compression( &self, - storage: S, + storage: StorageWithOverrides, env: OneshotEnv, args: TxExecutionArgs, - params: OneshotTracingParams, + tracing_params: OneshotTracingParams, ) -> anyhow::Result { let missed_storage_invocation_limit = match env.system.execution_mode { // storage accesses are not limited for tx validation @@ -100,35 +136,24 @@ where self.missed_storage_invocation_limit } }; - let execution_latency_histogram = self.execution_latency_histogram; + let sandbox = VmSandbox { + fast_vm_mode: self.select_fast_vm_mode(&env, &tracing_params), + panic_on_divergence: self.panic_on_divergence, + storage, + env, + execution_args: args, + execution_latency_histogram: self.execution_latency_histogram, + }; tokio::task::spawn_blocking(move || { - let mut tracers = vec![]; - let mut calls_result = Arc::>::default(); - if params.trace_calls { - tracers.push(CallTracer::new(calls_result.clone()).into_tracer_pointer()); - } - tracers.push( - StorageInvocations::new(missed_storage_invocation_limit).into_tracer_pointer(), - ); - - let executor = VmSandbox::new(storage, env, args, execution_latency_histogram); - let mut result = executor.apply(|vm, transaction| { - let (compression_result, tx_result) = vm - .inspect_transaction_with_bytecode_compression( - &mut tracers.into(), - transaction, - true, - ); - OneshotTransactionExecutionResult { - tx_result: Box::new(tx_result), - compression_result: compression_result.map(drop), - call_traces: vec![], - } - }); - - result.call_traces = Arc::make_mut(&mut calls_result).take().unwrap_or_default(); - result + sandbox.execute_in_vm(|vm, transaction| { + vm.inspect_transaction_with_bytecode_compression( + missed_storage_invocation_limit, + tracing_params, + transaction, + true, + ) + }) }) .await .context("VM execution panicked") @@ -136,13 +161,13 @@ where } #[async_trait] -impl TransactionValidator for MainOneshotExecutor +impl TransactionValidator> for MainOneshotExecutor where S: ReadStorage + Send + 'static, { async fn validate_transaction( &self, - storage: S, + storage: StorageWithOverrides, env: OneshotEnv, tx: L2Tx, validation_params: ValidationParams, @@ -152,23 +177,28 @@ where "Unexpected execution mode for tx validation: {:?} (expected `VerifyExecute`)", env.system.execution_mode ); - let execution_latency_histogram = self.execution_latency_histogram; + + let sandbox = VmSandbox { + fast_vm_mode: FastVmMode::Old, + panic_on_divergence: self.panic_on_divergence, + storage, + env, + execution_args: TxExecutionArgs::for_validation(tx), + execution_latency_histogram: self.execution_latency_histogram, + }; tokio::task::spawn_blocking(move || { let (validation_tracer, mut validation_result) = ValidationTracer::::new( validation_params, - env.system.version.into(), + sandbox.env.system.version.into(), ); let tracers = vec![validation_tracer.into_tracer_pointer()]; - let executor = VmSandbox::new( - storage, - env, - TxExecutionArgs::for_validation(tx), - execution_latency_histogram, - ); - let exec_result = executor.apply(|vm, transaction| { + let exec_result = sandbox.execute_in_vm(|vm, transaction| { + let Vm::Legacy(vm) = vm else { + unreachable!("Fast VM is never used for validation yet"); + }; vm.push_transaction(transaction); vm.inspect(&mut tracers.into(), InspectExecutionMode::OneTx) }); @@ -188,70 +218,99 @@ where } #[derive(Debug)] -struct VmSandbox { - vm: Box>, - storage_view: StoragePtr>, - transaction: Transaction, - execution_latency_histogram: Option<&'static vise::Histogram>, +enum Vm { + Legacy(LegacyVmInstance), + Fast(FastVmInstance), } -impl VmSandbox { - /// This method is blocking. - fn new( - storage: S, - mut env: OneshotEnv, - execution_args: TxExecutionArgs, - execution_latency_histogram: Option<&'static vise::Histogram>, - ) -> Self { - let mut storage_view = StorageView::new(storage); - Self::setup_storage_view(&mut storage_view, &execution_args, env.current_block); - - let protocol_version = env.system.version; - if execution_args.adjust_pubdata_price { - env.l1_batch.fee_input = adjust_pubdata_price_for_tx( - env.l1_batch.fee_input, - execution_args.transaction.gas_per_pubdata_byte_limit(), - env.l1_batch.enforced_base_fee.map(U256::from), - protocol_version.into(), - ); +impl Vm { + fn inspect_transaction_with_bytecode_compression( + &mut self, + missed_storage_invocation_limit: usize, + params: OneshotTracingParams, + tx: Transaction, + with_compression: bool, + ) -> OneshotTransactionExecutionResult { + let mut calls_result = Arc::>::default(); + let (compression_result, tx_result) = match self { + Self::Legacy(vm) => { + let mut tracers = Self::create_legacy_tracers( + missed_storage_invocation_limit, + params.trace_calls.then(|| calls_result.clone()), + ); + vm.inspect_transaction_with_bytecode_compression(&mut tracers, tx, with_compression) + } + Self::Fast(vm) => { + assert!( + !params.trace_calls, + "Call tracing is not supported by fast VM yet" + ); + let legacy_tracers = Self::create_legacy_tracers::( + missed_storage_invocation_limit, + None, + ); + let mut full_tracer = (legacy_tracers.into(), ()); + vm.inspect_transaction_with_bytecode_compression( + &mut full_tracer, + tx, + with_compression, + ) + } }; - let storage_view = storage_view.to_rc_ptr(); - let vm = Box::new(LegacyVmInstance::new_with_specific_version( - env.l1_batch, - env.system, - storage_view.clone(), - protocol_version.into_api_vm_version(), - )); + OneshotTransactionExecutionResult { + tx_result: Box::new(tx_result), + compression_result: compression_result.map(drop), + call_traces: Arc::make_mut(&mut calls_result).take().unwrap_or_default(), + } + } - Self { - vm, - storage_view, - transaction: execution_args.transaction, - execution_latency_histogram, + fn create_legacy_tracers( + missed_storage_invocation_limit: usize, + calls_result: Option>>>, + ) -> TracerDispatcher, H> { + let mut tracers = vec![]; + if let Some(calls_result) = calls_result { + tracers.push(CallTracer::new(calls_result).into_tracer_pointer()); } + tracers + .push(StorageInvocations::new(missed_storage_invocation_limit).into_tracer_pointer()); + tracers.into() } +} +/// Full parameters necessary to instantiate a VM for oneshot execution. +#[derive(Debug)] +struct VmSandbox { + fast_vm_mode: FastVmMode, + panic_on_divergence: bool, + storage: StorageWithOverrides, + env: OneshotEnv, + execution_args: TxExecutionArgs, + execution_latency_histogram: Option<&'static vise::Histogram>, +} + +impl VmSandbox { /// This method is blocking. - fn setup_storage_view( - storage_view: &mut StorageView, + fn setup_storage( + storage: &mut StorageWithOverrides, execution_args: &TxExecutionArgs, current_block: Option, ) { let storage_view_setup_started_at = Instant::now(); if let Some(nonce) = execution_args.enforced_nonce { let nonce_key = get_nonce_key(&execution_args.transaction.initiator_account()); - let full_nonce = storage_view.read_value(&nonce_key); + let full_nonce = storage.read_value(&nonce_key); let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce)); let enforced_full_nonce = nonces_to_full_nonce(U256::from(nonce.0), deployment_nonce); - storage_view.set_value(nonce_key, u256_to_h256(enforced_full_nonce)); + storage.set_value(nonce_key, u256_to_h256(enforced_full_nonce)); } let payer = execution_args.transaction.payer(); let balance_key = storage_key_for_eth_balance(&payer); - let mut current_balance = h256_to_u256(storage_view.read_value(&balance_key)); + let mut current_balance = h256_to_u256(storage.read_value(&balance_key)); current_balance += execution_args.added_balance; - storage_view.set_value(balance_key, u256_to_h256(current_balance)); + storage.set_value(balance_key, u256_to_h256(current_balance)); // Reset L2 block info if necessary. if let Some(current_block) = current_block { @@ -261,13 +320,13 @@ impl VmSandbox { ); let l2_block_info = pack_block_info(current_block.number.into(), current_block.timestamp); - storage_view.set_value(l2_block_info_key, u256_to_h256(l2_block_info)); + storage.set_value(l2_block_info_key, u256_to_h256(l2_block_info)); let l2_block_txs_rolling_hash_key = StorageKey::new( AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, ); - storage_view.set_value( + storage.set_value( l2_block_txs_rolling_hash_key, current_block.txs_rolling_hash, ); @@ -280,30 +339,90 @@ impl VmSandbox { } } - pub(super) fn apply(mut self, apply_fn: F) -> T - where - F: FnOnce(&mut LegacyVmInstance, Transaction) -> T, - { + /// This method is blocking. + fn execute_in_vm( + mut self, + action: impl FnOnce(&mut Vm>, Transaction) -> T, + ) -> T { + Self::setup_storage( + &mut self.storage, + &self.execution_args, + self.env.current_block, + ); + + let protocol_version = self.env.system.version; + let mode = self.env.system.execution_mode; + if self.execution_args.adjust_pubdata_price { + self.env.l1_batch.fee_input = adjust_pubdata_price_for_tx( + self.env.l1_batch.fee_input, + self.execution_args.transaction.gas_per_pubdata_byte_limit(), + self.env.l1_batch.enforced_base_fee.map(U256::from), + protocol_version.into(), + ); + }; + + let transaction = self.execution_args.transaction; let tx_id = format!( "{:?}-{}", - self.transaction.initiator_account(), - self.transaction.nonce().unwrap_or(Nonce(0)) + transaction.initiator_account(), + transaction.nonce().unwrap_or(Nonce(0)) ); + let storage_view = StorageView::new(self.storage).to_rc_ptr(); + let mut vm = match self.fast_vm_mode { + FastVmMode::Old => Vm::Legacy(LegacyVmInstance::new_with_specific_version( + self.env.l1_batch, + self.env.system, + storage_view.clone(), + protocol_version.into_api_vm_version(), + )), + FastVmMode::New => Vm::Fast(FastVmInstance::fast( + self.env.l1_batch, + self.env.system, + storage_view.clone(), + )), + FastVmMode::Shadow => { + let mut vm = + ShadowVm::new(self.env.l1_batch, self.env.system, storage_view.clone()); + if !self.panic_on_divergence { + let transaction = format!("{:?}", transaction); + let handler = DivergenceHandler::new(move |errors, _| { + tracing::error!(transaction, ?mode, "{errors}"); + }); + vm.set_divergence_handler(handler); + } + Vm::Fast(FastVmInstance::Shadowed(vm)) + } + }; + let started_at = Instant::now(); - let result = apply_fn(&mut *self.vm, self.transaction); + let result = action(&mut vm, transaction); let vm_execution_took = started_at.elapsed(); if let Some(histogram) = self.execution_latency_histogram { histogram.observe(vm_execution_took); } - let memory_metrics = self.vm.record_vm_memory_metrics(); - metrics::report_vm_memory_metrics( - &tx_id, - &memory_metrics, - vm_execution_took, - &self.storage_view.borrow().stats(), - ); + + match &vm { + Vm::Legacy(vm) => { + let memory_metrics = vm.record_vm_memory_metrics(); + metrics::report_vm_memory_metrics( + &tx_id, + &memory_metrics, + vm_execution_took, + &storage_view.borrow().stats(), + ); + } + Vm::Fast(_) => { + // The new VM implementation doesn't have the same memory model as old ones, so it doesn't report memory metrics, + // only storage-related ones. + metrics::report_vm_storage_metrics( + &format!("Tx {tx_id}"), + vm_execution_took, + &storage_view.borrow().stats(), + ); + } + } result } } diff --git a/core/lib/vm_executor/src/oneshot/tests.rs b/core/lib/vm_executor/src/oneshot/tests.rs new file mode 100644 index 000000000000..65d2ff3727c0 --- /dev/null +++ b/core/lib/vm_executor/src/oneshot/tests.rs @@ -0,0 +1,107 @@ +//! Oneshot executor tests. + +use assert_matches::assert_matches; +use test_casing::{test_casing, Product}; +use zksync_multivm::interface::storage::InMemoryStorage; +use zksync_types::{ProtocolVersionId, H256}; +use zksync_utils::bytecode::hash_bytecode; + +use super::*; +use crate::testonly::{ + create_l2_transaction, default_l1_batch_env, default_system_env, FAST_VM_MODES, +}; + +const EXEC_MODES: [TxExecutionMode; 3] = [ + TxExecutionMode::EstimateFee, + TxExecutionMode::EthCall, + TxExecutionMode::VerifyExecute, +]; + +#[test] +fn selecting_vm_for_execution() { + let mut executor = MainOneshotExecutor::new(usize::MAX); + executor.set_fast_vm_mode(FastVmMode::New); + + for exec_mode in EXEC_MODES { + let env = OneshotEnv { + system: default_system_env(exec_mode), + l1_batch: default_l1_batch_env(1), + current_block: None, + }; + let mode = executor.select_fast_vm_mode(&env, &OneshotTracingParams::default()); + assert_matches!(mode, FastVmMode::New); + + // Tracing calls is not supported by the new VM. + let mode = executor.select_fast_vm_mode(&env, &OneshotTracingParams { trace_calls: true }); + assert_matches!(mode, FastVmMode::Old); + + // Old protocol versions are not supported either. + let mut old_env = env.clone(); + old_env.system.version = ProtocolVersionId::Version22; + let mode = executor.select_fast_vm_mode(&old_env, &OneshotTracingParams::default()); + assert_matches!(mode, FastVmMode::Old); + } +} + +#[test] +fn setting_up_nonce_and_balance_in_storage() { + let mut storage = StorageWithOverrides::new(InMemoryStorage::default()); + let tx = create_l2_transaction(1_000_000_000.into(), Nonce(1)); + let execution_args = TxExecutionArgs::for_gas_estimate(tx.clone().into()); + VmSandbox::setup_storage(&mut storage, &execution_args, None); + + // Check the overridden nonce and balance. + let nonce_key = get_nonce_key(&tx.initiator_account()); + assert_eq!(storage.read_value(&nonce_key), H256::from_low_u64_be(1)); + let balance_key = storage_key_for_eth_balance(&tx.initiator_account()); + let expected_added_balance = tx.common_data.fee.gas_limit * tx.common_data.fee.max_fee_per_gas; + assert_eq!( + storage.read_value(&balance_key), + u256_to_h256(expected_added_balance) + ); + + let mut storage = InMemoryStorage::default(); + storage.set_value(balance_key, H256::from_low_u64_be(2_000_000_000)); + let mut storage = StorageWithOverrides::new(storage); + VmSandbox::setup_storage(&mut storage, &execution_args, None); + + assert_eq!( + storage.read_value(&balance_key), + u256_to_h256(expected_added_balance + U256::from(2_000_000_000)) + ); +} + +#[test_casing(9, Product((EXEC_MODES, FAST_VM_MODES)))] +#[tokio::test] +async fn inspecting_transfer(exec_mode: TxExecutionMode, fast_vm_mode: FastVmMode) { + let tx = create_l2_transaction(1_000_000_000.into(), Nonce(0)); + let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); + storage.set_value( + storage_key_for_eth_balance(&tx.initiator_account()), + u256_to_h256(u64::MAX.into()), + ); + let storage = StorageWithOverrides::new(storage); + + let l1_batch = default_l1_batch_env(1); + let env = OneshotEnv { + system: default_system_env(exec_mode), + current_block: Some(StoredL2BlockEnv { + number: l1_batch.first_l2_block.number - 1, + timestamp: l1_batch.first_l2_block.timestamp - 1, + txs_rolling_hash: H256::zero(), + }), + l1_batch, + }; + let args = TxExecutionArgs::for_gas_estimate(tx.into()); + let tracing = OneshotTracingParams::default(); + + let mut executor = MainOneshotExecutor::new(usize::MAX); + executor.set_fast_vm_mode(fast_vm_mode); + let result = executor + .inspect_transaction_with_bytecode_compression(storage, env, args, tracing) + .await + .unwrap(); + result.compression_result.unwrap(); + let exec_result = result.tx_result.result; + assert!(!exec_result.is_failed(), "{exec_result:?}"); +} diff --git a/core/lib/vm_executor/src/testonly.rs b/core/lib/vm_executor/src/testonly.rs index 5bcd604a4324..2fa7f075db71 100644 --- a/core/lib/vm_executor/src/testonly.rs +++ b/core/lib/vm_executor/src/testonly.rs @@ -2,11 +2,14 @@ use once_cell::sync::Lazy; use zksync_contracts::BaseSystemContracts; use zksync_multivm::{ interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}, + utils::derive_base_fee_and_gas_per_pubdata, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, + zk_evm_latest::ethereum_types::U256, }; use zksync_types::{ - block::L2BlockHasher, fee_model::BatchFeeInput, vm::FastVmMode, Address, L1BatchNumber, - L2BlockNumber, L2ChainId, ProtocolVersionId, H256, ZKPORTER_IS_AVAILABLE, + block::L2BlockHasher, fee::Fee, fee_model::BatchFeeInput, l2::L2Tx, + transaction_request::PaymasterParams, vm::FastVmMode, Address, K256PrivateKey, L1BatchNumber, + L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, H256, ZKPORTER_IS_AVAILABLE, }; static BASE_SYSTEM_CONTRACTS: Lazy = @@ -43,3 +46,28 @@ pub(crate) fn default_l1_batch_env(number: u32) -> L1BatchEnv { fee_input: BatchFeeInput::sensible_l1_pegged_default(), } } + +pub(crate) fn create_l2_transaction(value: U256, nonce: Nonce) -> L2Tx { + let (max_fee_per_gas, gas_per_pubdata_limit) = derive_base_fee_and_gas_per_pubdata( + BatchFeeInput::sensible_l1_pegged_default(), + ProtocolVersionId::latest().into(), + ); + let fee = Fee { + gas_limit: 10_000_000.into(), + max_fee_per_gas: max_fee_per_gas.into(), + max_priority_fee_per_gas: 0_u64.into(), + gas_per_pubdata_limit: gas_per_pubdata_limit.into(), + }; + L2Tx::new_signed( + Some(Address::random()), + vec![], + nonce, + fee, + value, + L2ChainId::default(), + &K256PrivateKey::random(), + vec![], + PaymasterParams::default(), + ) + .unwrap() +} diff --git a/core/lib/vm_interface/src/storage/mod.rs b/core/lib/vm_interface/src/storage/mod.rs index 6cdcd33db682..aade56ca5d96 100644 --- a/core/lib/vm_interface/src/storage/mod.rs +++ b/core/lib/vm_interface/src/storage/mod.rs @@ -5,11 +5,13 @@ use zksync_types::{get_known_code_key, StorageKey, StorageValue, H256}; pub use self::{ // Note, that `test_infra` of the bootloader tests relies on this value to be exposed in_memory::{InMemoryStorage, IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID}, + overrides::StorageWithOverrides, snapshot::{StorageSnapshot, StorageWithSnapshot}, view::{ImmutableStorageView, StorageView, StorageViewCache, StorageViewStats}, }; mod in_memory; +mod overrides; mod snapshot; mod view; diff --git a/core/lib/vm_interface/src/storage/overrides.rs b/core/lib/vm_interface/src/storage/overrides.rs new file mode 100644 index 000000000000..ad5a3d8624f1 --- /dev/null +++ b/core/lib/vm_interface/src/storage/overrides.rs @@ -0,0 +1,70 @@ +//! VM storage functionality specifically used in the VM sandbox. + +use std::{ + collections::{HashMap, HashSet}, + fmt, +}; + +use zksync_types::{AccountTreeId, StorageKey, StorageValue, H256}; + +use super::ReadStorage; + +/// A storage view that allows to override some of the storage values. +#[derive(Debug)] +pub struct StorageWithOverrides { + storage_handle: S, + overridden_slots: HashMap, + overridden_factory_deps: HashMap>, + empty_accounts: HashSet, +} + +impl StorageWithOverrides { + /// Creates a new storage view based on the underlying storage. + pub fn new(storage: S) -> Self { + Self { + storage_handle: storage, + overridden_slots: HashMap::new(), + overridden_factory_deps: HashMap::new(), + empty_accounts: HashSet::new(), + } + } + + pub fn set_value(&mut self, key: StorageKey, value: StorageValue) { + self.overridden_slots.insert(key, value); + } + + pub fn store_factory_dep(&mut self, hash: H256, code: Vec) { + self.overridden_factory_deps.insert(hash, code); + } + + pub fn insert_erased_account(&mut self, account: AccountTreeId) { + self.empty_accounts.insert(account); + } +} + +impl ReadStorage for StorageWithOverrides { + fn read_value(&mut self, key: &StorageKey) -> StorageValue { + if let Some(value) = self.overridden_slots.get(key) { + return *value; + } + if self.empty_accounts.contains(key.account()) { + return H256::zero(); + } + self.storage_handle.read_value(key) + } + + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + self.storage_handle.is_write_initial(key) + } + + fn load_factory_dep(&mut self, hash: H256) -> Option> { + self.overridden_factory_deps + .get(&hash) + .cloned() + .or_else(|| self.storage_handle.load_factory_dep(hash)) + } + + fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { + self.storage_handle.get_enumeration_index(key) + } +} diff --git a/core/lib/vm_interface/src/types/inputs/mod.rs b/core/lib/vm_interface/src/types/inputs/mod.rs index cb80ba7c1386..83f87f0fe1dd 100644 --- a/core/lib/vm_interface/src/types/inputs/mod.rs +++ b/core/lib/vm_interface/src/types/inputs/mod.rs @@ -15,7 +15,7 @@ mod l2_block; mod system_env; /// Full environment for oneshot transaction / call execution. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct OneshotEnv { /// System environment. pub system: SystemEnv, diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index bdd574625888..7958b5ed3c12 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -8,7 +8,7 @@ use tokio::runtime::Handle; use zksync_dal::{Connection, Core}; use zksync_multivm::interface::{ executor::{OneshotExecutor, TransactionValidator}, - storage::ReadStorage, + storage::{ReadStorage, StorageWithOverrides}, tracer::{ValidationError, ValidationParams}, Call, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, TransactionExecutionMetrics, TxExecutionArgs, VmExecutionResultAndLogs, @@ -20,11 +20,10 @@ use zksync_types::{ use zksync_vm_executor::oneshot::{MainOneshotExecutor, MockOneshotExecutor}; use super::{ - storage::StorageWithOverrides, vm_metrics::{self, SandboxStage}, BlockArgs, VmPermit, SANDBOX_METRICS, }; -use crate::tx_sender::SandboxExecutorOptions; +use crate::{execution_sandbox::storage::apply_state_override, tx_sender::SandboxExecutorOptions}; /// Action that can be executed by [`SandboxExecutor`]. #[derive(Debug)] @@ -109,6 +108,9 @@ impl SandboxExecutor { missed_storage_invocation_limit: usize, ) -> Self { let mut executor = MainOneshotExecutor::new(missed_storage_invocation_limit); + executor.set_fast_vm_mode(options.fast_vm_mode); + #[cfg(test)] + executor.panic_on_divergence(); executor .set_execution_latency_histogram(&SANDBOX_METRICS.sandbox[&SandboxStage::Execution]); Self { @@ -151,7 +153,7 @@ impl SandboxExecutor { .await?; let state_override = state_override.unwrap_or_default(); - let storage = StorageWithOverrides::new(storage, &state_override); + let storage = apply_state_override(storage, &state_override); let (execution_args, tracing_params) = action.into_parts(); let result = self .inspect_transaction_with_bytecode_compression( @@ -246,13 +248,13 @@ impl SandboxExecutor { } #[async_trait] -impl OneshotExecutor for SandboxExecutor +impl OneshotExecutor> for SandboxExecutor where S: ReadStorage + Send + 'static, { async fn inspect_transaction_with_bytecode_compression( &self, - storage: S, + storage: StorageWithOverrides, env: OneshotEnv, args: TxExecutionArgs, tracing_params: OneshotTracingParams, @@ -283,13 +285,13 @@ where } #[async_trait] -impl TransactionValidator for SandboxExecutor +impl TransactionValidator> for SandboxExecutor where S: ReadStorage + Send + 'static, { async fn validate_transaction( &self, - storage: S, + storage: StorageWithOverrides, env: OneshotEnv, tx: L2Tx, validation_params: ValidationParams, diff --git a/core/node/api_server/src/execution_sandbox/storage.rs b/core/node/api_server/src/execution_sandbox/storage.rs index bf775d484906..c80356f6e36e 100644 --- a/core/node/api_server/src/execution_sandbox/storage.rs +++ b/core/node/api_server/src/execution_sandbox/storage.rs @@ -1,127 +1,67 @@ //! VM storage functionality specifically used in the VM sandbox. -use std::{ - collections::{HashMap, HashSet}, - fmt, -}; - -use zksync_multivm::interface::storage::ReadStorage; +use zksync_multivm::interface::storage::{ReadStorage, StorageWithOverrides}; use zksync_types::{ api::state_override::{OverrideState, StateOverride}, get_code_key, get_known_code_key, get_nonce_key, utils::{decompose_full_nonce, nonces_to_full_nonce, storage_key_for_eth_balance}, - AccountTreeId, StorageKey, StorageValue, H256, + AccountTreeId, StorageKey, H256, }; use zksync_utils::{h256_to_u256, u256_to_h256}; -/// A storage view that allows to override some of the storage values. -#[derive(Debug)] -pub(super) struct StorageWithOverrides { - storage_handle: S, - overridden_slots: HashMap, - overridden_factory_deps: HashMap>, - overridden_accounts: HashSet, -} - -impl StorageWithOverrides { - /// Creates a new storage view based on the underlying storage. - pub(super) fn new(storage: S, state_override: &StateOverride) -> Self { - let mut this = Self { - storage_handle: storage, - overridden_slots: HashMap::new(), - overridden_factory_deps: HashMap::new(), - overridden_accounts: HashSet::new(), - }; - this.apply_state_override(state_override); - this - } - - fn apply_state_override(&mut self, state_override: &StateOverride) { - for (account, overrides) in state_override.iter() { - if let Some(balance) = overrides.balance { - let balance_key = storage_key_for_eth_balance(account); - self.overridden_slots - .insert(balance_key, u256_to_h256(balance)); - } +/// This method is blocking. +pub(super) fn apply_state_override( + storage: S, + state_override: &StateOverride, +) -> StorageWithOverrides { + let mut storage = StorageWithOverrides::new(storage); + for (account, overrides) in state_override.iter() { + if let Some(balance) = overrides.balance { + let balance_key = storage_key_for_eth_balance(account); + storage.set_value(balance_key, u256_to_h256(balance)); + } - if let Some(nonce) = overrides.nonce { - let nonce_key = get_nonce_key(account); - let full_nonce = self.read_value(&nonce_key); - let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce)); - let new_full_nonce = u256_to_h256(nonces_to_full_nonce(nonce, deployment_nonce)); - self.overridden_slots.insert(nonce_key, new_full_nonce); - } + if let Some(nonce) = overrides.nonce { + let nonce_key = get_nonce_key(account); + let full_nonce = storage.read_value(&nonce_key); + let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce)); + let new_full_nonce = u256_to_h256(nonces_to_full_nonce(nonce, deployment_nonce)); + storage.set_value(nonce_key, new_full_nonce); + } - if let Some(code) = &overrides.code { - let code_key = get_code_key(account); - let code_hash = code.hash(); - self.overridden_slots.insert(code_key, code_hash); - let known_code_key = get_known_code_key(&code_hash); - self.overridden_slots - .insert(known_code_key, H256::from_low_u64_be(1)); - self.store_factory_dep(code_hash, code.clone().into_bytes()); - } + if let Some(code) = &overrides.code { + let code_key = get_code_key(account); + let code_hash = code.hash(); + storage.set_value(code_key, code_hash); + let known_code_key = get_known_code_key(&code_hash); + storage.set_value(known_code_key, H256::from_low_u64_be(1)); + storage.store_factory_dep(code_hash, code.clone().into_bytes()); + } - match &overrides.state { - Some(OverrideState::State(state)) => { - let account = AccountTreeId::new(*account); - self.override_account_state_diff(account, state); - self.overridden_accounts.insert(account); + match &overrides.state { + Some(OverrideState::State(state)) => { + let account = AccountTreeId::new(*account); + for (&key, &value) in state { + storage.set_value(StorageKey::new(account, key), value); } - Some(OverrideState::StateDiff(state_diff)) => { - let account = AccountTreeId::new(*account); - self.override_account_state_diff(account, state_diff); + storage.insert_erased_account(account); + } + Some(OverrideState::StateDiff(state_diff)) => { + let account = AccountTreeId::new(*account); + for (&key, &value) in state_diff { + storage.set_value(StorageKey::new(account, key), value); } - None => { /* do nothing */ } } + None => { /* do nothing */ } } } - - fn store_factory_dep(&mut self, hash: H256, code: Vec) { - self.overridden_factory_deps.insert(hash, code); - } - - fn override_account_state_diff( - &mut self, - account: AccountTreeId, - state_diff: &HashMap, - ) { - let account_slots = state_diff - .iter() - .map(|(&slot, &value)| (StorageKey::new(account, slot), value)); - self.overridden_slots.extend(account_slots); - } -} - -impl ReadStorage for StorageWithOverrides { - fn read_value(&mut self, key: &StorageKey) -> StorageValue { - if let Some(value) = self.overridden_slots.get(key) { - return *value; - } - if self.overridden_accounts.contains(key.account()) { - return H256::zero(); - } - self.storage_handle.read_value(key) - } - - fn is_write_initial(&mut self, key: &StorageKey) -> bool { - self.storage_handle.is_write_initial(key) - } - - fn load_factory_dep(&mut self, hash: H256) -> Option> { - self.overridden_factory_deps - .get(&hash) - .cloned() - .or_else(|| self.storage_handle.load_factory_dep(hash)) - } - - fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { - self.storage_handle.get_enumeration_index(key) - } + storage } #[cfg(test)] mod tests { + use std::collections::HashMap; + use zksync_multivm::interface::storage::InMemoryStorage; use zksync_types::{ api::state_override::{Bytecode, OverrideAccount}, @@ -184,7 +124,7 @@ mod tests { storage.set_value(retained_key, H256::repeat_byte(0xfe)); let erased_key = StorageKey::new(AccountTreeId::new(Address::repeat_byte(5)), H256::zero()); storage.set_value(erased_key, H256::repeat_byte(1)); - let mut storage = StorageWithOverrides::new(storage, &overrides); + let mut storage = apply_state_override(storage, &overrides); let balance = storage.read_value(&storage_key_for_eth_balance(&Address::repeat_byte(1))); assert_eq!(balance, H256::from_low_u64_be(1)); diff --git a/core/node/api_server/src/execution_sandbox/validate.rs b/core/node/api_server/src/execution_sandbox/validate.rs index 9a3c88f8bf0c..758547abbd6e 100644 --- a/core/node/api_server/src/execution_sandbox/validate.rs +++ b/core/node/api_server/src/execution_sandbox/validate.rs @@ -5,16 +5,15 @@ use tracing::Instrument; use zksync_dal::{Connection, Core, CoreDal}; use zksync_multivm::interface::{ executor::TransactionValidator, + storage::StorageWithOverrides, tracer::{ValidationError as RawValidationError, ValidationParams}, }; use zksync_types::{ - api::state_override::StateOverride, fee_model::BatchFeeInput, l2::L2Tx, Address, - TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS, + fee_model::BatchFeeInput, l2::L2Tx, Address, TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS, }; use super::{ execute::{SandboxAction, SandboxExecutor}, - storage::StorageWithOverrides, vm_metrics::{SandboxStage, EXECUTION_METRICS, SANDBOX_METRICS}, BlockArgs, VmPermit, }; @@ -57,7 +56,7 @@ impl SandboxExecutor { let SandboxAction::Execution { tx, .. } = action else { unreachable!(); // by construction }; - let storage = StorageWithOverrides::new(storage, &StateOverride::default()); + let storage = StorageWithOverrides::new(storage); let stage_latency = SANDBOX_METRICS.sandbox[&SandboxStage::Validation].start(); let validation_result = self diff --git a/core/node/api_server/src/testonly.rs b/core/node/api_server/src/testonly.rs index 6da8e333495f..3add9c2f165c 100644 --- a/core/node/api_server/src/testonly.rs +++ b/core/node/api_server/src/testonly.rs @@ -10,7 +10,7 @@ use zksync_contracts::{ }; use zksync_dal::{Connection, Core, CoreDal}; use zksync_multivm::utils::derive_base_fee_and_gas_per_pubdata; -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; +use zksync_system_constants::{L2_BASE_TOKEN_ADDRESS, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE}; use zksync_types::{ api::state_override::{Bytecode, OverrideAccount, OverrideState, StateOverride}, ethabi, @@ -18,11 +18,12 @@ use zksync_types::{ fee::Fee, fee_model::FeeParams, get_code_key, get_known_code_key, + l1::L1Tx, l2::L2Tx, - transaction_request::{CallRequest, PaymasterParams}, + transaction_request::{CallRequest, Eip712Meta, PaymasterParams}, utils::storage_key_for_eth_balance, AccountTreeId, Address, K256PrivateKey, L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, - StorageKey, StorageLog, H256, U256, + StorageKey, StorageLog, EIP_712_TX_TYPE, H256, U256, }; use zksync_utils::{address_to_u256, u256_to_h256}; @@ -343,6 +344,8 @@ pub(crate) trait TestAccount { fn create_counter_tx(&self, increment: U256, revert: bool) -> L2Tx; + fn create_l1_counter_tx(&self, increment: U256, revert: bool) -> L1Tx; + fn query_counter_value(&self) -> CallRequest; fn create_infinite_loop_tx(&self) -> L2Tx; @@ -482,6 +485,26 @@ impl TestAccount for K256PrivateKey { .unwrap() } + fn create_l1_counter_tx(&self, increment: U256, revert: bool) -> L1Tx { + let calldata = load_contract(COUNTER_CONTRACT_PATH) + .function("incrementWithRevert") + .expect("no `incrementWithRevert` function") + .encode_input(&[Token::Uint(increment), Token::Bool(revert)]) + .expect("failed encoding `incrementWithRevert` input"); + let request = CallRequest { + data: Some(calldata.into()), + from: Some(self.address()), + to: Some(StateBuilder::COUNTER_CONTRACT_ADDRESS), + transaction_type: Some(EIP_712_TX_TYPE.into()), + eip712_meta: Some(Eip712Meta { + gas_per_pubdata: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), + ..Eip712Meta::default() + }), + ..CallRequest::default() + }; + L1Tx::from_request(request, false).unwrap() + } + fn query_counter_value(&self) -> CallRequest { let calldata = load_contract(COUNTER_CONTRACT_PATH) .function("get") diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index 38794fe71371..75cc1ad602f8 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -25,6 +25,7 @@ use zksync_types::{ l2::{error::TxCheckError::TxDuplication, L2Tx}, transaction_request::CallOverrides, utils::storage_key_for_eth_balance, + vm::FastVmMode, AccountTreeId, Address, L2ChainId, Nonce, ProtocolVersionId, Transaction, H160, H256, MAX_NEW_FACTORY_DEPS, U256, }; @@ -89,6 +90,7 @@ pub async fn build_tx_sender( /// Oneshot executor options used by the API server sandbox. #[derive(Debug)] pub struct SandboxExecutorOptions { + pub(crate) fast_vm_mode: FastVmMode, /// Env parameters to be used when estimating gas. pub(crate) estimate_gas: OneshotEnvParameters, /// Env parameters to be used when performing `eth_call` requests. @@ -114,6 +116,7 @@ impl SandboxExecutorOptions { .context("failed loading base contracts for calls / tx execution")?; Ok(Self { + fast_vm_mode: FastVmMode::Old, estimate_gas: OneshotEnvParameters::new( Arc::new(estimate_gas_contracts), chain_id, @@ -129,6 +132,11 @@ impl SandboxExecutorOptions { }) } + /// Sets the fast VM mode used by this executor. + pub fn set_fast_vm_mode(&mut self, fast_vm_mode: FastVmMode) { + self.fast_vm_mode = fast_vm_mode; + } + pub(crate) async fn mock() -> Self { Self::new(L2ChainId::default(), AccountTreeId::default(), u32::MAX) .await diff --git a/core/node/api_server/src/tx_sender/tests/gas_estimation.rs b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs index 4528d9cda12f..7db1b8339314 100644 --- a/core/node/api_server/src/tx_sender/tests/gas_estimation.rs +++ b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs @@ -74,6 +74,28 @@ async fn initial_estimate_for_load_test_transaction(tx_params: LoadnextContractE test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; } +#[tokio::test] +async fn initial_gas_estimate_for_l1_transaction() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_counter_contract(0).build(); + let tx = alice.create_l1_counter_tx(1.into(), false); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let mut estimator = GasEstimator::new(&tx_sender, tx.into(), block_args, Some(state_override)) + .await + .unwrap(); + estimator.adjust_transaction_fee(); + let initial_estimate = estimator.initialize().await.unwrap(); + assert!(initial_estimate.total_gas_charged.is_none()); + + let (vm_result, _) = estimator.unadjusted_step(15_000).await.unwrap(); + assert!(vm_result.result.is_failed(), "{:?}", vm_result.result); + let (vm_result, _) = estimator.unadjusted_step(1_000_000).await.unwrap(); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + #[test_casing(2, [false, true])] #[tokio::test] async fn initial_estimate_for_deep_recursion(with_reads: bool) { @@ -322,9 +344,10 @@ async fn insufficient_funds_error_for_transfer() { async fn test_estimating_gas( state_override: StateOverride, - tx: L2Tx, + tx: impl Into, acceptable_overestimation: u64, ) { + let tx = tx.into(); let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; let block_args = pending_block_args(&tx_sender).await; @@ -332,7 +355,7 @@ async fn test_estimating_gas( let fee_scale_factor = 1.0; let fee = tx_sender .get_txs_fee_in_wei( - tx.clone().into(), + tx.clone(), block_args.clone(), fee_scale_factor, acceptable_overestimation, @@ -350,7 +373,7 @@ async fn test_estimating_gas( let fee = tx_sender .get_txs_fee_in_wei( - tx.into(), + tx, block_args, fee_scale_factor, acceptable_overestimation, @@ -383,6 +406,15 @@ async fn estimating_gas_for_transfer(acceptable_overestimation: u64) { test_estimating_gas(state_override, tx, acceptable_overestimation).await; } +#[tokio::test] +async fn estimating_gas_for_l1_transaction() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_counter_contract(0).build(); + let tx = alice.create_l1_counter_tx(1.into(), false); + + test_estimating_gas(state_override, tx, 0).await; +} + #[test_casing(10, Product((LOAD_TEST_CASES, [0, 100])))] #[tokio::test] async fn estimating_gas_for_load_test_tx( diff --git a/core/node/api_server/src/tx_sender/tests/mod.rs b/core/node/api_server/src/tx_sender/tests/mod.rs index cacd616202d2..ea3f77fbcd82 100644 --- a/core/node/api_server/src/tx_sender/tests/mod.rs +++ b/core/node/api_server/src/tx_sender/tests/mod.rs @@ -145,13 +145,14 @@ async fn create_real_tx_sender(pool: ConnectionPool) -> TxSender { drop(storage); let genesis_config = genesis_params.config(); - let executor_options = SandboxExecutorOptions::new( + let mut executor_options = SandboxExecutorOptions::new( genesis_config.l2_chain_id, AccountTreeId::new(genesis_config.fee_account), u32::MAX, ) .await .unwrap(); + executor_options.set_fast_vm_mode(FastVmMode::Shadow); let pg_caches = PostgresStorageCaches::new(1, 1); let tx_executor = SandboxExecutor::real(executor_options, pg_caches, usize::MAX); diff --git a/core/node/api_server/src/web3/tests/vm.rs b/core/node/api_server/src/web3/tests/vm.rs index 45128f579cda..7dd0164198a1 100644 --- a/core/node/api_server/src/web3/tests/vm.rs +++ b/core/node/api_server/src/web3/tests/vm.rs @@ -16,8 +16,8 @@ use zksync_multivm::interface::{ }; use zksync_types::{ api::ApiStorageLog, fee_model::BatchFeeInput, get_intrinsic_constants, - transaction_request::CallRequest, K256PrivateKey, L2ChainId, PackedEthSignature, - StorageLogKind, StorageLogWithPreviousValue, Transaction, U256, + transaction_request::CallRequest, vm::FastVmMode, K256PrivateKey, L2ChainId, + PackedEthSignature, StorageLogKind, StorageLogWithPreviousValue, Transaction, U256, }; use zksync_utils::u256_to_h256; use zksync_vm_executor::oneshot::{ @@ -92,6 +92,7 @@ impl BaseSystemContractsProvider for BaseContractsWithMockE fn executor_options_with_evm_emulator() -> SandboxExecutorOptions { let base_contracts = Arc::::default(); SandboxExecutorOptions { + fast_vm_mode: FastVmMode::Old, estimate_gas: OneshotEnvParameters::new( base_contracts.clone(), L2ChainId::default(), diff --git a/core/node/consensus/src/vm.rs b/core/node/consensus/src/vm.rs index 46b84c34061d..cbd4918dcee1 100644 --- a/core/node/consensus/src/vm.rs +++ b/core/node/consensus/src/vm.rs @@ -11,7 +11,8 @@ use zksync_vm_executor::oneshot::{ CallOrExecute, MainOneshotExecutor, MultiVMBaseSystemContracts, OneshotEnvParameters, }; use zksync_vm_interface::{ - executor::OneshotExecutor, ExecutionResult, OneshotTracingParams, TxExecutionArgs, + executor::OneshotExecutor, storage::StorageWithOverrides, ExecutionResult, + OneshotTracingParams, TxExecutionArgs, }; use crate::{abi, storage::ConnectionPool}; @@ -89,7 +90,7 @@ impl VM { let output = ctx .wait(self.executor.inspect_transaction_with_bytecode_compression( - storage, + StorageWithOverrides::new(storage), env, TxExecutionArgs::for_eth_call(tx), OneshotTracingParams::default(), diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs index ba1a69e23bb6..023ef1059c79 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs @@ -6,7 +6,7 @@ use zksync_node_api_server::{ tx_sender::{SandboxExecutorOptions, TxSenderBuilder, TxSenderConfig}, }; use zksync_state::{PostgresStorageCaches, PostgresStorageCachesTask}; -use zksync_types::{AccountTreeId, Address}; +use zksync_types::{vm::FastVmMode, AccountTreeId, Address}; use zksync_web3_decl::{ client::{DynClient, L2}, jsonrpsee, @@ -60,6 +60,7 @@ pub struct TxSenderLayer { postgres_storage_caches_config: PostgresStorageCachesConfig, max_vm_concurrency: usize, whitelisted_tokens_for_aa_cache: bool, + vm_mode: FastVmMode, } #[derive(Debug, FromContext)] @@ -95,6 +96,7 @@ impl TxSenderLayer { postgres_storage_caches_config, max_vm_concurrency, whitelisted_tokens_for_aa_cache: false, + vm_mode: FastVmMode::Old, } } @@ -106,6 +108,12 @@ impl TxSenderLayer { self.whitelisted_tokens_for_aa_cache = value; self } + + /// Sets the fast VM modes used for all supported operations. + pub fn with_vm_mode(mut self, mode: FastVmMode) -> Self { + self.vm_mode = mode; + self + } } #[async_trait::async_trait] @@ -151,12 +159,13 @@ impl WiringLayer for TxSenderLayer { // TODO (BFT-138): Allow to dynamically reload API contracts let config = self.tx_sender_config; - let executor_options = SandboxExecutorOptions::new( + let mut executor_options = SandboxExecutorOptions::new( config.chain_id, AccountTreeId::new(config.fee_account_addr), config.validation_computational_gas_limit, ) .await?; + executor_options.set_fast_vm_mode(self.vm_mode); // Build `TxSender`. let mut tx_sender = TxSenderBuilder::new(config, replica_pool, tx_sink); diff --git a/etc/env/file_based/overrides/tests/integration.yaml b/etc/env/file_based/overrides/tests/integration.yaml new file mode 100644 index 000000000000..6ad031e29458 --- /dev/null +++ b/etc/env/file_based/overrides/tests/integration.yaml @@ -0,0 +1,4 @@ +experimental_vm: + # Use the shadow VM mode everywhere to catch divergences as early as possible + state_keeper_fast_vm_mode: SHADOW + api_fast_vm_mode: SHADOW diff --git a/etc/env/file_based/overrides/tests/loadtest-new.yaml b/etc/env/file_based/overrides/tests/loadtest-new.yaml index 2167f7347e09..e66625636b1f 100644 --- a/etc/env/file_based/overrides/tests/loadtest-new.yaml +++ b/etc/env/file_based/overrides/tests/loadtest-new.yaml @@ -1,7 +1,11 @@ db: merkle_tree: mode: LIGHTWEIGHT +api: + web3_json_rpc: + estimate_gas_optimize_search: true experimental_vm: state_keeper_fast_vm_mode: NEW + api_fast_vm_mode: NEW mempool: delay_interval: 50 diff --git a/etc/env/file_based/overrides/tests/loadtest-old.yaml b/etc/env/file_based/overrides/tests/loadtest-old.yaml index a2d66d1cf4a7..7b1a35870187 100644 --- a/etc/env/file_based/overrides/tests/loadtest-old.yaml +++ b/etc/env/file_based/overrides/tests/loadtest-old.yaml @@ -3,5 +3,6 @@ db: mode: LIGHTWEIGHT experimental_vm: state_keeper_fast_vm_mode: OLD + api_fast_vm_mode: OLD mempool: delay_interval: 50 From 91ec341b8aab19da7cfff125f0d94490df65cd06 Mon Sep 17 00:00:00 2001 From: Alexander Melnikov Date: Fri, 25 Oct 2024 07:35:05 -0600 Subject: [PATCH 15/32] chore(configs): Adjust file based configs (#3171) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ * Add fields to mainnet.yaml so that `zkstack` can create mainnet ecosystem * Add comment about typo in `max_acceptable_priority_fee_in_gwei` (caused problems before) * Add `l1_batch_min_age_before_execute_seconds` to avoid sending Execute tx too early ## Why ❔ * To make things work ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- etc/env/ecosystems/mainnet.yaml | 5 +++++ etc/env/file_based/general.yaml | 2 +- etc/env/file_based/overrides/mainnet.yaml | 1 + etc/env/file_based/overrides/testnet.yaml | 1 + 4 files changed, 8 insertions(+), 1 deletion(-) diff --git a/etc/env/ecosystems/mainnet.yaml b/etc/env/ecosystems/mainnet.yaml index 7d4266e8b761..f7b09150793b 100644 --- a/etc/env/ecosystems/mainnet.yaml +++ b/etc/env/ecosystems/mainnet.yaml @@ -1,3 +1,5 @@ +create2_factory_addr: 0xce0042b868300000d44a59004da54a005ffdcf9f +create2_factory_salt: '0x0000000000000000000000000000000000000000000000000000000000000000' ecosystem_contracts: bridgehub_proxy_addr: 0x303a465B659cBB0ab36eE643eA362c509EEb5213 state_transition_proxy_addr: 0xc2eE6b6af7d616f6e27ce7F4A451Aedc2b0F5f5C @@ -17,3 +19,6 @@ l1: verifier_addr: 0x70F3FBf8a427155185Ec90BED8a3434203de9604 validator_timelock_addr: 0x5D8ba173Dc6C3c90C8f7C04C9288BeF5FDbAd06E base_token_addr: '0x0000000000000000000000000000000000000000' +l2: + testnet_paymaster_addr: '0x0000000000000000000000000000000000000000' + default_l2_upgrader: '0x0000000000000000000000000000000000000000' diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 587ba4614a59..8758d38186f7 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -106,7 +106,7 @@ eth: max_eth_tx_data_size: 120000 aggregated_proof_sizes: [ 1 ] max_aggregated_tx_gas: 15000000 - max_acceptable_priority_fee_in_gwei: 100000000000 + max_acceptable_priority_fee_in_gwei: 100000000000 # typo: value is in wei (100 gwei) pubdata_sending_mode: BLOBS gas_adjuster: default_priority_fee_per_gas: 1000000000 diff --git a/etc/env/file_based/overrides/mainnet.yaml b/etc/env/file_based/overrides/mainnet.yaml index 7565aac869ae..847f9ae98aa6 100644 --- a/etc/env/file_based/overrides/mainnet.yaml +++ b/etc/env/file_based/overrides/mainnet.yaml @@ -11,6 +11,7 @@ eth: aggregated_block_prove_deadline: 300 aggregated_block_execute_deadline: 300 timestamp_criteria_max_allowed_lag: 104000 # 29h + l1_batch_min_age_before_execute_seconds: 76000 # 21h wait_confirmations: null gas_adjuster: pricing_formula_parameter_a: 1.06 diff --git a/etc/env/file_based/overrides/testnet.yaml b/etc/env/file_based/overrides/testnet.yaml index d36cf9fc7bc0..4643a963ed7f 100644 --- a/etc/env/file_based/overrides/testnet.yaml +++ b/etc/env/file_based/overrides/testnet.yaml @@ -11,6 +11,7 @@ eth: aggregated_block_prove_deadline: 300 aggregated_block_execute_deadline: 300 timestamp_criteria_max_allowed_lag: 104000 # 29h + l1_batch_min_age_before_execute_seconds: 1500 # 25m wait_confirmations: null gas_adjuster: pricing_formula_parameter_a: 1.1 From f3724a71c7466451d380981b05d68d8afd70cdca Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 25 Oct 2024 16:01:02 +0200 Subject: [PATCH 16/32] feat(proof-data-handler): add tee_proof_generation_timeout_in_secs param (#3128) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add `tee_proof_generation_timeout_in_secs` parameter to the `proof-data-handler` configuration to avoid sharing the same `proof_generation_timeout_in_secs` timeout with the ZK prover. This timeout is for retrying TEE proof generation if it fails. Retries continue indefinitely until successful. ## Why ❔ The TEE prover is much faster than the ZK prover, so some of the ZK timeouts are too long to be shared with the TEE-specific code. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- core/lib/config/src/configs/proof_data_handler.rs | 13 +++++++++++++ core/lib/config/src/testonly.rs | 1 + core/lib/env_config/src/proof_data_handler.rs | 2 ++ core/lib/protobuf_config/src/proof_data_handler.rs | 9 +++++++++ .../protobuf_config/src/proto/config/prover.proto | 1 + .../proof_data_handler/src/tee_request_processor.rs | 2 +- core/node/proof_data_handler/src/tests.rs | 2 ++ etc/env/base/proof_data_handler.toml | 1 + etc/env/file_based/general.yaml | 1 + 9 files changed, 31 insertions(+), 1 deletion(-) diff --git a/core/lib/config/src/configs/proof_data_handler.rs b/core/lib/config/src/configs/proof_data_handler.rs index 1094b1bb1801..1d8703df51aa 100644 --- a/core/lib/config/src/configs/proof_data_handler.rs +++ b/core/lib/config/src/configs/proof_data_handler.rs @@ -9,6 +9,9 @@ pub struct TeeConfig { pub tee_support: bool, /// All batches before this one are considered to be processed. pub first_tee_processed_batch: L1BatchNumber, + /// Timeout in seconds for retrying TEE proof generation if it fails. Retries continue + /// indefinitely until successful. + pub tee_proof_generation_timeout_in_secs: u16, } impl Default for TeeConfig { @@ -16,6 +19,8 @@ impl Default for TeeConfig { TeeConfig { tee_support: Self::default_tee_support(), first_tee_processed_batch: Self::default_first_tee_processed_batch(), + tee_proof_generation_timeout_in_secs: + Self::default_tee_proof_generation_timeout_in_secs(), } } } @@ -28,6 +33,14 @@ impl TeeConfig { pub fn default_first_tee_processed_batch() -> L1BatchNumber { L1BatchNumber(0) } + + pub fn default_tee_proof_generation_timeout_in_secs() -> u16 { + 600 + } + + pub fn tee_proof_generation_timeout(&self) -> Duration { + Duration::from_secs(self.tee_proof_generation_timeout_in_secs.into()) + } } #[derive(Debug, Deserialize, Clone, PartialEq)] diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index f8e53e33042b..21ff9e2351b6 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -681,6 +681,7 @@ impl Distribution for EncodeDist { tee_config: configs::TeeConfig { tee_support: self.sample(rng), first_tee_processed_batch: L1BatchNumber(rng.gen()), + tee_proof_generation_timeout_in_secs: self.sample(rng), }, } } diff --git a/core/lib/env_config/src/proof_data_handler.rs b/core/lib/env_config/src/proof_data_handler.rs index b5bfda4544e7..47848585e769 100644 --- a/core/lib/env_config/src/proof_data_handler.rs +++ b/core/lib/env_config/src/proof_data_handler.rs @@ -28,6 +28,7 @@ mod tests { tee_config: TeeConfig { tee_support: true, first_tee_processed_batch: L1BatchNumber(1337), + tee_proof_generation_timeout_in_secs: 600, }, } } @@ -39,6 +40,7 @@ mod tests { PROOF_DATA_HANDLER_HTTP_PORT="3320" PROOF_DATA_HANDLER_TEE_SUPPORT="true" PROOF_DATA_HANDLER_FIRST_TEE_PROCESSED_BATCH="1337" + PROOF_DATA_HANDLER_TEE_PROOF_GENERATION_TIMEOUT_IN_SECS="600" "#; let mut lock = MUTEX.lock(); lock.set_env(config); diff --git a/core/lib/protobuf_config/src/proof_data_handler.rs b/core/lib/protobuf_config/src/proof_data_handler.rs index a587c702633f..c01e163bd771 100644 --- a/core/lib/protobuf_config/src/proof_data_handler.rs +++ b/core/lib/protobuf_config/src/proof_data_handler.rs @@ -23,6 +23,12 @@ impl ProtoRepr for proto::ProofDataHandler { .first_tee_processed_batch .map(|x| L1BatchNumber(x as u32)) .unwrap_or_else(configs::TeeConfig::default_first_tee_processed_batch), + tee_proof_generation_timeout_in_secs: self + .tee_proof_generation_timeout_in_secs + .map(|x| x as u16) + .unwrap_or_else( + configs::TeeConfig::default_tee_proof_generation_timeout_in_secs, + ), }, }) } @@ -33,6 +39,9 @@ impl ProtoRepr for proto::ProofDataHandler { proof_generation_timeout_in_secs: Some(this.proof_generation_timeout_in_secs.into()), tee_support: Some(this.tee_config.tee_support), first_tee_processed_batch: Some(this.tee_config.first_tee_processed_batch.0 as u64), + tee_proof_generation_timeout_in_secs: Some( + this.tee_config.tee_proof_generation_timeout_in_secs.into(), + ), } } } diff --git a/core/lib/protobuf_config/src/proto/config/prover.proto b/core/lib/protobuf_config/src/proto/config/prover.proto index 92ba770a7560..392834d25f3d 100644 --- a/core/lib/protobuf_config/src/proto/config/prover.proto +++ b/core/lib/protobuf_config/src/proto/config/prover.proto @@ -109,4 +109,5 @@ message ProofDataHandler { optional uint32 proof_generation_timeout_in_secs = 2; // required; s optional bool tee_support = 3; // optional optional uint64 first_tee_processed_batch = 4; // optional + optional uint32 tee_proof_generation_timeout_in_secs = 5; // optional } diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs index 8e06d0c26bc9..b265b94d4d74 100644 --- a/core/node/proof_data_handler/src/tee_request_processor.rs +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -164,7 +164,7 @@ impl TeeRequestProcessor { .tee_proof_generation_dal() .lock_batch_for_proving( tee_type, - self.config.proof_generation_timeout(), + self.config.tee_config.tee_proof_generation_timeout(), min_batch_number, ) .await diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index 63ea087a81c4..87c6bff8a1f4 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -28,6 +28,7 @@ async fn request_tee_proof_inputs() { tee_config: TeeConfig { tee_support: true, first_tee_processed_batch: L1BatchNumber(0), + tee_proof_generation_timeout_in_secs: 600, }, }, L1BatchCommitmentMode::Rollup, @@ -86,6 +87,7 @@ async fn submit_tee_proof() { tee_config: TeeConfig { tee_support: true, first_tee_processed_batch: L1BatchNumber(0), + tee_proof_generation_timeout_in_secs: 600, }, }, L1BatchCommitmentMode::Rollup, diff --git a/etc/env/base/proof_data_handler.toml b/etc/env/base/proof_data_handler.toml index 7a1999a03c31..b56ac26fb177 100644 --- a/etc/env/base/proof_data_handler.toml +++ b/etc/env/base/proof_data_handler.toml @@ -1,4 +1,5 @@ [proof_data_handler] http_port = 3320 proof_generation_timeout_in_secs = 18000 +tee_proof_generation_timeout_in_secs = 600 tee_support = true diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 8758d38186f7..5abee904765b 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -169,6 +169,7 @@ witness_vector_generator: data_handler: http_port: 3320 proof_generation_timeout_in_secs: 18000 + tee_proof_generation_timeout_in_secs: 600 tee_support: true prover_gateway: api_url: http://127.0.0.1:3320 From dd166f887b11a8dfb039a0030dda923c481f67af Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Fri, 25 Oct 2024 16:55:26 +0200 Subject: [PATCH 17/32] feat(prover): Add scale failure events watching and pods eviction. (#3175) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add scale failure events watching. Add pending pods eviction to different cluster if there are `FailedScaleUp` events. Keep watching k8s if an error occurred. ## Why ❔ To failover to different cluster faster if there is no capacity in the current one. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. ref ZKD-1855 --- prover/Cargo.lock | 141 +++++++++++------ prover/Cargo.toml | 2 +- .../prover_autoscaler/src/cluster_types.rs | 7 + .../prover_autoscaler/src/global/scaler.rs | 31 +++- .../bin/prover_autoscaler/src/k8s/watcher.rs | 144 +++++++++++------- 5 files changed, 222 insertions(+), 103 deletions(-) diff --git a/prover/Cargo.lock b/prover/Cargo.lock index dbc3b3425e49..747d3df987e9 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -326,7 +326,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-util", "itoa", "matchit", @@ -341,7 +341,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.1", "tokio", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -1605,6 +1605,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "educe" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7bc049e1bd8cdeb31b68bbd586a9464ecf9f3944af3958a7a9d0f8b9799417" +dependencies = [ + "enum-ordinalize", + "proc-macro2 1.0.85", + "quote 1.0.36", + "syn 2.0.66", +] + [[package]] name = "either" version = "1.12.0" @@ -1678,6 +1690,26 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "enum-ordinalize" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea0dcfa4e54eeb516fe454635a95753ddd39acda650ce703031c6973e315dd5" +dependencies = [ + "enum-ordinalize-derive", +] + +[[package]] +name = "enum-ordinalize-derive" +version = "4.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" +dependencies = [ + "proc-macro2 1.0.85", + "quote 1.0.36", + "syn 2.0.66", +] + [[package]] name = "enum_dispatch" version = "0.3.13" @@ -2610,9 +2642,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.3.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" +checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" dependencies = [ "bytes", "futures-channel", @@ -2639,7 +2671,7 @@ dependencies = [ "futures-util", "headers", "http 1.1.0", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-rustls", "hyper-util", "pin-project-lite", @@ -2657,7 +2689,7 @@ checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" dependencies = [ "futures-util", "http 1.1.0", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-util", "log", "rustls", @@ -2674,7 +2706,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" dependencies = [ - "hyper 1.3.1", + "hyper 1.5.0", "hyper-util", "pin-project-lite", "tokio", @@ -2702,7 +2734,7 @@ checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", "http-body-util", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-util", "native-tls", "tokio", @@ -2712,20 +2744,19 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.5" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" +checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" dependencies = [ "bytes", "futures-channel", "futures-util", "http 1.1.0", "http-body 1.0.0", - "hyper 1.3.1", + "hyper 1.5.0", "pin-project-lite", "socket2", "tokio", - "tower", "tower-service", "tracing", ] @@ -3075,7 +3106,7 @@ dependencies = [ "async-trait", "base64 0.22.1", "http-body 1.0.0", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-rustls", "hyper-util", "jsonrpsee-core", @@ -3086,7 +3117,7 @@ dependencies = [ "serde_json", "thiserror", "tokio", - "tower", + "tower 0.4.13", "tracing", "url", ] @@ -3206,9 +3237,9 @@ dependencies = [ [[package]] name = "kube" -version = "0.95.0" +version = "0.96.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa21063c854820a77c5d7f8deeb7ffa55246d8304e4bcd8cce2956752c6604f8" +checksum = "efffeb3df0bd4ef3e5d65044573499c0e4889b988070b08c50b25b1329289a1f" dependencies = [ "k8s-openapi", "kube-client", @@ -3219,9 +3250,9 @@ dependencies = [ [[package]] name = "kube-client" -version = "0.95.0" +version = "0.96.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c2355f5c9d8a11900e71a6fe1e47abd5ec45bf971eb4b162ffe97b46db9bb7" +checksum = "8bf471ece8ff8d24735ce78dac4d091e9fcb8d74811aeb6b75de4d1c3f5de0f1" dependencies = [ "base64 0.22.1", "bytes", @@ -3232,7 +3263,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-http-proxy", "hyper-rustls", "hyper-timeout", @@ -3243,23 +3274,23 @@ dependencies = [ "pem", "rustls", "rustls-pemfile 2.1.2", - "secrecy", + "secrecy 0.10.3", "serde", "serde_json", "serde_yaml", "thiserror", "tokio", "tokio-util", - "tower", + "tower 0.5.1", "tower-http", "tracing", ] [[package]] name = "kube-core" -version = "0.95.0" +version = "0.96.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3030bd91c9db544a50247e7d48d7db9cf633c172732dce13351854526b1e666" +checksum = "f42346d30bb34d1d7adc5c549b691bce7aa3a1e60254e68fab7e2d7b26fe3d77" dependencies = [ "chrono", "form_urlencoded", @@ -3275,9 +3306,9 @@ dependencies = [ [[package]] name = "kube-derive" -version = "0.95.0" +version = "0.96.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa98be978eddd70a773aa8e86346075365bfb7eb48783410852dbf7cb57f0c27" +checksum = "f9364e04cc5e0482136c6ee8b7fb7551812da25802249f35b3def7aaa31e82ad" dependencies = [ "darling 0.20.10", "proc-macro2 1.0.85", @@ -3288,16 +3319,16 @@ dependencies = [ [[package]] name = "kube-runtime" -version = "0.95.0" +version = "0.96.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5895cb8aa641ac922408f128b935652b34c2995f16ad7db0984f6caa50217914" +checksum = "d3fbf1f6ffa98e65f1d2a9a69338bb60605d46be7edf00237784b89e62c9bd44" dependencies = [ "ahash 0.8.11", "async-broadcast", "async-stream", "async-trait", "backoff", - "derivative", + "educe", "futures 0.3.30", "hashbrown 0.14.5", "json-patch", @@ -4876,7 +4907,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-rustls", "hyper-tls 0.6.0", "hyper-util", @@ -5321,7 +5352,15 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" dependencies = [ - "serde", + "zeroize", +] + +[[package]] +name = "secrecy" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e891af845473308773346dc847b2c23ee78fe442e0472ac50e22a18a93d3ae5a" +dependencies = [ "zeroize", ] @@ -6542,7 +6581,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-timeout", "hyper-util", "percent-encoding", @@ -6551,7 +6590,7 @@ dependencies = [ "socket2", "tokio", "tokio-stream", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -6577,18 +6616,34 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 0.1.2", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower-http" -version = "0.5.2" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" +checksum = "8437150ab6bbc8c5f0f519e3d5ed4aa883a83dd4cdd3d1b21f9482936046cb97" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "bitflags 2.6.0", "bytes", "http 1.1.0", "http-body 1.0.0", - "http-body-util", "mime", "pin-project-lite", "tower-layer", @@ -6598,15 +6653,15 @@ dependencies = [ [[package]] name = "tower-layer" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" @@ -7745,7 +7800,7 @@ dependencies = [ "ethabi", "hex", "num_enum 0.7.2", - "secrecy", + "secrecy 0.8.0", "serde", "serde_json", "serde_with", @@ -7830,7 +7885,7 @@ version = "0.1.0" dependencies = [ "anyhow", "rand 0.8.5", - "secrecy", + "secrecy 0.8.0", "serde", "strum", "strum_macros", @@ -8283,7 +8338,7 @@ dependencies = [ "hex", "prost 0.12.6", "rand 0.8.5", - "secrecy", + "secrecy 0.8.0", "serde_json", "serde_yaml", "time", diff --git a/prover/Cargo.toml b/prover/Cargo.toml index af022e691c1f..31c663590eff 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -32,7 +32,7 @@ indicatif = "0.16" itertools = "0.10.5" jemallocator = "0.5" k8s-openapi = { version = "0.23.0", features = ["v1_30"] } -kube = { version = "0.95.0", features = ["runtime", "derive"] } +kube = { version = "0.96.0", features = ["runtime", "derive"] } local-ip-address = "0.5.0" log = "0.4.20" md5 = "0.7.0" diff --git a/prover/crates/bin/prover_autoscaler/src/cluster_types.rs b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs index b800b86f3c28..e3e4c9b4df0d 100644 --- a/prover/crates/bin/prover_autoscaler/src/cluster_types.rs +++ b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs @@ -29,11 +29,18 @@ where ordered.serialize(serializer) } +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct ScaleEvent { + pub name: String, + pub time: DateTime, +} + #[derive(Debug, Default, Clone, Serialize, Deserialize)] pub struct Namespace { #[serde(serialize_with = "ordered_map")] pub deployments: HashMap, pub pods: HashMap, + pub scale_errors: Vec, } #[derive(Debug, Clone, Default, Serialize, Deserialize)] diff --git a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs index 884174562a10..eb4249d071fe 100644 --- a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs +++ b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs @@ -21,7 +21,7 @@ struct GPUPool { name: String, gpu: Gpu, provers: HashMap, // TODO: consider using i64 everywhere to avoid type casts. - preemtions: u64, + scale_errors: usize, max_pool_size: u32, } @@ -140,6 +140,11 @@ impl Scaler { .and_then(|inner_map| inner_map.get(&gpu)) .copied() .unwrap_or(0), + scale_errors: namespace_value + .scale_errors + .iter() + .filter(|v| v.time < Utc::now() - chrono::Duration::hours(1)) // TODO Move the duration into config. + .count(), ..Default::default() }); @@ -147,6 +152,12 @@ impl Scaler { e.provers.insert(PodStatus::Running, 0); } + let recent_scale_errors = namespace_value + .scale_errors + .iter() + .filter(|v| v.time < Utc::now() - chrono::Duration::minutes(4)) // TODO Move the duration into config. This should be at least x2 or run interval. + .count(); + for ppg in namespace_value .pods .iter() @@ -158,10 +169,12 @@ impl Scaler { ..Default::default() }); let mut status = PodStatus::from_str(&ppg.pod.status).unwrap_or_default(); - if status == PodStatus::Pending - && ppg.pod.changed < Utc::now() - self.long_pending_duration - { - status = PodStatus::LongPending; + if status == PodStatus::Pending { + if ppg.pod.changed < Utc::now() - self.long_pending_duration { + status = PodStatus::LongPending; + } else if recent_scale_errors > 0 { + status = PodStatus::NeedToMove; + } } tracing::info!( "pod {}: status: {}, real status: {}", @@ -172,7 +185,7 @@ impl Scaler { e.provers.entry(status).and_modify(|n| *n += 1).or_insert(1); } - tracing::info!("From pods {:?}", gp_map.sorted_debug()); + tracing::debug!("From pods {:?}", gp_map.sorted_debug()); gp_map.into_values().collect() } @@ -195,7 +208,7 @@ impl Scaler { a.sum_by_pod_status(PodStatus::LongPending) .cmp(&b.sum_by_pod_status(PodStatus::LongPending)), ) // Sort by long Pending pods. - .then(a.preemtions.cmp(&b.preemtions)) // Sort by preemtions in the cluster. + .then(a.scale_errors.cmp(&b.scale_errors)) // Sort by scale_errors in the cluster. .then( self.cluster_priorities .get(&a.name) @@ -455,6 +468,7 @@ mod tests { }, )] .into(), + ..Default::default() }, )] .into(), @@ -521,6 +535,7 @@ mod tests { }, )] .into(), + ..Default::default() }, )] .into(), @@ -681,6 +696,7 @@ mod tests { ) ] .into(), + ..Default::default() }, )] .into(), @@ -718,6 +734,7 @@ mod tests { ) ] .into(), + ..Default::default() }, )] .into(), diff --git a/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs b/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs index f94dfc3704fb..5384db082bc7 100644 --- a/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs +++ b/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs @@ -1,6 +1,6 @@ use std::{collections::HashMap, sync::Arc}; -use chrono::Utc; +use chrono::{DateTime, Utc}; use futures::{stream, StreamExt, TryStreamExt}; use k8s_openapi::api; use kube::{ @@ -9,7 +9,7 @@ use kube::{ }; use tokio::sync::Mutex; -use crate::cluster_types::{Cluster, Deployment, Namespace, Pod}; +use crate::cluster_types::{Cluster, Deployment, Namespace, Pod, ScaleEvent}; #[derive(Clone)] pub struct Watcher { @@ -62,6 +62,15 @@ impl Watcher { .map_ok(Watched::Pod) .boxed(), ); + + let events: Api = Api::namespaced(self.client.clone(), namespace); + watchers.push( + watcher(events, watcher::Config::default()) + .default_backoff() + .applied_objects() + .map_ok(Watched::Event) + .boxed(), + ); } // select on applied events from all watchers let mut combo_stream = stream::select_all(watchers); @@ -70,61 +79,92 @@ impl Watcher { enum Watched { Deploy(api::apps::v1::Deployment), Pod(api::core::v1::Pod), + Event(api::core::v1::Event), } - while let Some(o) = combo_stream.try_next().await? { + while let Some(o) = combo_stream.next().await { match o { - Watched::Deploy(d) => { - let namespace = match d.namespace() { - Some(n) => n.to_string(), - None => continue, - }; - let mut cluster = self.cluster.lock().await; - let v = cluster.namespaces.get_mut(&namespace).unwrap(); - let dep = v - .deployments - .entry(d.name_any()) - .or_insert(Deployment::default()); - let nums = d.status.clone().unwrap_or_default(); - dep.running = nums.available_replicas.unwrap_or_default(); - dep.desired = nums.replicas.unwrap_or_default(); + Ok(o) => match o { + Watched::Deploy(d) => { + let namespace = match d.namespace() { + Some(n) => n.to_string(), + None => continue, + }; + let mut cluster = self.cluster.lock().await; + let v = cluster.namespaces.get_mut(&namespace).unwrap(); + let dep = v + .deployments + .entry(d.name_any()) + .or_insert(Deployment::default()); + let nums = d.status.clone().unwrap_or_default(); + dep.running = nums.available_replicas.unwrap_or_default(); + dep.desired = nums.replicas.unwrap_or_default(); - tracing::info!( - "Got deployment: {}, size: {}/{} un {}", - d.name_any(), - nums.available_replicas.unwrap_or_default(), - nums.replicas.unwrap_or_default(), - nums.unavailable_replicas.unwrap_or_default(), - ) - } - Watched::Pod(p) => { - let namespace = match p.namespace() { - Some(n) => n.to_string(), - None => continue, - }; - let mut cluster = self.cluster.lock().await; - let v = cluster.namespaces.get_mut(&namespace).unwrap(); - let pod = v.pods.entry(p.name_any()).or_insert(Pod::default()); - pod.owner = p - .owner_references() - .iter() - .map(|x| format!("{}/{}", x.kind.clone(), x.name.clone())) - .collect::>() - .join(":"); - // TODO: Collect replica sets to match deployments and pods. - let phase = p - .status - .clone() - .unwrap_or_default() - .phase - .unwrap_or_default(); - if phase != pod.status { - // TODO: try to get an idea how to set correct value on restart. - pod.changed = Utc::now(); + tracing::info!( + "Got deployment: {}, size: {}/{} un {}", + d.name_any(), + nums.available_replicas.unwrap_or_default(), + nums.replicas.unwrap_or_default(), + nums.unavailable_replicas.unwrap_or_default(), + ) } - pod.status = phase; + Watched::Pod(p) => { + let namespace = match p.namespace() { + Some(n) => n.to_string(), + None => continue, + }; + let mut cluster = self.cluster.lock().await; + let v = cluster.namespaces.get_mut(&namespace).unwrap(); + let pod = v.pods.entry(p.name_any()).or_insert(Pod::default()); + pod.owner = p + .owner_references() + .iter() + .map(|x| format!("{}/{}", x.kind.clone(), x.name.clone())) + .collect::>() + .join(":"); + // TODO: Collect replica sets to match deployments and pods. + let phase = p + .status + .clone() + .unwrap_or_default() + .phase + .unwrap_or_default(); + if phase != pod.status { + // TODO: try to get an idea how to set correct value on restart. + pod.changed = Utc::now(); + } + pod.status = phase; - tracing::info!("Got pod: {}", p.name_any()) - } + tracing::info!("Got pod: {}", p.name_any()) + } + Watched::Event(e) => { + let namespace: String = match e.namespace() { + Some(n) => n, + None => "".into(), + }; + let name = e.name_any(); + let reason = e.reason.unwrap_or_default(); + if reason != "FailedScaleUp" { + // Ignore all events which are not scale issues. + continue; + } + let time: DateTime = match e.last_timestamp { + Some(t) => t.0, + None => Utc::now(), + }; + tracing::debug!( + "Got event: {}/{}, message: {:?}; action: {:?}, reason: {:?}", + namespace, + name, + e.message.unwrap_or_default(), + e.action.unwrap_or_default(), + reason + ); + let mut cluster = self.cluster.lock().await; + let v = cluster.namespaces.get_mut(&namespace).unwrap(); + v.scale_errors.push(ScaleEvent { name, time }) + } + }, + Err(err) => tracing::warn!("Error during watch: {err:?}"), } } From 89eadd353c4fb84bb815ae56b29f4ff3467b80f3 Mon Sep 17 00:00:00 2001 From: EmilLuta Date: Sat, 26 Oct 2024 10:06:16 +0200 Subject: [PATCH 18/32] fix: allow compilation under current toolchain (#3176) Nightly breaks regex crate. The option is to either remove the nightly feature we used that now breaks, or pin nightly to something prior to 2024-10-17. The first option was picked, which requires updates to downstream dependencies. This PR updates said dependencies. --- Cargo.lock | 58 +++++++++++----------- Cargo.toml | 6 +-- prover/Cargo.lock | 122 +++++++++++++++++++++++----------------------- prover/Cargo.toml | 10 ++-- 4 files changed, 98 insertions(+), 98 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index de2c2d6c9b22..2e3a8a1c3e3a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1327,14 +1327,14 @@ dependencies = [ [[package]] name = "circuit_encodings" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5128d4b8fbb27ac453f573a95601058e74487bdafd22a3168cded66bf340c28" +checksum = "2501cc688ef391013019495ae7035cfd54f86987e36d10f73976ce4c5d413c5a" dependencies = [ "derivative", "serde", - "zk_evm 0.150.6", - "zkevm_circuits 0.150.6", + "zk_evm 0.150.7", + "zkevm_circuits 0.150.7", ] [[package]] @@ -1394,11 +1394,11 @@ dependencies = [ [[package]] name = "circuit_sequencer_api" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "093d0c2c0b39144ddb4e1e88d73d95067ce34ec7750808b2eed01edbb510b88e" +checksum = "917d27db531fdd98a51e42ea465bc097f48cc849e7fad68d7856087d15125be1" dependencies = [ - "circuit_encodings 0.150.6", + "circuit_encodings 0.150.7", "derivative", "rayon", "serde", @@ -9360,9 +9360,9 @@ dependencies = [ [[package]] name = "zk_evm" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c14bda6c101389145cd01fac900f1392876bc0284d98faf7f376237baa2cb19d" +checksum = "3cc74fbe2b45fd19e95c59ea792c795feebdb616ebaa463f0ac567f495f47387" dependencies = [ "anyhow", "lazy_static", @@ -9370,7 +9370,7 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.150.6", + "zk_evm_abstractions 0.150.7", ] [[package]] @@ -9401,15 +9401,15 @@ dependencies = [ [[package]] name = "zk_evm_abstractions" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a008f2442fc6a508bdd1f902380242cb6ff11b8b27acdac2677c6d9f75cbb004" +checksum = "37f333a3b059899df09e40deb041af881bc03e496fda5eec618ffb5e854ee7df" dependencies = [ "anyhow", "num_enum 0.6.1", "serde", "static_assertions", - "zkevm_opcode_defs 0.150.6", + "zkevm_opcode_defs 0.150.7", ] [[package]] @@ -9458,9 +9458,9 @@ dependencies = [ [[package]] name = "zkevm_circuits" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f68518aedd5358b17224771bb78bacd912cf66011aeda98b1f887cfb9e0972f" +checksum = "d06fb35b00699d25175a2ad447f86a9088af8b0bc698bb57086fb04c13e52eab" dependencies = [ "arrayvec 0.7.6", "boojum", @@ -9472,7 +9472,7 @@ dependencies = [ "seq-macro", "serde", "smallvec", - "zkevm_opcode_defs 0.150.6", + "zkevm_opcode_defs 0.150.7", "zksync_cs_derive", ] @@ -9520,9 +9520,9 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "762b5f1c1b283c5388995a85d40a05aef1c14f50eb904998b7e9364739f5b899" +checksum = "b83f3b279248af4ca86dec20a54127f02110b45570f3f6c1d13df49ba75c28a5" dependencies = [ "bitflags 2.6.0", "blake2 0.10.6", @@ -9646,7 +9646,7 @@ dependencies = [ "anyhow", "circuit_sequencer_api 0.140.3", "circuit_sequencer_api 0.141.2", - "circuit_sequencer_api 0.150.6", + "circuit_sequencer_api 0.150.7", "futures 0.3.30", "itertools 0.10.5", "num_cpus", @@ -9658,7 +9658,7 @@ dependencies = [ "vise", "zk_evm 0.133.0", "zk_evm 0.141.0", - "zk_evm 0.150.6", + "zk_evm 0.150.7", "zksync_contracts", "zksync_dal", "zksync_eth_client", @@ -10377,9 +10377,9 @@ dependencies = [ [[package]] name = "zksync_kzg" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c006b6b7a27cc50ff0c515b6d0b197dbb907bbf65d1d2ea42fc3ed21b315642" +checksum = "dc58af8e4e4ad1a851ffd2275e6a44ead0f15a7eaac9dc9d60a56b3b9c9b08e8" dependencies = [ "boojum", "derivative", @@ -10389,7 +10389,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "zkevm_circuits 0.150.6", + "zkevm_circuits 0.150.7", ] [[package]] @@ -10516,7 +10516,7 @@ dependencies = [ "circuit_sequencer_api 0.140.3", "circuit_sequencer_api 0.141.2", "circuit_sequencer_api 0.142.2", - "circuit_sequencer_api 0.150.6", + "circuit_sequencer_api 0.150.7", "ethabi", "hex", "itertools 0.10.5", @@ -10530,7 +10530,7 @@ dependencies = [ "zk_evm 0.133.0", "zk_evm 0.140.0", "zk_evm 0.141.0", - "zk_evm 0.150.6", + "zk_evm 0.150.7", "zksync_contracts", "zksync_eth_signer", "zksync_mini_merkle_tree", @@ -10572,7 +10572,7 @@ dependencies = [ "tower-http", "tracing", "vise", - "zk_evm 0.150.6", + "zk_evm 0.150.7", "zksync_config", "zksync_consensus_roles", "zksync_contracts", @@ -10968,7 +10968,7 @@ version = "0.1.0" dependencies = [ "bincode", "chrono", - "circuit_sequencer_api 0.150.6", + "circuit_sequencer_api 0.150.7", "serde", "serde_json", "serde_with", @@ -11330,8 +11330,8 @@ source = "git+https://github.com/matter-labs/vm2.git?rev=df5bec3d04d64d434f9b0cc dependencies = [ "enum_dispatch", "primitive-types", - "zk_evm_abstractions 0.150.6", - "zkevm_opcode_defs 0.150.6", + "zk_evm_abstractions 0.150.7", + "zkevm_opcode_defs 0.150.7", "zksync_vm2_interface", ] diff --git a/Cargo.toml b/Cargo.toml index 0f8e6ba77ae6..dc6fdf1727e3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -219,15 +219,15 @@ circuit_sequencer_api_1_3_3 = { package = "circuit_sequencer_api", version = "0. circuit_sequencer_api_1_4_0 = { package = "circuit_sequencer_api", version = "0.140" } circuit_sequencer_api_1_4_1 = { package = "circuit_sequencer_api", version = "0.141" } circuit_sequencer_api_1_4_2 = { package = "circuit_sequencer_api", version = "0.142" } -circuit_sequencer_api_1_5_0 = { package = "circuit_sequencer_api", version = "=0.150.6" } +circuit_sequencer_api_1_5_0 = { package = "circuit_sequencer_api", version = "=0.150.7" } crypto_codegen = { package = "zksync_solidity_vk_codegen", version = "=0.30.1" } -kzg = { package = "zksync_kzg", version = "=0.150.6" } +kzg = { package = "zksync_kzg", version = "=0.150.7" } zk_evm = { version = "=0.133.0" } zk_evm_1_3_1 = { package = "zk_evm", version = "0.131.0-rc.2" } zk_evm_1_3_3 = { package = "zk_evm", version = "0.133" } zk_evm_1_4_0 = { package = "zk_evm", version = "0.140" } zk_evm_1_4_1 = { package = "zk_evm", version = "0.141" } -zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.6" } +zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.7" } # New VM; pinned to a specific commit because of instability zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "df5bec3d04d64d434f9b0ccb285ba4681008f7b3" } diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 747d3df987e9..d68ef368a4aa 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -651,9 +651,9 @@ dependencies = [ [[package]] name = "boojum-cuda" -version = "0.151.0" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98c681a3f867afe40bcc188e5cb5260bbf5699531823affa3cbe28f7ca9b7bc9" +checksum = "4b63a717789f92f16fd566c78655d64017c690be59e473c3e769080c975a1f9e" dependencies = [ "boojum", "cmake", @@ -694,7 +694,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05efc5cfd9110c8416e471df0e96702d58690178e206e61b7173706673c93706" dependencies = [ "memchr", - "regex-automata 0.4.6", + "regex-automata 0.4.8", "serde", ] @@ -799,11 +799,11 @@ dependencies = [ [[package]] name = "circuit_definitions" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "492404ea63c934d8e894325f0a741723bf91cd035cb34a92fddd8617c4a00fd3" +checksum = "76be9ee6e75f1f948d175ab9820ecc7189f72154c95ca503a1974012356f5363" dependencies = [ - "circuit_encodings 0.150.6", + "circuit_encodings 0.150.7", "crossbeam", "derivative", "seq-macro", @@ -849,14 +849,14 @@ dependencies = [ [[package]] name = "circuit_encodings" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5128d4b8fbb27ac453f573a95601058e74487bdafd22a3168cded66bf340c28" +checksum = "2501cc688ef391013019495ae7035cfd54f86987e36d10f73976ce4c5d413c5a" dependencies = [ "derivative", "serde", - "zk_evm 0.150.6", - "zkevm_circuits 0.150.6", + "zk_evm 0.150.7", + "zkevm_circuits 0.150.7", ] [[package]] @@ -916,11 +916,11 @@ dependencies = [ [[package]] name = "circuit_sequencer_api" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "093d0c2c0b39144ddb4e1e88d73d95067ce34ec7750808b2eed01edbb510b88e" +checksum = "917d27db531fdd98a51e42ea465bc097f48cc849e7fad68d7856087d15125be1" dependencies = [ - "circuit_encodings 0.150.6", + "circuit_encodings 0.150.7", "derivative", "rayon", "serde", @@ -1773,9 +1773,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "era_cudart" -version = "0.151.0" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e1990fee6e9d25b40524ce53ca7977a211155a17bc7277f4dd354633e4fc22" +checksum = "ad950752eeb44f8938be405b95a1630f82e903f4a7adda355d92aad135fcd382" dependencies = [ "bitflags 2.6.0", "era_cudart_sys", @@ -1784,9 +1784,9 @@ dependencies = [ [[package]] name = "era_cudart_sys" -version = "0.151.0" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d84e8d300c28cd91ceb56340f66da8607409f44a45f5e694e23723630db8c852" +checksum = "c38607d52509b5db97cc4447c8644d6c5ca84f22ff8a9254f984669b1eb82ed4" dependencies = [ "serde_json", ] @@ -4454,7 +4454,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha", "rand_xorshift", - "regex-syntax 0.8.3", + "regex-syntax 0.8.5", "rusty-fork", "tempfile", "unarray", @@ -4800,14 +4800,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.6" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.6", - "regex-syntax 0.8.3", + "regex-automata 0.4.8", + "regex-syntax 0.8.5", ] [[package]] @@ -4821,13 +4821,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.6" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.3", + "regex-syntax 0.8.5", ] [[package]] @@ -4838,9 +4838,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "rend" @@ -5721,9 +5721,9 @@ checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" [[package]] name = "shivini" -version = "0.151.0" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92776ca824f49c255a7417939706d759e0fd3dd4217420d01da68beae04f0bd6" +checksum = "9d2ac4440b6c23005c43a81cf064b9aa123fbeb992ac91cd04c7d485abb1fbea" dependencies = [ "bincode", "blake2 0.10.6", @@ -7526,9 +7526,9 @@ dependencies = [ [[package]] name = "zk_evm" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c14bda6c101389145cd01fac900f1392876bc0284d98faf7f376237baa2cb19d" +checksum = "3cc74fbe2b45fd19e95c59ea792c795feebdb616ebaa463f0ac567f495f47387" dependencies = [ "anyhow", "lazy_static", @@ -7536,7 +7536,7 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.150.6", + "zk_evm_abstractions 0.150.7", ] [[package]] @@ -7567,22 +7567,22 @@ dependencies = [ [[package]] name = "zk_evm_abstractions" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a008f2442fc6a508bdd1f902380242cb6ff11b8b27acdac2677c6d9f75cbb004" +checksum = "37f333a3b059899df09e40deb041af881bc03e496fda5eec618ffb5e854ee7df" dependencies = [ "anyhow", "num_enum 0.6.1", "serde", "static_assertions", - "zkevm_opcode_defs 0.150.6", + "zkevm_opcode_defs 0.150.7", ] [[package]] name = "zkevm-assembly" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dc743ac7b0d618536dc3ace798fd4b8af78b057884afda5785c7970e15d62d0" +checksum = "cf011a0c83cbfb175f1e60811f0e0cd56551c9e35df596a762556662c638deb9" dependencies = [ "env_logger 0.9.3", "hex", @@ -7595,7 +7595,7 @@ dependencies = [ "smallvec", "structopt", "thiserror", - "zkevm_opcode_defs 0.150.6", + "zkevm_opcode_defs 0.150.7", ] [[package]] @@ -7644,9 +7644,9 @@ dependencies = [ [[package]] name = "zkevm_circuits" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f68518aedd5358b17224771bb78bacd912cf66011aeda98b1f887cfb9e0972f" +checksum = "d06fb35b00699d25175a2ad447f86a9088af8b0bc698bb57086fb04c13e52eab" dependencies = [ "arrayvec 0.7.4", "boojum", @@ -7658,7 +7658,7 @@ dependencies = [ "seq-macro", "serde", "smallvec", - "zkevm_opcode_defs 0.150.6", + "zkevm_opcode_defs 0.150.7", "zksync_cs_derive", ] @@ -7706,9 +7706,9 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "762b5f1c1b283c5388995a85d40a05aef1c14f50eb904998b7e9364739f5b899" +checksum = "b83f3b279248af4ca86dec20a54127f02110b45570f3f6c1d13df49ba75c28a5" dependencies = [ "bitflags 2.6.0", "blake2 0.10.6", @@ -7723,13 +7723,13 @@ dependencies = [ [[package]] name = "zkevm_test_harness" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73ad3e73d290a38a35dd245fd68cb6f498a8a8da4a52f846e88da3d3c31a34fd" +checksum = "d9c801aa17e9009699aacf654588d6adfaeeb8a490b2d9121847c201e2766803" dependencies = [ "bincode", "circuit_definitions", - "circuit_sequencer_api 0.150.6", + "circuit_sequencer_api 0.150.7", "codegen", "crossbeam", "derivative", @@ -7750,9 +7750,9 @@ dependencies = [ [[package]] name = "zksync-gpu-ffi" -version = "0.151.0" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d555e24b853359c5b076c52f9ff9e0ed62a7edc8c2f82f93517c524410c21ecb" +checksum = "5688dc060456f6c1e790d589f3abd6d9e9a11eb393d7383fbeb23b55961951e0" dependencies = [ "cmake", "crossbeam", @@ -7765,9 +7765,9 @@ dependencies = [ [[package]] name = "zksync-gpu-prover" -version = "0.151.0" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "615dad34e5fe678ec3b3e029af3f19313bebb1b771a8ce963c9ab9a8cc3879d3" +checksum = "5714848e6f8361820346483246dd68b4e7fb05ec41dd6610a8b53fb5c3ca7f3a" dependencies = [ "bit-vec", "cfg-if", @@ -7782,9 +7782,9 @@ dependencies = [ [[package]] name = "zksync-wrapper-prover" -version = "0.151.0" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80721b2da2643bd43f664ac65673ee078e6973c0a88d75b73bfaeac8e1bf5432" +checksum = "52a6a1863818d939d445c53af57e53c222f11c2c94b9a94c3612dd938a3d983c" dependencies = [ "circuit_definitions", "zkevm_test_harness", @@ -8152,9 +8152,9 @@ dependencies = [ [[package]] name = "zksync_kzg" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c006b6b7a27cc50ff0c515b6d0b197dbb907bbf65d1d2ea42fc3ed21b315642" +checksum = "dc58af8e4e4ad1a851ffd2275e6a44ead0f15a7eaac9dc9d60a56b3b9c9b08e8" dependencies = [ "boojum", "derivative", @@ -8164,7 +8164,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "zkevm_circuits 0.150.6", + "zkevm_circuits 0.150.7", ] [[package]] @@ -8200,7 +8200,7 @@ dependencies = [ "circuit_sequencer_api 0.140.3", "circuit_sequencer_api 0.141.2", "circuit_sequencer_api 0.142.2", - "circuit_sequencer_api 0.150.6", + "circuit_sequencer_api 0.150.7", "ethabi", "hex", "itertools 0.10.5", @@ -8212,7 +8212,7 @@ dependencies = [ "zk_evm 0.133.0", "zk_evm 0.140.0", "zk_evm 0.141.0", - "zk_evm 0.150.6", + "zk_evm 0.150.7", "zksync_contracts", "zksync_mini_merkle_tree", "zksync_system_constants", @@ -8265,7 +8265,7 @@ dependencies = [ "anyhow", "async-trait", "bincode", - "circuit_sequencer_api 0.150.6", + "circuit_sequencer_api 0.150.7", "clap 4.5.4", "ctrlc", "futures 0.3.30", @@ -8492,7 +8492,7 @@ name = "zksync_prover_interface" version = "0.1.0" dependencies = [ "chrono", - "circuit_sequencer_api 0.150.6", + "circuit_sequencer_api 0.150.7", "serde", "serde_with", "strum", @@ -8689,8 +8689,8 @@ source = "git+https://github.com/matter-labs/vm2.git?rev=df5bec3d04d64d434f9b0cc dependencies = [ "enum_dispatch", "primitive-types", - "zk_evm_abstractions 0.150.6", - "zkevm_opcode_defs 0.150.6", + "zk_evm_abstractions 0.150.7", + "zkevm_opcode_defs 0.150.7", "zksync_vm2_interface", ] diff --git a/prover/Cargo.toml b/prover/Cargo.toml index 31c663590eff..32c3185f64c3 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -63,13 +63,13 @@ url = "2.5.2" vise = "0.2.0" # Proving dependencies -circuit_definitions = "=0.150.6" -circuit_sequencer_api = "=0.150.6" -zkevm_test_harness = "=0.150.6" +circuit_definitions = "=0.150.7" +circuit_sequencer_api = "=0.150.7" +zkevm_test_harness = "=0.150.7" # GPU proving dependencies -wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.151.0" } -shivini = "=0.151.0" +wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.151.1" } +shivini = "=0.151.1" # Core workspace dependencies zksync_multivm = { path = "../core/lib/multivm", version = "0.1.0" } From b5490a04d3b73e520de9bdae0d132fa35a885665 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Mon, 28 Oct 2024 18:31:35 +0200 Subject: [PATCH 19/32] test(vm): Improve instruction-counting VM benchmark (#3105) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Replaces `iai` with an alternative; brushes up instruction counting in general. ## Why ❔ - The library currently used for the benchmark (`iai`) is unmaintained. - It doesn't work with newer valgrind versions. - It doesn't allow measuring parts of program execution, only the entire program run. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- .github/workflows/vm-perf-comparison.yml | 33 ++- .github/workflows/vm-perf-to-prometheus.yml | 4 +- Cargo.lock | 57 +++-- Cargo.toml | 4 +- core/lib/multivm/src/versions/vm_fast/mod.rs | 2 +- core/lib/vm_executor/src/batch/factory.rs | 2 +- core/tests/vm-benchmark/Cargo.toml | 4 +- core/tests/vm-benchmark/benches/iai.rs | 35 --- .../vm-benchmark/benches/instructions.rs | 206 ++++++++++++++++++ core/tests/vm-benchmark/src/bin/common/mod.rs | 54 ----- .../src/bin/compare_iai_results.rs | 108 --------- .../src/bin/iai_results_to_prometheus.rs | 52 ----- .../src/bin/instruction_counts.rs | 106 ++++++++- core/tests/vm-benchmark/src/criterion.rs | 6 +- core/tests/vm-benchmark/src/lib.rs | 2 +- core/tests/vm-benchmark/src/vm.rs | 114 ++++++---- 16 files changed, 446 insertions(+), 343 deletions(-) delete mode 100644 core/tests/vm-benchmark/benches/iai.rs create mode 100644 core/tests/vm-benchmark/benches/instructions.rs delete mode 100644 core/tests/vm-benchmark/src/bin/common/mod.rs delete mode 100644 core/tests/vm-benchmark/src/bin/compare_iai_results.rs delete mode 100644 core/tests/vm-benchmark/src/bin/iai_results_to_prometheus.rs diff --git a/.github/workflows/vm-perf-comparison.yml b/.github/workflows/vm-perf-comparison.yml index 49830a30cc1e..3520419f1337 100644 --- a/.github/workflows/vm-perf-comparison.yml +++ b/.github/workflows/vm-perf-comparison.yml @@ -40,6 +40,8 @@ jobs: echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env echo "RUSTC_WRAPPER=sccache" >> .env + # Set the minimum reported instruction count difference to reduce noise + echo "BENCHMARK_DIFF_THRESHOLD_PERCENT=2" >> .env - name: init run: | @@ -51,8 +53,8 @@ jobs: run: | ci_run zkstackup -g --local ci_run zkstack dev contracts --system-contracts - ci_run cargo bench --package vm-benchmark --bench iai | tee base-iai - ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee base-opcodes || touch base-opcodes + ci_run cargo bench --package vm-benchmark --bench instructions -- --verbose || echo "Instructions benchmark is missing" + ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee base-opcodes - name: checkout PR run: | @@ -60,24 +62,39 @@ jobs: - name: run benchmarks on PR shell: bash + id: comparison run: | ci_run zkstackup -g --local ci_run zkstack dev contracts --system-contracts - ci_run cargo bench --package vm-benchmark --bench iai | tee pr-iai - ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee pr-opcodes || touch pr-opcodes + ci_run cargo bench --package vm-benchmark --bench instructions -- --verbose + ci_run cargo bench --package vm-benchmark --bench instructions -- --print > instructions.log 2>/dev/null + # Output all lines from the benchmark result starting from the "## ..." comparison header. + # Since the output spans multiple lines, we use a heredoc declaration. EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64) echo "speedup<<$EOF" >> $GITHUB_OUTPUT - ci_run cargo run --package vm-benchmark --release --bin compare_iai_results base-iai pr-iai base-opcodes pr-opcodes >> $GITHUB_OUTPUT + sed -n '/^## /,$p' instructions.log >> $GITHUB_OUTPUT + echo "$EOF" >> $GITHUB_OUTPUT + + ci_run cargo run --package vm-benchmark --release --bin instruction_counts -- --diff base-opcodes > opcodes.log + echo "opcodes<<$EOF" >> $GITHUB_OUTPUT + sed -n '/^## /,$p' opcodes.log >> $GITHUB_OUTPUT echo "$EOF" >> $GITHUB_OUTPUT - id: comparison - name: Comment on PR uses: thollander/actions-comment-pull-request@fabd468d3a1a0b97feee5f6b9e499eab0dd903f6 # v2.5.0 + if: steps.comparison.outputs.speedup != '' || steps.comparison.outputs.opcodes != '' with: message: | - ${{ steps.comparison.outputs.speedup == '' && '## No performance difference detected (anymore)' || '## Detected VM performance changes' }} ${{ steps.comparison.outputs.speedup }} + ${{ steps.comparison.outputs.opcodes }} comment_tag: vm-performance-changes mode: recreate - create_if_not_exists: ${{ steps.comparison.outputs.speedup != '' }} + create_if_not_exists: true + - name: Remove PR comment + uses: thollander/actions-comment-pull-request@fabd468d3a1a0b97feee5f6b9e499eab0dd903f6 # v2.5.0 + if: steps.comparison.outputs.speedup == '' && steps.comparison.outputs.opcodes == '' + with: + comment_tag: vm-performance-changes + message: 'No performance difference detected (anymore)' + mode: delete diff --git a/.github/workflows/vm-perf-to-prometheus.yml b/.github/workflows/vm-perf-to-prometheus.yml index d336a1472e4a..93d33116794f 100644 --- a/.github/workflows/vm-perf-to-prometheus.yml +++ b/.github/workflows/vm-perf-to-prometheus.yml @@ -48,5 +48,5 @@ jobs: ci_run cargo bench --package vm-benchmark --bench oneshot # Run only benches with 1,000 transactions per batch to not spend too much time ci_run cargo bench --package vm-benchmark --bench batch '/1000$' - ci_run cargo bench --package vm-benchmark --bench iai | tee iai-result - ci_run cargo run --package vm-benchmark --bin iai_results_to_prometheus --release < iai-result + ci_run cargo bench --package vm-benchmark --bench instructions -- --verbose + ci_run cargo bench --package vm-benchmark --bench instructions -- --print diff --git a/Cargo.lock b/Cargo.lock index 2e3a8a1c3e3a..597da3c1b31b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -959,7 +959,7 @@ name = "block_reverter" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.18", + "clap 4.5.20", "serde_json", "tokio", "zksync_block_reverter", @@ -1445,9 +1445,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.18" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" +checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" dependencies = [ "clap_builder", "clap_derive", @@ -1455,14 +1455,15 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.18" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" +checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" dependencies = [ "anstream", "anstyle", "clap_lex 0.7.2", "strsim 0.11.1", + "terminal_size", ] [[package]] @@ -2796,7 +2797,7 @@ name = "genesis_generator" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.18", + "clap 4.5.20", "futures 0.3.30", "serde", "serde_json", @@ -3472,12 +3473,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "iai" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71a816c97c42258aa5834d07590b718b4c9a598944cd39a52dc25b351185d678" - [[package]] name = "iana-time-zone" version = "0.1.61" @@ -4122,7 +4117,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -4392,7 +4387,7 @@ name = "merkle_tree_consistency_checker" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.18", + "clap 4.5.20", "tracing", "zksync_config", "zksync_env_config", @@ -5498,7 +5493,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" dependencies = [ "bytes", - "heck 0.5.0", + "heck 0.4.1", "itertools 0.12.1", "log", "multimap", @@ -6583,7 +6578,7 @@ name = "selector_generator" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.18", + "clap 4.5.20", "ethabi", "glob", "hex", @@ -7907,6 +7902,16 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "terminal_size" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f599bd7ca042cfdf8f4512b277c02ba102247820f9d9d4a9f521f496751a6ef" +dependencies = [ + "rustix", + "windows-sys 0.59.0", +] + [[package]] name = "test-casing" version = "0.1.3" @@ -8751,11 +8756,11 @@ version = "0.1.0" dependencies = [ "assert_matches", "criterion", - "iai", "once_cell", "rand 0.8.5", "tokio", "vise", + "yab", "zksync_contracts", "zksync_multivm", "zksync_types", @@ -9239,6 +9244,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "yab" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b06cc62d4cec617d3c259537be0fcaa8a5bcf72ddf2983823d9528605f36ed3" +dependencies = [ + "anes", + "clap 4.5.20", + "num_cpus", + "thiserror", +] + [[package]] name = "yansi" version = "1.0.1" @@ -10229,7 +10246,7 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "clap 4.5.18", + "clap 4.5.20", "envy", "futures 0.3.30", "rustc_version", @@ -10437,7 +10454,7 @@ version = "0.1.0" dependencies = [ "anyhow", "assert_matches", - "clap 4.5.18", + "clap 4.5.20", "insta", "leb128", "once_cell", @@ -11017,7 +11034,7 @@ name = "zksync_server" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.18", + "clap 4.5.20", "futures 0.3.30", "serde_json", "tikv-jemallocator", diff --git a/Cargo.toml b/Cargo.toml index dc6fdf1727e3..6d51e5060aa8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -122,6 +122,7 @@ derive_more = "1.0.0" envy = "0.4" ethabi = "18.0.0" flate2 = "1.0.28" +fraction = "0.15.3" futures = "0.3" glob = "0.3" google-cloud-auth = "0.16.0" @@ -131,7 +132,6 @@ hex = "0.4" http = "1.1" httpmock = "0.7.0" hyper = "1.3" -iai = "0.1" insta = "1.29.0" itertools = "0.10" jsonrpsee = { version = "0.23", default-features = false } @@ -190,7 +190,7 @@ tracing-opentelemetry = "0.25.0" time = "0.3.36" # Has to be same as used by `tracing-subscriber` url = "2" web3 = "0.19.0" -fraction = "0.15.3" +yab = "0.1.0" # Proc-macro syn = "2.0" diff --git a/core/lib/multivm/src/versions/vm_fast/mod.rs b/core/lib/multivm/src/versions/vm_fast/mod.rs index bb5a342bff28..35789c6cdc9a 100644 --- a/core/lib/multivm/src/versions/vm_fast/mod.rs +++ b/core/lib/multivm/src/versions/vm_fast/mod.rs @@ -1,4 +1,4 @@ -pub use zksync_vm2::interface::Tracer; +pub use zksync_vm2::interface; pub use self::{circuits_tracer::CircuitsTracer, vm::Vm}; diff --git a/core/lib/vm_executor/src/batch/factory.rs b/core/lib/vm_executor/src/batch/factory.rs index f974d17f4a75..de0db5f0bf75 100644 --- a/core/lib/vm_executor/src/batch/factory.rs +++ b/core/lib/vm_executor/src/batch/factory.rs @@ -37,7 +37,7 @@ pub trait BatchTracer: fmt::Debug + 'static + Send + Sealed { const TRACE_CALLS: bool; /// Tracer for the fast VM. #[doc(hidden)] - type Fast: vm_fast::Tracer + Default + 'static; + type Fast: vm_fast::interface::Tracer + Default + 'static; } impl Sealed for () {} diff --git a/core/tests/vm-benchmark/Cargo.toml b/core/tests/vm-benchmark/Cargo.toml index 59c1e21493b4..892bcf1c1051 100644 --- a/core/tests/vm-benchmark/Cargo.toml +++ b/core/tests/vm-benchmark/Cargo.toml @@ -21,7 +21,7 @@ tokio.workspace = true [dev-dependencies] assert_matches.workspace = true -iai.workspace = true +yab.workspace = true [[bench]] name = "oneshot" @@ -32,5 +32,5 @@ name = "batch" harness = false [[bench]] -name = "iai" +name = "instructions" harness = false diff --git a/core/tests/vm-benchmark/benches/iai.rs b/core/tests/vm-benchmark/benches/iai.rs deleted file mode 100644 index 8cbb9f10dd83..000000000000 --- a/core/tests/vm-benchmark/benches/iai.rs +++ /dev/null @@ -1,35 +0,0 @@ -use iai::black_box; -use vm_benchmark::{BenchmarkingVm, BenchmarkingVmFactory, Bytecode, Fast, Legacy}; - -fn run_bytecode(name: &str) { - let tx = Bytecode::get(name).deploy_tx(); - black_box(BenchmarkingVm::::default().run_transaction(&tx)); -} - -macro_rules! make_functions_and_main { - ($($file:ident => $legacy_name:ident,)+) => { - $( - fn $file() { - run_bytecode::(stringify!($file)); - } - - fn $legacy_name() { - run_bytecode::(stringify!($file)); - } - )+ - - iai::main!($($file, $legacy_name,)+); - }; -} - -make_functions_and_main!( - access_memory => access_memory_legacy, - call_far => call_far_legacy, - decode_shl_sub => decode_shl_sub_legacy, - deploy_simple_contract => deploy_simple_contract_legacy, - finish_eventful_frames => finish_eventful_frames_legacy, - write_and_decode => write_and_decode_legacy, - event_spam => event_spam_legacy, - slot_hash_collision => slot_hash_collision_legacy, - heap_read_write => heap_read_write_legacy, -); diff --git a/core/tests/vm-benchmark/benches/instructions.rs b/core/tests/vm-benchmark/benches/instructions.rs new file mode 100644 index 000000000000..654dfef71b29 --- /dev/null +++ b/core/tests/vm-benchmark/benches/instructions.rs @@ -0,0 +1,206 @@ +//! Measures the number of host instructions required to run the benchmark bytecodes. + +use std::{env, sync::mpsc}; + +use vise::{Gauge, LabeledFamily, Metrics}; +use vm_benchmark::{ + criterion::PrometheusRuntime, BenchmarkingVm, BenchmarkingVmFactory, Fast, Legacy, BYTECODES, +}; +use yab::{ + reporter::{BenchmarkOutput, BenchmarkReporter, Reporter}, + AccessSummary, BenchMode, Bencher, BenchmarkId, +}; + +fn benchmarks_for_vm(bencher: &mut Bencher) { + bencher.bench( + BenchmarkId::new("init", VM::LABEL.as_str()), + BenchmarkingVm::::default, + ); + + for bytecode in BYTECODES { + bencher.bench_with_capture( + BenchmarkId::new(bytecode.name, VM::LABEL.as_str()), + |capture| { + let mut vm = yab::black_box(BenchmarkingVm::::default()); + let tx = yab::black_box(bytecode.deploy_tx()); + capture.measure(|| vm.run_transaction(&tx)); + }, + ); + } +} + +/// Reporter that pushes cachegrind metrics to Prometheus. +#[derive(Debug)] +struct MetricsReporter { + _runtime: Option, +} + +impl Default for MetricsReporter { + fn default() -> Self { + Self { + _runtime: PrometheusRuntime::new(), + } + } +} + +impl Reporter for MetricsReporter { + fn new_benchmark(&mut self, id: &BenchmarkId) -> Box { + Box::new(MetricsBenchmarkReporter(id.clone())) + } +} + +#[derive(Debug)] +struct MetricsBenchmarkReporter(BenchmarkId); + +impl BenchmarkReporter for MetricsBenchmarkReporter { + fn ok(self: Box, output: &BenchmarkOutput) { + #[derive(Debug, Metrics)] + #[metrics(prefix = "vm_cachegrind")] + struct VmCachegrindMetrics { + #[metrics(labels = ["benchmark"])] + instructions: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + l1_accesses: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + l2_accesses: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + ram_accesses: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + cycles: LabeledFamily>, + } + + #[vise::register] + static VM_CACHEGRIND_METRICS: vise::Global = vise::Global::new(); + + let id = self.0.to_string(); + VM_CACHEGRIND_METRICS.instructions[&id].set(output.stats.total_instructions()); + if let Some(&full) = output.stats.as_full() { + let summary = AccessSummary::from(full); + VM_CACHEGRIND_METRICS.l1_accesses[&id].set(summary.l1_hits); + VM_CACHEGRIND_METRICS.l2_accesses[&id].set(summary.l3_hits); + VM_CACHEGRIND_METRICS.ram_accesses[&id].set(summary.ram_accesses); + VM_CACHEGRIND_METRICS.cycles[&id].set(summary.estimated_cycles()); + } + } +} + +#[derive(Debug, Clone, Copy)] +struct Comparison { + current_cycles: u64, + prev_cycles: Option, +} + +impl Comparison { + fn percent_difference(a: u64, b: u64) -> f64 { + ((b as i64) - (a as i64)) as f64 / (a as f64) * 100.0 + } + + fn new(output: &BenchmarkOutput) -> Option { + let current_cycles = AccessSummary::from(*output.stats.as_full()?).estimated_cycles(); + let prev_cycles = if let Some(prev_stats) = &output.prev_stats { + Some(AccessSummary::from(*prev_stats.as_full()?).estimated_cycles()) + } else { + None + }; + + Some(Self { + current_cycles, + prev_cycles, + }) + } + + fn cycles_diff(&self) -> Option { + self.prev_cycles + .map(|prev_cycles| Self::percent_difference(prev_cycles, self.current_cycles)) + } +} + +/// Reporter that outputs diffs in a Markdown table to stdout after all benchmarks are completed. +/// +/// Significant diff level can be changed via `BENCHMARK_DIFF_THRESHOLD_PERCENT` env var; it is set to 1% by default. +#[derive(Debug)] +struct ComparisonReporter { + comparisons_sender: mpsc::Sender<(String, Comparison)>, + comparisons_receiver: mpsc::Receiver<(String, Comparison)>, +} + +impl Default for ComparisonReporter { + fn default() -> Self { + let (comparisons_sender, comparisons_receiver) = mpsc::channel(); + Self { + comparisons_sender, + comparisons_receiver, + } + } +} + +impl Reporter for ComparisonReporter { + fn new_benchmark(&mut self, id: &BenchmarkId) -> Box { + Box::new(BenchmarkComparison { + comparisons_sender: self.comparisons_sender.clone(), + id: id.clone(), + }) + } + + fn ok(self: Box) { + const ENV_VAR: &str = "BENCHMARK_DIFF_THRESHOLD_PERCENT"; + + let diff_threshold = env::var(ENV_VAR).unwrap_or_else(|_| "1.0".into()); + let diff_threshold: f64 = diff_threshold.parse().unwrap_or_else(|err| { + panic!("incorrect `{ENV_VAR}` value: {err}"); + }); + + // Drop the sender to not hang on the iteration below. + drop(self.comparisons_sender); + let mut comparisons: Vec<_> = self.comparisons_receiver.iter().collect(); + comparisons.retain(|(_, diff)| { + // Output all stats if `diff_threshold <= 0.0` since this is what the user expects + diff.cycles_diff().unwrap_or(0.0) >= diff_threshold + }); + if comparisons.is_empty() { + return; + } + + comparisons.sort_unstable_by(|(name, _), (other_name, _)| name.cmp(other_name)); + + println!("\n## Detected VM performance changes"); + println!("Benchmark name | Est. cycles | Change in est. cycles |"); + println!("|:---|---:|---:|"); + for (name, comparison) in &comparisons { + let diff = comparison + .cycles_diff() + .map_or_else(|| "N/A".to_string(), |diff| format!("{diff:+.1}%")); + println!("| {name} | {} | {diff} |", comparison.current_cycles); + } + } +} + +#[derive(Debug)] +struct BenchmarkComparison { + comparisons_sender: mpsc::Sender<(String, Comparison)>, + id: BenchmarkId, +} + +impl BenchmarkReporter for BenchmarkComparison { + fn ok(self: Box, output: &BenchmarkOutput) { + if let Some(diff) = Comparison::new(output) { + self.comparisons_sender + .send((self.id.to_string(), diff)) + .ok(); + } + } +} + +fn benchmarks(bencher: &mut Bencher) { + if bencher.mode() == BenchMode::PrintResults { + // Only customize reporting if outputting previously collected benchmark result in order to prevent + // reporters influencing cachegrind stats. + bencher + .add_reporter(MetricsReporter::default()) + .add_reporter(ComparisonReporter::default()); + } + benchmarks_for_vm::(bencher); + benchmarks_for_vm::(bencher); +} + +yab::main!(benchmarks); diff --git a/core/tests/vm-benchmark/src/bin/common/mod.rs b/core/tests/vm-benchmark/src/bin/common/mod.rs deleted file mode 100644 index a92c9d5f710c..000000000000 --- a/core/tests/vm-benchmark/src/bin/common/mod.rs +++ /dev/null @@ -1,54 +0,0 @@ -use std::io::BufRead; - -#[derive(Debug)] -pub struct IaiResult { - pub name: String, - pub instructions: u64, - pub l1_accesses: u64, - pub l2_accesses: u64, - pub ram_accesses: u64, - pub cycles: u64, -} - -pub fn parse_iai(iai_output: R) -> impl Iterator { - IaiResultParser { - lines: iai_output.lines().map(|x| x.unwrap()), - } -} - -struct IaiResultParser> { - lines: I, -} - -impl> Iterator for IaiResultParser { - type Item = IaiResult; - - fn next(&mut self) -> Option { - self.lines.next().map(|name| { - let result = IaiResult { - name, - instructions: self.parse_stat(), - l1_accesses: self.parse_stat(), - l2_accesses: self.parse_stat(), - ram_accesses: self.parse_stat(), - cycles: self.parse_stat(), - }; - self.lines.next(); - result - }) - } -} - -impl> IaiResultParser { - fn parse_stat(&mut self) -> u64 { - let line = self.lines.next().unwrap(); - let number = line - .split(':') - .nth(1) - .unwrap() - .split_whitespace() - .next() - .unwrap(); - number.parse().unwrap() - } -} diff --git a/core/tests/vm-benchmark/src/bin/compare_iai_results.rs b/core/tests/vm-benchmark/src/bin/compare_iai_results.rs deleted file mode 100644 index c274b039c9bd..000000000000 --- a/core/tests/vm-benchmark/src/bin/compare_iai_results.rs +++ /dev/null @@ -1,108 +0,0 @@ -use std::{ - collections::{HashMap, HashSet}, - fs::File, - io::{BufRead, BufReader}, -}; - -pub use crate::common::parse_iai; - -mod common; - -fn main() { - let [iai_before, iai_after, opcodes_before, opcodes_after] = std::env::args() - .skip(1) - .take(4) - .collect::>() - .try_into() - .expect("expected four arguments"); - - let iai_before = get_name_to_cycles(&iai_before); - let iai_after = get_name_to_cycles(&iai_after); - let opcodes_before = get_name_to_opcodes(&opcodes_before); - let opcodes_after = get_name_to_opcodes(&opcodes_after); - - let perf_changes = iai_before - .keys() - .collect::>() - .intersection(&iai_after.keys().collect()) - .map(|&name| (name, percent_difference(iai_before[name], iai_after[name]))) - .collect::>(); - - let duration_changes = opcodes_before - .keys() - .collect::>() - .intersection(&opcodes_after.keys().collect()) - .map(|&name| { - let opcodes_abs_diff = (opcodes_after[name] as i64) - (opcodes_before[name] as i64); - (name, opcodes_abs_diff) - }) - .collect::>(); - - let mut nonzero_diff = false; - - for name in perf_changes - .iter() - .filter_map(|(key, value)| (value.abs() > 2.).then_some(key)) - .collect::>() - .union( - &duration_changes - .iter() - .filter_map(|(key, value)| (*value != 0).then_some(key)) - .collect(), - ) - { - // write the header before writing the first line of diff - if !nonzero_diff { - println!("Benchmark name | change in estimated runtime | change in number of opcodes executed \n--- | --- | ---"); - nonzero_diff = true; - } - - let n_a = "N/A".to_string(); - println!( - "{} | {} | {}", - name, - perf_changes - .get(**name) - .map(|percent| format!("{:+.1}%", percent)) - .unwrap_or(n_a.clone()), - duration_changes - .get(**name) - .map(|abs_diff| format!( - "{:+} ({:+.1}%)", - abs_diff, - percent_difference(opcodes_before[**name], opcodes_after[**name]) - )) - .unwrap_or(n_a), - ); - } - - if nonzero_diff { - println!("\n Changes in number of opcodes executed indicate that the gas price of the benchmark has changed, which causes it run out of gas at a different time. Or that it is behaving completely differently."); - } -} - -fn percent_difference(a: u64, b: u64) -> f64 { - ((b as f64) - (a as f64)) / (a as f64) * 100.0 -} - -fn get_name_to_cycles(filename: &str) -> HashMap { - parse_iai(BufReader::new( - File::open(filename).expect("failed to open file"), - )) - .map(|x| (x.name, x.cycles)) - .collect() -} - -fn get_name_to_opcodes(filename: &str) -> HashMap { - BufReader::new(File::open(filename).expect("failed to open file")) - .lines() - .map(|line| { - let line = line.unwrap(); - let mut it = line.split_whitespace(); - ( - it.next().unwrap().to_string(), - it.next().unwrap().parse().unwrap(), - ) - }) - .collect() -} diff --git a/core/tests/vm-benchmark/src/bin/iai_results_to_prometheus.rs b/core/tests/vm-benchmark/src/bin/iai_results_to_prometheus.rs deleted file mode 100644 index 3b3aa05bf69c..000000000000 --- a/core/tests/vm-benchmark/src/bin/iai_results_to_prometheus.rs +++ /dev/null @@ -1,52 +0,0 @@ -use std::{env, io::BufReader, time::Duration}; - -use tokio::sync::watch; -use vise::{Gauge, LabeledFamily, Metrics}; -use zksync_vlog::prometheus::PrometheusExporterConfig; - -use crate::common::{parse_iai, IaiResult}; - -mod common; - -#[derive(Debug, Metrics)] -#[metrics(prefix = "vm_cachegrind")] -pub(crate) struct VmCachegrindMetrics { - #[metrics(labels = ["benchmark"])] - pub instructions: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub l1_accesses: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub l2_accesses: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub ram_accesses: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub cycles: LabeledFamily>, -} - -#[vise::register] -pub(crate) static VM_CACHEGRIND_METRICS: vise::Global = vise::Global::new(); - -#[tokio::main] -async fn main() { - let results: Vec = parse_iai(BufReader::new(std::io::stdin())).collect(); - - let endpoint = env::var("BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL") - .expect("`BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL` env var is not set"); - let (stop_sender, stop_receiver) = watch::channel(false); - let prometheus_config = - PrometheusExporterConfig::push(endpoint.to_owned(), Duration::from_millis(100)); - tokio::spawn(prometheus_config.run(stop_receiver)); - - for result in results { - let name = result.name; - VM_CACHEGRIND_METRICS.instructions[&name.clone()].set(result.instructions); - VM_CACHEGRIND_METRICS.l1_accesses[&name.clone()].set(result.l1_accesses); - VM_CACHEGRIND_METRICS.l2_accesses[&name.clone()].set(result.l2_accesses); - VM_CACHEGRIND_METRICS.ram_accesses[&name.clone()].set(result.ram_accesses); - VM_CACHEGRIND_METRICS.cycles[&name].set(result.cycles); - } - - println!("Waiting for push to happen..."); - tokio::time::sleep(Duration::from_secs(1)).await; - stop_sender.send_replace(true); -} diff --git a/core/tests/vm-benchmark/src/bin/instruction_counts.rs b/core/tests/vm-benchmark/src/bin/instruction_counts.rs index 96208007fd97..ece30a66cee3 100644 --- a/core/tests/vm-benchmark/src/bin/instruction_counts.rs +++ b/core/tests/vm-benchmark/src/bin/instruction_counts.rs @@ -1,16 +1,100 @@ //! Runs all benchmarks and prints out the number of zkEVM opcodes each one executed. -use vm_benchmark::{BenchmarkingVmFactory, Fast, Legacy, BYTECODES}; +use std::{collections::BTreeMap, env, fs, io, path::PathBuf}; -fn main() { - for bytecode in BYTECODES { - let tx = bytecode.deploy_tx(); - let name = bytecode.name; - println!("{name} {}", Fast::<()>::count_instructions(&tx)); - println!( - "{} {}", - name.to_string() + "_legacy", - Legacy::count_instructions(&tx) - ); +use vm_benchmark::{CountInstructions, Fast, Legacy, BYTECODES}; + +#[derive(Debug)] +enum Command { + Print, + Diff { old: PathBuf }, +} + +impl Command { + fn from_env() -> Self { + let mut args = env::args().skip(1); + let Some(first) = args.next() else { + return Self::Print; + }; + assert_eq!(first, "--diff", "Unsupported command-line arg"); + let old = args.next().expect("`--diff` requires a path to old file"); + Self::Diff { old: old.into() } } + + fn print_instructions(counts: &BTreeMap<&str, usize>) { + for (bytecode_name, count) in counts { + println!("{bytecode_name} {count}"); + } + } + + fn parse_counts(reader: impl io::BufRead) -> BTreeMap { + let mut counts = BTreeMap::new(); + for line in reader.lines() { + let line = line.unwrap(); + if line.is_empty() { + continue; + } + let (name, count) = line.split_once(' ').expect("invalid output format"); + let count = count.parse().unwrap_or_else(|err| { + panic!("invalid count for `{name}`: {err}"); + }); + counts.insert(name.to_owned(), count); + } + counts + } + + fn run(self) { + let counts: BTreeMap<_, _> = BYTECODES + .iter() + .map(|bytecode| { + let tx = bytecode.deploy_tx(); + // We have a unit test comparing stats, but do it here as well just in case. + let fast_count = Fast::count_instructions(&tx); + let legacy_count = Legacy::count_instructions(&tx); + assert_eq!( + fast_count, legacy_count, + "mismatch on number of instructions on bytecode `{}`", + bytecode.name + ); + + (bytecode.name, fast_count) + }) + .collect(); + + match self { + Self::Print => Self::print_instructions(&counts), + Self::Diff { old } => { + let file = fs::File::open(&old).unwrap_or_else(|err| { + panic!("failed opening `{}`: {err}", old.display()); + }); + let reader = io::BufReader::new(file); + let old_counts = Self::parse_counts(reader); + + let differing_counts: Vec<_> = counts + .iter() + .filter_map(|(&name, &new_count)| { + let old_count = *old_counts.get(name)?; + (old_count != new_count).then_some((name, old_count, new_count)) + }) + .collect(); + + if !differing_counts.is_empty() { + println!("## ⚠ Detected differing instruction counts"); + println!("| Benchmark | Old count | New count |"); + println!("|-----------|----------:|----------:|"); + for (name, old_count, new_count) in differing_counts { + println!("| {name} | {old_count} | {new_count} |"); + } + println!( + "\nChanges in number of opcodes executed indicate that the gas price of the benchmark has changed, \ + which causes it to run out of gas at a different time." + ); + } + } + } + } +} + +fn main() { + Command::from_env().run(); } diff --git a/core/tests/vm-benchmark/src/criterion.rs b/core/tests/vm-benchmark/src/criterion.rs index 9515ac4ef988..024ccf14139f 100644 --- a/core/tests/vm-benchmark/src/criterion.rs +++ b/core/tests/vm-benchmark/src/criterion.rs @@ -57,7 +57,7 @@ struct VmBenchmarkMetrics { static METRICS: vise::Global = vise::Global::new(); #[derive(Debug)] -struct PrometheusRuntime { +pub struct PrometheusRuntime { stop_sender: watch::Sender, _runtime: tokio::runtime::Runtime, } @@ -72,7 +72,7 @@ impl Drop for PrometheusRuntime { } impl PrometheusRuntime { - fn new() -> Option { + pub fn new() -> Option { const PUSH_INTERVAL: Duration = Duration::from_millis(100); let gateway_url = env::var("BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL").ok()?; @@ -164,7 +164,7 @@ thread_local! { static BIN_NAME: SyncOnceCell<&'static str> = SyncOnceCell::new(); -/// Measurement for criterion that exports . +/// Measurement for criterion that exports timing-related metrics. #[derive(Debug)] pub struct MeteredTime { _prometheus: Option, diff --git a/core/tests/vm-benchmark/src/lib.rs b/core/tests/vm-benchmark/src/lib.rs index 4bd008d33196..9c4f547c1de2 100644 --- a/core/tests/vm-benchmark/src/lib.rs +++ b/core/tests/vm-benchmark/src/lib.rs @@ -6,7 +6,7 @@ pub use crate::{ get_load_test_deploy_tx, get_load_test_tx, get_realistic_load_test_tx, get_transfer_tx, LoadTestParams, }, - vm::{BenchmarkingVm, BenchmarkingVmFactory, Fast, Legacy, VmLabel}, + vm::{BenchmarkingVm, BenchmarkingVmFactory, CountInstructions, Fast, Legacy, VmLabel}, }; pub mod criterion; diff --git a/core/tests/vm-benchmark/src/vm.rs b/core/tests/vm-benchmark/src/vm.rs index e198be9ea6b2..bf969e0de5c0 100644 --- a/core/tests/vm-benchmark/src/vm.rs +++ b/core/tests/vm-benchmark/src/vm.rs @@ -72,19 +72,21 @@ pub trait BenchmarkingVmFactory { system_env: SystemEnv, storage: &'static InMemoryStorage, ) -> Self::Instance; +} +pub trait CountInstructions { /// Counts instructions executed by the VM while processing the transaction. fn count_instructions(tx: &Transaction) -> usize; } /// Factory for the new / fast VM. #[derive(Debug)] -pub struct Fast(Tr); +pub struct Fast; -impl BenchmarkingVmFactory for Fast { +impl BenchmarkingVmFactory for Fast { const LABEL: VmLabel = VmLabel::Fast; - type Instance = vm_fast::Vm<&'static InMemoryStorage, Tr>; + type Instance = vm_fast::Vm<&'static InMemoryStorage>; fn create( batch_env: L1BatchEnv, @@ -93,27 +95,30 @@ impl BenchmarkingVmFactory for Fast ) -> Self::Instance { vm_fast::Vm::custom(batch_env, system_env, storage) } +} +impl CountInstructions for Fast { fn count_instructions(tx: &Transaction) -> usize { - let mut vm = BenchmarkingVm::>::default(); - vm.0.push_transaction(tx.clone()); + use vm_fast::interface as vm2; #[derive(Default)] struct InstructionCount(usize); - impl vm_fast::Tracer for InstructionCount { - fn before_instruction< - OP: zksync_vm2::interface::OpcodeType, - S: zksync_vm2::interface::GlobalStateInterface, - >( + + impl vm2::Tracer for InstructionCount { + fn before_instruction( &mut self, _: &mut S, ) { self.0 += 1; } } - let mut tracer = InstructionCount(0); - vm.0.inspect(&mut tracer, InspectExecutionMode::OneTx); + let (system_env, l1_batch_env) = test_env(); + let mut vm = + vm_fast::Vm::<_, InstructionCount>::custom(l1_batch_env, system_env, &*STORAGE); + vm.push_transaction(tx.clone()); + let mut tracer = InstructionCount(0); + vm.inspect(&mut tracer, InspectExecutionMode::OneTx); tracer.0 } } @@ -135,7 +140,9 @@ impl BenchmarkingVmFactory for Legacy { let storage = StorageView::new(storage).to_rc_ptr(); vm_latest::Vm::new(batch_env, system_env, storage) } +} +impl CountInstructions for Legacy { fn count_instructions(tx: &Transaction) -> usize { let mut vm = BenchmarkingVm::::default(); vm.0.push_transaction(tx.clone()); @@ -150,41 +157,44 @@ impl BenchmarkingVmFactory for Legacy { } } +fn test_env() -> (SystemEnv, L1BatchEnv) { + let timestamp = unix_timestamp_ms(); + let system_env = SystemEnv { + zk_porter_available: false, + version: ProtocolVersionId::latest(), + base_system_smart_contracts: SYSTEM_CONTRACTS.clone(), + bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + chain_id: L2ChainId::from(270), + }; + let l1_batch_env = L1BatchEnv { + previous_batch_hash: None, + number: L1BatchNumber(1), + timestamp, + fee_input: BatchFeeInput::l1_pegged( + 50_000_000_000, // 50 gwei + 250_000_000, // 0.25 gwei + ), + fee_account: Address::random(), + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number: 1, + timestamp, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 100, + }, + }; + (system_env, l1_batch_env) +} + #[derive(Debug)] pub struct BenchmarkingVm(VM::Instance); impl Default for BenchmarkingVm { fn default() -> Self { - let timestamp = unix_timestamp_ms(); - Self(VM::create( - L1BatchEnv { - previous_batch_hash: None, - number: L1BatchNumber(1), - timestamp, - fee_input: BatchFeeInput::l1_pegged( - 50_000_000_000, // 50 gwei - 250_000_000, // 0.25 gwei - ), - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - }, - SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: SYSTEM_CONTRACTS.clone(), - bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - &STORAGE, - )) + let (system_env, l1_batch_env) = test_env(); + Self(VM::create(l1_batch_env, system_env, &STORAGE)) } } @@ -231,7 +241,7 @@ mod tests { use super::*; use crate::{ get_deploy_tx, get_heavy_load_test_tx, get_load_test_deploy_tx, get_load_test_tx, - get_realistic_load_test_tx, get_transfer_tx, LoadTestParams, + get_realistic_load_test_tx, get_transfer_tx, LoadTestParams, BYTECODES, }; #[test] @@ -282,4 +292,22 @@ mod tests { let res = vm.run_transaction(&get_heavy_load_test_tx(1)); assert_matches!(res.result, ExecutionResult::Success { .. }); } + + #[test] + fn instruction_count_matches_on_both_vms_for_transfer() { + let tx = get_transfer_tx(0); + let legacy_count = Legacy::count_instructions(&tx); + let fast_count = Fast::count_instructions(&tx); + assert_eq!(legacy_count, fast_count); + } + + #[test] + fn instruction_count_matches_on_both_vms_for_benchmark_bytecodes() { + for bytecode in BYTECODES { + let tx = bytecode.deploy_tx(); + let legacy_count = Legacy::count_instructions(&tx); + let fast_count = Fast::count_instructions(&tx); + assert_eq!(legacy_count, fast_count, "bytecode: {}", bytecode.name); + } + } } From 6ee9f1f431f95514d58db87a4562e09df9d09f86 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Mon, 28 Oct 2024 17:58:15 +0100 Subject: [PATCH 20/32] fix(consensus): made attestation controller non-critical (#3180) The attestation logic in consensus component is experimental, while p2p synchronization is critical. I've made the attestation controller non-critical, i.e. if attestation controller fails, an error is logged, but the consensus component keeps working (on both main node and external node). This should prevent situations like in https://www.notion.so/matterlabs/mainnet2-p2p-synchronization-downtime-12aa48363f2380e6b8e0c8e1c3728201?pvs=4 --- core/node/consensus/src/en.rs | 25 +++-- core/node/consensus/src/mn.rs | 201 ++++++++++++++++++---------------- 2 files changed, 123 insertions(+), 103 deletions(-) diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index 5e9aadc8f37f..6e3619f57e2e 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -127,7 +127,7 @@ impl EN { ) .await .wrap("Store::new()")?; - s.spawn_bg(async { Ok(runner.run(ctx).await?) }); + s.spawn_bg(async { Ok(runner.run(ctx).await.context("Store::runner()")?) }); // Run the temporary fetcher until the certificates are backfilled. // Temporary fetcher should be removed once json RPC syncing is fully deprecated. @@ -146,14 +146,25 @@ impl EN { let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) .await .wrap("BlockStore::new()")?; - s.spawn_bg(async { Ok(runner.run(ctx).await?) }); + s.spawn_bg(async { Ok(runner.run(ctx).await.context("BlockStore::run()")?) }); let attestation = Arc::new(attestation::Controller::new(attester)); - s.spawn_bg(self.run_attestation_controller( - ctx, - global_config.clone(), - attestation.clone(), - )); + s.spawn_bg({ + let global_config = global_config.clone(); + let attestation = attestation.clone(); + async { + let res = self + .run_attestation_controller(ctx, global_config, attestation) + .await + .wrap("run_attestation_controller()"); + // Attestation currently is not critical for the node to function. + // If it fails, we just log the error and continue. + if let Err(err) = res { + tracing::error!("attestation controller failed: {err:#}"); + } + Ok(()) + } + }); let executor = executor::Executor { config: config::executor(&cfg, &secrets, &global_config, build_version)?, diff --git a/core/node/consensus/src/mn.rs b/core/node/consensus/src/mn.rs index 2a280b2f1616..a392acfbe5f0 100644 --- a/core/node/consensus/src/mn.rs +++ b/core/node/consensus/src/mn.rs @@ -30,7 +30,7 @@ pub async fn run_main_node( tracing::debug!(is_attester = attester.is_some(), "main node attester mode"); - scope::run!(&ctx, |ctx, s| async { + let res: ctx::Result<()> = scope::run!(&ctx, |ctx, s| async { if let Some(spec) = &cfg.genesis_spec { let spec = config::GenesisSpec::parse(spec).context("GenesisSpec::parse()")?; @@ -46,7 +46,7 @@ pub async fn run_main_node( let (store, runner) = Store::new(ctx, pool.clone(), None, None) .await .wrap("Store::new()")?; - s.spawn_bg(runner.run(ctx)); + s.spawn_bg(async { Ok(runner.run(ctx).await.context("Store::runner()")?) }); let global_config = pool .connection(ctx) @@ -56,25 +56,36 @@ pub async fn run_main_node( .await .wrap("global_config()")? .context("global_config() disappeared")?; - anyhow::ensure!( - global_config.genesis.leader_selection - == validator::LeaderSelectionMode::Sticky(validator_key.public()), - "unsupported leader selection mode - main node has to be the leader" - ); + if global_config.genesis.leader_selection + != validator::LeaderSelectionMode::Sticky(validator_key.public()) + { + return Err(anyhow::format_err!( + "unsupported leader selection mode - main node has to be the leader" + ) + .into()); + } let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) .await .wrap("BlockStore::new()")?; - s.spawn_bg(runner.run(ctx)); + s.spawn_bg(async { Ok(runner.run(ctx).await.context("BlockStore::run()")?) }); let attestation = Arc::new(attestation::Controller::new(attester)); - s.spawn_bg(run_attestation_controller( - ctx, - &pool, - global_config.clone(), - attestation.clone(), - )); - + s.spawn_bg({ + let global_config = global_config.clone(); + let attestation = attestation.clone(); + async { + let res = run_attestation_controller(ctx, &pool, global_config, attestation) + .await + .wrap("run_attestation_controller()"); + // Attestation currently is not critical for the node to function. + // If it fails, we just log the error and continue. + if let Err(err) = res { + tracing::error!("attestation controller failed: {err:#}"); + } + Ok(()) + } + }); let executor = executor::Executor { config: config::executor(&cfg, &secrets, &global_config, None)?, block_store, @@ -87,9 +98,14 @@ pub async fn run_main_node( }; tracing::info!("running the main node executor"); - executor.run(ctx).await + executor.run(ctx).await.context("executor")?; + Ok(()) }) - .await + .await; + match res { + Ok(()) | Err(ctx::Error::Canceled(_)) => Ok(()), + Err(ctx::Error::Internal(err)) => Err(err), + } } /// Manages attestation state by configuring the @@ -100,91 +116,84 @@ async fn run_attestation_controller( pool: &ConnectionPool, cfg: consensus_dal::GlobalConfig, attestation: Arc, -) -> anyhow::Result<()> { +) -> ctx::Result<()> { const POLL_INTERVAL: time::Duration = time::Duration::seconds(5); let registry = registry::Registry::new(cfg.genesis, pool.clone()).await; let registry_addr = cfg.registry_address.map(registry::Address::new); let mut next = attester::BatchNumber(0); - let res = async { - loop { - // After regenesis it might happen that the batch number for the first block - // is not immediately known (the first block was not produced yet), - // therefore we need to wait for it. - let status = loop { - match pool - .connection(ctx) - .await - .wrap("connection()")? - .attestation_status(ctx) - .await - .wrap("attestation_status()")? - { - Some(status) if status.next_batch_to_attest >= next => break status, - _ => {} - } - ctx.sleep(POLL_INTERVAL).await?; - }; - next = status.next_batch_to_attest.next(); - tracing::info!( - "waiting for hash of batch {:?}", - status.next_batch_to_attest - ); - let info = pool - .wait_for_batch_info(ctx, status.next_batch_to_attest, POLL_INTERVAL) - .await?; - let hash = consensus_dal::batch_hash(&info); - let Some(committee) = registry - .attester_committee_for(ctx, registry_addr, status.next_batch_to_attest) - .await - .wrap("attester_committee_for()")? - else { - tracing::info!("attestation not required"); - continue; - }; - let committee = Arc::new(committee); - // Persist the derived committee. - pool.connection(ctx) - .await - .wrap("connection")? - .upsert_attester_committee(ctx, status.next_batch_to_attest, &committee) - .await - .wrap("upsert_attester_committee()")?; - tracing::info!( - "attesting batch {:?} with hash {hash:?}", - status.next_batch_to_attest - ); - attestation - .start_attestation(Arc::new(attestation::Info { - batch_to_attest: attester::Batch { - hash, - number: status.next_batch_to_attest, - genesis: status.genesis, - }, - committee, - })) - .await - .context("start_attestation()")?; - // Main node is the only node which can update the global AttestationStatus, - // therefore we can synchronously wait for the certificate. - let qc = attestation - .wait_for_cert(ctx, status.next_batch_to_attest) - .await? - .context("attestation config has changed unexpectedly")?; - tracing::info!( - "collected certificate for batch {:?}", - status.next_batch_to_attest - ); - pool.connection(ctx) + loop { + // After regenesis it might happen that the batch number for the first block + // is not immediately known (the first block was not produced yet), + // therefore we need to wait for it. + let status = loop { + match pool + .connection(ctx) .await .wrap("connection()")? - .insert_batch_certificate(ctx, &qc) + .attestation_status(ctx) .await - .wrap("insert_batch_certificate()")?; - } - } - .await; - match res { - Ok(()) | Err(ctx::Error::Canceled(_)) => Ok(()), - Err(ctx::Error::Internal(err)) => Err(err), + .wrap("attestation_status()")? + { + Some(status) if status.next_batch_to_attest >= next => break status, + _ => {} + } + ctx.sleep(POLL_INTERVAL).await?; + }; + next = status.next_batch_to_attest.next(); + tracing::info!( + "waiting for hash of batch {:?}", + status.next_batch_to_attest + ); + let info = pool + .wait_for_batch_info(ctx, status.next_batch_to_attest, POLL_INTERVAL) + .await?; + let hash = consensus_dal::batch_hash(&info); + let Some(committee) = registry + .attester_committee_for(ctx, registry_addr, status.next_batch_to_attest) + .await + .wrap("attester_committee_for()")? + else { + tracing::info!("attestation not required"); + continue; + }; + let committee = Arc::new(committee); + // Persist the derived committee. + pool.connection(ctx) + .await + .wrap("connection")? + .upsert_attester_committee(ctx, status.next_batch_to_attest, &committee) + .await + .wrap("upsert_attester_committee()")?; + tracing::info!( + "attesting batch {:?} with hash {hash:?}", + status.next_batch_to_attest + ); + attestation + .start_attestation(Arc::new(attestation::Info { + batch_to_attest: attester::Batch { + hash, + number: status.next_batch_to_attest, + genesis: status.genesis, + }, + committee, + })) + .await + .context("start_attestation()")?; + // Main node is the only node which can update the global AttestationStatus, + // therefore we can synchronously wait for the certificate. + let qc = attestation + .wait_for_cert(ctx, status.next_batch_to_attest) + .await? + .context("attestation config has changed unexpectedly")?; + tracing::info!( + "collected certificate for batch {:?}", + status.next_batch_to_attest + ); + pool.connection(ctx) + .await + .wrap("connection()")? + .insert_batch_certificate(ctx, &qc) + .await + .wrap("insert_batch_certificate()")?; } } From 6c034f6e180cc92e99766f14c8840c90efa56cec Mon Sep 17 00:00:00 2001 From: Daniyar Itegulov Date: Tue, 29 Oct 2024 20:33:57 +1100 Subject: [PATCH 21/32] feat(api): get rid of tx receipt root (#3187) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Transaction receipt `root` was replaced by `status` in [EIP658](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-658.md). Since we already return `status` there is no need in having `root` too. Fixes #3188 ## Why ❔ It is actively harmful as it confuses some ETH tooling that considers having both `status` and `root` at the same time an invalid receipt. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- core/lib/dal/src/models/storage_transaction.rs | 1 - core/lib/types/src/api/mod.rs | 2 -- 2 files changed, 3 deletions(-) diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index 78daaebb335e..dbd4fa947520 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -392,7 +392,6 @@ impl From for TransactionReceipt { logs: vec![], l2_to_l1_logs: vec![], status, - root: block_hash, logs_bloom: Default::default(), // Even though the Rust SDK recommends us to supply "None" for legacy transactions // we always supply some number anyway to have the same behavior as most popular RPCs diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index a4eb64605534..409dc3727570 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -257,8 +257,6 @@ pub struct TransactionReceipt { pub l2_to_l1_logs: Vec, /// Status: either 1 (success) or 0 (failure). pub status: U64, - /// State root. - pub root: H256, /// Logs bloom #[serde(rename = "logsBloom")] pub logs_bloom: Bloom, From d88b875464ec5ac7e54aba0cc7c0a68c01969782 Mon Sep 17 00:00:00 2001 From: Dima Zhornyk <55756184+dimazhornyk@users.noreply.github.com> Date: Tue, 29 Oct 2024 12:39:38 +0100 Subject: [PATCH 22/32] feat(da-clients): add Celestia client (#2983) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ This PR adds a Celestia DA client. The main complexity of this PR comes from our goal to lower the operational load and not run the Celestia light node (which is a default way of interacting with Celestia blockchain). This was done by adapting Astria's Celestia client implementation to our codebase and removing unneeded logical components. Note that Celestia's main communication protocol is gRPC, which means we have to import or maintain the proto definitions. I decided to reuse the generated `.rs` files from Astria's repo to remove the need to maintain the `.proto` files in our repo (not the cleanest way, but consider it a rather temporary solution). There is a [celestia-proto](https://github.com/eigerco/lumina/tree/main/proto) crate that has all the codegen that we need, but they don't generate the gRPC client definitions, only the types, so we can't use them atm. I will try to ask the team maintaining it to add such an option, then we would be able to remove all the codegen from our repo, and simply import it from celestia-proto. Example config: ``` da_client: celestia: api_node_url: http://grpc-mocha.pops.one:9090 namespace: 000000000000000000000000000000000000ca1de12a5e2d5beb9ba9 chain_id: mocha-4 timeout_ms: 10000 ``` secrets: ``` da: celestia: private_key: PRIVATE_KEY_WITHOUT_0x_PREFIX ``` ## Why ❔ To enable Celestia DA in ZK stack ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- Cargo.lock | 1767 ++++++++++++----- Cargo.toml | 9 +- core/bin/zksync_server/src/main.rs | 11 +- core/bin/zksync_server/src/node_builder.rs | 11 +- core/lib/basic_types/src/api_key.rs | 20 - core/lib/basic_types/src/lib.rs | 3 +- core/lib/basic_types/src/secrets.rs | 54 + core/lib/basic_types/src/seed_phrase.rs | 20 - .../lib/config/src/configs/da_client/avail.rs | 4 +- .../config/src/configs/da_client/celestia.rs | 15 + core/lib/config/src/configs/da_client/mod.rs | 5 +- core/lib/config/src/configs/mod.rs | 4 +- core/lib/config/src/configs/secrets.rs | 6 +- core/lib/config/src/lib.rs | 4 +- core/lib/config/src/testonly.rs | 5 +- core/lib/env_config/src/da_client.rs | 94 +- core/lib/env_config/src/database.rs | 19 +- core/lib/env_config/src/utils.rs | 17 + core/lib/protobuf_config/src/da_client.rs | 74 +- .../src/proto/config/da_client.proto | 12 +- .../src/proto/config/secrets.proto | 5 + core/lib/protobuf_config/src/secrets.rs | 24 +- core/node/da_clients/Cargo.toml | 15 +- core/node/da_clients/src/avail/client.rs | 21 +- core/node/da_clients/src/avail/sdk.rs | 2 +- core/node/da_clients/src/celestia/README.md | 19 + core/node/da_clients/src/celestia/client.rs | 109 + .../celestia/generated/celestia.blob.v1.rs | 200 ++ .../celestia/generated/cosmos.auth.v1beta1.rs | 257 +++ .../generated/cosmos.base.abci.v1beta1.rs | 125 ++ .../generated/cosmos.base.node.v1beta1.rs | 146 ++ .../celestia/generated/cosmos.base.v1beta1.rs | 19 + .../cosmos.crypto.multisig.v1beta1.rs | 40 + .../generated/cosmos.crypto.secp256k1.rs | 21 + .../generated/cosmos.tx.signing.v1beta1.rs | 72 + .../celestia/generated/cosmos.tx.v1beta1.rs | 553 ++++++ .../src/celestia/generated/tendermint.abci.rs | 42 + .../celestia/generated/tendermint.types.rs | 48 + core/node/da_clients/src/celestia/mod.rs | 58 + core/node/da_clients/src/celestia/sdk.rs | 602 ++++++ core/node/da_clients/src/lib.rs | 2 + core/node/da_clients/src/utils.rs | 15 + .../layers/da_clients/celestia.rs | 46 + .../implementations/layers/da_clients/mod.rs | 1 + 44 files changed, 4004 insertions(+), 592 deletions(-) delete mode 100644 core/lib/basic_types/src/api_key.rs create mode 100644 core/lib/basic_types/src/secrets.rs delete mode 100644 core/lib/basic_types/src/seed_phrase.rs create mode 100644 core/lib/config/src/configs/da_client/celestia.rs create mode 100644 core/node/da_clients/src/celestia/README.md create mode 100644 core/node/da_clients/src/celestia/client.rs create mode 100644 core/node/da_clients/src/celestia/generated/celestia.blob.v1.rs create mode 100644 core/node/da_clients/src/celestia/generated/cosmos.auth.v1beta1.rs create mode 100644 core/node/da_clients/src/celestia/generated/cosmos.base.abci.v1beta1.rs create mode 100644 core/node/da_clients/src/celestia/generated/cosmos.base.node.v1beta1.rs create mode 100644 core/node/da_clients/src/celestia/generated/cosmos.base.v1beta1.rs create mode 100644 core/node/da_clients/src/celestia/generated/cosmos.crypto.multisig.v1beta1.rs create mode 100644 core/node/da_clients/src/celestia/generated/cosmos.crypto.secp256k1.rs create mode 100644 core/node/da_clients/src/celestia/generated/cosmos.tx.signing.v1beta1.rs create mode 100644 core/node/da_clients/src/celestia/generated/cosmos.tx.v1beta1.rs create mode 100644 core/node/da_clients/src/celestia/generated/tendermint.abci.rs create mode 100644 core/node/da_clients/src/celestia/generated/tendermint.types.rs create mode 100644 core/node/da_clients/src/celestia/mod.rs create mode 100644 core/node/da_clients/src/celestia/sdk.rs create mode 100644 core/node/da_clients/src/utils.rs create mode 100644 core/node/node_framework/src/implementations/layers/da_clients/celestia.rs diff --git a/Cargo.lock b/Cargo.lock index 597da3c1b31b..0554982e157a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -15,9 +15,9 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] @@ -101,6 +101,16 @@ version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" +[[package]] +name = "alloy-rlp" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da0822426598f95e45dd1ea32a738dac057529a709ee645fcc516ffa4cbde08f" +dependencies = [ + "arrayvec 0.7.6", + "bytes", +] + [[package]] name = "android-tzdata" version = "0.1.1" @@ -133,9 +143,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.15" +version = "0.6.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" +checksum = "23a1e53f0f5d86382dafe1cf314783b2044280f406e7e1506368220ad11b1338" dependencies = [ "anstyle", "anstyle-parse", @@ -148,43 +158,167 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" +checksum = "8365de52b16c035ff4fcafe0092ba9390540e3e352870ac09933bebcaa2c8c56" [[package]] name = "anstyle-parse" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.4" +version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" +checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" dependencies = [ "anstyle", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anyhow" -version = "1.0.89" +version = "1.0.91" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c042108f3ed77fd83760a5fd79b53be043192bb3b9dba91d8c574c0ada7850c8" + +[[package]] +name = "ark-ff" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +dependencies = [ + "ark-ff-asm 0.3.0", + "ark-ff-macros 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", + "derivative", + "num-bigint 0.4.6", + "num-traits", + "paste", + "rustc_version 0.3.3", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest 0.10.7", + "itertools 0.10.5", + "num-bigint 0.4.6", + "num-traits", + "paste", + "rustc_version 0.4.1", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +dependencies = [ + "quote 1.0.37", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote 1.0.37", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" +checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" +dependencies = [ + "num-bigint 0.4.6", + "num-traits", + "quote 1.0.37", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint 0.4.6", + "num-traits", + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 1.0.109", +] + +[[package]] +name = "ark-serialize" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" +dependencies = [ + "ark-std 0.3.0", + "digest 0.9.0", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-std 0.4.0", + "digest 0.10.7", + "num-bigint 0.4.6", +] + +[[package]] +name = "ark-std" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand 0.8.5", +] [[package]] name = "arr_macro" @@ -294,9 +428,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.11" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd066d0b4ef8ecb03a55319dc13aa6910616d0f44008a045bb1835af830abff5" +checksum = "0cb8f1d480b0ea3783ab015936d2a55c87e219676f0c0b7dec61494043f21857" dependencies = [ "futures-core", "memchr", @@ -420,9 +554,9 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -473,9 +607,9 @@ dependencies = [ [[package]] name = "async-stream" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" dependencies = [ "async-stream-impl", "futures-core", @@ -484,13 +618,13 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -505,9 +639,9 @@ version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -542,17 +676,28 @@ dependencies = [ "winapi", ] +[[package]] +name = "auto_impl" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" +dependencies = [ + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.85", +] + [[package]] name = "autocfg" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "aws-lc-rs" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f95446d919226d587817a7d21379e6eb099b97b45110a7f272a444ca5c54070" +checksum = "cdd82dba44d209fddb11c190e0a94b78651f95299598e472215667417a03ff1d" dependencies = [ "aws-lc-sys", "mirai-annotations", @@ -562,11 +707,11 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.21.2" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3ddc4a5b231dd6958b140ff3151b6412b3f4321fab354f399eec8f14b06df62" +checksum = "df7a4168111d7eb622a31b214057b8509c0a7e1794f44c546d742330dc793972" dependencies = [ - "bindgen 0.69.4", + "bindgen 0.69.5", "cc", "cmake", "dunce", @@ -577,18 +722,46 @@ dependencies = [ [[package]] name = "axum" -version = "0.7.6" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f43644eed690f5374f1af436ecd6aea01cd201f6fbdf0178adaf6907afb2cec" +checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" dependencies = [ "async-trait", - "axum-core", + "axum-core 0.3.4", + "bitflags 1.3.2", + "bytes", + "futures-util", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.31", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper 0.1.2", + "tower 0.4.13", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum" +version = "0.7.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "504e3947307ac8326a5437504c517c4b56716c9d98fac0028c2acc7ca47d70ae" +dependencies = [ + "async-trait", + "axum-core 0.4.5", "bytes", "futures-util", "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.4.1", + "hyper 1.5.0", "hyper-util", "itoa", "matchit", @@ -612,9 +785,26 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.4.4" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 0.2.12", + "http-body 0.4.6", + "mime", + "rustversion", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6b8ba012a258d63c9adfa28b9ddcf66149da6f986c5b5452e629d5ee64bf00" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" dependencies = [ "async-trait", "bytes", @@ -658,6 +848,12 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "base-x" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" + [[package]] name = "base16ct" version = "0.1.1" @@ -711,6 +907,12 @@ dependencies = [ "regex", ] +[[package]] +name = "bech32" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d965446196e3b7decd44aa7ee49e31d630118f90ef12f97900f262eb915c951d" + [[package]] name = "beef" version = "0.5.2" @@ -722,9 +924,9 @@ dependencies = [ [[package]] name = "bigdecimal" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d712318a27c7150326677b321a5fa91b55f6d9034ffd67f20319e147d40cee" +checksum = "8f850665a0385e070b64c38d2354e6c104c8479c59868d1e48a0c13ee2c7a1c1" dependencies = [ "autocfg", "libm", @@ -755,19 +957,19 @@ dependencies = [ "lazycell", "peeking_take_while", "prettyplease", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "regex", "rustc-hash", "shlex", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] name = "bindgen" -version = "0.69.4" +version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" +checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ "bitflags 2.6.0", "cexpr", @@ -777,20 +979,20 @@ dependencies = [ "lazycell", "log", "prettyplease", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "regex", "rustc-hash", "shlex", - "syn 2.0.77", + "syn 2.0.85", "which", ] [[package]] name = "bip39" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f2635620bf0b9d4576eb7bb9a38a55df78bd1205d26fa994b25911a69f212f" +checksum = "33415e24172c1b7d6066f6d999545375ab8e1d95421d6784bdfff9496f292387" dependencies = [ "bitcoin_hashes", "serde", @@ -815,11 +1017,21 @@ dependencies = [ "serde", ] +[[package]] +name = "bitcoin-internals" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9425c3bf7089c983facbae04de54513cce73b41c7f9ff8c845b54e7bc64ebbfb" + [[package]] name = "bitcoin_hashes" -version = "0.11.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90064b8dee6815a6470d60bad07bbbaee885c0e12d04177138fa3291a01b7bc4" +checksum = "1930a4dabfebb8d7d9992db18ebe3ae2876f0a305fab206fd168df931ede293b" +dependencies = [ + "bitcoin-internals", + "hex-conservative", +] [[package]] name = "bitflags" @@ -986,6 +1198,18 @@ dependencies = [ "piper", ] +[[package]] +name = "blockstore" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7679095248a6dc7555fae81154ed1baef264383c16621ef881a219576c72a9be" +dependencies = [ + "cid", + "dashmap 6.1.0", + "multihash", + "thiserror", +] + [[package]] name = "blst" version = "0.3.13" @@ -1047,9 +1271,9 @@ checksum = "c3ef8005764f53cd4dca619f5bf64cafd4664dada50ece25e4d81de54c80cc0b" dependencies = [ "once_cell", "proc-macro-crate 3.2.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", "syn_derive", ] @@ -1097,7 +1321,7 @@ version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", ] @@ -1116,9 +1340,12 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.2" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" +checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" +dependencies = [ + "serde", +] [[package]] name = "bytesize" @@ -1163,7 +1390,7 @@ checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" dependencies = [ "camino", "cargo-platform", - "semver", + "semver 1.0.23", "serde", "serde_json", ] @@ -1176,15 +1403,105 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.1.21" +version = "1.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" +checksum = "c2e7962b54006dcfcc61cb72735f4d89bb97061dd6a7ed882ec6b8ee53714c6f" dependencies = [ "jobserver", "libc", "shlex", ] +[[package]] +name = "celestia-proto" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6eb26c852e42015f85f3aed5c3d1472c751b143e2199d0401ebac2f4500b20d" +dependencies = [ + "celestia-tendermint-proto", + "prost 0.12.6", + "prost-build", + "prost-types", + "protox 0.6.1", + "serde", +] + +[[package]] +name = "celestia-tendermint" +version = "0.32.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce8c92a01145f79a0f3ac7c44a43a9b5ee58e8a4c716b56d98833a3848db1afd" +dependencies = [ + "bytes", + "celestia-tendermint-proto", + "digest 0.10.7", + "ed25519", + "ed25519-consensus", + "flex-error", + "futures 0.3.31", + "num-traits", + "once_cell", + "prost 0.12.6", + "prost-types", + "serde", + "serde_bytes", + "serde_json", + "serde_repr", + "sha2 0.10.8", + "signature 2.2.0", + "subtle", + "subtle-encoding", + "time", + "zeroize", +] + +[[package]] +name = "celestia-tendermint-proto" +version = "0.32.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a95746c5221a74d7b913a415fdbb9e7c90e1b4d818dbbff59bddc034cfce2ec" +dependencies = [ + "bytes", + "flex-error", + "num-derive 0.3.3", + "num-traits", + "prost 0.12.6", + "prost-types", + "serde", + "serde_bytes", + "subtle-encoding", + "time", +] + +[[package]] +name = "celestia-types" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "caf52cc4b4cdf73fc07d9eeaea6d27bb39eed81f4bf8c89f01df86ace4e6da10" +dependencies = [ + "base64 0.22.1", + "bech32", + "blockstore", + "bytes", + "celestia-proto", + "celestia-tendermint", + "celestia-tendermint-proto", + "cid", + "const_format", + "enum_dispatch", + "leopard-codec", + "libp2p-identity", + "multiaddr", + "multihash", + "nmt-rs", + "ruint", + "serde", + "serde_repr", + "sha2 0.10.8", + "thiserror", + "time", +] + [[package]] name = "cesu8" version = "1.1.0" @@ -1278,6 +1595,18 @@ dependencies = [ "half", ] +[[package]] +name = "cid" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3147d8272e8fa0ccd29ce51194dd98f79ddfb8191ba9e3409884e751798acf3a" +dependencies = [ + "core2", + "multibase", + "multihash", + "unsigned-varint", +] + [[package]] name = "cipher" version = "0.4.4" @@ -1473,9 +1802,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ "heck 0.5.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -1513,9 +1842,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" [[package]] name = "combine" @@ -1584,7 +1913,7 @@ version = "0.2.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eff1a44b93f47b1bac19a27932f5c591e43d1ba357ee4f61526c8a25603f0eb1" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "unicode-xid 0.2.6", ] @@ -1632,6 +1961,15 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" +[[package]] +name = "core2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b49ba7ef1ad6107f8824dbe97de947cbaac53c44e7f9756a1fba0d37c1eec505" +dependencies = [ + "memchr", +] + [[package]] name = "cpufeatures" version = "0.2.14" @@ -1815,7 +2153,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edb49164822f3ee45b17acd4a208cfc1251410cf0cad9a833234c9890774dd9f" dependencies = [ "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -1848,7 +2186,7 @@ dependencies = [ "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "rustc_version", + "rustc_version 0.4.1", "subtle", "zeroize", ] @@ -1859,9 +2197,22 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", +] + +[[package]] +name = "curve25519-dalek-ng" +version = "4.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c359b7249347e46fb28804470d071c921156ad62b3eef5d34e2ba867533dec8" +dependencies = [ + "byteorder", + "digest 0.9.0", + "rand_core 0.6.4", + "subtle-ng", + "zeroize", ] [[package]] @@ -1902,7 +2253,7 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "strsim 0.10.0", "syn 1.0.109", @@ -1916,7 +2267,7 @@ checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "strsim 0.10.0", "syn 1.0.109", @@ -1930,10 +2281,10 @@ checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "strsim 0.11.1", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -1966,7 +2317,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -1982,6 +2333,46 @@ dependencies = [ "parking_lot_core", ] +[[package]] +name = "dashmap" +version = "6.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", +] + +[[package]] +name = "data-encoding" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" + +[[package]] +name = "data-encoding-macro" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1559b6cba622276d6d63706db152618eeb15b89b3e4041446b05876e352e639" +dependencies = [ + "data-encoding", + "data-encoding-macro-internal", +] + +[[package]] +name = "data-encoding-macro-internal" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "332d754c0af53bc87c108fed664d121ecf59207ec4196041f04d6ab9002ad33f" +dependencies = [ + "data-encoding", + "syn 1.0.109", +] + [[package]] name = "debugid" version = "0.8.0" @@ -2029,7 +2420,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", ] @@ -2041,10 +2432,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "convert_case 0.4.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "rustc_version", - "syn 2.0.77", + "rustc_version 0.4.1", + "syn 2.0.85", ] [[package]] @@ -2062,9 +2453,9 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", "unicode-xid 0.2.6", ] @@ -2176,6 +2567,19 @@ dependencies = [ "signature 2.2.0", ] +[[package]] +name = "ed25519-consensus" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c8465edc8ee7436ffea81d21a019b16676ee3db267aa8d5a8d729581ecf998b" +dependencies = [ + "curve25519-dalek-ng", + "hex", + "rand_core 0.6.4", + "sha2 0.9.9", + "zeroize", +] + [[package]] name = "ed25519-dalek" version = "2.1.1" @@ -2281,9 +2685,9 @@ checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] name = "encoding_rs" -version = "0.8.34" +version = "0.8.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" dependencies = [ "cfg-if", ] @@ -2295,9 +2699,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa18ce2bc66555b3218614519ac839ddb759a7d6720732f979ef8d13be147ecd" dependencies = [ "once_cell", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -2447,12 +2851,33 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "eyre" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" +dependencies = [ + "indenter", + "once_cell", +] + [[package]] name = "fastrand" version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +[[package]] +name = "fastrlp" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" +dependencies = [ + "arrayvec 0.7.6", + "auto_impl", + "bytes", +] + [[package]] name = "ff" version = "0.12.1" @@ -2525,11 +2950,21 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "flex-error" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c606d892c9de11507fa0dcffc116434f94e105d0bbdc4e405b61519464c49d7b" +dependencies = [ + "eyre", + "paste", +] + [[package]] name = "flume" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" +checksum = "da0e4dd2a88388a1f4ccc7c9ce104604dab68d9f408dc34cd45823d5a9069095" dependencies = [ "futures-core", "futures-sink", @@ -2542,6 +2977,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" + [[package]] name = "foreign-types" version = "0.3.2" @@ -2619,7 +3060,7 @@ dependencies = [ "itertools 0.10.5", "lazy_static", "num-bigint 0.4.6", - "num-derive", + "num-derive 0.2.5", "num-integer", "num-traits", "rand 0.4.6", @@ -2658,9 +3099,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -2673,9 +3114,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -2683,15 +3124,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -2712,9 +3153,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" @@ -2731,26 +3172,26 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-timer" @@ -2764,9 +3205,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures 0.1.31", "futures-channel", @@ -2798,7 +3239,7 @@ version = "0.1.0" dependencies = [ "anyhow", "clap 4.5.20", - "futures 0.3.30", + "futures 0.3.31", "serde", "serde_json", "serde_yaml", @@ -2851,9 +3292,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.31.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "glob" @@ -2931,7 +3372,7 @@ dependencies = [ "google-cloud-token", "home", "jsonwebtoken", - "reqwest 0.12.7", + "reqwest 0.12.9", "serde", "serde_json", "thiserror", @@ -2947,7 +3388,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04f945a208886a13d07636f38fb978da371d0abc3e34bad338124b9f8c135a8f" dependencies = [ - "reqwest 0.12.7", + "reqwest 0.12.9", "thiserror", "tokio", ] @@ -2972,7 +3413,7 @@ dependencies = [ "percent-encoding", "pkcs8 0.10.2", "regex", - "reqwest 0.12.7", + "reqwest 0.12.9", "reqwest-middleware", "ring", "serde", @@ -3000,8 +3441,8 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19775995ee20209163239355bc3ad2f33f83da35d9ef72dea26e5af753552c87" dependencies = [ - "dashmap", - "futures 0.3.30", + "dashmap 5.5.3", + "futures 0.3.31", "futures-timer", "no-std-compat", "nonzero_ext", @@ -3045,7 +3486,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.5.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -3064,7 +3505,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.5.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -3115,6 +3556,17 @@ dependencies = [ "serde", ] +[[package]] +name = "hashbrown" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", +] + [[package]] name = "hashlink" version = "0.9.1" @@ -3172,6 +3624,12 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hex-conservative" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "212ab92002354b4819390025006c897e8140934349e8635c9b077f47b4dcbd20" + [[package]] name = "hkdf" version = "0.12.4" @@ -3289,9 +3747,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -3314,7 +3772,7 @@ dependencies = [ "crossbeam-utils", "form_urlencoded", "futures-util", - "hyper 0.14.30", + "hyper 0.14.31", "lazy_static", "levenshtein", "log", @@ -3335,9 +3793,9 @@ checksum = "f58b778a5761513caf593693f8951c97a5b610841e754788400f32102eefdff1" [[package]] name = "hyper" -version = "0.14.30" +version = "0.14.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" +checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" dependencies = [ "bytes", "futures-channel", @@ -3359,9 +3817,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" dependencies = [ "bytes", "futures-channel", @@ -3386,7 +3844,7 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.12", - "hyper 0.14.30", + "hyper 0.14.31", "log", "rustls 0.21.12", "rustls-native-certs 0.6.3", @@ -3402,23 +3860,35 @@ checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", "http 1.1.0", - "hyper 1.4.1", + "hyper 1.5.0", "hyper-util", "log", - "rustls 0.23.13", + "rustls 0.23.16", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", "tower-service", ] +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper 0.14.31", + "pin-project-lite", + "tokio", + "tokio-io-timeout", +] + [[package]] name = "hyper-timeout" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" dependencies = [ - "hyper 1.4.1", + "hyper 1.5.0", "hyper-util", "pin-project-lite", "tokio", @@ -3432,7 +3902,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper 0.14.30", + "hyper 0.14.31", "native-tls", "tokio", "tokio-native-tls", @@ -3446,7 +3916,7 @@ checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", "http-body-util", - "hyper 1.4.1", + "hyper 1.5.0", "hyper-util", "native-tls", "tokio", @@ -3456,16 +3926,16 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-channel", "futures-util", "http 1.1.0", "http-body 1.0.1", - "hyper 1.4.1", + "hyper 1.5.0", "pin-project-lite", "socket2", "tokio", @@ -3559,11 +4029,17 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", ] +[[package]] +name = "indenter" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" + [[package]] name = "indexmap" version = "1.9.3" @@ -3576,12 +4052,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.0", ] [[package]] @@ -3601,9 +4077,9 @@ dependencies = [ [[package]] name = "insta" -version = "1.40.0" +version = "1.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6593a41c7a73841868772495db7dc1e8ecab43bb5c0b6da2059246c4b506ab60" +checksum = "a1f72d3e19488cf7d8ea52d2fc0f8754fc933398b337cd3cbdb28aaeb35159ef" dependencies = [ "console", "lazy_static", @@ -3623,9 +4099,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.10.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" +checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" [[package]] name = "ipnetwork" @@ -3715,9 +4191,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.70" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" dependencies = [ "wasm-bindgen", ] @@ -3786,7 +4262,7 @@ dependencies = [ "http 1.1.0", "jsonrpsee-core 0.23.2", "pin-project", - "rustls 0.23.13", + "rustls 0.23.16", "rustls-pki-types", "rustls-platform-verifier", "soketto 0.8.0", @@ -3810,7 +4286,7 @@ dependencies = [ "beef", "futures-timer", "futures-util", - "hyper 0.14.30", + "hyper 0.14.31", "jsonrpsee-types 0.21.0", "pin-project", "rustc-hash", @@ -3858,7 +4334,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b7de9f3219d95985eb77fd03194d7c1b56c19bce1abfcc9d07462574b15572" dependencies = [ "async-trait", - "hyper 0.14.30", + "hyper 0.14.31", "hyper-rustls 0.24.2", "jsonrpsee-core 0.21.0", "jsonrpsee-types 0.21.0", @@ -3880,12 +4356,12 @@ dependencies = [ "async-trait", "base64 0.22.1", "http-body 1.0.1", - "hyper 1.4.1", + "hyper 1.5.0", "hyper-rustls 0.27.3", "hyper-util", "jsonrpsee-core 0.23.2", "jsonrpsee-types 0.23.2", - "rustls 0.23.13", + "rustls 0.23.16", "rustls-platform-verifier", "serde", "serde_json", @@ -3904,9 +4380,9 @@ checksum = "7895f186d5921065d96e16bd795e5ca89ac8356ec423fafc6e3d7cf8ec11aee4" dependencies = [ "heck 0.5.0", "proc-macro-crate 3.2.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -3920,7 +4396,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.4.1", + "hyper 1.5.0", "hyper-util", "jsonrpsee-core 0.23.2", "jsonrpsee-types 0.23.2", @@ -4060,7 +4536,7 @@ dependencies = [ "petgraph", "pico-args", "regex", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", "string_cache", "term", "tiny-keccak 2.0.2", @@ -4074,7 +4550,7 @@ version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "507460a910eb7b32ee961886ff48539633b788a36b65692b95f225b844c82553" dependencies = [ - "regex-automata 0.4.7", + "regex-automata 0.4.8", ] [[package]] @@ -4098,6 +4574,17 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" +[[package]] +name = "leopard-codec" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee58dbc414bd23885d7da915e0457618b36d1fc950a6169ef2cb29829d1b1a1d" +dependencies = [ + "bytes", + "lazy_static", + "thiserror", +] + [[package]] name = "levenshtein" version = "1.0.5" @@ -4106,9 +4593,9 @@ checksum = "db13adb97ab515a3691f56e4dbab09283d0b86cb45abd991d8634a9d6f501760" [[package]] name = "libc" -version = "0.2.159" +version = "0.2.161" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" +checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" [[package]] name = "libloading" @@ -4117,14 +4604,29 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] name = "libm" -version = "0.2.8" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" + +[[package]] +name = "libp2p-identity" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +checksum = "55cca1eb2bc1fd29f099f3daaab7effd01e1a54b7c577d0ed082521034d912e8" +dependencies = [ + "bs58", + "hkdf", + "multihash", + "quick-protobuf", + "sha2 0.10.8", + "thiserror", + "tracing", +] [[package]] name = "libredox" @@ -4241,13 +4743,13 @@ dependencies = [ "anyhow", "async-trait", "envy", - "futures 0.3.30", + "futures 0.3.31", "hex", "num", "once_cell", "rand 0.8.5", "regex", - "reqwest 0.12.7", + "reqwest 0.12.9", "serde", "serde_json", "static_assertions", @@ -4291,7 +4793,16 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c000ca4d908ff18ac99b93a062cb8958d331c3220719c52e77cb19cc6ac5d2c1" dependencies = [ - "logos-derive", + "logos-derive 0.13.0", +] + +[[package]] +name = "logos" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c6b6e02facda28ca5fb8dbe4b152496ba3b1bd5a4b40bb2b1b2d8ad74e0f39b" +dependencies = [ + "logos-derive 0.14.2", ] [[package]] @@ -4302,10 +4813,25 @@ checksum = "dc487311295e0002e452025d6b580b77bb17286de87b57138f3b5db711cded68" dependencies = [ "beef", "fnv", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "regex-syntax 0.6.29", - "syn 2.0.77", + "syn 2.0.85", +] + +[[package]] +name = "logos-codegen" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b32eb6b5f26efacd015b000bfc562186472cd9b34bdba3f6b264e2a052676d10" +dependencies = [ + "beef", + "fnv", + "lazy_static", + "proc-macro2 1.0.89", + "quote 1.0.37", + "regex-syntax 0.8.5", + "syn 2.0.85", ] [[package]] @@ -4314,16 +4840,25 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbfc0d229f1f42d790440136d941afd806bc9e949e2bcb8faa813b0f00d1267e" dependencies = [ - "logos-codegen", + "logos-codegen 0.13.0", +] + +[[package]] +name = "logos-derive" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e5d0c5463c911ef55624739fc353238b4e310f0144be1f875dc42fec6bfd5ec" +dependencies = [ + "logos-codegen 0.14.2", ] [[package]] name = "lru" -version = "0.12.4" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ee39891760e7d94734f6f63fedc29a2e4a152f836120753a72503f09fcf904" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.15.0", ] [[package]] @@ -4415,21 +4950,44 @@ version = "5.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59bb584eaeeab6bd0226ccf3509a69d7936d148cf3d036ad350abe35e8c6856e" dependencies = [ - "miette-derive", + "miette-derive 5.10.0", "once_cell", "thiserror", "unicode-width", ] +[[package]] +name = "miette" +version = "7.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4edc8853320c2a0dab800fbda86253c8938f6ea88510dc92c5f1ed20e794afc1" +dependencies = [ + "cfg-if", + "miette-derive 7.2.0", + "thiserror", + "unicode-width", +] + [[package]] name = "miette-derive" version = "5.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49e7bc1560b95a3c4a25d03de42fe76ca718ab92d1a22a55b9b4cf67b3ae635c" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", +] + +[[package]] +name = "miette-derive" +version = "7.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcf09caffaac8068c346b6df2a7fc27a177fd20b39421a39ce0a211bde679a6c" +dependencies = [ + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.85", ] [[package]] @@ -4456,7 +5014,7 @@ checksum = "c325dfab65f261f386debee8b0969da215b3fa0037e74c8a1234db7ba986d803" dependencies = [ "crossbeam-channel", "crossbeam-utils", - "dashmap", + "dashmap 5.5.3", "skeptic", "smallvec", "tagptr", @@ -4497,20 +5055,60 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1" [[package]] -name = "multer" -version = "3.1.0" +name = "multer" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83e87776546dc87511aa5ee218730c92b666d7264ab6ed41f9d215af9cd5224b" +dependencies = [ + "bytes", + "encoding_rs", + "futures-util", + "http 1.1.0", + "httparse", + "memchr", + "mime", + "spin", + "version_check", +] + +[[package]] +name = "multiaddr" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe6351f60b488e04c1d21bc69e56b89cb3f5e8f5d22557d6e8031bdfd79b6961" +dependencies = [ + "arrayref", + "byteorder", + "data-encoding", + "libp2p-identity", + "multibase", + "multihash", + "percent-encoding", + "serde", + "static_assertions", + "unsigned-varint", + "url", +] + +[[package]] +name = "multibase" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b3539ec3c1f04ac9748a260728e855f261b4977f5c3406612c884564f329404" +dependencies = [ + "base-x", + "data-encoding", + "data-encoding-macro", +] + +[[package]] +name = "multihash" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83e87776546dc87511aa5ee218730c92b666d7264ab6ed41f9d215af9cd5224b" +checksum = "cc41f430805af9d1cf4adae4ed2149c759b877b01d909a1f40256188d09345d2" dependencies = [ - "bytes", - "encoding_rs", - "futures-util", - "http 1.1.0", - "httparse", - "memchr", - "mime", - "spin", - "version_check", + "core2", + "unsigned-varint", ] [[package]] @@ -4554,6 +5152,18 @@ dependencies = [ "libc", ] +[[package]] +name = "nmt-rs" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e408e823bdc9b4bb525a61b44e846239833a8f9bd86c03a43e4ca314a5497582" +dependencies = [ + "borsh", + "bytes", + "serde", + "sha2 0.10.8", +] + [[package]] name = "no-std-compat" version = "0.4.1" @@ -4678,6 +5288,17 @@ dependencies = [ "syn 0.15.44", ] +[[package]] +name = "num-derive" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" +dependencies = [ + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 1.0.109", +] + [[package]] name = "num-integer" version = "0.1.46" @@ -4765,9 +5386,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate 1.3.1", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -4777,25 +5398,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" dependencies = [ "proc-macro-crate 3.2.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] name = "object" -version = "0.36.4" +version = "0.36.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "oorandom" @@ -4811,9 +5432,9 @@ checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "openssl" -version = "0.10.66" +version = "0.10.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" +checksum = "6174bc48f102d208783c2c84bf931bb75927a617866870de8a4ea85597f871f5" dependencies = [ "bitflags 2.6.0", "cfg-if", @@ -4830,9 +5451,9 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -4843,9 +5464,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.103" +version = "0.9.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" +checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741" dependencies = [ "cc", "libc", @@ -4889,7 +5510,7 @@ dependencies = [ "bytes", "http 1.1.0", "opentelemetry", - "reqwest 0.12.7", + "reqwest 0.12.9", ] [[package]] @@ -4906,10 +5527,10 @@ dependencies = [ "opentelemetry-proto", "opentelemetry_sdk", "prost 0.13.3", - "reqwest 0.12.7", + "reqwest 0.12.9", "thiserror", "tokio", - "tonic", + "tonic 0.12.3", ] [[package]] @@ -4921,7 +5542,7 @@ dependencies = [ "opentelemetry", "opentelemetry_sdk", "prost 0.13.3", - "tonic", + "tonic 0.12.3", ] [[package]] @@ -5016,7 +5637,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ "proc-macro-crate 3.2.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", ] @@ -5056,6 +5677,43 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" +[[package]] +name = "pbjson" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1030c719b0ec2a2d25a5df729d6cff1acf3cc230bf766f4f97833591f7577b90" +dependencies = [ + "base64 0.21.7", + "serde", +] + +[[package]] +name = "pbjson-build" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2580e33f2292d34be285c5bc3dba5259542b083cfad6037b6d70345f24dcb735" +dependencies = [ + "heck 0.4.1", + "itertools 0.11.0", + "prost 0.12.6", + "prost-types", +] + +[[package]] +name = "pbjson-types" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18f596653ba4ac51bdecbb4ef6773bc7f56042dc13927910de1684ad3d32aa12" +dependencies = [ + "bytes", + "chrono", + "pbjson", + "pbjson-build", + "prost 0.12.6", + "prost-build", + "serde", +] + [[package]] name = "pbkdf2" version = "0.12.2" @@ -5098,9 +5756,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.13" +version = "2.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdbef9d1d47087a895abd220ed25eb4ad973a5e26f6a4367b038c25e28dfc2d9" +checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" dependencies = [ "memchr", "thiserror", @@ -5109,9 +5767,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.13" +version = "2.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d3a6e3394ec80feb3b6393c725571754c6188490265c61aaf260810d6b95aa0" +checksum = "d214365f632b123a47fd913301e14c946c61d1c183ee245fa76eb752e59a02dd" dependencies = [ "pest", "pest_generator", @@ -5119,22 +5777,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.13" +version = "2.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94429506bde1ca69d1b5601962c73f4172ab4726571a59ea95931218cb0e930e" +checksum = "eb55586734301717aea2ac313f50b2eb8f60d2fc3dc01d190eefa2e625f60c4e" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] name = "pest_meta" -version = "2.7.13" +version = "2.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac8a071862e93690b6e34e9a5fb8e33ff3734473ac0245b27232222c4906a33f" +checksum = "b75da2a70cf4d9cb76833c990ac9cd3923c9a8905a8929789ce347c84564d03d" dependencies = [ "once_cell", "pest", @@ -5148,7 +5806,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.5.0", + "indexmap 2.6.0", ] [[package]] @@ -5168,29 +5826,29 @@ checksum = "5be167a7af36ee22fe3115051bc51f6e6c7054c9348e28deb4f49bd6f705a315" [[package]] name = "pin-project" -version = "1.1.5" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.5" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" [[package]] name = "pin-utils" @@ -5345,12 +6003,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.22" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" +checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ - "proc-macro2 1.0.86", - "syn 2.0.77", + "proc-macro2 1.0.89", + "syn 2.0.85", ] [[package]] @@ -5402,7 +6060,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", "version_check", @@ -5414,7 +6072,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "version_check", ] @@ -5436,9 +6094,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" dependencies = [ "unicode-ident", ] @@ -5461,9 +6119,25 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", +] + +[[package]] +name = "proptest" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" +dependencies = [ + "bitflags 2.6.0", + "lazy_static", + "num-traits", + "rand 0.8.5", + "rand_chacha", + "rand_xorshift", + "regex-syntax 0.8.5", + "unarray", ] [[package]] @@ -5493,7 +6167,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" dependencies = [ "bytes", - "heck 0.4.1", + "heck 0.5.0", "itertools 0.12.1", "log", "multimap", @@ -5503,7 +6177,7 @@ dependencies = [ "prost 0.12.6", "prost-types", "regex", - "syn 2.0.77", + "syn 2.0.85", "tempfile", ] @@ -5515,9 +6189,9 @@ checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", "itertools 0.12.1", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -5528,9 +6202,9 @@ checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" dependencies = [ "anyhow", "itertools 0.13.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -5540,8 +6214,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "057237efdb71cf4b3f9396302a3d6599a92fa94063ba537b66130980ea9909f3" dependencies = [ "base64 0.21.7", - "logos", - "miette", + "logos 0.13.0", + "miette 5.10.0", "once_cell", "prost 0.12.6", "prost-types", @@ -5549,6 +6223,19 @@ dependencies = [ "serde-value", ] +[[package]] +name = "prost-reflect" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f5eec97d5d34bdd17ad2db2219aabf46b054c6c41bd5529767c9ce55be5898f" +dependencies = [ + "logos 0.14.2", + "miette 7.2.0", + "once_cell", + "prost 0.12.6", + "prost-types", +] + [[package]] name = "prost-types" version = "0.12.6" @@ -5565,11 +6252,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00bb76c5f6221de491fe2c8f39b106330bbd9762c6511119c07940e10eb9ff11" dependencies = [ "bytes", - "miette", + "miette 5.10.0", + "prost 0.12.6", + "prost-reflect 0.12.0", + "prost-types", + "protox-parse 0.5.0", + "thiserror", +] + +[[package]] +name = "protox" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac532509cee918d40f38c3e12f8ef9230f215f017d54de7dd975015538a42ce7" +dependencies = [ + "bytes", + "miette 7.2.0", "prost 0.12.6", - "prost-reflect", + "prost-reflect 0.13.1", "prost-types", - "protox-parse", + "protox-parse 0.6.1", "thiserror", ] @@ -5579,8 +6281,20 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b4581f441c58863525a3e6bec7b8de98188cf75239a56c725a3e7288450a33f" dependencies = [ - "logos", - "miette", + "logos 0.13.0", + "miette 5.10.0", + "prost-types", + "thiserror", +] + +[[package]] +name = "protox-parse" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f6c33f43516fe397e2f930779d720ca12cd057f7da4cd6326a0ef78d69dee96" +dependencies = [ + "logos 0.14.2", + "miette 7.2.0", "prost-types", "thiserror", ] @@ -5600,7 +6314,7 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", ] @@ -5662,7 +6376,7 @@ version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", ] [[package]] @@ -5729,6 +6443,15 @@ dependencies = [ "getrandom", ] +[[package]] +name = "rand_xorshift" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +dependencies = [ + "rand_core 0.6.4", +] + [[package]] name = "rand_xoshiro" version = "0.6.0" @@ -5778,9 +6501,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "355ae415ccd3a04315d3f8246e86d67689ea74d88d915576e1589a351062a13b" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ "bitflags 2.6.0", ] @@ -5798,14 +6521,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.6" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.7", - "regex-syntax 0.8.4", + "regex-automata 0.4.8", + "regex-syntax 0.8.5", ] [[package]] @@ -5819,13 +6542,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", ] [[package]] @@ -5836,9 +6559,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "rend" @@ -5863,7 +6586,7 @@ dependencies = [ "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.30", + "hyper 0.14.31", "hyper-tls 0.5.0", "ipnet", "js-sys", @@ -5891,9 +6614,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.7" +version = "0.12.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" +checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" dependencies = [ "async-compression", "base64 0.22.1", @@ -5906,7 +6629,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.4.1", + "hyper 1.5.0", "hyper-rustls 0.27.3", "hyper-tls 0.6.0", "hyper-util", @@ -5919,7 +6642,7 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls-pemfile 2.1.3", + "rustls-pemfile 2.2.0", "serde", "serde_json", "serde_urlencoded", @@ -5946,7 +6669,7 @@ dependencies = [ "anyhow", "async-trait", "http 1.1.0", - "reqwest 0.12.7", + "reqwest 0.12.9", "serde", "thiserror", "tower-service", @@ -6013,6 +6736,15 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "ripemd" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "rkyv" version = "0.7.45" @@ -6037,7 +6769,7 @@ version = "0.7.45" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "503d1d27590a2b0a3a4ca4c94755aa2875657196ecbf401a42eff41d7de532c0" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", ] @@ -6088,6 +6820,36 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ruint" +version = "1.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c3cc4c2511671f327125da14133d0c5c5d137f006a1017a16f557bc85b16286" +dependencies = [ + "alloy-rlp", + "ark-ff 0.3.0", + "ark-ff 0.4.2", + "bytes", + "fastrlp", + "num-bigint 0.4.6", + "num-traits", + "parity-scale-codec", + "primitive-types", + "proptest", + "rand 0.8.5", + "rlp", + "ruint-macro", + "serde", + "valuable", + "zeroize", +] + +[[package]] +name = "ruint-macro" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" + [[package]] name = "rust_decimal" version = "1.36.0" @@ -6122,20 +6884,29 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" +[[package]] +name = "rustc_version" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", +] + [[package]] name = "rustc_version" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver", + "semver 1.0.23", ] [[package]] name = "rustix" -version = "0.38.37" +version = "0.38.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" +checksum = "aa260229e6538e52293eeb577aabd09945a09d6d9cc0fc550ed7529056c2e32a" dependencies = [ "bitflags 2.6.0", "errno", @@ -6172,9 +6943,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.13" +version = "0.23.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" +checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" dependencies = [ "aws-lc-rs", "log", @@ -6205,7 +6976,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" dependencies = [ "openssl-probe", - "rustls-pemfile 2.1.3", + "rustls-pemfile 2.2.0", "rustls-pki-types", "schannel", "security-framework", @@ -6222,19 +6993,18 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.3" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.8.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" +checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" [[package]] name = "rustls-platform-verifier" @@ -6247,7 +7017,7 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.13", + "rustls 0.23.16", "rustls-native-certs 0.7.3", "rustls-platform-verifier-android", "rustls-webpki 0.102.8", @@ -6287,9 +7057,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" +checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" [[package]] name = "ruzstd" @@ -6351,7 +7121,7 @@ checksum = "d3475108a1b62c7efd1b5c65974f30109a598b2f45f23c9ae030acb9686966db" dependencies = [ "darling 0.14.4", "proc-macro-crate 1.3.1", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", ] @@ -6379,20 +7149,20 @@ checksum = "995491f110efdc6bea96d6a746140e32bfceb4ea47510750a5467295a4707a25" dependencies = [ "darling 0.14.4", "proc-macro-crate 1.3.1", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", ] [[package]] name = "scale-info" -version = "2.11.3" +version = "2.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eca070c12893629e2cc820a9761bedf6ce1dcddc9852984d1dc734b8bd9bd024" +checksum = "1aa7ffc1c0ef49b0452c6e2986abf2b07743320641ffd5fc63d552458e3b779b" dependencies = [ "bitvec", "cfg-if", - "derive_more 0.99.18", + "derive_more 1.0.0", "parity-scale-codec", "scale-info-derive", "serde", @@ -6400,14 +7170,14 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.11.3" +version = "2.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" +checksum = "46385cc24172cf615450267463f937c10072516359b3ff1cb24228a4a08bf951" dependencies = [ "proc-macro-crate 3.2.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 1.0.109", + "syn 2.0.85", ] [[package]] @@ -6416,10 +7186,10 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00860983481ac590ac87972062909bef0d6a658013b592ccc0f2feb272feab11" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "scale-info", - "syn 2.0.77", + "syn 2.0.85", "thiserror", ] @@ -6445,9 +7215,9 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.24" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" +checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" dependencies = [ "windows-sys 0.59.0", ] @@ -6587,6 +7357,15 @@ dependencies = [ "tokio", ] +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser", +] + [[package]] name = "semver" version = "1.0.23" @@ -6596,6 +7375,15 @@ dependencies = [ "serde", ] +[[package]] +name = "semver-parser" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +dependencies = [ + "pest", +] + [[package]] name = "send_wrapper" version = "0.4.0" @@ -6642,7 +7430,7 @@ dependencies = [ "hostname", "libc", "os_info", - "rustc_version", + "rustc_version 0.4.1", "sentry-core", "uname", ] @@ -6718,9 +7506,9 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" [[package]] name = "serde" -version = "1.0.210" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" +checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" dependencies = [ "serde_derive", ] @@ -6746,20 +7534,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.210" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" +checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] name = "serde_json" -version = "1.0.128" +version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" +checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" dependencies = [ "itoa", "memchr", @@ -6787,6 +7575,17 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_repr" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" +dependencies = [ + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.85", +] + [[package]] name = "serde_spanned" version = "0.6.8" @@ -6827,7 +7626,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling 0.13.4", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", ] @@ -6838,7 +7637,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "itoa", "ryu", "serde", @@ -7172,7 +7971,7 @@ name = "snapshots_creator" version = "0.1.0" dependencies = [ "anyhow", - "futures 0.3.30", + "futures 0.3.31", "rand 0.8.5", "structopt", "test-casing", @@ -7199,7 +7998,7 @@ dependencies = [ "chacha20poly1305", "curve25519-dalek", "rand_core 0.6.4", - "rustc_version", + "rustc_version 0.4.1", "sha2 0.10.8", "subtle", ] @@ -7222,7 +8021,7 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.1", "bytes", - "futures 0.3.30", + "futures 0.3.31", "httparse", "log", "rand 0.8.5", @@ -7237,7 +8036,7 @@ checksum = "37468c595637c10857701c990f93a40ce0e357cedb0953d1c26c8d8027f9bb53" dependencies = [ "base64 0.22.1", "bytes", - "futures 0.3.30", + "futures 0.3.31", "http 1.1.0", "httparse", "log", @@ -7340,7 +8139,7 @@ dependencies = [ "hashbrown 0.14.5", "hashlink", "hex", - "indexmap 2.5.0", + "indexmap 2.6.0", "ipnetwork", "log", "memchr", @@ -7367,11 +8166,11 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cac0692bcc9de3b073e8d747391827297e075c7710ff6276d9f7a1f3d58c6657" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "sqlx-core", "sqlx-macros-core", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -7385,7 +8184,7 @@ dependencies = [ "heck 0.5.0", "hex", "once_cell", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "serde", "serde_json", @@ -7394,7 +8193,7 @@ dependencies = [ "sqlx-mysql", "sqlx-postgres", "sqlx-sqlite", - "syn 2.0.77", + "syn 2.0.85", "tempfile", "tokio", "url", @@ -7585,7 +8384,7 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", ] @@ -7606,10 +8405,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ "heck 0.5.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "rustversion", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -7618,6 +8417,21 @@ version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" +[[package]] +name = "subtle-encoding" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dcb1ed7b8330c5eed5441052651dd7a12c75e2ed88f2ec024ae1fa3a5e59945" +dependencies = [ + "zeroize", +] + +[[package]] +name = "subtle-ng" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" + [[package]] name = "subxt" version = "0.34.0" @@ -7630,7 +8444,7 @@ dependencies = [ "derivative", "either", "frame-metadata 16.0.0", - "futures 0.3.30", + "futures 0.3.31", "hex", "impl-serde", "instant", @@ -7665,12 +8479,12 @@ dependencies = [ "hex", "jsonrpsee 0.21.0", "parity-scale-codec", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "scale-info", "scale-typegen", "subxt-metadata", - "syn 2.0.77", + "syn 2.0.85", "thiserror", "tokio", ] @@ -7681,7 +8495,7 @@ version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ecec7066ba7bc0c3608fcd1d0c7d9584390990cd06095b6ae4f114f74c4b8550" dependencies = [ - "futures 0.3.30", + "futures 0.3.31", "futures-util", "serde", "serde_json", @@ -7704,7 +8518,7 @@ dependencies = [ "quote 1.0.37", "scale-typegen", "subxt-codegen", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -7758,18 +8572,18 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.77" +version = "2.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" +checksum = "5023162dfcd14ef8f32034d8bcd4cc5ddc61ef7a247c024a33e24e1f24d21b56" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "unicode-ident", ] @@ -7781,9 +8595,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -7869,11 +8683,17 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" +[[package]] +name = "target-triple" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42a4d50cdb458045afc8131fd91b64904da29548bcb63c7236e0844936c13078" + [[package]] name = "tempfile" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" +checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ "cfg-if", "fastrand", @@ -7927,9 +8747,9 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9b53c7124dd88026d5d98a1eb1fd062a578b7d783017c9298825526c7fb6427" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -7949,9 +8769,9 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5999e24eaa32083191ba4e425deb75cdf25efefabe5aaccb7446dd0d4122a3f5" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -7971,22 +8791,22 @@ checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9" [[package]] name = "thiserror" -version = "1.0.64" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" +checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.64" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" +checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -8117,9 +8937,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.40.0" +version = "1.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" +checksum = "145f3413504347a2be84393cc8a7d2fb4d863b375909ea59f2158261aa258bbb" dependencies = [ "backtrace", "bytes", @@ -8133,15 +8953,25 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "tokio-io-timeout" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +dependencies = [ + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-macros" version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -8181,7 +9011,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.13", + "rustls 0.23.16", "rustls-pki-types", "tokio", ] @@ -8239,7 +9069,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "toml_datetime", "winnow 0.5.40", ] @@ -8250,13 +9080,40 @@ version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", "winnow 0.6.20", ] +[[package]] +name = "tonic" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76c4eb7a4e9ef9d4763600161f12f5070b92a578e1b634db88a6887844c91a13" +dependencies = [ + "async-stream", + "async-trait", + "axum 0.6.20", + "base64 0.21.7", + "bytes", + "h2 0.3.26", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.31", + "hyper-timeout 0.4.1", + "percent-encoding", + "pin-project", + "prost 0.12.6", + "tokio", + "tokio-stream", + "tower 0.4.13", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tonic" version = "0.12.3" @@ -8265,15 +9122,15 @@ checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" dependencies = [ "async-stream", "async-trait", - "axum", + "axum 0.7.7", "base64 0.22.1", "bytes", "h2 0.4.6", "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.4.1", - "hyper-timeout", + "hyper 1.5.0", + "hyper-timeout 0.5.1", "hyper-util", "percent-encoding", "pin-project", @@ -8373,9 +9230,9 @@ version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -8451,9 +9308,9 @@ dependencies = [ [[package]] name = "triomphe" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6631e42e10b40c0690bf92f404ebcfe6e1fdb480391d15f17cc8e96eeed5369" +checksum = "ef8f7726da4807b58ea5c96fdc122f80702030edc33b35aff9190a51148ccc85" [[package]] name = "try-lock" @@ -8463,14 +9320,15 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "trybuild" -version = "1.0.99" +version = "1.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "207aa50d36c4be8d8c6ea829478be44a372c6a77669937bb39c698e52f1491e8" +checksum = "8dcd332a5496c026f1e14b7f3d2b7bd98e509660c04239c58b0ba38a12daded4" dependencies = [ "glob", "serde", "serde_derive", "serde_json", + "target-triple", "termcolor", "toml", ] @@ -8500,9 +9358,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "ucd-trie" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" [[package]] name = "uint" @@ -8525,20 +9383,23 @@ dependencies = [ "libc", ] +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + [[package]] name = "unicase" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" -dependencies = [ - "version_check", -] +checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" @@ -8557,9 +9418,9 @@ dependencies = [ [[package]] name = "unicode-properties" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ea75f83c0137a9b98608359a5f1af8144876eb67bcb1ce837368e906a9f524" +checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0" [[package]] name = "unicode-segmentation" @@ -8617,6 +9478,12 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" +[[package]] +name = "unsigned-varint" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb066959b24b5196ae73cb057f45598450d2c5f71460e98c49b738086eff9c06" + [[package]] name = "untrusted" version = "0.9.0" @@ -8662,9 +9529,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" +checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" dependencies = [ "serde", ] @@ -8677,9 +9544,9 @@ checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "value-bag" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a84c137d37ab0142f0f2ddfe332651fdbf252e7b7dbb4e67b6c1f1b2e925101" +checksum = "3ef4c4aa54d5d05a279399bfa921ec387b7aba77caf7a682ae8d86785b8fdad2" [[package]] name = "vcpkg" @@ -8732,7 +9599,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "671d3b894d5d0849f0a597f56bf071f42d4f2a1cbcf2f78ca21f870ab7c0cc2b" dependencies = [ - "hyper 0.14.30", + "hyper 0.14.31", "once_cell", "tokio", "tracing", @@ -8745,9 +9612,9 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a511871dc5de990a3b2a0e715facfbc5da848c0c0395597a1415029fb7c250a" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -8808,9 +9675,9 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" dependencies = [ "cfg-if", "once_cell", @@ -8819,24 +9686,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.43" +version = "0.4.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" +checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" dependencies = [ "cfg-if", "js-sys", @@ -8846,9 +9713,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" dependencies = [ "quote 1.0.37", "wasm-bindgen-macro-support", @@ -8856,28 +9723,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" +checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" [[package]] name = "wasm-streams" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b65dc4c90b63b118468cf747d8bf3566c1913ef60be765b5730ead9e0a3ba129" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" dependencies = [ "futures-util", "js-sys", @@ -8928,9 +9795,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.70" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" +checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112" dependencies = [ "js-sys", "wasm-bindgen", @@ -9284,9 +10151,9 @@ version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -9304,9 +10171,9 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -9607,7 +10474,7 @@ dependencies = [ "byteorder", "cfg-if", "crossbeam", - "futures 0.3.30", + "futures 0.3.31", "hex", "lazy_static", "num_cpus", @@ -9625,7 +10492,7 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "futures 0.3.30", + "futures 0.3.31", "serde", "tempfile", "test-casing", @@ -9664,7 +10531,7 @@ dependencies = [ "circuit_sequencer_api 0.140.3", "circuit_sequencer_api 0.141.2", "circuit_sequencer_api 0.150.7", - "futures 0.3.30", + "futures 0.3.31", "itertools 0.10.5", "num_cpus", "rand 0.8.5", @@ -9783,7 +10650,7 @@ dependencies = [ "anyhow", "async-trait", "rand 0.8.5", - "semver", + "semver 1.0.23", "tracing", "vise", "zksync_concurrency", @@ -9809,14 +10676,14 @@ dependencies = [ "bytesize", "http-body-util", "human-repr", - "hyper 1.4.1", + "hyper 1.5.0", "hyper-util", "im", "once_cell", "pin-project", "prost 0.12.6", "rand 0.8.5", - "semver", + "semver 1.0.23", "snow", "thiserror", "tls-listener", @@ -9917,7 +10784,7 @@ name = "zksync_contract_verification_server" version = "0.1.0" dependencies = [ "anyhow", - "axum", + "axum 0.7.7", "serde", "serde_json", "tokio", @@ -9935,7 +10802,7 @@ version = "0.1.0" dependencies = [ "anyhow", "ctrlc", - "futures 0.3.30", + "futures 0.3.31", "structopt", "tokio", "tracing", @@ -9959,7 +10826,7 @@ dependencies = [ "hex", "lazy_static", "regex", - "semver", + "semver 1.0.23", "serde", "serde_json", "tempfile", @@ -10026,7 +10893,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5939e2df4288c263c706ff18ac718e984149223ad4289d6d957d767dcfc04c81" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", ] @@ -10048,21 +10915,31 @@ dependencies = [ "async-trait", "backon", "base58", + "bech32", + "bincode", "blake2 0.10.6", "blake2b_simd", "bytes", + "celestia-types", "flate2", - "futures 0.3.30", + "futures 0.3.31", "hex", + "http 1.1.0", "jsonrpsee 0.23.2", "parity-scale-codec", - "reqwest 0.12.7", + "pbjson-types", + "prost 0.12.6", + "reqwest 0.12.9", + "ripemd", "scale-encode", + "secp256k1", "serde", "serde_json", + "sha2 0.10.8", "subxt-metadata", "subxt-signer", "tokio", + "tonic 0.11.0", "tracing", "zksync_config", "zksync_da_client", @@ -10077,7 +10954,7 @@ version = "0.1.0" dependencies = [ "anyhow", "chrono", - "futures 0.3.30", + "futures 0.3.31", "rand 0.8.5", "tokio", "tracing", @@ -10248,8 +11125,8 @@ dependencies = [ "async-trait", "clap 4.5.20", "envy", - "futures 0.3.30", - "rustc_version", + "futures 0.3.31", + "rustc_version 0.4.1", "serde", "serde_json", "tempfile", @@ -10306,7 +11183,7 @@ dependencies = [ "fraction", "httpmock", "rand 0.8.5", - "reqwest 0.12.7", + "reqwest 0.12.9", "serde", "serde_json", "tokio", @@ -10322,7 +11199,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "axum", + "axum 0.7.7", "bincode", "thiserror", "tokio", @@ -10356,7 +11233,7 @@ dependencies = [ "num-bigint 0.4.6", "num-integer", "num-traits", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "serde", "syn 1.0.109", @@ -10368,7 +11245,7 @@ version = "0.1.0" dependencies = [ "assert_matches", "async-trait", - "futures 0.3.30", + "futures 0.3.31", "serde", "serde_json", "thiserror", @@ -10485,11 +11362,11 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "axum", - "futures 0.3.30", + "axum 0.7.7", + "futures 0.3.31", "itertools 0.10.5", "once_cell", - "reqwest 0.12.7", + "reqwest 0.12.9", "serde", "serde_json", "tempfile", @@ -10566,10 +11443,10 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "axum", + "axum 0.7.7", "chrono", "const-decoder", - "futures 0.3.30", + "futures 0.3.31", "governor", "hex", "http 1.1.0", @@ -10621,7 +11498,7 @@ dependencies = [ "async-trait", "rand 0.8.5", "secrecy", - "semver", + "semver 1.0.23", "tempfile", "test-casing", "thiserror", @@ -10704,9 +11581,9 @@ dependencies = [ "assert_matches", "async-trait", "ctrlc", - "futures 0.3.30", + "futures 0.3.31", "pin-project-lite", - "semver", + "semver 1.0.23", "thiserror", "tokio", "tracing", @@ -10760,9 +11637,9 @@ dependencies = [ name = "zksync_node_framework_derive" version = "0.1.0" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -10817,7 +11694,7 @@ dependencies = [ "async-trait", "backon", "chrono", - "futures 0.3.30", + "futures 0.3.31", "once_cell", "serde", "serde_json", @@ -10871,7 +11748,7 @@ dependencies = [ "http 1.1.0", "prost 0.12.6", "rand 0.8.5", - "reqwest 0.12.7", + "reqwest 0.12.9", "serde_json", "tempfile", "tokio", @@ -10900,9 +11777,9 @@ name = "zksync_proof_data_handler" version = "0.1.0" dependencies = [ "anyhow", - "axum", + "axum 0.7.7", "chrono", - "hyper 1.4.1", + "hyper 1.5.0", "serde_json", "tokio", "tower 0.4.13", @@ -10931,7 +11808,7 @@ dependencies = [ "bit-vec", "once_cell", "prost 0.12.6", - "prost-reflect", + "prost-reflect 0.12.0", "quick-protobuf", "rand 0.8.5", "serde", @@ -10951,12 +11828,12 @@ dependencies = [ "anyhow", "heck 0.5.0", "prettyplease", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "prost-build", - "prost-reflect", - "protox", + "prost-reflect 0.12.0", + "protox 0.5.1", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -11035,7 +11912,7 @@ version = "0.1.0" dependencies = [ "anyhow", "clap 4.5.20", - "futures 0.3.30", + "futures 0.3.31", "serde_json", "tikv-jemallocator", "tokio", @@ -11064,7 +11941,7 @@ dependencies = [ name = "zksync_shared_metrics" version = "0.1.0" dependencies = [ - "rustc_version", + "rustc_version 0.4.1", "tracing", "vise", "zksync_dal", @@ -11078,7 +11955,7 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "futures 0.3.30", + "futures 0.3.31", "serde", "test-casing", "thiserror", @@ -11144,7 +12021,7 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "futures 0.3.30", + "futures 0.3.31", "hex", "itertools 0.10.5", "once_cell", @@ -11205,7 +12082,7 @@ dependencies = [ "anyhow", "async-trait", "envy", - "reqwest 0.12.7", + "reqwest 0.12.9", "secp256k1", "serde", "thiserror", @@ -11299,12 +12176,12 @@ dependencies = [ "assert_matches", "bigdecimal", "bincode", - "futures 0.3.30", + "futures 0.3.31", "hex", "num", "once_cell", "rand 0.8.5", - "reqwest 0.12.7", + "reqwest 0.12.9", "serde", "serde_json", "thiserror", @@ -11405,8 +12282,8 @@ dependencies = [ "assert_matches", "async-trait", "backon", - "dashmap", - "futures 0.3.30", + "dashmap 5.5.3", + "futures 0.3.31", "once_cell", "rand 0.8.5", "serde", @@ -11439,12 +12316,12 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "futures 0.3.30", + "futures 0.3.31", "jsonrpsee 0.23.2", "pin-project-lite", "rand 0.8.5", "rlp", - "rustls 0.23.13", + "rustls 0.23.16", "serde", "serde_json", "test-casing", diff --git a/Cargo.toml b/Cargo.toml index 6d51e5060aa8..5da7612171f9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -150,7 +150,7 @@ opentelemetry-semantic-conventions = "0.16.0" opentelemetry-appender-tracing = "0.5" pin-project-lite = "0.2.13" pretty_assertions = "1" -prost = "0.12.1" +prost = "0.12.6" rand = "0.8" rayon = "1.3.1" regex = "1" @@ -211,6 +211,13 @@ subxt-metadata = "0.34.0" parity-scale-codec = { version = "3.6.9", default-features = false } subxt-signer = { version = "0.34", default-features = false } +# Celestia +celestia-types = "0.6.1" +bech32 = "0.11.0" +ripemd = "0.1.3" +tonic = "0.11.0" +pbjson-types = "0.6.0" + # Here and below: # We *always* pin the latest version of protocol to disallow accidental changes in the execution logic. # However, for the historical version of protocol crates, we have lax requirements. Otherwise, diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index 9e1a1b5948c7..855f50df1419 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -11,12 +11,11 @@ use zksync_config::{ }, fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, - secrets::DataAvailabilitySecrets, - BasicWitnessInputProducerConfig, ContractsConfig, DatabaseSecrets, ExperimentalVmConfig, - ExternalPriceApiClientConfig, FriProofCompressorConfig, FriProverConfig, - FriProverGatewayConfig, FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, - L1Secrets, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, - ProtectiveReadsWriterConfig, Secrets, + BasicWitnessInputProducerConfig, ContractsConfig, DataAvailabilitySecrets, DatabaseSecrets, + ExperimentalVmConfig, ExternalPriceApiClientConfig, FriProofCompressorConfig, + FriProverConfig, FriProverGatewayConfig, FriWitnessGeneratorConfig, + FriWitnessVectorGeneratorConfig, L1Secrets, ObservabilityConfig, PrometheusConfig, + ProofDataHandlerConfig, ProtectiveReadsWriterConfig, Secrets, }, ApiConfig, BaseTokenAdjusterConfig, ContractVerifierConfig, DAClientConfig, DADispatcherConfig, DBConfig, EthConfig, EthWatchConfig, ExternalProofIntegrationApiConfig, GasAdjusterConfig, diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 19edef6e4eec..c9d99cc0783f 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -1,7 +1,7 @@ //! This module provides a "builder" for the main node, //! as well as an interface to run the node with the specified components. -use anyhow::Context; +use anyhow::{bail, Context}; use zksync_config::{ configs::{ da_client::DAClientConfig, secrets::DataAvailabilitySecrets, wallets::Wallets, @@ -26,7 +26,7 @@ use zksync_node_framework::{ consensus::MainNodeConsensusLayer, contract_verification_api::ContractVerificationApiLayer, da_clients::{ - avail::AvailWiringLayer, no_da::NoDAClientWiringLayer, + avail::AvailWiringLayer, celestia::CelestiaWiringLayer, no_da::NoDAClientWiringLayer, object_store::ObjectStorageClientWiringLayer, }, da_dispatcher::DataAvailabilityDispatcherLayer, @@ -507,16 +507,21 @@ impl MainNodeBuilder { }; let secrets = try_load_config!(self.secrets.data_availability); - match (da_client_config, secrets) { (DAClientConfig::Avail(config), DataAvailabilitySecrets::Avail(secret)) => { self.node.add_layer(AvailWiringLayer::new(config, secret)); } + (DAClientConfig::Celestia(config), DataAvailabilitySecrets::Celestia(secret)) => { + self.node + .add_layer(CelestiaWiringLayer::new(config, secret)); + } + (DAClientConfig::ObjectStore(config), _) => { self.node .add_layer(ObjectStorageClientWiringLayer::new(config)); } + _ => bail!("invalid pair of da_client and da_secrets"), } Ok(self) diff --git a/core/lib/basic_types/src/api_key.rs b/core/lib/basic_types/src/api_key.rs deleted file mode 100644 index eadf4e9051b5..000000000000 --- a/core/lib/basic_types/src/api_key.rs +++ /dev/null @@ -1,20 +0,0 @@ -use std::str::FromStr; - -use secrecy::{ExposeSecret, Secret}; - -#[derive(Debug, Clone)] -pub struct APIKey(pub Secret); - -impl PartialEq for APIKey { - fn eq(&self, other: &Self) -> bool { - self.0.expose_secret().eq(other.0.expose_secret()) - } -} - -impl FromStr for APIKey { - type Err = anyhow::Error; - - fn from_str(s: &str) -> Result { - Ok(APIKey(s.parse()?)) - } -} diff --git a/core/lib/basic_types/src/lib.rs b/core/lib/basic_types/src/lib.rs index 7953f362fd42..1b462fdf77d1 100644 --- a/core/lib/basic_types/src/lib.rs +++ b/core/lib/basic_types/src/lib.rs @@ -24,14 +24,13 @@ use serde::{de, Deserialize, Deserializer, Serialize}; #[macro_use] mod macros; -pub mod api_key; pub mod basic_fri_types; pub mod commitment; pub mod network; pub mod protocol_version; pub mod prover_dal; pub mod pubdata_da; -pub mod seed_phrase; +pub mod secrets; pub mod settlement; pub mod tee_types; pub mod url; diff --git a/core/lib/basic_types/src/secrets.rs b/core/lib/basic_types/src/secrets.rs new file mode 100644 index 000000000000..b3627470660c --- /dev/null +++ b/core/lib/basic_types/src/secrets.rs @@ -0,0 +1,54 @@ +use std::str::FromStr; + +use secrecy::{ExposeSecret, Secret}; + +#[derive(Debug, Clone)] +pub struct SeedPhrase(pub Secret); + +impl PartialEq for SeedPhrase { + fn eq(&self, other: &Self) -> bool { + self.0.expose_secret().eq(other.0.expose_secret()) + } +} + +impl FromStr for SeedPhrase { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + Ok(SeedPhrase(s.parse()?)) + } +} + +#[derive(Debug, Clone)] +pub struct PrivateKey(pub Secret); + +impl PartialEq for PrivateKey { + fn eq(&self, other: &Self) -> bool { + self.0.expose_secret().eq(other.0.expose_secret()) + } +} + +impl FromStr for PrivateKey { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + Ok(PrivateKey(s.parse()?)) + } +} + +#[derive(Debug, Clone)] +pub struct APIKey(pub Secret); + +impl PartialEq for APIKey { + fn eq(&self, other: &Self) -> bool { + self.0.expose_secret().eq(other.0.expose_secret()) + } +} + +impl FromStr for APIKey { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + Ok(APIKey(s.parse()?)) + } +} diff --git a/core/lib/basic_types/src/seed_phrase.rs b/core/lib/basic_types/src/seed_phrase.rs deleted file mode 100644 index 332bfd585945..000000000000 --- a/core/lib/basic_types/src/seed_phrase.rs +++ /dev/null @@ -1,20 +0,0 @@ -use std::str::FromStr; - -use secrecy::{ExposeSecret, Secret}; - -#[derive(Debug, Clone)] -pub struct SeedPhrase(pub Secret); - -impl PartialEq for SeedPhrase { - fn eq(&self, other: &Self) -> bool { - self.0.expose_secret().eq(other.0.expose_secret()) - } -} - -impl FromStr for SeedPhrase { - type Err = anyhow::Error; - - fn from_str(s: &str) -> Result { - Ok(SeedPhrase(s.parse()?)) - } -} diff --git a/core/lib/config/src/configs/da_client/avail.rs b/core/lib/config/src/configs/da_client/avail.rs index b8e9db0f3937..3993656d667a 100644 --- a/core/lib/config/src/configs/da_client/avail.rs +++ b/core/lib/config/src/configs/da_client/avail.rs @@ -1,5 +1,5 @@ use serde::Deserialize; -use zksync_basic_types::{api_key::APIKey, seed_phrase::SeedPhrase}; +use zksync_basic_types::secrets::{APIKey, SeedPhrase}; pub const AVAIL_GAS_RELAY_CLIENT_NAME: &str = "GasRelay"; pub const AVAIL_FULL_CLIENT_NAME: &str = "FullClient"; @@ -14,7 +14,7 @@ pub enum AvailClientConfig { #[derive(Clone, Debug, PartialEq, Deserialize)] pub struct AvailConfig { pub bridge_api_url: String, - pub timeout: usize, + pub timeout_ms: usize, #[serde(flatten)] pub config: AvailClientConfig, } diff --git a/core/lib/config/src/configs/da_client/celestia.rs b/core/lib/config/src/configs/da_client/celestia.rs new file mode 100644 index 000000000000..45810e0381e8 --- /dev/null +++ b/core/lib/config/src/configs/da_client/celestia.rs @@ -0,0 +1,15 @@ +use serde::Deserialize; +use zksync_basic_types::secrets::PrivateKey; + +#[derive(Clone, Debug, Default, PartialEq, Deserialize)] +pub struct CelestiaConfig { + pub api_node_url: String, + pub namespace: String, + pub chain_id: String, + pub timeout_ms: u64, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct CelestiaSecrets { + pub private_key: PrivateKey, +} diff --git a/core/lib/config/src/configs/da_client/mod.rs b/core/lib/config/src/configs/da_client/mod.rs index 406305a77b16..4806d7ed0996 100644 --- a/core/lib/config/src/configs/da_client/mod.rs +++ b/core/lib/config/src/configs/da_client/mod.rs @@ -1,12 +1,15 @@ -use crate::{AvailConfig, ObjectStoreConfig}; +use crate::{AvailConfig, CelestiaConfig, ObjectStoreConfig}; pub mod avail; +pub mod celestia; pub const AVAIL_CLIENT_CONFIG_NAME: &str = "Avail"; +pub const CELESTIA_CLIENT_CONFIG_NAME: &str = "Celestia"; pub const OBJECT_STORE_CLIENT_CONFIG_NAME: &str = "ObjectStore"; #[derive(Debug, Clone, PartialEq)] pub enum DAClientConfig { Avail(AvailConfig), + Celestia(CelestiaConfig), ObjectStore(ObjectStoreConfig), } diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index b3a7c2913437..0c756ad95647 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -5,7 +5,7 @@ pub use self::{ commitment_generator::CommitmentGeneratorConfig, contract_verifier::ContractVerifierConfig, contracts::{ContractsConfig, EcosystemContracts}, - da_client::{avail::AvailConfig, DAClientConfig}, + da_client::{avail::AvailConfig, celestia::CelestiaConfig, DAClientConfig}, da_dispatcher::DADispatcherConfig, database::{DBConfig, PostgresConfig}, eth_sender::{EthConfig, GasAdjusterConfig}, @@ -25,7 +25,7 @@ pub use self::{ proof_data_handler::{ProofDataHandlerConfig, TeeConfig}, prover_job_monitor::ProverJobMonitorConfig, pruning::PruningConfig, - secrets::{DatabaseSecrets, L1Secrets, Secrets}, + secrets::{DataAvailabilitySecrets, DatabaseSecrets, L1Secrets, Secrets}, snapshot_recovery::SnapshotRecoveryConfig, snapshots_creator::SnapshotsCreatorConfig, utils::PrometheusConfig, diff --git a/core/lib/config/src/configs/secrets.rs b/core/lib/config/src/configs/secrets.rs index 779bad370659..4d95ae4d1ede 100644 --- a/core/lib/config/src/configs/secrets.rs +++ b/core/lib/config/src/configs/secrets.rs @@ -1,7 +1,10 @@ use anyhow::Context; use zksync_basic_types::url::SensitiveUrl; -use crate::configs::{consensus::ConsensusSecrets, da_client::avail::AvailSecrets}; +use crate::configs::{ + consensus::ConsensusSecrets, + da_client::{avail::AvailSecrets, celestia::CelestiaSecrets}, +}; #[derive(Debug, Clone, PartialEq)] pub struct DatabaseSecrets { @@ -18,6 +21,7 @@ pub struct L1Secrets { #[derive(Debug, Clone, PartialEq)] pub enum DataAvailabilitySecrets { Avail(AvailSecrets), + Celestia(CelestiaSecrets), } #[derive(Debug, Clone, PartialEq)] diff --git a/core/lib/config/src/lib.rs b/core/lib/config/src/lib.rs index 9191edc39822..c02f3e531b34 100644 --- a/core/lib/config/src/lib.rs +++ b/core/lib/config/src/lib.rs @@ -1,8 +1,8 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] pub use crate::configs::{ - ApiConfig, AvailConfig, BaseTokenAdjusterConfig, ContractVerifierConfig, ContractsConfig, - DAClientConfig, DADispatcherConfig, DBConfig, EthConfig, EthWatchConfig, + ApiConfig, AvailConfig, BaseTokenAdjusterConfig, CelestiaConfig, ContractVerifierConfig, + ContractsConfig, DAClientConfig, DADispatcherConfig, DBConfig, EthConfig, EthWatchConfig, ExternalProofIntegrationApiConfig, GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, }; diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 21ff9e2351b6..45c776242630 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -3,13 +3,12 @@ use std::num::NonZeroUsize; use rand::{distributions::Distribution, Rng}; use secrecy::Secret; use zksync_basic_types::{ - api_key::APIKey, basic_fri_types::CircuitIdRoundTuple, commitment::L1BatchCommitmentMode, network::Network, protocol_version::{ProtocolSemanticVersion, ProtocolVersionId, VersionPatch}, pubdata_da::PubdataSendingMode, - seed_phrase::SeedPhrase, + secrets::{APIKey, SeedPhrase}, vm::FastVmMode, L1BatchNumber, L1ChainId, L2ChainId, }; @@ -949,7 +948,7 @@ impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::da_client::DAClientConfig { Avail(AvailConfig { bridge_api_url: self.sample(rng), - timeout: self.sample(rng), + timeout_ms: self.sample(rng), config: AvailClientConfig::FullClient(AvailDefaultConfig { api_node_url: self.sample(rng), app_id: self.sample(rng), diff --git a/core/lib/env_config/src/da_client.rs b/core/lib/env_config/src/da_client.rs index 1043786fc1eb..70819a706427 100644 --- a/core/lib/env_config/src/da_client.rs +++ b/core/lib/env_config/src/da_client.rs @@ -5,7 +5,9 @@ use zksync_config::configs::{ avail::{ AvailClientConfig, AvailSecrets, AVAIL_FULL_CLIENT_NAME, AVAIL_GAS_RELAY_CLIENT_NAME, }, - DAClientConfig, AVAIL_CLIENT_CONFIG_NAME, OBJECT_STORE_CLIENT_CONFIG_NAME, + celestia::CelestiaSecrets, + DAClientConfig, AVAIL_CLIENT_CONFIG_NAME, CELESTIA_CLIENT_CONFIG_NAME, + OBJECT_STORE_CLIENT_CONFIG_NAME, }, secrets::DataAvailabilitySecrets, AvailConfig, @@ -19,7 +21,7 @@ impl FromEnv for DAClientConfig { let config = match client_tag.as_str() { AVAIL_CLIENT_CONFIG_NAME => Self::Avail(AvailConfig { bridge_api_url: env::var("DA_BRIDGE_API_URL").ok().unwrap(), - timeout: env::var("DA_TIMEOUT")?.parse()?, + timeout_ms: env::var("DA_TIMEOUT_MS")?.parse()?, config: match env::var("DA_AVAIL_CLIENT_TYPE")?.as_str() { AVAIL_FULL_CLIENT_NAME => { AvailClientConfig::FullClient(envy_load("da_avail_full_client", "DA_")?) @@ -30,6 +32,7 @@ impl FromEnv for DAClientConfig { _ => anyhow::bail!("Unknown Avail DA client type"), }, }), + CELESTIA_CLIENT_CONFIG_NAME => Self::Celestia(envy_load("da_celestia_config", "DA_")?), OBJECT_STORE_CLIENT_CONFIG_NAME => { Self::ObjectStore(envy_load("da_object_store", "DA_")?) } @@ -45,11 +48,11 @@ impl FromEnv for DataAvailabilitySecrets { let client_tag = std::env::var("DA_CLIENT")?; let secrets = match client_tag.as_str() { AVAIL_CLIENT_CONFIG_NAME => { - let seed_phrase: Option = + let seed_phrase: Option = env::var("DA_SECRETS_SEED_PHRASE") .ok() .map(|s| s.parse().unwrap()); - let gas_relay_api_key: Option = + let gas_relay_api_key: Option = env::var("DA_SECRETS_GAS_RELAY_API_KEY") .ok() .map(|s| s.parse().unwrap()); @@ -61,6 +64,14 @@ impl FromEnv for DataAvailabilitySecrets { gas_relay_api_key, }) } + CELESTIA_CLIENT_CONFIG_NAME => { + let private_key = env::var("DA_SECRETS_PRIVATE_KEY") + .map_err(|e| anyhow::format_err!("private key not found: {}", e))? + .parse() + .map_err(|e| anyhow::format_err!("failed to parse the auth token: {}", e))?; + Self::Celestia(CelestiaSecrets { private_key }) + } + _ => anyhow::bail!("Unknown DA client name: {}", client_tag), }; @@ -78,7 +89,7 @@ mod tests { }, object_store::ObjectStoreMode::GCS, }, - AvailConfig, ObjectStoreConfig, + AvailConfig, CelestiaConfig, ObjectStoreConfig, }; use super::*; @@ -118,11 +129,11 @@ mod tests { api_node_url: &str, bridge_api_url: &str, app_id: u32, - timeout: usize, + timeout_ms: usize, ) -> DAClientConfig { DAClientConfig::Avail(AvailConfig { bridge_api_url: bridge_api_url.to_string(), - timeout, + timeout_ms, config: AvailClientConfig::FullClient(AvailDefaultConfig { api_node_url: api_node_url.to_string(), app_id, @@ -138,7 +149,7 @@ mod tests { DA_AVAIL_CLIENT_TYPE="FullClient" DA_BRIDGE_API_URL="localhost:54321" - DA_TIMEOUT="2" + DA_TIMEOUT_MS="2000" DA_API_NODE_URL="localhost:12345" DA_APP_ID="1" @@ -153,7 +164,7 @@ mod tests { "localhost:12345", "localhost:54321", "1".parse::().unwrap(), - "2".parse::().unwrap(), + "2000".parse::().unwrap(), ) ); } @@ -170,8 +181,10 @@ mod tests { let (actual_seed, actual_key) = match DataAvailabilitySecrets::from_env().unwrap() { DataAvailabilitySecrets::Avail(avail) => (avail.seed_phrase, avail.gas_relay_api_key), + _ => { + panic!("Avail config expected") + } }; - assert_eq!( (actual_seed.unwrap(), actual_key), ( @@ -182,4 +195,65 @@ mod tests { ) ); } + + fn expected_celestia_da_layer_config( + api_node_url: &str, + namespace: &str, + chain_id: &str, + timeout_ms: u64, + ) -> DAClientConfig { + DAClientConfig::Celestia(CelestiaConfig { + api_node_url: api_node_url.to_string(), + namespace: namespace.to_string(), + chain_id: chain_id.to_string(), + timeout_ms, + }) + } + + #[test] + fn from_env_celestia_client() { + let mut lock = MUTEX.lock(); + let config = r#" + DA_CLIENT="Celestia" + DA_API_NODE_URL="localhost:12345" + DA_NAMESPACE="0x1234567890abcdef" + DA_CHAIN_ID="mocha-4" + DA_TIMEOUT_MS="7000" + "#; + lock.set_env(config); + + let actual = DAClientConfig::from_env().unwrap(); + assert_eq!( + actual, + expected_celestia_da_layer_config( + "localhost:12345", + "0x1234567890abcdef", + "mocha-4", + 7000 + ) + ); + } + + #[test] + fn from_env_celestia_secrets() { + let mut lock = MUTEX.lock(); + let config = r#" + DA_CLIENT="Celestia" + DA_SECRETS_PRIVATE_KEY="f55baf7c0e4e33b1d78fbf52f069c426bc36cff1aceb9bc8f45d14c07f034d73" + "#; + + lock.set_env(config); + + let DataAvailabilitySecrets::Celestia(actual) = + DataAvailabilitySecrets::from_env().unwrap() + else { + panic!("expected Celestia config") + }; + assert_eq!( + actual.private_key, + "f55baf7c0e4e33b1d78fbf52f069c426bc36cff1aceb9bc8f45d14c07f034d73" + .parse() + .unwrap() + ); + } } diff --git a/core/lib/env_config/src/database.rs b/core/lib/env_config/src/database.rs index c067c96de73e..119d64b7738c 100644 --- a/core/lib/env_config/src/database.rs +++ b/core/lib/env_config/src/database.rs @@ -1,23 +1,8 @@ -use std::{env, error, str::FromStr}; +use std::env; -use anyhow::Context as _; use zksync_config::{configs::DatabaseSecrets, DBConfig, PostgresConfig}; -use crate::{envy_load, FromEnv}; - -fn parse_optional_var(name: &str) -> anyhow::Result> -where - T: FromStr, - T::Err: 'static + error::Error + Send + Sync, -{ - env::var(name) - .ok() - .map(|val| { - val.parse() - .with_context(|| format!("failed to parse env variable {name}")) - }) - .transpose() -} +use crate::{envy_load, utils::parse_optional_var, FromEnv}; impl FromEnv for DBConfig { fn from_env() -> anyhow::Result { diff --git a/core/lib/env_config/src/utils.rs b/core/lib/env_config/src/utils.rs index 211e73ae2b17..9f363777bf69 100644 --- a/core/lib/env_config/src/utils.rs +++ b/core/lib/env_config/src/utils.rs @@ -1,3 +1,6 @@ +use std::{env, error, str::FromStr}; + +use anyhow::Context; use zksync_config::configs::PrometheusConfig; use crate::{envy_load, FromEnv}; @@ -7,3 +10,17 @@ impl FromEnv for PrometheusConfig { envy_load("prometheus", "API_PROMETHEUS_") } } + +pub fn parse_optional_var(name: &str) -> anyhow::Result> +where + T: FromStr, + T::Err: 'static + error::Error + Send + Sync, +{ + env::var(name) + .ok() + .map(|val| { + val.parse() + .with_context(|| format!("failed to parse env variable {name}")) + }) + .transpose() +} diff --git a/core/lib/protobuf_config/src/da_client.rs b/core/lib/protobuf_config/src/da_client.rs index a17a8711a27b..e175a671c3ce 100644 --- a/core/lib/protobuf_config/src/da_client.rs +++ b/core/lib/protobuf_config/src/da_client.rs @@ -3,7 +3,8 @@ use zksync_config::configs::{ self, da_client::{ avail::{AvailClientConfig, AvailConfig, AvailDefaultConfig, AvailGasRelayConfig}, - DAClientConfig::{Avail, ObjectStore}, + celestia::CelestiaConfig, + DAClientConfig::{Avail, Celestia, ObjectStore}, }, }; use zksync_protobuf::{required, ProtoRepr}; @@ -21,7 +22,7 @@ impl ProtoRepr for proto::DataAvailabilityClient { bridge_api_url: required(&conf.bridge_api_url) .context("bridge_api_url")? .clone(), - timeout: *required(&conf.timeout).context("timeout")? as usize, + timeout_ms: *required(&conf.timeout_ms).context("timeout_ms")? as usize, config: match conf.config.as_ref() { Some(proto::avail_config::Config::FullClient(full_client_conf)) => { AvailClientConfig::FullClient(AvailDefaultConfig { @@ -44,6 +45,12 @@ impl ProtoRepr for proto::DataAvailabilityClient { None => return Err(anyhow::anyhow!("Invalid Avail DA configuration")), }, }), + proto::data_availability_client::Config::Celestia(conf) => Celestia(CelestiaConfig { + api_node_url: required(&conf.api_node_url).context("namespace")?.clone(), + namespace: required(&conf.namespace).context("namespace")?.clone(), + chain_id: required(&conf.chain_id).context("chain_id")?.clone(), + timeout_ms: *required(&conf.timeout_ms).context("timeout_ms")?, + }), proto::data_availability_client::Config::ObjectStore(conf) => { ObjectStore(object_store_proto::ObjectStore::read(conf)?) } @@ -53,34 +60,41 @@ impl ProtoRepr for proto::DataAvailabilityClient { } fn build(this: &Self::Type) -> Self { - match &this { - Avail(config) => Self { - config: Some(proto::data_availability_client::Config::Avail( - proto::AvailConfig { - bridge_api_url: Some(config.bridge_api_url.clone()), - timeout: Some(config.timeout as u64), - config: match &config.config { - AvailClientConfig::FullClient(conf) => Some( - proto::avail_config::Config::FullClient(proto::AvailClientConfig { - api_node_url: Some(conf.api_node_url.clone()), - app_id: Some(conf.app_id), - }), - ), - AvailClientConfig::GasRelay(conf) => Some( - proto::avail_config::Config::GasRelay(proto::AvailGasRelayConfig { - gas_relay_api_url: Some(conf.gas_relay_api_url.clone()), - max_retries: Some(conf.max_retries as u64), - }), - ), - }, - }, - )), - }, - ObjectStore(config) => Self { - config: Some(proto::data_availability_client::Config::ObjectStore( - object_store_proto::ObjectStore::build(config), - )), - }, + let config = match &this { + Avail(config) => proto::data_availability_client::Config::Avail(proto::AvailConfig { + bridge_api_url: Some(config.bridge_api_url.clone()), + timeout_ms: Some(config.timeout_ms as u64), + config: match &config.config { + AvailClientConfig::FullClient(conf) => Some( + proto::avail_config::Config::FullClient(proto::AvailClientConfig { + api_node_url: Some(conf.api_node_url.clone()), + app_id: Some(conf.app_id), + }), + ), + AvailClientConfig::GasRelay(conf) => Some( + proto::avail_config::Config::GasRelay(proto::AvailGasRelayConfig { + gas_relay_api_url: Some(conf.gas_relay_api_url.clone()), + max_retries: Some(conf.max_retries as u64), + }), + ), + }, + }), + + Celestia(config) => { + proto::data_availability_client::Config::Celestia(proto::CelestiaConfig { + api_node_url: Some(config.api_node_url.clone()), + namespace: Some(config.namespace.clone()), + chain_id: Some(config.chain_id.clone()), + timeout_ms: Some(config.timeout_ms), + }) + } + ObjectStore(config) => proto::data_availability_client::Config::ObjectStore( + object_store_proto::ObjectStore::build(config), + ), + }; + + Self { + config: Some(config), } } } diff --git a/core/lib/protobuf_config/src/proto/config/da_client.proto b/core/lib/protobuf_config/src/proto/config/da_client.proto index 73fa2435996f..206b1d05c04e 100644 --- a/core/lib/protobuf_config/src/proto/config/da_client.proto +++ b/core/lib/protobuf_config/src/proto/config/da_client.proto @@ -6,14 +6,16 @@ import "zksync/config/object_store.proto"; message AvailConfig { optional string bridge_api_url = 2; - optional uint64 timeout = 5; oneof config { AvailClientConfig full_client = 7; AvailGasRelayConfig gas_relay = 8; } + optional uint64 timeout_ms = 9; + reserved 1; reserved "api_node_url"; reserved 3; reserved "seed"; reserved 4; reserved "app_id"; + reserved 5; reserved "timeout"; reserved 6; reserved "max_retries"; } @@ -27,10 +29,18 @@ message AvailGasRelayConfig { optional uint64 max_retries = 2; } +message CelestiaConfig { + optional string api_node_url = 1; + optional string namespace = 2; + optional string chain_id = 3; + optional uint64 timeout_ms = 4; +} + message DataAvailabilityClient { // oneof in protobuf allows for None oneof config { AvailConfig avail = 1; object_store.ObjectStore object_store = 2; + CelestiaConfig celestia = 3; } } diff --git a/core/lib/protobuf_config/src/proto/config/secrets.proto b/core/lib/protobuf_config/src/proto/config/secrets.proto index 43c4542783c7..145a8cf0c45f 100644 --- a/core/lib/protobuf_config/src/proto/config/secrets.proto +++ b/core/lib/protobuf_config/src/proto/config/secrets.proto @@ -24,9 +24,14 @@ message AvailSecret { optional string gas_relay_api_key = 2; } +message CelestiaSecret { + optional string private_key = 1; +} + message DataAvailabilitySecrets { oneof da_secrets { AvailSecret avail = 1; + CelestiaSecret celestia = 2; } } diff --git a/core/lib/protobuf_config/src/secrets.rs b/core/lib/protobuf_config/src/secrets.rs index 07ab340c2313..d9cdf3384899 100644 --- a/core/lib/protobuf_config/src/secrets.rs +++ b/core/lib/protobuf_config/src/secrets.rs @@ -2,20 +2,20 @@ use std::str::FromStr; use anyhow::Context; use secrecy::ExposeSecret; -use zksync_basic_types::{api_key::APIKey, seed_phrase::SeedPhrase, url::SensitiveUrl}; +use zksync_basic_types::{ + secrets::{APIKey, PrivateKey, SeedPhrase}, + url::SensitiveUrl, +}; use zksync_config::configs::{ consensus::{AttesterSecretKey, ConsensusSecrets, NodeSecretKey, ValidatorSecretKey}, - da_client::avail::AvailSecrets, + da_client::{avail::AvailSecrets, celestia::CelestiaSecrets}, secrets::{DataAvailabilitySecrets, Secrets}, DatabaseSecrets, L1Secrets, }; use zksync_protobuf::{required, ProtoRepr}; use crate::{ - proto::{ - secrets as proto, - secrets::{data_availability_secrets::DaSecrets, AvailSecret}, - }, + proto::{secrets as proto, secrets::data_availability_secrets::DaSecrets}, read_optional_repr, }; @@ -128,6 +128,11 @@ impl ProtoRepr for proto::DataAvailabilitySecrets { gas_relay_api_key, }) } + DaSecrets::Celestia(celestia) => DataAvailabilitySecrets::Celestia(CelestiaSecrets { + private_key: PrivateKey::from_str( + required(&celestia.private_key).context("private_key")?, + )?, + }), }; Ok(client) @@ -164,11 +169,16 @@ impl ProtoRepr for proto::DataAvailabilitySecrets { None }; - Some(DaSecrets::Avail(AvailSecret { + Some(DaSecrets::Avail(proto::AvailSecret { seed_phrase, gas_relay_api_key, })) } + DataAvailabilitySecrets::Celestia(config) => { + Some(DaSecrets::Celestia(proto::CelestiaSecret { + private_key: Some(config.private_key.0.expose_secret().to_string()), + })) + } }; Self { diff --git a/core/node/da_clients/Cargo.toml b/core/node/da_clients/Cargo.toml index fa2f15920bd0..da5cd4effa68 100644 --- a/core/node/da_clients/Cargo.toml +++ b/core/node/da_clients/Cargo.toml @@ -33,10 +33,21 @@ base58.workspace = true serde_json.workspace = true hex.workspace = true blake2b_simd.workspace = true - -jsonrpsee = { workspace = true, features = ["ws-client"] } parity-scale-codec = { workspace = true, features = ["derive"] } subxt-signer = { workspace = true, features = ["sr25519", "native"] } +jsonrpsee = { workspace = true, features = ["ws-client"] } reqwest = { workspace = true } bytes = { workspace = true } backon.workspace = true + +# Celestia dependencies +http.workspace = true +bincode.workspace = true +celestia-types.workspace = true +secp256k1.workspace = true +sha2.workspace = true +prost.workspace = true +bech32.workspace = true +ripemd.workspace = true +tonic.workspace = true +pbjson-types.workspace = true diff --git a/core/node/da_clients/src/avail/client.rs b/core/node/da_clients/src/avail/client.rs index 46d652d57137..c0ead429d91a 100644 --- a/core/node/da_clients/src/avail/client.rs +++ b/core/node/da_clients/src/avail/client.rs @@ -16,7 +16,10 @@ use zksync_types::{ H256, U256, }; -use crate::avail::sdk::{GasRelayClient, RawAvailClient}; +use crate::{ + avail::sdk::{GasRelayClient, RawAvailClient}, + utils::{to_non_retriable_da_error, to_retriable_da_error}, +}; #[derive(Debug, Clone)] enum AvailClientMode { @@ -192,7 +195,7 @@ impl DataAvailabilityClient for AvailClient { let response = self .api_client .get(&url) - .timeout(Duration::from_secs(self.config.timeout as u64)) + .timeout(Duration::from_millis(self.config.timeout_ms as u64)) .send() .await .map_err(to_retriable_da_error)?; @@ -225,17 +228,3 @@ impl DataAvailabilityClient for AvailClient { Some(RawAvailClient::MAX_BLOB_SIZE) } } - -pub fn to_non_retriable_da_error(error: impl Into) -> DAError { - DAError { - error: error.into(), - is_retriable: false, - } -} - -pub fn to_retriable_da_error(error: impl Into) -> DAError { - DAError { - error: error.into(), - is_retriable: true, - } -} diff --git a/core/node/da_clients/src/avail/sdk.rs b/core/node/da_clients/src/avail/sdk.rs index f693280ba4a9..19309dc3cbf3 100644 --- a/core/node/da_clients/src/avail/sdk.rs +++ b/core/node/da_clients/src/avail/sdk.rs @@ -18,7 +18,7 @@ use subxt_signer::{ }; use zksync_types::H256; -use crate::avail::client::to_non_retriable_da_error; +use crate::utils::to_non_retriable_da_error; const PROTOCOL_VERSION: u8 = 4; diff --git a/core/node/da_clients/src/celestia/README.md b/core/node/da_clients/src/celestia/README.md new file mode 100644 index 000000000000..a3142a7d7615 --- /dev/null +++ b/core/node/da_clients/src/celestia/README.md @@ -0,0 +1,19 @@ +# Celestia client + +--- + +This is an implementation of the Celestia client capable of sending the blobs to DA layer. Normally, the light client is +required to send the blobs to Celestia, but this implementation is capable of sending the blobs to DA layer directly. + +This is a simplified and adapted version of astria's code, look +[here](https://github.com/astriaorg/astria/tree/main/crates/astria-sequencer-relayer) for original implementation. + +The generated files are copied from +[here](https://github.com/astriaorg/astria/tree/main/crates/astria-core/src/generated), which is not perfect, but allows +us to use them without adding the proto files and the infrastructure to generate the `.rs`. + +While moving the files, the `#[cfg(feature = "client")]` annotations were removed for simplicity, so client code is +available by default. + +If there is a need to generate the files from the proto files, the `tools/protobuf-compiler` from astria's repo can be +used. diff --git a/core/node/da_clients/src/celestia/client.rs b/core/node/da_clients/src/celestia/client.rs new file mode 100644 index 000000000000..df0735d4e1e4 --- /dev/null +++ b/core/node/da_clients/src/celestia/client.rs @@ -0,0 +1,109 @@ +use std::{ + fmt::{Debug, Formatter}, + str::FromStr, + sync::Arc, + time, +}; + +use async_trait::async_trait; +use celestia_types::{blob::Commitment, nmt::Namespace, Blob}; +use serde::{Deserialize, Serialize}; +use subxt_signer::ExposeSecret; +use tonic::transport::Endpoint; +use zksync_config::configs::da_client::celestia::{CelestiaConfig, CelestiaSecrets}; +use zksync_da_client::{ + types::{DAError, DispatchResponse, InclusionData}, + DataAvailabilityClient, +}; + +use crate::{ + celestia::sdk::{BlobTxHash, RawCelestiaClient}, + utils::to_non_retriable_da_error, +}; + +/// An implementation of the `DataAvailabilityClient` trait that interacts with the Avail network. +#[derive(Clone)] +pub struct CelestiaClient { + config: CelestiaConfig, + client: Arc, +} + +impl CelestiaClient { + pub async fn new(config: CelestiaConfig, secrets: CelestiaSecrets) -> anyhow::Result { + let grpc_channel = Endpoint::from_str(config.api_node_url.clone().as_str())? + .timeout(time::Duration::from_millis(config.timeout_ms)) + .connect() + .await?; + + let private_key = secrets.private_key.0.expose_secret().to_string(); + let client = RawCelestiaClient::new(grpc_channel, private_key, config.chain_id.clone()) + .expect("could not create Celestia client"); + + Ok(Self { + config, + client: Arc::new(client), + }) + } +} +#[derive(Serialize, Deserialize)] +pub struct BlobId { + pub commitment: Commitment, + pub height: u64, +} + +#[async_trait] +impl DataAvailabilityClient for CelestiaClient { + async fn dispatch_blob( + &self, + _: u32, // batch number + data: Vec, + ) -> Result { + let namespace_bytes = + hex::decode(&self.config.namespace).map_err(to_non_retriable_da_error)?; + let namespace = + Namespace::new_v0(namespace_bytes.as_slice()).map_err(to_non_retriable_da_error)?; + let blob = Blob::new(namespace, data).map_err(to_non_retriable_da_error)?; + + let commitment = blob.commitment; + let blob_tx = self + .client + .prepare(vec![blob]) + .await + .map_err(to_non_retriable_da_error)?; + + let blob_tx_hash = BlobTxHash::compute(&blob_tx); + let height = self + .client + .submit(blob_tx_hash, blob_tx) + .await + .map_err(to_non_retriable_da_error)?; + + let blob_id = BlobId { commitment, height }; + let blob_bytes = bincode::serialize(&blob_id).map_err(to_non_retriable_da_error)?; + + Ok(DispatchResponse { + blob_id: hex::encode(&blob_bytes), + }) + } + + async fn get_inclusion_data(&self, _: &str) -> Result, DAError> { + Ok(Some(InclusionData { data: vec![] })) + } + + fn clone_boxed(&self) -> Box { + Box::new(self.clone()) + } + + fn blob_size_limit(&self) -> Option { + Some(1973786) // almost 2MB + } +} + +impl Debug for CelestiaClient { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("CelestiaClient") + .field("config.api_node_url", &self.config.api_node_url) + .field("config.namespace", &self.config.namespace) + .finish() + } +} diff --git a/core/node/da_clients/src/celestia/generated/celestia.blob.v1.rs b/core/node/da_clients/src/celestia/generated/celestia.blob.v1.rs new file mode 100644 index 000000000000..ee6ed85655e2 --- /dev/null +++ b/core/node/da_clients/src/celestia/generated/celestia.blob.v1.rs @@ -0,0 +1,200 @@ +// This file is @generated by prost-build. +/// Params defines the parameters for the module. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct Params { + #[prost(uint32, tag = "1")] + pub gas_per_blob_byte: u32, + #[prost(uint64, tag = "2")] + pub gov_max_square_size: u64, +} +impl ::prost::Name for Params { + const NAME: &'static str = "Params"; + const PACKAGE: &'static str = "celestia.blob.v1"; + fn full_name() -> ::prost::alloc::string::String { + "celestia.blob.v1.Params".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/celestia.blob.v1.Params".into() + } +} +/// QueryParamsRequest is the request type for the Query/Params RPC method. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct QueryParamsRequest {} +impl ::prost::Name for QueryParamsRequest { + const NAME: &'static str = "QueryParamsRequest"; + const PACKAGE: &'static str = "celestia.blob.v1"; + fn full_name() -> ::prost::alloc::string::String { + "celestia.blob.v1.QueryParamsRequest".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/celestia.blob.v1.QueryParamsRequest".into() + } +} +/// QueryParamsResponse is the response type for the Query/Params RPC method. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct QueryParamsResponse { + #[prost(message, optional, tag = "1")] + pub params: ::core::option::Option, +} +impl ::prost::Name for QueryParamsResponse { + const NAME: &'static str = "QueryParamsResponse"; + const PACKAGE: &'static str = "celestia.blob.v1"; + fn full_name() -> ::prost::alloc::string::String { + "celestia.blob.v1.QueryParamsResponse".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/celestia.blob.v1.QueryParamsResponse".into() + } +} +/// Generated client implementations. +pub mod query_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Query defines the gRPC query service. + #[derive(Debug, Clone)] + pub struct QueryClient { + inner: tonic::client::Grpc, + } + impl QueryClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl QueryClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> QueryClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + QueryClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Params queries the parameters of the module. + pub async fn params( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/celestia.blob.v1.Query/Params", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("celestia.blob.v1.Query", "Params")); + self.inner.unary(req, path, codec).await + } + } +} + +/// MsgPayForBlobs pays for the inclusion of a blob in the block. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgPayForBlobs { + #[prost(string, tag = "1")] + pub signer: ::prost::alloc::string::String, + /// namespaces is a list of namespaces that the blobs are associated with. A + /// namespace is a byte slice of length 29 where the first byte is the + /// namespaceVersion and the subsequent 28 bytes are the namespaceId. + #[prost(bytes = "bytes", repeated, tag = "2")] + pub namespaces: ::prost::alloc::vec::Vec<::prost::bytes::Bytes>, + #[prost(uint32, repeated, tag = "3")] + pub blob_sizes: ::prost::alloc::vec::Vec, + /// share_commitments is a list of share commitments (one per blob). + #[prost(bytes = "bytes", repeated, tag = "4")] + pub share_commitments: ::prost::alloc::vec::Vec<::prost::bytes::Bytes>, + /// share_versions are the versions of the share format that the blobs + /// associated with this message should use when included in a block. The + /// share_versions specified must match the share_versions used to generate the + /// share_commitment in this message. + #[prost(uint32, repeated, tag = "8")] + pub share_versions: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for MsgPayForBlobs { + const NAME: &'static str = "MsgPayForBlobs"; + const PACKAGE: &'static str = "celestia.blob.v1"; + fn full_name() -> ::prost::alloc::string::String { + "celestia.blob.v1.MsgPayForBlobs".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/celestia.blob.v1.MsgPayForBlobs".into() + } +} diff --git a/core/node/da_clients/src/celestia/generated/cosmos.auth.v1beta1.rs b/core/node/da_clients/src/celestia/generated/cosmos.auth.v1beta1.rs new file mode 100644 index 000000000000..98314985a8e6 --- /dev/null +++ b/core/node/da_clients/src/celestia/generated/cosmos.auth.v1beta1.rs @@ -0,0 +1,257 @@ +// This file is @generated by prost-build. +/// BaseAccount defines a base account type. It contains all the necessary fields +/// for basic account functionality. Any custom account type should extend this +/// type for additional functionality (e.g. vesting). +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BaseAccount { + #[prost(string, tag = "1")] + pub address: ::prost::alloc::string::String, + #[prost(message, optional, tag = "2")] + pub pub_key: ::core::option::Option<::pbjson_types::Any>, + #[prost(uint64, tag = "3")] + pub account_number: u64, + #[prost(uint64, tag = "4")] + pub sequence: u64, +} +impl ::prost::Name for BaseAccount { + const NAME: &'static str = "BaseAccount"; + const PACKAGE: &'static str = "cosmos.auth.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.auth.v1beta1.BaseAccount".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.auth.v1beta1.BaseAccount".into() + } +} +/// Params defines the parameters for the auth module. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct Params { + #[prost(uint64, tag = "1")] + pub max_memo_characters: u64, + #[prost(uint64, tag = "2")] + pub tx_sig_limit: u64, + #[prost(uint64, tag = "3")] + pub tx_size_cost_per_byte: u64, + #[prost(uint64, tag = "4")] + pub sig_verify_cost_ed25519: u64, + #[prost(uint64, tag = "5")] + pub sig_verify_cost_secp256k1: u64, +} +impl ::prost::Name for Params { + const NAME: &'static str = "Params"; + const PACKAGE: &'static str = "cosmos.auth.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.auth.v1beta1.Params".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.auth.v1beta1.Params".into() + } +} +/// QueryAccountRequest is the request type for the Query/Account RPC method. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryAccountRequest { + /// address defines the address to query for. + #[prost(string, tag = "1")] + pub address: ::prost::alloc::string::String, +} +impl ::prost::Name for QueryAccountRequest { + const NAME: &'static str = "QueryAccountRequest"; + const PACKAGE: &'static str = "cosmos.auth.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.auth.v1beta1.QueryAccountRequest".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.auth.v1beta1.QueryAccountRequest".into() + } +} +/// QueryAccountResponse is the response type for the Query/Account RPC method. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryAccountResponse { + /// account defines the account of the corresponding address. + #[prost(message, optional, tag = "1")] + pub account: ::core::option::Option<::pbjson_types::Any>, +} +impl ::prost::Name for QueryAccountResponse { + const NAME: &'static str = "QueryAccountResponse"; + const PACKAGE: &'static str = "cosmos.auth.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.auth.v1beta1.QueryAccountResponse".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.auth.v1beta1.QueryAccountResponse".into() + } +} +/// QueryParamsRequest is the request type for the Query/Params RPC method. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct QueryParamsRequest {} +impl ::prost::Name for QueryParamsRequest { + const NAME: &'static str = "QueryParamsRequest"; + const PACKAGE: &'static str = "cosmos.auth.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.auth.v1beta1.QueryParamsRequest".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.auth.v1beta1.QueryParamsRequest".into() + } +} +/// QueryParamsResponse is the response type for the Query/Params RPC method. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct QueryParamsResponse { + /// params defines the parameters of the module. + #[prost(message, optional, tag = "1")] + pub params: ::core::option::Option, +} +impl ::prost::Name for QueryParamsResponse { + const NAME: &'static str = "QueryParamsResponse"; + const PACKAGE: &'static str = "cosmos.auth.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.auth.v1beta1.QueryParamsResponse".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.auth.v1beta1.QueryParamsResponse".into() + } +} +/// Generated client implementations. +pub mod query_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Query defines the gRPC querier service. + #[derive(Debug, Clone)] + pub struct QueryClient { + inner: tonic::client::Grpc, + } + impl QueryClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl QueryClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> QueryClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + QueryClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Account returns account details based on address. + pub async fn account( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/cosmos.auth.v1beta1.Query/Account", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("cosmos.auth.v1beta1.Query", "Account")); + self.inner.unary(req, path, codec).await + } + /// Params queries all parameters. + pub async fn params( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/cosmos.auth.v1beta1.Query/Params", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("cosmos.auth.v1beta1.Query", "Params")); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/core/node/da_clients/src/celestia/generated/cosmos.base.abci.v1beta1.rs b/core/node/da_clients/src/celestia/generated/cosmos.base.abci.v1beta1.rs new file mode 100644 index 000000000000..6b0f9fc1956d --- /dev/null +++ b/core/node/da_clients/src/celestia/generated/cosmos.base.abci.v1beta1.rs @@ -0,0 +1,125 @@ +// This file is @generated by prost-build. +/// TxResponse defines a structure containing relevant tx data and metadata. The +/// tags are stringified and the log is JSON decoded. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TxResponse { + /// The block height + #[prost(int64, tag = "1")] + pub height: i64, + /// The transaction hash. + #[prost(string, tag = "2")] + pub txhash: ::prost::alloc::string::String, + /// Namespace for the Code + #[prost(string, tag = "3")] + pub codespace: ::prost::alloc::string::String, + /// Response code. + #[prost(uint32, tag = "4")] + pub code: u32, + /// Result bytes, if any. + #[prost(string, tag = "5")] + pub data: ::prost::alloc::string::String, + /// The output of the application's logger (raw string). May be + /// non-deterministic. + #[prost(string, tag = "6")] + pub raw_log: ::prost::alloc::string::String, + /// The output of the application's logger (typed). May be non-deterministic. + #[prost(message, repeated, tag = "7")] + pub logs: ::prost::alloc::vec::Vec, + /// Additional information. May be non-deterministic. + #[prost(string, tag = "8")] + pub info: ::prost::alloc::string::String, + /// Amount of gas requested for transaction. + #[prost(int64, tag = "9")] + pub gas_wanted: i64, + /// Amount of gas consumed by transaction. + #[prost(int64, tag = "10")] + pub gas_used: i64, + /// The request transaction bytes. + #[prost(message, optional, tag = "11")] + pub tx: ::core::option::Option<::pbjson_types::Any>, + /// Time of the previous block. For heights > 1, it's the weighted median of + /// the timestamps of the valid votes in the block.LastCommit. For height == 1, + /// it's genesis time. + #[prost(string, tag = "12")] + pub timestamp: ::prost::alloc::string::String, + /// Events defines all the events emitted by processing a transaction. Note, + /// these events include those emitted by processing all the messages and those + /// emitted from the ante. Whereas Logs contains the events, with + /// additional metadata, emitted only by processing the messages. + /// + /// Since: cosmos-sdk 0.42.11, 0.44.5, 0.45 + #[prost(message, repeated, tag = "13")] + pub events: ::prost::alloc::vec::Vec< + super::super::super::tendermint::abci::Event, + >, +} +impl ::prost::Name for TxResponse { + const NAME: &'static str = "TxResponse"; + const PACKAGE: &'static str = "cosmos.base.abci.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.base.abci.v1beta1.TxResponse".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.base.abci.v1beta1.TxResponse".into() + } +} +/// ABCIMessageLog defines a structure containing an indexed tx ABCI message log. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AbciMessageLog { + #[prost(uint32, tag = "1")] + pub msg_index: u32, + #[prost(string, tag = "2")] + pub log: ::prost::alloc::string::String, + /// Events contains a slice of Event objects that were emitted during some + /// execution. + #[prost(message, repeated, tag = "3")] + pub events: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for AbciMessageLog { + const NAME: &'static str = "ABCIMessageLog"; + const PACKAGE: &'static str = "cosmos.base.abci.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.base.abci.v1beta1.ABCIMessageLog".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.base.abci.v1beta1.ABCIMessageLog".into() + } +} +/// StringEvent defines en Event object wrapper where all the attributes +/// contain key/value pairs that are strings instead of raw bytes. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StringEvent { + #[prost(string, tag = "1")] + pub r#type: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub attributes: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for StringEvent { + const NAME: &'static str = "StringEvent"; + const PACKAGE: &'static str = "cosmos.base.abci.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.base.abci.v1beta1.StringEvent".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.base.abci.v1beta1.StringEvent".into() + } +} +/// Attribute defines an attribute wrapper where the key and value are +/// strings instead of raw bytes. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Attribute { + #[prost(string, tag = "1")] + pub key: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub value: ::prost::alloc::string::String, +} +impl ::prost::Name for Attribute { + const NAME: &'static str = "Attribute"; + const PACKAGE: &'static str = "cosmos.base.abci.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.base.abci.v1beta1.Attribute".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.base.abci.v1beta1.Attribute".into() + } +} diff --git a/core/node/da_clients/src/celestia/generated/cosmos.base.node.v1beta1.rs b/core/node/da_clients/src/celestia/generated/cosmos.base.node.v1beta1.rs new file mode 100644 index 000000000000..89bb519bd810 --- /dev/null +++ b/core/node/da_clients/src/celestia/generated/cosmos.base.node.v1beta1.rs @@ -0,0 +1,146 @@ +// This file is @generated by prost-build. +/// ConfigRequest defines the request structure for the Config gRPC query. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct ConfigRequest {} +impl ::prost::Name for ConfigRequest { + const NAME: &'static str = "ConfigRequest"; + const PACKAGE: &'static str = "cosmos.base.node.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.base.node.v1beta1.ConfigRequest".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.base.node.v1beta1.ConfigRequest".into() + } +} +/// ConfigResponse defines the response structure for the Config gRPC query. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ConfigResponse { + #[prost(string, tag = "1")] + pub minimum_gas_price: ::prost::alloc::string::String, +} +impl ::prost::Name for ConfigResponse { + const NAME: &'static str = "ConfigResponse"; + const PACKAGE: &'static str = "cosmos.base.node.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.base.node.v1beta1.ConfigResponse".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.base.node.v1beta1.ConfigResponse".into() + } +} +/// Generated client implementations. +pub mod service_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Service defines the gRPC querier service for node related queries. + #[derive(Debug, Clone)] + pub struct ServiceClient { + inner: tonic::client::Grpc, + } + impl ServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl ServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> ServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + ServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Config queries for the operator configuration. + pub async fn config( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/cosmos.base.node.v1beta1.Service/Config", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("cosmos.base.node.v1beta1.Service", "Config")); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/core/node/da_clients/src/celestia/generated/cosmos.base.v1beta1.rs b/core/node/da_clients/src/celestia/generated/cosmos.base.v1beta1.rs new file mode 100644 index 000000000000..d13fb784d97a --- /dev/null +++ b/core/node/da_clients/src/celestia/generated/cosmos.base.v1beta1.rs @@ -0,0 +1,19 @@ +// This file is @generated by prost-build. +/// Coin defines a token with a denomination and an amount. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Coin { + #[prost(string, tag = "1")] + pub denom: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub amount: ::prost::alloc::string::String, +} +impl ::prost::Name for Coin { + const NAME: &'static str = "Coin"; + const PACKAGE: &'static str = "cosmos.base.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.base.v1beta1.Coin".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.base.v1beta1.Coin".into() + } +} diff --git a/core/node/da_clients/src/celestia/generated/cosmos.crypto.multisig.v1beta1.rs b/core/node/da_clients/src/celestia/generated/cosmos.crypto.multisig.v1beta1.rs new file mode 100644 index 000000000000..c514b3739b21 --- /dev/null +++ b/core/node/da_clients/src/celestia/generated/cosmos.crypto.multisig.v1beta1.rs @@ -0,0 +1,40 @@ +// This file is @generated by prost-build. +/// MultiSignature wraps the signatures from a multisig.LegacyAminoPubKey. +/// See cosmos.tx.v1betata1.ModeInfo.Multi for how to specify which signers +/// signed and with which modes. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MultiSignature { + #[prost(bytes = "bytes", repeated, tag = "1")] + pub signatures: ::prost::alloc::vec::Vec<::prost::bytes::Bytes>, +} +impl ::prost::Name for MultiSignature { + const NAME: &'static str = "MultiSignature"; + const PACKAGE: &'static str = "cosmos.crypto.multisig.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.crypto.multisig.v1beta1.MultiSignature".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.crypto.multisig.v1beta1.MultiSignature".into() + } +} +/// CompactBitArray is an implementation of a space efficient bit array. +/// This is used to ensure that the encoded data takes up a minimal amount of +/// space after proto encoding. +/// This is not thread safe, and is not intended for concurrent usage. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CompactBitArray { + #[prost(uint32, tag = "1")] + pub extra_bits_stored: u32, + #[prost(bytes = "bytes", tag = "2")] + pub elems: ::prost::bytes::Bytes, +} +impl ::prost::Name for CompactBitArray { + const NAME: &'static str = "CompactBitArray"; + const PACKAGE: &'static str = "cosmos.crypto.multisig.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.crypto.multisig.v1beta1.CompactBitArray".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.crypto.multisig.v1beta1.CompactBitArray".into() + } +} diff --git a/core/node/da_clients/src/celestia/generated/cosmos.crypto.secp256k1.rs b/core/node/da_clients/src/celestia/generated/cosmos.crypto.secp256k1.rs new file mode 100644 index 000000000000..081aec09682b --- /dev/null +++ b/core/node/da_clients/src/celestia/generated/cosmos.crypto.secp256k1.rs @@ -0,0 +1,21 @@ +// This file is @generated by prost-build. +/// PubKey defines a secp256k1 public key +/// Key is the compressed form of the pubkey. The first byte depends is a 0x02 byte +/// if the y-coordinate is the lexicographically largest of the two associated with +/// the x-coordinate. Otherwise the first byte is a 0x03. +/// This prefix is followed with the x-coordinate. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PubKey { + #[prost(bytes = "bytes", tag = "1")] + pub key: ::prost::bytes::Bytes, +} +impl ::prost::Name for PubKey { + const NAME: &'static str = "PubKey"; + const PACKAGE: &'static str = "cosmos.crypto.secp256k1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.crypto.secp256k1.PubKey".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.crypto.secp256k1.PubKey".into() + } +} diff --git a/core/node/da_clients/src/celestia/generated/cosmos.tx.signing.v1beta1.rs b/core/node/da_clients/src/celestia/generated/cosmos.tx.signing.v1beta1.rs new file mode 100644 index 000000000000..54f3fa9d00d7 --- /dev/null +++ b/core/node/da_clients/src/celestia/generated/cosmos.tx.signing.v1beta1.rs @@ -0,0 +1,72 @@ +// This file is @generated by prost-build. +/// SignMode represents a signing mode with its own security guarantees. +/// +/// This enum should be considered a registry of all known sign modes +/// in the Cosmos ecosystem. Apps are not expected to support all known +/// sign modes. Apps that would like to support custom sign modes are +/// encouraged to open a small PR against this file to add a new case +/// to this SignMode enum describing their sign mode so that different +/// apps have a consistent version of this enum. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum SignMode { + /// SIGN_MODE_UNSPECIFIED specifies an unknown signing mode and will be + /// rejected. + Unspecified = 0, + /// SIGN_MODE_DIRECT specifies a signing mode which uses SignDoc and is + /// verified with raw bytes from Tx. + Direct = 1, + /// SIGN_MODE_TEXTUAL is a future signing mode that will verify some + /// human-readable textual representation on top of the binary representation + /// from SIGN_MODE_DIRECT. It is currently not supported. + Textual = 2, + /// SIGN_MODE_DIRECT_AUX specifies a signing mode which uses + /// SignDocDirectAux. As opposed to SIGN_MODE_DIRECT, this sign mode does not + /// require signers signing over other signers' `signer_info`. It also allows + /// for adding Tips in transactions. + /// + /// Since: cosmos-sdk 0.46 + DirectAux = 3, + /// SIGN_MODE_LEGACY_AMINO_JSON is a backwards compatibility mode which uses + /// Amino JSON and will be removed in the future. + LegacyAminoJson = 127, + /// SIGN_MODE_EIP_191 specifies the sign mode for EIP 191 signing on the Cosmos + /// SDK. Ref: + /// + /// Currently, SIGN_MODE_EIP_191 is registered as a SignMode enum variant, + /// but is not implemented on the SDK by default. To enable EIP-191, you need + /// to pass a custom `TxConfig` that has an implementation of + /// `SignModeHandler` for EIP-191. The SDK may decide to fully support + /// EIP-191 in the future. + /// + /// Since: cosmos-sdk 0.45.2 + Eip191 = 191, +} +impl SignMode { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Unspecified => "SIGN_MODE_UNSPECIFIED", + Self::Direct => "SIGN_MODE_DIRECT", + Self::Textual => "SIGN_MODE_TEXTUAL", + Self::DirectAux => "SIGN_MODE_DIRECT_AUX", + Self::LegacyAminoJson => "SIGN_MODE_LEGACY_AMINO_JSON", + Self::Eip191 => "SIGN_MODE_EIP_191", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SIGN_MODE_UNSPECIFIED" => Some(Self::Unspecified), + "SIGN_MODE_DIRECT" => Some(Self::Direct), + "SIGN_MODE_TEXTUAL" => Some(Self::Textual), + "SIGN_MODE_DIRECT_AUX" => Some(Self::DirectAux), + "SIGN_MODE_LEGACY_AMINO_JSON" => Some(Self::LegacyAminoJson), + "SIGN_MODE_EIP_191" => Some(Self::Eip191), + _ => None, + } + } +} diff --git a/core/node/da_clients/src/celestia/generated/cosmos.tx.v1beta1.rs b/core/node/da_clients/src/celestia/generated/cosmos.tx.v1beta1.rs new file mode 100644 index 000000000000..7783eabcdbac --- /dev/null +++ b/core/node/da_clients/src/celestia/generated/cosmos.tx.v1beta1.rs @@ -0,0 +1,553 @@ +// This file is @generated by prost-build. +/// Tx is the standard type used for broadcasting transactions. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Tx { + /// body is the processable content of the transaction + #[prost(message, optional, tag = "1")] + pub body: ::core::option::Option, + /// auth_info is the authorization related content of the transaction, + /// specifically signers, signer modes and fee + #[prost(message, optional, tag = "2")] + pub auth_info: ::core::option::Option, + /// signatures is a list of signatures that matches the length and order of + /// AuthInfo's signer_infos to allow connecting signature meta information like + /// public key and signing mode by position. + #[prost(bytes = "bytes", repeated, tag = "3")] + pub signatures: ::prost::alloc::vec::Vec<::prost::bytes::Bytes>, +} +impl ::prost::Name for Tx { + const NAME: &'static str = "Tx"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.Tx".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.Tx".into() + } +} +/// SignDoc is the type used for generating sign bytes for SIGN_MODE_DIRECT. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SignDoc { + /// body_bytes is protobuf serialization of a TxBody that matches the + /// representation in TxRaw. + #[prost(bytes = "bytes", tag = "1")] + pub body_bytes: ::prost::bytes::Bytes, + /// auth_info_bytes is a protobuf serialization of an AuthInfo that matches the + /// representation in TxRaw. + #[prost(bytes = "bytes", tag = "2")] + pub auth_info_bytes: ::prost::bytes::Bytes, + /// chain_id is the unique identifier of the chain this transaction targets. + /// It prevents signed transactions from being used on another chain by an + /// attacker + #[prost(string, tag = "3")] + pub chain_id: ::prost::alloc::string::String, + /// account_number is the account number of the account in state + #[prost(uint64, tag = "4")] + pub account_number: u64, +} +impl ::prost::Name for SignDoc { + const NAME: &'static str = "SignDoc"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.SignDoc".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.SignDoc".into() + } +} +/// TxBody is the body of a transaction that all signers sign over. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TxBody { + /// messages is a list of messages to be executed. The required signers of + /// those messages define the number and order of elements in AuthInfo's + /// signer_infos and Tx's signatures. Each required signer address is added to + /// the list only the first time it occurs. + /// By convention, the first required signer (usually from the first message) + /// is referred to as the primary signer and pays the fee for the whole + /// transaction. + #[prost(message, repeated, tag = "1")] + pub messages: ::prost::alloc::vec::Vec<::pbjson_types::Any>, + /// memo is any arbitrary note/comment to be added to the transaction. + /// WARNING: in clients, any publicly exposed text should not be called memo, + /// but should be called `note` instead (see ). + #[prost(string, tag = "2")] + pub memo: ::prost::alloc::string::String, + /// timeout is the block height after which this transaction will not + /// be processed by the chain + #[prost(uint64, tag = "3")] + pub timeout_height: u64, + /// extension_options are arbitrary options that can be added by chains + /// when the default options are not sufficient. If any of these are present + /// and can't be handled, the transaction will be rejected + #[prost(message, repeated, tag = "1023")] + pub extension_options: ::prost::alloc::vec::Vec<::pbjson_types::Any>, + /// extension_options are arbitrary options that can be added by chains + /// when the default options are not sufficient. If any of these are present + /// and can't be handled, they will be ignored + #[prost(message, repeated, tag = "2047")] + pub non_critical_extension_options: ::prost::alloc::vec::Vec<::pbjson_types::Any>, +} +impl ::prost::Name for TxBody { + const NAME: &'static str = "TxBody"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.TxBody".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.TxBody".into() + } +} +/// AuthInfo describes the fee and signer modes that are used to sign a +/// transaction. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AuthInfo { + /// signer_infos defines the signing modes for the required signers. The number + /// and order of elements must match the required signers from TxBody's + /// messages. The first element is the primary signer and the one which pays + /// the fee. + #[prost(message, repeated, tag = "1")] + pub signer_infos: ::prost::alloc::vec::Vec, + /// Fee is the fee and gas limit for the transaction. The first signer is the + /// primary signer and the one which pays the fee. The fee can be calculated + /// based on the cost of evaluating the body and doing signature verification + /// of the signers. This can be estimated via simulation. + #[prost(message, optional, tag = "2")] + pub fee: ::core::option::Option, + /// Tip is the optional tip used for transactions fees paid in another denom. + /// + /// This field is ignored if the chain didn't enable tips, i.e. didn't add the + /// `TipDecorator` in its posthandler. + /// + /// Since: cosmos-sdk 0.46 + #[prost(message, optional, tag = "3")] + pub tip: ::core::option::Option, +} +impl ::prost::Name for AuthInfo { + const NAME: &'static str = "AuthInfo"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.AuthInfo".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.AuthInfo".into() + } +} +/// SignerInfo describes the public key and signing mode of a single top-level +/// signer. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SignerInfo { + /// public_key is the public key of the signer. It is optional for accounts + /// that already exist in state. If unset, the verifier can use the required \ + /// signer address for this position and lookup the public key. + #[prost(message, optional, tag = "1")] + pub public_key: ::core::option::Option<::pbjson_types::Any>, + /// mode_info describes the signing mode of the signer and is a nested + /// structure to support nested multisig pubkey's + #[prost(message, optional, tag = "2")] + pub mode_info: ::core::option::Option, + /// sequence is the sequence of the account, which describes the + /// number of committed transactions signed by a given address. It is used to + /// prevent replay attacks. + #[prost(uint64, tag = "3")] + pub sequence: u64, +} +impl ::prost::Name for SignerInfo { + const NAME: &'static str = "SignerInfo"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.SignerInfo".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.SignerInfo".into() + } +} +/// ModeInfo describes the signing mode of a single or nested multisig signer. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ModeInfo { + /// sum is the oneof that specifies whether this represents a single or nested + /// multisig signer + #[prost(oneof = "mode_info::Sum", tags = "1, 2")] + pub sum: ::core::option::Option, +} +/// Nested message and enum types in `ModeInfo`. +pub mod mode_info { + /// Single is the mode info for a single signer. It is structured as a message + /// to allow for additional fields such as locale for SIGN_MODE_TEXTUAL in the + /// future + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct Single { + /// mode is the signing mode of the single signer + #[prost(enumeration = "super::super::signing::SignMode", tag = "1")] + pub mode: i32, + } + impl ::prost::Name for Single { + const NAME: &'static str = "Single"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.ModeInfo.Single".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.ModeInfo.Single".into() + } + } + /// Multi is the mode info for a multisig public key + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Multi { + /// bitarray specifies which keys within the multisig are signing + #[prost(message, optional, tag = "1")] + pub bitarray: ::core::option::Option< + super::super::super::crypto::multisig::CompactBitArray, + >, + /// mode_infos is the corresponding modes of the signers of the multisig + /// which could include nested multisig public keys + #[prost(message, repeated, tag = "2")] + pub mode_infos: ::prost::alloc::vec::Vec, + } + impl ::prost::Name for Multi { + const NAME: &'static str = "Multi"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.ModeInfo.Multi".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.ModeInfo.Multi".into() + } + } + /// sum is the oneof that specifies whether this represents a single or nested + /// multisig signer + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Sum { + /// single represents a single signer + #[prost(message, tag = "1")] + Single(Single), + /// multi represents a nested multisig signer + #[prost(message, tag = "2")] + Multi(Multi), + } +} +impl ::prost::Name for ModeInfo { + const NAME: &'static str = "ModeInfo"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.ModeInfo".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.ModeInfo".into() + } +} +/// Fee includes the amount of coins paid in fees and the maximum +/// gas to be used by the transaction. The ratio yields an effective "gasprice", +/// which must be above some miminum to be accepted into the mempool. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Fee { + /// amount is the amount of coins to be paid as a fee + #[prost(message, repeated, tag = "1")] + pub amount: ::prost::alloc::vec::Vec, + /// gas_limit is the maximum gas that can be used in transaction processing + /// before an out of gas error occurs + #[prost(uint64, tag = "2")] + pub gas_limit: u64, + /// if unset, the first signer is responsible for paying the fees. If set, the specified account must pay the fees. + /// the payer must be a tx signer (and thus have signed this field in AuthInfo). + /// setting this field does *not* change the ordering of required signers for the transaction. + #[prost(string, tag = "3")] + pub payer: ::prost::alloc::string::String, + /// if set, the fee payer (either the first signer or the value of the payer field) requests that a fee grant be used + /// to pay fees instead of the fee payer's own balance. If an appropriate fee grant does not exist or the chain does + /// not support fee grants, this will fail + #[prost(string, tag = "4")] + pub granter: ::prost::alloc::string::String, +} +impl ::prost::Name for Fee { + const NAME: &'static str = "Fee"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.Fee".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.Fee".into() + } +} +/// Tip is the tip used for meta-transactions. +/// +/// Since: cosmos-sdk 0.46 +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Tip { + /// amount is the amount of the tip + #[prost(message, repeated, tag = "1")] + pub amount: ::prost::alloc::vec::Vec, + /// tipper is the address of the account paying for the tip + #[prost(string, tag = "2")] + pub tipper: ::prost::alloc::string::String, +} +impl ::prost::Name for Tip { + const NAME: &'static str = "Tip"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.Tip".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.Tip".into() + } +} +/// BroadcastTxRequest is the request type for the Service.BroadcastTxRequest +/// RPC method. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BroadcastTxRequest { + /// tx_bytes is the raw transaction. + #[prost(bytes = "bytes", tag = "1")] + pub tx_bytes: ::prost::bytes::Bytes, + #[prost(enumeration = "BroadcastMode", tag = "2")] + pub mode: i32, +} +impl ::prost::Name for BroadcastTxRequest { + const NAME: &'static str = "BroadcastTxRequest"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.BroadcastTxRequest".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.BroadcastTxRequest".into() + } +} +/// BroadcastTxResponse is the response type for the +/// Service.BroadcastTx method. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BroadcastTxResponse { + /// tx_response is the queried TxResponses. + #[prost(message, optional, tag = "1")] + pub tx_response: ::core::option::Option< + super::super::base::abci::TxResponse, + >, +} +impl ::prost::Name for BroadcastTxResponse { + const NAME: &'static str = "BroadcastTxResponse"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.BroadcastTxResponse".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.BroadcastTxResponse".into() + } +} +/// GetTxRequest is the request type for the Service.GetTx +/// RPC method. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetTxRequest { + /// hash is the tx hash to query, encoded as a hex string. + #[prost(string, tag = "1")] + pub hash: ::prost::alloc::string::String, +} +impl ::prost::Name for GetTxRequest { + const NAME: &'static str = "GetTxRequest"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.GetTxRequest".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.GetTxRequest".into() + } +} +/// GetTxResponse is the response type for the Service.GetTx method. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetTxResponse { + /// tx is the queried transaction. + #[prost(message, optional, tag = "1")] + pub tx: ::core::option::Option, + /// tx_response is the queried TxResponses. + #[prost(message, optional, tag = "2")] + pub tx_response: ::core::option::Option< + super::super::base::abci::TxResponse, + >, +} +impl ::prost::Name for GetTxResponse { + const NAME: &'static str = "GetTxResponse"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.GetTxResponse".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.GetTxResponse".into() + } +} +/// BroadcastMode specifies the broadcast mode for the TxService.Broadcast RPC method. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum BroadcastMode { + /// zero-value for mode ordering + Unspecified = 0, + /// BROADCAST_MODE_BLOCK defines a tx broadcasting mode where the client waits for + /// the tx to be committed in a block. + Block = 1, + /// BROADCAST_MODE_SYNC defines a tx broadcasting mode where the client waits for + /// a CheckTx execution response only. + Sync = 2, + /// BROADCAST_MODE_ASYNC defines a tx broadcasting mode where the client returns + /// immediately. + Async = 3, +} +impl BroadcastMode { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Unspecified => "BROADCAST_MODE_UNSPECIFIED", + Self::Block => "BROADCAST_MODE_BLOCK", + Self::Sync => "BROADCAST_MODE_SYNC", + Self::Async => "BROADCAST_MODE_ASYNC", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "BROADCAST_MODE_UNSPECIFIED" => Some(Self::Unspecified), + "BROADCAST_MODE_BLOCK" => Some(Self::Block), + "BROADCAST_MODE_SYNC" => Some(Self::Sync), + "BROADCAST_MODE_ASYNC" => Some(Self::Async), + _ => None, + } + } +} +/// Generated client implementations. +pub mod service_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Service defines a gRPC service for interacting with transactions. + #[derive(Debug, Clone)] + pub struct ServiceClient { + inner: tonic::client::Grpc, + } + impl ServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl ServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> ServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + ServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// GetTx fetches a tx by hash. + pub async fn get_tx( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/cosmos.tx.v1beta1.Service/GetTx", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("cosmos.tx.v1beta1.Service", "GetTx")); + self.inner.unary(req, path, codec).await + } + /// BroadcastTx broadcast transaction. + pub async fn broadcast_tx( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/cosmos.tx.v1beta1.Service/BroadcastTx", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("cosmos.tx.v1beta1.Service", "BroadcastTx")); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/core/node/da_clients/src/celestia/generated/tendermint.abci.rs b/core/node/da_clients/src/celestia/generated/tendermint.abci.rs new file mode 100644 index 000000000000..ab3bbeb946f6 --- /dev/null +++ b/core/node/da_clients/src/celestia/generated/tendermint.abci.rs @@ -0,0 +1,42 @@ +// This file is @generated by prost-build. +/// Event allows application developers to attach additional information to +/// ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and ResponseDeliverTx. +/// Later, transactions may be queried using these events. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Event { + #[prost(string, tag = "1")] + pub r#type: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub attributes: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for Event { + const NAME: &'static str = "Event"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + "tendermint.abci.Event".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/tendermint.abci.Event".into() + } +} +/// EventAttribute is a single key-value pair, associated with an event. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EventAttribute { + #[prost(bytes = "bytes", tag = "1")] + pub key: ::prost::bytes::Bytes, + #[prost(bytes = "bytes", tag = "2")] + pub value: ::prost::bytes::Bytes, + /// nondeterministic + #[prost(bool, tag = "3")] + pub index: bool, +} +impl ::prost::Name for EventAttribute { + const NAME: &'static str = "EventAttribute"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + "tendermint.abci.EventAttribute".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/tendermint.abci.EventAttribute".into() + } +} diff --git a/core/node/da_clients/src/celestia/generated/tendermint.types.rs b/core/node/da_clients/src/celestia/generated/tendermint.types.rs new file mode 100644 index 000000000000..000e3f2c1fbc --- /dev/null +++ b/core/node/da_clients/src/celestia/generated/tendermint.types.rs @@ -0,0 +1,48 @@ +// This file is @generated by prost-build. +/// Blob (named after binary large object) is a chunk of data submitted by a user +/// to be published to the Celestia blockchain. The data of a Blob is published +/// to a namespace and is encoded into shares based on the format specified by +/// share_version. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Blob { + #[prost(bytes = "bytes", tag = "1")] + pub namespace_id: ::prost::bytes::Bytes, + #[prost(bytes = "bytes", tag = "2")] + pub data: ::prost::bytes::Bytes, + #[prost(uint32, tag = "3")] + pub share_version: u32, + #[prost(uint32, tag = "4")] + pub namespace_version: u32, +} +impl ::prost::Name for Blob { + const NAME: &'static str = "Blob"; + const PACKAGE: &'static str = "tendermint.types"; + fn full_name() -> ::prost::alloc::string::String { + "tendermint.types.Blob".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/tendermint.types.Blob".into() + } +} +/// BlobTx wraps an encoded sdk.Tx with a second field to contain blobs of data. +/// The raw bytes of the blobs are not signed over, instead we verify each blob +/// using the relevant MsgPayForBlobs that is signed over in the encoded sdk.Tx. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobTx { + #[prost(bytes = "bytes", tag = "1")] + pub tx: ::prost::bytes::Bytes, + #[prost(message, repeated, tag = "2")] + pub blobs: ::prost::alloc::vec::Vec, + #[prost(string, tag = "3")] + pub type_id: ::prost::alloc::string::String, +} +impl ::prost::Name for BlobTx { + const NAME: &'static str = "BlobTx"; + const PACKAGE: &'static str = "tendermint.types"; + fn full_name() -> ::prost::alloc::string::String { + "tendermint.types.BlobTx".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/tendermint.types.BlobTx".into() + } +} diff --git a/core/node/da_clients/src/celestia/mod.rs b/core/node/da_clients/src/celestia/mod.rs new file mode 100644 index 000000000000..ce648531f282 --- /dev/null +++ b/core/node/da_clients/src/celestia/mod.rs @@ -0,0 +1,58 @@ +mod client; +mod sdk; + +pub use self::client::CelestiaClient; + +pub mod celestia_proto { + include!("generated/celestia.blob.v1.rs"); +} + +pub mod cosmos { + pub mod auth { + include!("generated/cosmos.auth.v1beta1.rs"); + } + + pub mod base { + pub mod abci { + include!("generated/cosmos.base.abci.v1beta1.rs"); + } + + pub mod node { + include!("generated/cosmos.base.node.v1beta1.rs"); + } + + pub mod v1beta1 { + include!("generated/cosmos.base.v1beta1.rs"); + } + } + + pub mod tx { + pub mod signing { + include!("generated/cosmos.tx.signing.v1beta1.rs"); + } + + pub mod v1beta1 { + include!("generated/cosmos.tx.v1beta1.rs"); + } + } + + pub mod crypto { + pub mod multisig { + include!("generated/cosmos.crypto.multisig.v1beta1.rs"); + } + + pub mod secp256k1 { + include!("generated/cosmos.crypto.secp256k1.rs"); + } + } +} + +pub mod tendermint { + pub mod abci { + include!("generated/tendermint.abci.rs"); + } + + pub mod types { + include!("generated/tendermint.types.rs"); + } +} diff --git a/core/node/da_clients/src/celestia/sdk.rs b/core/node/da_clients/src/celestia/sdk.rs new file mode 100644 index 000000000000..5fd9aea79f07 --- /dev/null +++ b/core/node/da_clients/src/celestia/sdk.rs @@ -0,0 +1,602 @@ +use std::{ + fmt::{Display, Formatter, Result}, + str::FromStr, + time::{Duration, Instant}, +}; + +use celestia_types::Blob; +use prost::{bytes::Bytes, Message, Name}; +use secp256k1::{PublicKey, Secp256k1, SecretKey}; +use sha2::Digest; +use tonic::transport::Channel; + +use super::{ + celestia_proto::{ + query_client::QueryClient as BlobQueryClient, MsgPayForBlobs, + QueryParamsRequest as QueryBlobParamsRequest, + }, + cosmos::{ + auth::{ + query_client::QueryClient as AuthQueryClient, BaseAccount, QueryAccountRequest, + QueryParamsRequest as QueryAuthParamsRequest, + }, + base::{ + node::{ + service_client::ServiceClient as MinGasPriceClient, + ConfigRequest as MinGasPriceRequest, + }, + v1beta1::Coin, + }, + crypto::secp256k1 as ec_proto, + tx::v1beta1::{ + mode_info::{Single, Sum}, + service_client::ServiceClient as TxClient, + AuthInfo, BroadcastMode, BroadcastTxRequest, Fee, GetTxRequest, ModeInfo, SignDoc, + SignerInfo, Tx, TxBody, + }, + }, + tendermint::types::{Blob as PbBlob, BlobTx}, +}; + +const UNITS_SUFFIX: &str = "utia"; +pub const ADDRESS_LENGTH: usize = 20; +const ACCOUNT_ADDRESS_PREFIX: bech32::Hrp = bech32::Hrp::parse_unchecked("celestia"); +const BLOB_TX_TYPE_ID: &str = "BLOB"; + +#[derive(Clone)] +pub(crate) struct RawCelestiaClient { + grpc_channel: Channel, + address: String, + chain_id: String, + signing_key: SecretKey, +} + +impl RawCelestiaClient { + pub(crate) fn new( + grpc_channel: Channel, + private_key: String, + chain_id: String, + ) -> anyhow::Result { + let signing_key = SecretKey::from_str(&private_key) + .map_err(|e| anyhow::anyhow!("Failed to parse private key: {}", e))?; + let address = get_address(signing_key.public_key(&Secp256k1::new()))?; + + Ok(Self { + grpc_channel, + address, + chain_id, + signing_key, + }) + } + + /// Prepares a blob transaction for the given blobs. + pub(crate) async fn prepare(&self, blobs: Vec) -> anyhow::Result { + let (gas_per_blob_byte, tx_size_cost_per_byte, min_gas_price, base_account) = tokio::try_join!( + self.get_gas_per_blob_byte(), + self.fetch_tx_size_cost_per_byte(), + self.fetch_min_gas_price(), + self.fetch_account(), + )?; + + let msg_pay_for_blobs = new_msg_pay_for_blobs(blobs.as_slice(), self.address.clone())?; + + let gas_limit = estimate_gas( + &msg_pay_for_blobs.blob_sizes, + gas_per_blob_byte, + tx_size_cost_per_byte, + ); + let fee = calculate_fee(min_gas_price, gas_limit); + + let signed_tx = new_signed_tx( + &msg_pay_for_blobs, + &base_account, + gas_limit, + fee, + self.chain_id.clone(), + &self.signing_key, + ); + + Ok(new_blob_tx(&signed_tx, blobs.iter())) + } + + /// Submits the blob transaction to the node and returns the height of the block in which it was + pub(super) async fn submit( + &self, + blob_tx_hash: BlobTxHash, + blob_tx: BlobTx, + ) -> anyhow::Result { + let mut client: TxClient = TxClient::new(self.grpc_channel.clone()); + let hex_encoded_tx_hash = self.broadcast_tx(&mut client, blob_tx).await?; + if hex_encoded_tx_hash != blob_tx_hash.clone().hex() { + tracing::error!( + "tx hash {} returned from celestia app is not the same as \ + the locally calculated one {}; submission file has invalid data", + hex_encoded_tx_hash, + blob_tx_hash + ); + } + tracing::info!(tx_hash = %hex_encoded_tx_hash, "broadcast blob transaction succeeded"); + + let height = self + .confirm_submission(&mut client, hex_encoded_tx_hash) + .await; + Ok(height) + } + + /// Fetches the gas cost per byte for blobs from the node. + async fn get_gas_per_blob_byte(&self) -> anyhow::Result { + let mut blob_query_client = BlobQueryClient::new(self.grpc_channel.clone()); + let response = blob_query_client.params(QueryBlobParamsRequest {}).await; + + let params = response + .map_err(|status| { + anyhow::format_err!( + "failed to get blob params, code: {}, message: {}", + status.code(), + status.message() + ) + })? + .into_inner() + .params + .ok_or_else(|| anyhow::anyhow!("EmptyBlobParams"))?; + + Ok(params.gas_per_blob_byte) + } + + /// Fetches the transaction size cost per byte from the node. + async fn fetch_tx_size_cost_per_byte(&self) -> anyhow::Result { + let mut auth_query_client = AuthQueryClient::new(self.grpc_channel.clone()); + let response = auth_query_client.params(QueryAuthParamsRequest {}).await; + + let params = response + .map_err(|status| { + anyhow::format_err!( + "failed to get auth params, code: {}, message: {}", + status.code(), + status.message() + ) + })? + .into_inner() + .params + .ok_or_else(|| anyhow::anyhow!("EmptyAuthParams"))?; + + Ok(params.tx_size_cost_per_byte) + } + + /// Fetches the minimum gas price from the node. + async fn fetch_min_gas_price(&self) -> anyhow::Result { + let mut min_gas_price_client = MinGasPriceClient::new(self.grpc_channel.clone()); + let response = min_gas_price_client.config(MinGasPriceRequest {}).await; + + let min_gas_price_with_suffix = response + .map_err(|status| { + anyhow::format_err!( + "failed to get price params, code: {}, message: {}", + status.code(), + status.message() + ) + })? + .into_inner() + .minimum_gas_price; + + let min_gas_price_str = min_gas_price_with_suffix + .strip_suffix(UNITS_SUFFIX) + .ok_or_else(|| { + anyhow::anyhow!( + "MinGasPrice bad suffix, min_gas_price: {}, expected_suffix: {}", + min_gas_price_with_suffix.clone(), + UNITS_SUFFIX + ) + })?; + + min_gas_price_str.parse::().map_err(|source| { + anyhow::anyhow!( + "Failed to parse min gas price, min_gas_price: {}, err: {}", + min_gas_price_str, + source, + ) + }) + } + + /// Fetches the account info for the current address. + async fn fetch_account(&self) -> anyhow::Result { + let mut auth_query_client = AuthQueryClient::new(self.grpc_channel.clone()); + let request = QueryAccountRequest { + address: self.address.clone(), + }; + + let account_info = auth_query_client.account(request).await.map_err(|status| { + anyhow::anyhow!( + "failed to get account info, code: {}, message: {}", + status.code(), + status.message() + ) + })?; + + let account_as_any = account_info + .into_inner() + .account + .ok_or_else(|| anyhow::anyhow!("empty account info"))?; + let expected_type_url = BaseAccount::type_url(); + + if expected_type_url == account_as_any.type_url { + return BaseAccount::decode(&*account_as_any.value) + .map_err(|error| anyhow::anyhow!("failed to decode account info: {}", error)); + } + + Err(anyhow::anyhow!( + "unexpected account type, expected: {}, got: {}", + expected_type_url, + account_as_any.type_url + )) + } + + /// Broadcasts the transaction and returns the transaction hash. + async fn broadcast_tx( + &self, + client: &mut TxClient, + blob_tx: BlobTx, + ) -> anyhow::Result { + let request = BroadcastTxRequest { + tx_bytes: Bytes::from(blob_tx.encode_to_vec()), + mode: i32::from(BroadcastMode::Sync), + }; + + let mut tx_response = client + .broadcast_tx(request) + .await + .map_err(|status| { + anyhow::anyhow!( + "failed to broadcast the tx, code: {}, message: {}", + status.code(), + status.message() + ) + })? + .into_inner() + .tx_response + .ok_or_else(|| anyhow::anyhow!("empty broadcast tx response"))?; + + if tx_response.code != 0 { + return Err(anyhow::format_err!( + "failed to broadcast the tx, tx_hash: {}, code: {}, namespace: {}, log: {}", + tx_response.txhash, + tx_response.code, + tx_response.codespace, + tx_response.raw_log, + )); + } + + tx_response.txhash.make_ascii_lowercase(); + Ok(tx_response.txhash) + } + + /// Waits for the transaction to be included in a block and returns the height of that block. + async fn confirm_submission( + &self, + client: &mut TxClient, + hex_encoded_tx_hash: String, + ) -> u64 { + // The min seconds to sleep after receiving a GetTx response and sending the next request. + const MIN_POLL_INTERVAL_SECS: u64 = 1; + // The max seconds to sleep after receiving a GetTx response and sending the next request. + const MAX_POLL_INTERVAL_SECS: u64 = 12; + // How long to wait after starting `confirm_submission` before starting to log errors. + const START_LOGGING_DELAY: Duration = Duration::from_secs(12); + // The minimum duration between logging errors. + const LOG_ERROR_INTERVAL: Duration = Duration::from_secs(5); + + let start = Instant::now(); + let mut logged_at = start; + + let mut log_if_due = |maybe_error: Option| { + if start.elapsed() <= START_LOGGING_DELAY || logged_at.elapsed() <= LOG_ERROR_INTERVAL { + return; + } + let reason = maybe_error + .map_or(anyhow::anyhow!("transaction still pending"), |error| { + anyhow::anyhow!("transaction still pending, error: {}", error) + }); + tracing::warn!( + %reason, + tx_hash = %hex_encoded_tx_hash, + elapsed_seconds = start.elapsed().as_secs_f32(), + "waiting to confirm blob submission" + ); + logged_at = Instant::now(); + }; + + let mut sleep_secs = MIN_POLL_INTERVAL_SECS; + loop { + tokio::time::sleep(Duration::from_secs(sleep_secs)).await; + let res = self + .clone() + .get_tx(client, hex_encoded_tx_hash.clone()) + .await; + match res { + Ok(Some(height)) => return height, + Ok(None) => { + sleep_secs = MIN_POLL_INTERVAL_SECS; + log_if_due(None); + } + Err(error) => { + sleep_secs = + std::cmp::min(sleep_secs.saturating_mul(2), MAX_POLL_INTERVAL_SECS); + log_if_due(Some(error)); + } + } + } + } + + /// Returns the height of the block in which the transaction was included (if it was). + async fn get_tx( + self, + client: &mut TxClient, + hex_encoded_tx_hash: String, + ) -> anyhow::Result> { + let request = GetTxRequest { + hash: hex_encoded_tx_hash, + }; + let response = client.get_tx(request).await; + + let ok_response = match response { + Ok(resp) => resp, + Err(status) => { + if status.code() == tonic::Code::NotFound { + tracing::trace!(msg = status.message(), "transaction still pending"); + return Ok(None); + } + return Err(anyhow::anyhow!( + "failed to get tx, code: {}, message: {}", + status.code(), + status.message() + )); + } + }; + let tx_response = ok_response + .into_inner() + .tx_response + .ok_or_else(|| anyhow::anyhow!("Empty get tx response"))?; + if tx_response.code != 0 { + return Err(anyhow::anyhow!( + "failed to get tx, tx_hash: {}, code: {}, namespace: {}, log: {}", + tx_response.txhash, + tx_response.code, + tx_response.codespace, + tx_response.raw_log, + )); + } + if tx_response.height == 0 { + tracing::trace!(tx_hash = %tx_response.txhash, "transaction still pending"); + return Ok(None); + } + + let height = u64::try_from(tx_response.height).map_err(|_| { + anyhow::anyhow!("GetTxResponseNegativeBlockHeight: {}", tx_response.height) + })?; + + tracing::debug!(tx_hash = %tx_response.txhash, height, "transaction succeeded"); + Ok(Some(height)) + } +} + +/// Returns a `BlobTx` for the given signed tx and blobs. +fn new_blob_tx<'a>(signed_tx: &Tx, blobs: impl Iterator) -> BlobTx { + let blobs = blobs + .map(|blob| PbBlob { + namespace_id: Bytes::from(blob.namespace.id().to_vec()), + namespace_version: u32::from(blob.namespace.version()), + data: Bytes::from(blob.data.clone()), + share_version: u32::from(blob.share_version), + }) + .collect(); + BlobTx { + tx: Bytes::from(signed_tx.encode_to_vec()), + blobs, + type_id: BLOB_TX_TYPE_ID.to_string(), + } +} + +/// Returns a signed tx for the given message, account and metadata. +fn new_signed_tx( + msg_pay_for_blobs: &MsgPayForBlobs, + base_account: &BaseAccount, + gas_limit: u64, + fee: u64, + chain_id: String, + signing_key: &SecretKey, +) -> Tx { + const SIGNING_MODE_INFO: Option = Some(ModeInfo { + sum: Some(Sum::Single(Single { mode: 1 })), + }); + + let fee_coin = Coin { + denom: UNITS_SUFFIX.to_string(), + amount: fee.to_string(), + }; + let fee = Fee { + amount: vec![fee_coin], + gas_limit, + ..Fee::default() + }; + + let public_key = ec_proto::PubKey { + key: Bytes::from( + signing_key + .public_key(&Secp256k1::new()) + .serialize() + .to_vec(), + ), + }; + let public_key_as_any = pbjson_types::Any { + type_url: ec_proto::PubKey::type_url(), + value: public_key.encode_to_vec().into(), + }; + let auth_info = AuthInfo { + signer_infos: vec![SignerInfo { + public_key: Some(public_key_as_any), + mode_info: SIGNING_MODE_INFO, + sequence: base_account.sequence, + }], + fee: Some(fee), + tip: None, + }; + + let msg = pbjson_types::Any { + type_url: MsgPayForBlobs::type_url(), + value: msg_pay_for_blobs.encode_to_vec().into(), + }; + let tx_body = TxBody { + messages: vec![msg], + ..TxBody::default() + }; + + let bytes_to_sign = SignDoc { + body_bytes: Bytes::from(tx_body.encode_to_vec()), + auth_info_bytes: Bytes::from(auth_info.encode_to_vec()), + chain_id, + account_number: base_account.account_number, + } + .encode_to_vec(); + let hashed_bytes: [u8; 32] = sha2::Sha256::digest(bytes_to_sign).into(); + let signature = secp256k1::Secp256k1::new().sign_ecdsa( + &secp256k1::Message::from_slice(&hashed_bytes[..]).unwrap(), // unwrap is safe here because we know the length of the hashed bytes + signing_key, + ); + Tx { + body: Some(tx_body), + auth_info: Some(auth_info), + signatures: vec![Bytes::from(signature.serialize_compact().to_vec())], + } +} + +/// Returns the fee for the signed tx. +fn calculate_fee(min_gas_price: f64, gas_limit: u64) -> u64 { + let calculated_fee = (min_gas_price * gas_limit as f64).ceil() as u64; + tracing::info!( + "calculated fee: {}, min_gas_price: {}, gas_limit: {}", + calculated_fee, + min_gas_price, + gas_limit + ); + + calculated_fee +} + +fn estimate_gas(blob_sizes: &[u32], gas_per_blob_byte: u32, tx_size_cost_per_byte: u64) -> u64 { + // From https://github.com/celestiaorg/celestia-app/blob/v1.4.0/pkg/appconsts/global_consts.go#L28 + const SHARE_SIZE: u64 = 512; + // From https://github.com/celestiaorg/celestia-app/blob/v1.4.0/pkg/appconsts/global_consts.go#L55 + const CONTINUATION_COMPACT_SHARE_CONTENT_SIZE: u32 = 482; + // From https://github.com/celestiaorg/celestia-app/blob/v1.4.0/pkg/appconsts/global_consts.go#L59 + const FIRST_SPARSE_SHARE_CONTENT_SIZE: u32 = 478; + // From https://github.com/celestiaorg/celestia-app/blob/v1.4.0/x/blob/types/payforblob.go#L40 + const PFB_GAS_FIXED_COST: u64 = 75_000; + // From https://github.com/celestiaorg/celestia-app/blob/v1.4.0/x/blob/types/payforblob.go#L44 + const BYTES_PER_BLOB_INFO: u64 = 70; + + // From https://github.com/celestiaorg/celestia-app/blob/v1.4.0/pkg/shares/share_sequence.go#L126 + // + // `blob_len` is the size in bytes of one blob's `data` field. + fn sparse_shares_needed(blob_len: u32) -> u64 { + if blob_len == 0 { + return 0; + } + + if blob_len < FIRST_SPARSE_SHARE_CONTENT_SIZE { + return 1; + } + + // Use `u64` here to avoid overflow while adding below. + let mut bytes_available = u64::from(FIRST_SPARSE_SHARE_CONTENT_SIZE); + let mut shares_needed = 1_u64; + while bytes_available < u64::from(blob_len) { + bytes_available = bytes_available + .checked_add(u64::from(CONTINUATION_COMPACT_SHARE_CONTENT_SIZE)) + .expect( + "this can't overflow, as on each iteration `bytes_available < u32::MAX`, and \ + we're adding at most `u32::MAX` to it", + ); + shares_needed = shares_needed.checked_add(1).expect( + "this can't overflow, as the loop cannot execute for `u64::MAX` iterations", + ); + } + shares_needed + } + + let total_shares_used: u64 = blob_sizes.iter().copied().map(sparse_shares_needed).sum(); + let blob_count = blob_sizes.len().try_into().unwrap_or(u64::MAX); + + let shares_gas = total_shares_used + .saturating_mul(SHARE_SIZE) + .saturating_mul(u64::from(gas_per_blob_byte)); + let blob_info_gas = tx_size_cost_per_byte + .saturating_mul(BYTES_PER_BLOB_INFO) + .saturating_mul(blob_count); + + shares_gas + .saturating_add(blob_info_gas) + .saturating_add(PFB_GAS_FIXED_COST) +} + +/// Prepares a `MsgPayForBlobs` message for the given blobs. +fn new_msg_pay_for_blobs(blobs: &[Blob], signer: String) -> anyhow::Result { + let mut blob_sizes = Vec::with_capacity(blobs.len()); + let mut namespaces = Vec::with_capacity(blobs.len()); + let mut share_commitments = Vec::with_capacity(blobs.len()); + let mut share_versions = Vec::with_capacity(blobs.len()); + for blob in blobs { + blob_sizes.push(blob.data.len()); + namespaces.push(Bytes::from(blob.namespace.as_bytes().to_vec())); + share_commitments.push(Bytes::from(blob.commitment.0.to_vec())); + share_versions.push(u32::from(blob.share_version)); + } + + let blob_sizes = blob_sizes + .into_iter() + .map(|blob_size| { + u32::try_from(blob_size) + .map_err(|_| anyhow::anyhow!("blob too large, size: {}", blob_size)) + }) + .collect::>()?; + + Ok(MsgPayForBlobs { + signer, + namespaces, + blob_sizes, + share_commitments, + share_versions, + }) +} + +fn get_address(public_key: PublicKey) -> anyhow::Result { + use ripemd::{Digest, Ripemd160}; + + let sha_digest = sha2::Sha256::digest(public_key.serialize()); + let ripemd_digest = Ripemd160::digest(&sha_digest[..]); + let mut bytes = [0u8; ADDRESS_LENGTH]; + bytes.copy_from_slice(&ripemd_digest[..ADDRESS_LENGTH]); + + Ok(bech32::encode::( + ACCOUNT_ADDRESS_PREFIX, + bytes.as_slice(), + )?) +} + +#[derive(Clone, Debug)] +pub(super) struct BlobTxHash([u8; 32]); + +impl BlobTxHash { + pub(super) fn compute(blob_tx: &BlobTx) -> Self { + Self(sha2::Sha256::digest(&blob_tx.tx).into()) + } + + pub(super) fn hex(self) -> String { + hex::encode(self.0) + } +} + +impl Display for BlobTxHash { + fn fmt(&self, formatter: &mut Formatter<'_>) -> Result { + write!(formatter, "{}", hex::encode(self.0)) + } +} diff --git a/core/node/da_clients/src/lib.rs b/core/node/da_clients/src/lib.rs index 48311ce4c3f2..8515c128ff3f 100644 --- a/core/node/da_clients/src/lib.rs +++ b/core/node/da_clients/src/lib.rs @@ -1,3 +1,5 @@ pub mod avail; +pub mod celestia; pub mod no_da; pub mod object_store; +mod utils; diff --git a/core/node/da_clients/src/utils.rs b/core/node/da_clients/src/utils.rs new file mode 100644 index 000000000000..d717d41f0e03 --- /dev/null +++ b/core/node/da_clients/src/utils.rs @@ -0,0 +1,15 @@ +use zksync_da_client::types::DAError; + +pub fn to_non_retriable_da_error(error: impl Into) -> DAError { + DAError { + error: error.into(), + is_retriable: false, + } +} + +pub fn to_retriable_da_error(error: impl Into) -> DAError { + DAError { + error: error.into(), + is_retriable: true, + } +} diff --git a/core/node/node_framework/src/implementations/layers/da_clients/celestia.rs b/core/node/node_framework/src/implementations/layers/da_clients/celestia.rs new file mode 100644 index 000000000000..69f5553d4da8 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/da_clients/celestia.rs @@ -0,0 +1,46 @@ +use zksync_config::{configs::da_client::celestia::CelestiaSecrets, CelestiaConfig}; +use zksync_da_client::DataAvailabilityClient; +use zksync_da_clients::celestia::CelestiaClient; + +use crate::{ + implementations::resources::da_client::DAClientResource, + wiring_layer::{WiringError, WiringLayer}, + IntoContext, +}; + +#[derive(Debug)] +pub struct CelestiaWiringLayer { + config: CelestiaConfig, + secrets: CelestiaSecrets, +} + +impl CelestiaWiringLayer { + pub fn new(config: CelestiaConfig, secrets: CelestiaSecrets) -> Self { + Self { config, secrets } + } +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub client: DAClientResource, +} + +#[async_trait::async_trait] +impl WiringLayer for CelestiaWiringLayer { + type Input = (); + type Output = Output; + + fn layer_name(&self) -> &'static str { + "celestia_client_layer" + } + + async fn wire(self, _input: Self::Input) -> Result { + let client: Box = + Box::new(CelestiaClient::new(self.config, self.secrets).await?); + + Ok(Self::Output { + client: DAClientResource(client), + }) + } +} diff --git a/core/node/node_framework/src/implementations/layers/da_clients/mod.rs b/core/node/node_framework/src/implementations/layers/da_clients/mod.rs index 48311ce4c3f2..6bb6ce4fb877 100644 --- a/core/node/node_framework/src/implementations/layers/da_clients/mod.rs +++ b/core/node/node_framework/src/implementations/layers/da_clients/mod.rs @@ -1,3 +1,4 @@ pub mod avail; +pub mod celestia; pub mod no_da; pub mod object_store; From c41db9ecec1c21b80969604f703ac6990f6f3434 Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Tue, 29 Oct 2024 15:59:04 +0100 Subject: [PATCH 23/32] feat(prover): Add support for scaling WGs and compressor (#3179) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add support for scaling WGs and compressor. Cleanup unneeded pods in Agents. Include `in_progress` into the queue. ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. ref ZKD-1855 --- core/lib/basic_types/src/prover_dal.rs | 19 +- .../config/src/configs/prover_autoscaler.rs | 43 ++ .../src/proto/config/prover_autoscaler.proto | 15 +- .../protobuf_config/src/prover_autoscaler.rs | 61 +++ .../prover_autoscaler/src/cluster_types.rs | 2 + .../prover_autoscaler/src/global/queuer.rs | 49 +- .../prover_autoscaler/src/global/scaler.rs | 432 ++++++++++++++++-- .../bin/prover_autoscaler/src/k8s/watcher.rs | 5 + .../bin/prover_autoscaler/src/metrics.rs | 5 +- 9 files changed, 575 insertions(+), 56 deletions(-) diff --git a/core/lib/basic_types/src/prover_dal.rs b/core/lib/basic_types/src/prover_dal.rs index bec5a55ced1f..d86f79ba77aa 100644 --- a/core/lib/basic_types/src/prover_dal.rs +++ b/core/lib/basic_types/src/prover_dal.rs @@ -28,12 +28,6 @@ pub struct ExtendedJobCountStatistics { pub successful: usize, } -#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize)] -pub struct JobCountStatistics { - pub queued: usize, - pub in_progress: usize, -} - impl Add for ExtendedJobCountStatistics { type Output = ExtendedJobCountStatistics; @@ -47,6 +41,19 @@ impl Add for ExtendedJobCountStatistics { } } +#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize)] +pub struct JobCountStatistics { + pub queued: usize, + pub in_progress: usize, +} + +impl JobCountStatistics { + /// all returns sum of queued and in_progress. + pub fn all(&self) -> usize { + self.queued + self.in_progress + } +} + #[derive(Debug)] pub struct StuckJobs { pub id: u64, diff --git a/core/lib/config/src/configs/prover_autoscaler.rs b/core/lib/config/src/configs/prover_autoscaler.rs index b24a1a26651f..d345b53e6f31 100644 --- a/core/lib/config/src/configs/prover_autoscaler.rs +++ b/core/lib/config/src/configs/prover_autoscaler.rs @@ -61,6 +61,8 @@ pub struct ProverAutoscalerScalerConfig { /// Duration after which pending pod considered long pending. #[serde(default = "ProverAutoscalerScalerConfig::default_long_pending_duration")] pub long_pending_duration: Duration, + /// List of simple autoscaler targets. + pub scaler_targets: Vec, } #[derive( @@ -93,6 +95,41 @@ pub enum Gpu { A100, } +// TODO: generate this enum by QueueReport from https://github.com/matter-labs/zksync-era/blob/main/prover/crates/bin/prover_job_monitor/src/autoscaler_queue_reporter.rs#L23 +// and remove allowing of non_camel_case_types by generating field name parser. +#[derive(Debug, Display, PartialEq, Eq, Hash, Clone, Deserialize, EnumString, Default)] +#[allow(non_camel_case_types)] +pub enum QueueReportFields { + #[strum(ascii_case_insensitive)] + basic_witness_jobs, + #[strum(ascii_case_insensitive)] + leaf_witness_jobs, + #[strum(ascii_case_insensitive)] + node_witness_jobs, + #[strum(ascii_case_insensitive)] + recursion_tip_witness_jobs, + #[strum(ascii_case_insensitive)] + scheduler_witness_jobs, + #[strum(ascii_case_insensitive)] + proof_compressor_jobs, + #[default] + #[strum(ascii_case_insensitive)] + prover_jobs, +} + +/// ScalerTarget can be configured to autoscale any of services for which queue is reported by +/// prover-job-monitor, except of provers. Provers need special treatment due to GPU requirement. +#[derive(Debug, Clone, PartialEq, Deserialize, Default)] +pub struct ScalerTarget { + pub queue_report_field: QueueReportFields, + pub pod_name_prefix: String, + /// Max replicas per cluster. + pub max_replicas: HashMap, + /// The queue will be divided by the speed and rounded up to get number of replicas. + #[serde(default = "ScalerTarget::default_speed")] + pub speed: usize, +} + impl ProverAutoscalerConfig { /// Default graceful shutdown timeout -- 5 seconds pub fn default_graceful_shutdown_timeout() -> Duration { @@ -126,3 +163,9 @@ impl ProverAutoscalerScalerConfig { Duration::minutes(10) } } + +impl ScalerTarget { + pub fn default_speed() -> usize { + 1 + } +} diff --git a/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto b/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto index 9b7f201e9b77..0f723e22a93f 100644 --- a/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto +++ b/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto @@ -45,15 +45,28 @@ message MinProver { optional uint32 min = 2; // required } +message MaxReplica { + optional string cluster = 1; // required + optional uint64 max = 2; // required +} + +message ScalerTarget { + optional string queue_report_field = 1; // required + optional string pod_name_prefix = 2; // required + repeated MaxReplica max_replicas = 3; // required at least one + optional uint64 speed = 4; // optional +} + message ProverAutoscalerScalerConfig { optional uint32 prometheus_port = 1; // required optional std.Duration scaler_run_interval = 2; // optional optional string prover_job_monitor_url = 3; // required repeated string agents = 4; // required at least one - repeated ProtocolVersion protocol_versions = 5; // repeated at least one + repeated ProtocolVersion protocol_versions = 5; // required at least one repeated ClusterPriority cluster_priorities = 6; // optional repeated ProverSpeed prover_speed = 7; // optional optional uint32 long_pending_duration_s = 8; // optional repeated MaxProver max_provers = 9; // optional repeated MinProver min_provers = 10; // optional + repeated ScalerTarget scaler_targets = 11; // optional } diff --git a/core/lib/protobuf_config/src/prover_autoscaler.rs b/core/lib/protobuf_config/src/prover_autoscaler.rs index 51f1b162d4cf..c3e7c9719f13 100644 --- a/core/lib/protobuf_config/src/prover_autoscaler.rs +++ b/core/lib/protobuf_config/src/prover_autoscaler.rs @@ -112,6 +112,12 @@ impl ProtoRepr for proto::ProverAutoscalerScalerConfig { .map(|(i, e)| e.read().context(i)) .collect::>() .context("min_provers")?, + scaler_targets: self + .scaler_targets + .iter() + .enumerate() + .map(|(i, x)| x.read().context(i).unwrap()) + .collect::>(), }) } @@ -151,6 +157,7 @@ impl ProtoRepr for proto::ProverAutoscalerScalerConfig { .iter() .map(|(k, v)| proto::MinProver::build(&(k.clone(), *v))) .collect(), + scaler_targets: this.scaler_targets.iter().map(ProtoRepr::build).collect(), } } } @@ -238,3 +245,57 @@ impl ProtoRepr for proto::MinProver { } } } + +impl ProtoRepr for proto::MaxReplica { + type Type = (String, usize); + fn read(&self) -> anyhow::Result { + Ok(( + required(&self.cluster).context("cluster")?.parse()?, + *required(&self.max).context("max")? as usize, + )) + } + fn build(this: &Self::Type) -> Self { + Self { + cluster: Some(this.0.to_string()), + max: Some(this.1 as u64), + } + } +} + +impl ProtoRepr for proto::ScalerTarget { + type Type = configs::prover_autoscaler::ScalerTarget; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + queue_report_field: required(&self.queue_report_field) + .and_then(|x| Ok((*x).parse()?)) + .context("queue_report_field")?, + pod_name_prefix: required(&self.pod_name_prefix) + .context("pod_name_prefix")? + .clone(), + max_replicas: self + .max_replicas + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("max_replicas")?, + speed: match self.speed { + Some(x) => x as usize, + None => Self::Type::default_speed(), + }, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + queue_report_field: Some(this.queue_report_field.to_string()), + pod_name_prefix: Some(this.pod_name_prefix.clone()), + max_replicas: this + .max_replicas + .iter() + .map(|(k, v)| proto::MaxReplica::build(&(k.clone(), *v))) + .collect(), + speed: Some(this.speed as u64), + } + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/cluster_types.rs b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs index e3e4c9b4df0d..db215e570ef8 100644 --- a/prover/crates/bin/prover_autoscaler/src/cluster_types.rs +++ b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs @@ -40,6 +40,7 @@ pub struct Namespace { #[serde(serialize_with = "ordered_map")] pub deployments: HashMap, pub pods: HashMap, + #[serde(default)] pub scale_errors: Vec, } @@ -64,4 +65,5 @@ pub enum PodStatus { Pending, LongPending, NeedToMove, + Failed, } diff --git a/prover/crates/bin/prover_autoscaler/src/global/queuer.rs b/prover/crates/bin/prover_autoscaler/src/global/queuer.rs index 32610ebf3c3d..e2cd1c6a4fb2 100644 --- a/prover/crates/bin/prover_autoscaler/src/global/queuer.rs +++ b/prover/crates/bin/prover_autoscaler/src/global/queuer.rs @@ -1,17 +1,22 @@ -use std::collections::HashMap; +use std::{collections::HashMap, ops::Deref}; use anyhow::{Context, Ok}; use reqwest::Method; -use zksync_prover_job_monitor::autoscaler_queue_reporter::VersionedQueueReport; +use zksync_config::configs::prover_autoscaler::QueueReportFields; +use zksync_prover_job_monitor::autoscaler_queue_reporter::{QueueReport, VersionedQueueReport}; use zksync_utils::http_with_retries::send_request_with_retries; use crate::metrics::{AUTOSCALER_METRICS, DEFAULT_ERROR_CODE}; const MAX_RETRIES: usize = 5; -#[derive(Debug)] -pub struct Queue { - pub queue: HashMap, +pub struct Queue(HashMap<(String, QueueReportFields), u64>); + +impl Deref for Queue { + type Target = HashMap<(String, QueueReportFields), u64>; + fn deref(&self) -> &Self::Target { + &self.0 + } } #[derive(Default)] @@ -19,6 +24,19 @@ pub struct Queuer { pub prover_job_monitor_url: String, } +fn target_to_queue(target: &QueueReportFields, report: &QueueReport) -> u64 { + let res = match target { + QueueReportFields::basic_witness_jobs => report.basic_witness_jobs.all(), + QueueReportFields::leaf_witness_jobs => report.leaf_witness_jobs.all(), + QueueReportFields::node_witness_jobs => report.node_witness_jobs.all(), + QueueReportFields::recursion_tip_witness_jobs => report.recursion_tip_witness_jobs.all(), + QueueReportFields::scheduler_witness_jobs => report.scheduler_witness_jobs.all(), + QueueReportFields::proof_compressor_jobs => report.proof_compressor_jobs.all(), + QueueReportFields::prover_jobs => report.prover_jobs.all(), + }; + res as u64 +} + impl Queuer { pub fn new(pjm_url: String) -> Self { Self { @@ -26,12 +44,14 @@ impl Queuer { } } - pub async fn get_queue(&self) -> anyhow::Result { + /// Requests queue report from prover-job-monitor and parse it into Queue HashMap for provided + /// list of jobs. + pub async fn get_queue(&self, jobs: &[QueueReportFields]) -> anyhow::Result { let url = &self.prover_job_monitor_url; let response = send_request_with_retries(url, MAX_RETRIES, Method::GET, None, None).await; let response = response.map_err(|err| { AUTOSCALER_METRICS.calls[&(url.clone(), DEFAULT_ERROR_CODE)].inc(); - anyhow::anyhow!("Failed fetching queue from url: {url}: {err:?}") + anyhow::anyhow!("Failed fetching queue from URL: {url}: {err:?}") })?; AUTOSCALER_METRICS.calls[&(url.clone(), response.status().as_u16())].inc(); @@ -39,11 +59,18 @@ impl Queuer { .json::>() .await .context("Failed to read response as json")?; - Ok(Queue { - queue: response + Ok(Queue( + response .iter() - .map(|x| (x.version.to_string(), x.report.prover_jobs.queued as u64)) + .flat_map(|versioned_report| { + jobs.iter().map(move |j| { + ( + (versioned_report.version.to_string(), j.clone()), + target_to_queue(j, &versioned_report.report), + ) + }) + }) .collect::>(), - }) + )) } } diff --git a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs index eb4249d071fe..1bdd2b251040 100644 --- a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs +++ b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs @@ -4,7 +4,9 @@ use chrono::Utc; use debug_map_sorted::SortedOutputExt; use once_cell::sync::Lazy; use regex::Regex; -use zksync_config::configs::prover_autoscaler::{Gpu, ProverAutoscalerScalerConfig}; +use zksync_config::configs::prover_autoscaler::{ + Gpu, ProverAutoscalerScalerConfig, QueueReportFields, ScalerTarget, +}; use super::{queuer, watcher}; use crate::{ @@ -65,6 +67,12 @@ pub struct Scaler { watcher: watcher::Watcher, queuer: queuer::Queuer, + jobs: Vec, + prover_scaler: GpuScaler, + simple_scalers: Vec, +} + +pub struct GpuScaler { /// Which cluster to use first. cluster_priorities: HashMap, min_provers: HashMap, @@ -73,6 +81,16 @@ pub struct Scaler { long_pending_duration: chrono::Duration, } +pub struct SimpleScaler { + queue_report_field: QueueReportFields, + pod_name_prefix: String, + /// Which cluster to use first. + cluster_priorities: HashMap, + max_replicas: HashMap, + speed: usize, + long_pending_duration: chrono::Duration, +} + struct ProverPodGpu<'a> { name: &'a str, pod: &'a Pod, @@ -102,10 +120,31 @@ impl Scaler { AUTOSCALER_METRICS.prover_protocol_version[&(namespace.clone(), version.clone())] .set(1); }); + + let mut simple_scalers = Vec::default(); + let mut jobs = vec![QueueReportFields::prover_jobs]; + for c in &config.scaler_targets { + jobs.push(c.queue_report_field.clone()); + simple_scalers.push(SimpleScaler::new( + c, + config.cluster_priorities.clone(), + chrono::Duration::seconds(config.long_pending_duration.whole_seconds()), + )) + } Self { - namespaces: config.protocol_versions, + namespaces: config.protocol_versions.clone(), watcher, queuer, + jobs, + prover_scaler: GpuScaler::new(config), + simple_scalers, + } + } +} + +impl GpuScaler { + pub fn new(config: ProverAutoscalerScalerConfig) -> Self { + Self { cluster_priorities: config.cluster_priorities, min_provers: config.min_provers, max_provers: config.max_provers, @@ -116,6 +155,7 @@ impl Scaler { } } + /// Converts a single cluster into vec of GPUPools, one for each GPU. fn convert_to_gpu_pool(&self, namespace: &String, cluster: &Cluster) -> Vec { let mut gp_map = HashMap::new(); // let Some(namespace_value) = &cluster.namespaces.get(namespace) else { @@ -218,6 +258,10 @@ impl Scaler { .then(b.max_pool_size.cmp(&a.max_pool_size)) // Reverse sort by cluster size. }); + gpu_pools.iter().for_each(|p| { + AUTOSCALER_METRICS.scale_errors[&p.name.clone()].set(p.scale_errors as u64); + }); + gpu_pools } @@ -323,6 +367,192 @@ impl Scaler { } } +#[derive(Default, Debug, PartialEq, Eq)] +struct Pool { + name: String, + pods: HashMap, + scale_errors: usize, + max_pool_size: usize, +} + +impl Pool { + fn sum_by_pod_status(&self, ps: PodStatus) -> usize { + self.pods.get(&ps).cloned().unwrap_or(0) + } +} + +impl SimpleScaler { + pub fn new( + config: &ScalerTarget, + cluster_priorities: HashMap, + long_pending_duration: chrono::Duration, + ) -> Self { + Self { + queue_report_field: config.queue_report_field.clone(), + pod_name_prefix: config.pod_name_prefix.clone(), + cluster_priorities, + max_replicas: config.max_replicas.clone(), + speed: config.speed, + long_pending_duration, + } + } + + fn convert_to_pool(&self, namespace: &String, cluster: &Cluster) -> Option { + let Some(namespace_value) = &cluster.namespaces.get(namespace) else { + // No namespace in config, ignoring. + return None; + }; + + // TODO: Check if related deployment exists. + let mut pool = Pool { + name: cluster.name.clone(), + max_pool_size: self.max_replicas.get(&cluster.name).copied().unwrap_or(0), + scale_errors: namespace_value + .scale_errors + .iter() + .filter(|v| v.time < Utc::now() - chrono::Duration::hours(1)) // TODO Move the duration into config. + .count(), + ..Default::default() + }; + + // Initialize pool only if we have ready deployments. + pool.pods.insert(PodStatus::Running, 0); + + let pod_re = Regex::new(&format!("^{}-", self.pod_name_prefix)).unwrap(); + for (_, pod) in namespace_value + .pods + .iter() + .filter(|(name, _)| pod_re.is_match(name)) + { + let mut status = PodStatus::from_str(&pod.status).unwrap_or_default(); + if status == PodStatus::Pending && pod.changed < Utc::now() - self.long_pending_duration + { + status = PodStatus::LongPending; + } + pool.pods.entry(status).and_modify(|n| *n += 1).or_insert(1); + } + + tracing::debug!("Pool pods {:?}", pool); + + Some(pool) + } + + fn sorted_clusters(&self, namespace: &String, clusters: &Clusters) -> Vec { + let mut pools: Vec = clusters + .clusters + .values() + .flat_map(|c| self.convert_to_pool(namespace, c)) + .collect(); + + pools.sort_by(|a, b| { + a.sum_by_pod_status(PodStatus::NeedToMove) + .cmp(&b.sum_by_pod_status(PodStatus::NeedToMove)) // Sort by need to evict. + .then( + a.sum_by_pod_status(PodStatus::LongPending) + .cmp(&b.sum_by_pod_status(PodStatus::LongPending)), + ) // Sort by long Pending pods. + .then(a.scale_errors.cmp(&b.scale_errors)) // Sort by scale_errors in the cluster. + .then( + self.cluster_priorities + .get(&a.name) + .unwrap_or(&1000) + .cmp(self.cluster_priorities.get(&b.name).unwrap_or(&1000)), + ) // Sort by priority. + .then(b.max_pool_size.cmp(&a.max_pool_size)) // Reverse sort by cluster size. + }); + + pools + } + + fn pods_to_speed(&self, n: usize) -> u64 { + (self.speed * n) as u64 + } + + fn normalize_queue(&self, queue: u64) -> u64 { + let speed = self.speed as u64; + // Divide and round up if there's any remainder. + (queue + speed - 1) / speed * speed + } + + fn run(&self, namespace: &String, queue: u64, clusters: &Clusters) -> HashMap { + let sorted_clusters = self.sorted_clusters(namespace, clusters); + tracing::debug!( + "Sorted clusters for namespace {}: {:?}", + namespace, + &sorted_clusters + ); + + let mut total: i64 = 0; + let mut pods: HashMap = HashMap::new(); + for cluster in &sorted_clusters { + for (status, replicas) in &cluster.pods { + match status { + PodStatus::Running | PodStatus::Pending => { + total += self.pods_to_speed(*replicas) as i64; + pods.entry(cluster.name.clone()) + .and_modify(|x| *x += replicas) + .or_insert(*replicas); + } + _ => (), // Ignore LongPending as not running here. + } + } + } + + // Remove unneeded pods. + if (total as u64) > self.normalize_queue(queue) { + for cluster in sorted_clusters.iter().rev() { + let mut excess_queue = total as u64 - self.normalize_queue(queue); + let mut excess_pods = excess_queue as usize / self.speed; + let replicas = pods.entry(cluster.name.clone()).or_default(); + if *replicas < excess_pods { + excess_pods = *replicas; + excess_queue = *replicas as u64 * self.speed as u64; + } + *replicas -= excess_pods; + total -= excess_queue as i64; + if total <= 0 { + break; + }; + } + } + + // Reduce load in over capacity pools. + for cluster in &sorted_clusters { + let replicas = pods.entry(cluster.name.clone()).or_default(); + if cluster.max_pool_size < *replicas { + let excess = *replicas - cluster.max_pool_size; + total -= (excess * self.speed) as i64; + *replicas -= excess; + } + } + + tracing::debug!("Queue covered with provers: {}", total); + // Add required pods. + if (total as u64) < queue { + for cluster in &sorted_clusters { + let mut required_queue = queue - total as u64; + let mut required_pods = self.normalize_queue(required_queue) as usize / self.speed; + let replicas = pods.entry(cluster.name.clone()).or_default(); + if *replicas + required_pods > cluster.max_pool_size { + required_pods = cluster.max_pool_size - *replicas; + required_queue = (required_pods * self.speed) as u64; + } + *replicas += required_pods; + total += required_queue as i64; + } + } + + tracing::debug!( + "run result for namespace {}: provers {:?}, total: {}", + namespace, + &pods, + total + ); + + pods + } +} + fn diff( namespace: &str, provers: HashMap, @@ -383,7 +613,7 @@ fn is_namespace_running(namespace: &str, clusters: &Clusters) -> bool { #[async_trait::async_trait] impl Task for Scaler { async fn invoke(&self) -> anyhow::Result<()> { - let queue = self.queuer.get_queue().await.unwrap(); + let queue = self.queuer.get_queue(&self.jobs).await.unwrap(); let mut scale_requests: HashMap = HashMap::new(); { @@ -396,16 +626,38 @@ impl Task for Scaler { } for (ns, ppv) in &self.namespaces { - let q = queue.queue.get(ppv).cloned().unwrap_or(0); + // Prover + let q = queue + .get(&(ppv.to_string(), QueueReportFields::prover_jobs)) + .cloned() + .unwrap_or(0); tracing::debug!("Running eval for namespace {ns} and PPV {ppv} found queue {q}"); if q > 0 || is_namespace_running(ns, &guard.clusters) { - let provers = self.run(ns, q, &guard.clusters); + let provers = self.prover_scaler.run(ns, q, &guard.clusters); for (k, num) in &provers { AUTOSCALER_METRICS.provers[&(k.cluster.clone(), ns.clone(), k.gpu)] .set(*num as u64); } diff(ns, provers, &guard.clusters, &mut scale_requests); } + + // Simple Scalers. + for scaler in &self.simple_scalers { + let q = queue + .get(&(ppv.to_string(), scaler.queue_report_field.clone())) + .cloned() + .unwrap_or(0); + tracing::debug!("Running eval for namespace {ns}, PPV {ppv}, simple scaler {} found queue {q}", scaler.pod_name_prefix); + if q > 0 || is_namespace_running(ns, &guard.clusters) { + let pods = scaler.run(ns, q, &guard.clusters); + for (k, num) in &pods { + AUTOSCALER_METRICS.jobs + [&(scaler.pod_name_prefix.clone(), k.clone(), ns.clone())] + .set(*num as u64); + } + // TODO: diff and add into scale_requests. + } + } } } // Unlock self.watcher.data. @@ -420,28 +672,21 @@ impl Task for Scaler { #[cfg(test)] mod tests { use super::*; - use crate::{ - cluster_types::{Deployment, Namespace, Pod}, - global::{queuer, watcher}, - }; + use crate::cluster_types::{Deployment, Namespace, Pod, ScaleEvent}; #[tracing_test::traced_test] #[test] fn test_run() { - let scaler = Scaler::new( - watcher::Watcher::default(), - queuer::Queuer::default(), - ProverAutoscalerScalerConfig { - cluster_priorities: [("foo".into(), 0), ("bar".into(), 10)].into(), - min_provers: [("prover-other".into(), 2)].into(), - max_provers: [ - ("foo".into(), [(Gpu::L4, 100)].into()), - ("bar".into(), [(Gpu::L4, 100)].into()), - ] - .into(), - ..Default::default() - }, - ); + let scaler = GpuScaler::new(ProverAutoscalerScalerConfig { + cluster_priorities: [("foo".into(), 0), ("bar".into(), 10)].into(), + min_provers: [("prover-other".into(), 2)].into(), + max_provers: [ + ("foo".into(), [(Gpu::L4, 100)].into()), + ("bar".into(), [(Gpu::L4, 100)].into()), + ] + .into(), + ..Default::default() + }); assert_eq!( scaler.run( @@ -570,20 +815,16 @@ mod tests { #[tracing_test::traced_test] #[test] fn test_run_min_provers() { - let scaler = Scaler::new( - watcher::Watcher::default(), - queuer::Queuer::default(), - ProverAutoscalerScalerConfig { - cluster_priorities: [("foo".into(), 0), ("bar".into(), 10)].into(), - min_provers: [("prover".into(), 2)].into(), - max_provers: [ - ("foo".into(), [(Gpu::L4, 100)].into()), - ("bar".into(), [(Gpu::L4, 100)].into()), - ] - .into(), - ..Default::default() - }, - ); + let scaler = GpuScaler::new(ProverAutoscalerScalerConfig { + cluster_priorities: [("foo".into(), 0), ("bar".into(), 10)].into(), + min_provers: [("prover".into(), 2)].into(), + max_provers: [ + ("foo".into(), [(Gpu::L4, 100)].into()), + ("bar".into(), [(Gpu::L4, 100)].into()), + ] + .into(), + ..Default::default() + }); assert_eq!( scaler.run( @@ -765,4 +1006,121 @@ mod tests { "Min 2 provers, 5 running" ); } + + #[tracing_test::traced_test] + #[test] + fn test_run_need_move() { + let scaler = GpuScaler::new(ProverAutoscalerScalerConfig { + cluster_priorities: [("foo".into(), 0), ("bar".into(), 10)].into(), + min_provers: [("prover".into(), 2)].into(), + max_provers: [ + ("foo".into(), [(Gpu::L4, 100)].into()), + ("bar".into(), [(Gpu::L4, 100)].into()), + ] + .into(), + long_pending_duration: ProverAutoscalerScalerConfig::default_long_pending_duration(), + ..Default::default() + }); + + assert_eq!( + scaler.run( + &"prover".into(), + 1400, + &Clusters { + clusters: [ + ( + "foo".into(), + Cluster { + name: "foo".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment { + running: 3, + desired: 3, + }, + )] + .into(), + pods: [ + ( + "circuit-prover-gpu-7c5f8fc747-gmtcr".into(), + Pod { + status: "Running".into(), + changed: Utc::now(), + ..Default::default() + }, + ), + ( + "circuit-prover-gpu-7c5f8fc747-gmtc2".into(), + Pod { + status: "Pending".into(), + changed: Utc::now(), + ..Default::default() + }, + ), + ( + "circuit-prover-gpu-7c5f8fc747-gmtc3".into(), + Pod { + status: "Running".into(), + changed: Utc::now(), + ..Default::default() + }, + ) + ] + .into(), + scale_errors: vec![ScaleEvent { + name: "circuit-prover-gpu-7c5f8fc747-gmtc2.123456" + .into(), + time: Utc::now() - chrono::Duration::hours(1) + }], + }, + )] + .into(), + }, + ), + ( + "bar".into(), + Cluster { + name: "bar".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment::default(), + )] + .into(), + ..Default::default() + }, + )] + .into(), + }, + ) + ] + .into(), + ..Default::default() + }, + ), + [ + ( + GPUPoolKey { + cluster: "foo".into(), + gpu: Gpu::L4, + }, + 2, + ), + ( + GPUPoolKey { + cluster: "bar".into(), + gpu: Gpu::L4, + }, + 1, + ) + ] + .into(), + "Move 1 prover to bar" + ); + } } diff --git a/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs b/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs index 5384db082bc7..707ff04f1836 100644 --- a/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs +++ b/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs @@ -134,6 +134,11 @@ impl Watcher { } pod.status = phase; + if pod.status == "Succeeded" || pod.status == "Failed" { + // Cleaning up list of pods. + v.pods.remove(&p.name_any()); + } + tracing::info!("Got pod: {}", p.name_any()) } Watched::Event(e) => { diff --git a/prover/crates/bin/prover_autoscaler/src/metrics.rs b/prover/crates/bin/prover_autoscaler/src/metrics.rs index d94ac8b97e97..853e3db000f1 100644 --- a/prover/crates/bin/prover_autoscaler/src/metrics.rs +++ b/prover/crates/bin/prover_autoscaler/src/metrics.rs @@ -10,10 +10,13 @@ pub(crate) struct AutoscalerMetrics { pub prover_protocol_version: LabeledFamily<(String, String), Gauge, 2>, #[metrics(labels = ["target_cluster", "target_namespace", "gpu"])] pub provers: LabeledFamily<(String, String, Gpu), Gauge, 3>, + #[metrics(labels = ["job", "target_cluster", "target_namespace"])] + pub jobs: LabeledFamily<(String, String, String), Gauge, 3>, pub clusters_not_ready: Counter, #[metrics(labels = ["target", "status"])] pub calls: LabeledFamily<(String, u16), Counter, 2>, - // TODO: count of command send succes/fail + #[metrics(labels = ["target_cluster"])] + pub scale_errors: LabeledFamily, 1>, } #[vise::register] From 5161eeda5905d33f4d038a2a04ced3e06f39d593 Mon Sep 17 00:00:00 2001 From: Dima Zhornyk <55756184+dimazhornyk@users.noreply.github.com> Date: Tue, 29 Oct 2024 16:08:19 +0100 Subject: [PATCH 24/32] feat(da-clients): add EigenDA client (#3155) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ This PR adds an EigenDA client. The implementation uses the gRPC streams to send the authenticated requests to dispatch the blob. The protogen situation is very similar to Celestia, we use the generated files as a temporary solution until there is a separate crate that provides those. This kind of function can be used to generate them in the future: ```rust pub fn compile_protos() { let fds = protox::compile( [ "proto/common.proto", "proto/disperser.proto", ], ["."], ) .expect("protox failed to build"); tonic_build::configure() .build_client(true) .build_server(false) .skip_protoc_run() .out_dir("generated") .compile_fds(fds) .unwrap(); } ``` Example config: ``` da_client: eigen: rpc_node_url: https://disperser-holesky.eigenda.xyz:443 inclusion_polling_interval_ms: 10000 ``` secrets: ``` da: eigen: private_key: PRIVATE_KEY_WITHOUT_0x_PREFIX ``` ## Why ❔ To enable EigenDA in ZK stack ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- Cargo.lock | 5 + Cargo.toml | 5 +- core/bin/zksync_server/src/node_builder.rs | 8 +- .../lib/config/src/configs/da_client/eigen.rs | 13 + core/lib/config/src/configs/da_client/mod.rs | 5 +- core/lib/config/src/configs/mod.rs | 2 +- core/lib/config/src/configs/secrets.rs | 3 +- core/lib/config/src/lib.rs | 6 +- core/lib/env_config/src/da_client.rs | 37 +- core/lib/protobuf_config/src/da_client.rs | 15 +- .../src/proto/config/da_client.proto | 6 + .../src/proto/config/secrets.proto | 5 + core/lib/protobuf_config/src/secrets.rs | 10 +- core/node/da_clients/Cargo.toml | 6 +- core/node/da_clients/README.md | 2 + core/node/da_clients/src/eigen/README.md | 35 ++ core/node/da_clients/src/eigen/client.rs | 65 +++ .../da_clients/src/eigen/generated/common.rs | 63 +++ .../src/eigen/generated/disperser.rs | 517 ++++++++++++++++++ core/node/da_clients/src/eigen/mod.rs | 14 + core/node/da_clients/src/eigen/sdk.rs | 217 ++++++++ core/node/da_clients/src/lib.rs | 1 + core/node/da_clients/src/no_da.rs | 2 +- .../layers/da_clients/eigen.rs | 46 ++ .../implementations/layers/da_clients/mod.rs | 1 + 25 files changed, 1071 insertions(+), 18 deletions(-) create mode 100644 core/lib/config/src/configs/da_client/eigen.rs create mode 100644 core/node/da_clients/src/eigen/README.md create mode 100644 core/node/da_clients/src/eigen/client.rs create mode 100644 core/node/da_clients/src/eigen/generated/common.rs create mode 100644 core/node/da_clients/src/eigen/generated/disperser.rs create mode 100644 core/node/da_clients/src/eigen/mod.rs create mode 100644 core/node/da_clients/src/eigen/sdk.rs create mode 100644 core/node/node_framework/src/implementations/layers/da_clients/eigen.rs diff --git a/Cargo.lock b/Cargo.lock index 0554982e157a..9f94faea781c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9106,7 +9106,10 @@ dependencies = [ "percent-encoding", "pin-project", "prost 0.12.6", + "rustls-pemfile 2.2.0", + "rustls-pki-types", "tokio", + "tokio-rustls 0.25.0", "tokio-stream", "tower 0.4.13", "tower-layer", @@ -10939,8 +10942,10 @@ dependencies = [ "subxt-metadata", "subxt-signer", "tokio", + "tokio-stream", "tonic 0.11.0", "tracing", + "zksync_basic_types", "zksync_config", "zksync_da_client", "zksync_env_config", diff --git a/Cargo.toml b/Cargo.toml index 5da7612171f9..e7cce4c4c421 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -215,9 +215,12 @@ subxt-signer = { version = "0.34", default-features = false } celestia-types = "0.6.1" bech32 = "0.11.0" ripemd = "0.1.3" -tonic = "0.11.0" +tonic = { version = "0.11.0", default-features = false } pbjson-types = "0.6.0" +# Eigen +tokio-stream = "0.1.16" + # Here and below: # We *always* pin the latest version of protocol to disallow accidental changes in the execution logic. # However, for the historical version of protocol crates, we have lax requirements. Otherwise, diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index c9d99cc0783f..e7a3dca77f15 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -26,8 +26,8 @@ use zksync_node_framework::{ consensus::MainNodeConsensusLayer, contract_verification_api::ContractVerificationApiLayer, da_clients::{ - avail::AvailWiringLayer, celestia::CelestiaWiringLayer, no_da::NoDAClientWiringLayer, - object_store::ObjectStorageClientWiringLayer, + avail::AvailWiringLayer, celestia::CelestiaWiringLayer, eigen::EigenWiringLayer, + no_da::NoDAClientWiringLayer, object_store::ObjectStorageClientWiringLayer, }, da_dispatcher::DataAvailabilityDispatcherLayer, eth_sender::{EthTxAggregatorLayer, EthTxManagerLayer}, @@ -517,6 +517,10 @@ impl MainNodeBuilder { .add_layer(CelestiaWiringLayer::new(config, secret)); } + (DAClientConfig::Eigen(config), DataAvailabilitySecrets::Eigen(secret)) => { + self.node.add_layer(EigenWiringLayer::new(config, secret)); + } + (DAClientConfig::ObjectStore(config), _) => { self.node .add_layer(ObjectStorageClientWiringLayer::new(config)); diff --git a/core/lib/config/src/configs/da_client/eigen.rs b/core/lib/config/src/configs/da_client/eigen.rs new file mode 100644 index 000000000000..f2c05a0f61ef --- /dev/null +++ b/core/lib/config/src/configs/da_client/eigen.rs @@ -0,0 +1,13 @@ +use serde::Deserialize; +use zksync_basic_types::secrets::PrivateKey; + +#[derive(Clone, Debug, Default, PartialEq, Deserialize)] +pub struct EigenConfig { + pub rpc_node_url: String, + pub inclusion_polling_interval_ms: u64, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct EigenSecrets { + pub private_key: PrivateKey, +} diff --git a/core/lib/config/src/configs/da_client/mod.rs b/core/lib/config/src/configs/da_client/mod.rs index 4806d7ed0996..322c4a20aac8 100644 --- a/core/lib/config/src/configs/da_client/mod.rs +++ b/core/lib/config/src/configs/da_client/mod.rs @@ -1,15 +1,18 @@ -use crate::{AvailConfig, CelestiaConfig, ObjectStoreConfig}; +use crate::{AvailConfig, CelestiaConfig, EigenConfig, ObjectStoreConfig}; pub mod avail; pub mod celestia; +pub mod eigen; pub const AVAIL_CLIENT_CONFIG_NAME: &str = "Avail"; pub const CELESTIA_CLIENT_CONFIG_NAME: &str = "Celestia"; +pub const EIGEN_CLIENT_CONFIG_NAME: &str = "Eigen"; pub const OBJECT_STORE_CLIENT_CONFIG_NAME: &str = "ObjectStore"; #[derive(Debug, Clone, PartialEq)] pub enum DAClientConfig { Avail(AvailConfig), Celestia(CelestiaConfig), + Eigen(EigenConfig), ObjectStore(ObjectStoreConfig), } diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index 0c756ad95647..2b848030d719 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -5,7 +5,7 @@ pub use self::{ commitment_generator::CommitmentGeneratorConfig, contract_verifier::ContractVerifierConfig, contracts::{ContractsConfig, EcosystemContracts}, - da_client::{avail::AvailConfig, celestia::CelestiaConfig, DAClientConfig}, + da_client::{avail::AvailConfig, celestia::CelestiaConfig, eigen::EigenConfig, DAClientConfig}, da_dispatcher::DADispatcherConfig, database::{DBConfig, PostgresConfig}, eth_sender::{EthConfig, GasAdjusterConfig}, diff --git a/core/lib/config/src/configs/secrets.rs b/core/lib/config/src/configs/secrets.rs index 4d95ae4d1ede..75ff067c2473 100644 --- a/core/lib/config/src/configs/secrets.rs +++ b/core/lib/config/src/configs/secrets.rs @@ -3,7 +3,7 @@ use zksync_basic_types::url::SensitiveUrl; use crate::configs::{ consensus::ConsensusSecrets, - da_client::{avail::AvailSecrets, celestia::CelestiaSecrets}, + da_client::{avail::AvailSecrets, celestia::CelestiaSecrets, eigen::EigenSecrets}, }; #[derive(Debug, Clone, PartialEq)] @@ -22,6 +22,7 @@ pub struct L1Secrets { pub enum DataAvailabilitySecrets { Avail(AvailSecrets), Celestia(CelestiaSecrets), + Eigen(EigenSecrets), } #[derive(Debug, Clone, PartialEq)] diff --git a/core/lib/config/src/lib.rs b/core/lib/config/src/lib.rs index c02f3e531b34..f77a8ceb39ad 100644 --- a/core/lib/config/src/lib.rs +++ b/core/lib/config/src/lib.rs @@ -2,9 +2,9 @@ pub use crate::configs::{ ApiConfig, AvailConfig, BaseTokenAdjusterConfig, CelestiaConfig, ContractVerifierConfig, - ContractsConfig, DAClientConfig, DADispatcherConfig, DBConfig, EthConfig, EthWatchConfig, - ExternalProofIntegrationApiConfig, GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, - PostgresConfig, SnapshotsCreatorConfig, + ContractsConfig, DAClientConfig, DADispatcherConfig, DBConfig, EigenConfig, EthConfig, + EthWatchConfig, ExternalProofIntegrationApiConfig, GasAdjusterConfig, GenesisConfig, + ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, }; pub mod configs; diff --git a/core/lib/env_config/src/da_client.rs b/core/lib/env_config/src/da_client.rs index 70819a706427..8ceeb215faf4 100644 --- a/core/lib/env_config/src/da_client.rs +++ b/core/lib/env_config/src/da_client.rs @@ -6,8 +6,9 @@ use zksync_config::configs::{ AvailClientConfig, AvailSecrets, AVAIL_FULL_CLIENT_NAME, AVAIL_GAS_RELAY_CLIENT_NAME, }, celestia::CelestiaSecrets, + eigen::EigenSecrets, DAClientConfig, AVAIL_CLIENT_CONFIG_NAME, CELESTIA_CLIENT_CONFIG_NAME, - OBJECT_STORE_CLIENT_CONFIG_NAME, + EIGEN_CLIENT_CONFIG_NAME, OBJECT_STORE_CLIENT_CONFIG_NAME, }, secrets::DataAvailabilitySecrets, AvailConfig, @@ -33,6 +34,7 @@ impl FromEnv for DAClientConfig { }, }), CELESTIA_CLIENT_CONFIG_NAME => Self::Celestia(envy_load("da_celestia_config", "DA_")?), + EIGEN_CLIENT_CONFIG_NAME => Self::Eigen(envy_load("da_eigen_config", "DA_")?), OBJECT_STORE_CLIENT_CONFIG_NAME => { Self::ObjectStore(envy_load("da_object_store", "DA_")?) } @@ -66,11 +68,18 @@ impl FromEnv for DataAvailabilitySecrets { } CELESTIA_CLIENT_CONFIG_NAME => { let private_key = env::var("DA_SECRETS_PRIVATE_KEY") - .map_err(|e| anyhow::format_err!("private key not found: {}", e))? + .map_err(|e| anyhow::format_err!("Celestia private key not found: {}", e))? .parse() - .map_err(|e| anyhow::format_err!("failed to parse the auth token: {}", e))?; + .map_err(|e| anyhow::format_err!("failed to parse the private key: {}", e))?; Self::Celestia(CelestiaSecrets { private_key }) } + EIGEN_CLIENT_CONFIG_NAME => { + let private_key = env::var("DA_SECRETS_PRIVATE_KEY") + .map_err(|e| anyhow::format_err!("Eigen private key not found: {}", e))? + .parse() + .map_err(|e| anyhow::format_err!("failed to parse the private key: {}", e))?; + Self::Eigen(EigenSecrets { private_key }) + } _ => anyhow::bail!("Unknown DA client name: {}", client_tag), }; @@ -89,7 +98,7 @@ mod tests { }, object_store::ObjectStoreMode::GCS, }, - AvailConfig, CelestiaConfig, ObjectStoreConfig, + AvailConfig, CelestiaConfig, EigenConfig, ObjectStoreConfig, }; use super::*; @@ -234,6 +243,26 @@ mod tests { ); } + #[test] + fn from_env_eigen_client() { + let mut lock = MUTEX.lock(); + let config = r#" + DA_CLIENT="Eigen" + DA_RPC_NODE_URL="localhost:12345" + DA_INCLUSION_POLLING_INTERVAL_MS="1000" + "#; + lock.set_env(config); + + let actual = DAClientConfig::from_env().unwrap(); + assert_eq!( + actual, + DAClientConfig::Eigen(EigenConfig { + rpc_node_url: "localhost:12345".to_string(), + inclusion_polling_interval_ms: 1000, + }) + ); + } + #[test] fn from_env_celestia_secrets() { let mut lock = MUTEX.lock(); diff --git a/core/lib/protobuf_config/src/da_client.rs b/core/lib/protobuf_config/src/da_client.rs index e175a671c3ce..341a6a9e4f43 100644 --- a/core/lib/protobuf_config/src/da_client.rs +++ b/core/lib/protobuf_config/src/da_client.rs @@ -4,7 +4,8 @@ use zksync_config::configs::{ da_client::{ avail::{AvailClientConfig, AvailConfig, AvailDefaultConfig, AvailGasRelayConfig}, celestia::CelestiaConfig, - DAClientConfig::{Avail, Celestia, ObjectStore}, + eigen::EigenConfig, + DAClientConfig::{Avail, Celestia, Eigen, ObjectStore}, }, }; use zksync_protobuf::{required, ProtoRepr}; @@ -51,6 +52,13 @@ impl ProtoRepr for proto::DataAvailabilityClient { chain_id: required(&conf.chain_id).context("chain_id")?.clone(), timeout_ms: *required(&conf.timeout_ms).context("timeout_ms")?, }), + proto::data_availability_client::Config::Eigen(conf) => Eigen(EigenConfig { + rpc_node_url: required(&conf.rpc_node_url) + .context("rpc_node_url")? + .clone(), + inclusion_polling_interval_ms: *required(&conf.inclusion_polling_interval_ms) + .context("inclusion_polling_interval_ms")?, + }), proto::data_availability_client::Config::ObjectStore(conf) => { ObjectStore(object_store_proto::ObjectStore::read(conf)?) } @@ -79,7 +87,6 @@ impl ProtoRepr for proto::DataAvailabilityClient { ), }, }), - Celestia(config) => { proto::data_availability_client::Config::Celestia(proto::CelestiaConfig { api_node_url: Some(config.api_node_url.clone()), @@ -88,6 +95,10 @@ impl ProtoRepr for proto::DataAvailabilityClient { timeout_ms: Some(config.timeout_ms), }) } + Eigen(config) => proto::data_availability_client::Config::Eigen(proto::EigenConfig { + rpc_node_url: Some(config.rpc_node_url.clone()), + inclusion_polling_interval_ms: Some(config.inclusion_polling_interval_ms), + }), ObjectStore(config) => proto::data_availability_client::Config::ObjectStore( object_store_proto::ObjectStore::build(config), ), diff --git a/core/lib/protobuf_config/src/proto/config/da_client.proto b/core/lib/protobuf_config/src/proto/config/da_client.proto index 206b1d05c04e..0a302120d775 100644 --- a/core/lib/protobuf_config/src/proto/config/da_client.proto +++ b/core/lib/protobuf_config/src/proto/config/da_client.proto @@ -36,11 +36,17 @@ message CelestiaConfig { optional uint64 timeout_ms = 4; } +message EigenConfig { + optional string rpc_node_url = 1; + optional uint64 inclusion_polling_interval_ms = 2; +} + message DataAvailabilityClient { // oneof in protobuf allows for None oneof config { AvailConfig avail = 1; object_store.ObjectStore object_store = 2; CelestiaConfig celestia = 3; + EigenConfig eigen = 4; } } diff --git a/core/lib/protobuf_config/src/proto/config/secrets.proto b/core/lib/protobuf_config/src/proto/config/secrets.proto index 145a8cf0c45f..7c9d0f928237 100644 --- a/core/lib/protobuf_config/src/proto/config/secrets.proto +++ b/core/lib/protobuf_config/src/proto/config/secrets.proto @@ -28,10 +28,15 @@ message CelestiaSecret { optional string private_key = 1; } +message EigenSecret { + optional string private_key = 1; +} + message DataAvailabilitySecrets { oneof da_secrets { AvailSecret avail = 1; CelestiaSecret celestia = 2; + EigenSecret eigen = 3; } } diff --git a/core/lib/protobuf_config/src/secrets.rs b/core/lib/protobuf_config/src/secrets.rs index d9cdf3384899..f5bc10a3e340 100644 --- a/core/lib/protobuf_config/src/secrets.rs +++ b/core/lib/protobuf_config/src/secrets.rs @@ -8,7 +8,7 @@ use zksync_basic_types::{ }; use zksync_config::configs::{ consensus::{AttesterSecretKey, ConsensusSecrets, NodeSecretKey, ValidatorSecretKey}, - da_client::{avail::AvailSecrets, celestia::CelestiaSecrets}, + da_client::{avail::AvailSecrets, celestia::CelestiaSecrets, eigen::EigenSecrets}, secrets::{DataAvailabilitySecrets, Secrets}, DatabaseSecrets, L1Secrets, }; @@ -133,6 +133,11 @@ impl ProtoRepr for proto::DataAvailabilitySecrets { required(&celestia.private_key).context("private_key")?, )?, }), + DaSecrets::Eigen(eigen) => DataAvailabilitySecrets::Eigen(EigenSecrets { + private_key: PrivateKey::from_str( + required(&eigen.private_key).context("private_key")?, + )?, + }), }; Ok(client) @@ -179,6 +184,9 @@ impl ProtoRepr for proto::DataAvailabilitySecrets { private_key: Some(config.private_key.0.expose_secret().to_string()), })) } + DataAvailabilitySecrets::Eigen(config) => Some(DaSecrets::Eigen(proto::EigenSecret { + private_key: Some(config.private_key.0.expose_secret().to_string()), + })), }; Self { diff --git a/core/node/da_clients/Cargo.toml b/core/node/da_clients/Cargo.toml index da5cd4effa68..bde71ce3ec5a 100644 --- a/core/node/da_clients/Cargo.toml +++ b/core/node/da_clients/Cargo.toml @@ -23,6 +23,7 @@ zksync_types.workspace = true zksync_object_store.workspace = true zksync_da_client.workspace = true zksync_env_config.workspace = true +zksync_basic_types.workspace = true futures.workspace = true # Avail dependencies @@ -49,5 +50,8 @@ sha2.workspace = true prost.workspace = true bech32.workspace = true ripemd.workspace = true -tonic.workspace = true +tonic = { workspace = true, features = ["tls", "default"] } pbjson-types.workspace = true + +# Eigen dependencies +tokio-stream.workspace = true diff --git a/core/node/da_clients/README.md b/core/node/da_clients/README.md index df06cef24197..1b22e5198a68 100644 --- a/core/node/da_clients/README.md +++ b/core/node/da_clients/README.md @@ -8,3 +8,5 @@ Currently, the following DataAvailability clients are implemented: utilizing the DA framework. - `Object Store client` that stores the pubdata in the Object Store(GCS). - `Avail` that sends the pubdata to the Avail DA layer. +- `Celestia` that sends the pubdata to the Celestia DA layer. +- `Eigen` that sends the pubdata to the Eigen DA layer. diff --git a/core/node/da_clients/src/eigen/README.md b/core/node/da_clients/src/eigen/README.md new file mode 100644 index 000000000000..634b4eb58780 --- /dev/null +++ b/core/node/da_clients/src/eigen/README.md @@ -0,0 +1,35 @@ +# EigenDA client + +--- + +This is an implementation of the EigenDA client capable of sending the blobs to DA layer. It uses authenticated +requests, though the auth headers are kind of mocked in the current API implementation. + +The generated files are received by compiling the `.proto` files from EigenDA repo using the following function: + +```rust +pub fn compile_protos() { + let fds = protox::compile( + [ + "proto/common.proto", + "proto/disperser.proto", + ], + ["."], + ) + .expect("protox failed to build"); + + tonic_build::configure() + .build_client(true) + .build_server(false) + .skip_protoc_run() + .out_dir("generated") + .compile_fds(fds) + .unwrap(); +} +``` + +proto files are not included here to not create confusion in case they are not updated in time, so the EigenDA +[repo](https://github.com/Layr-Labs/eigenda/tree/master/api/proto) has to be a source of truth for the proto files. + +The generated folder here is considered a temporary solution until the EigenDA has a library with either a protogen, or +preferably a full Rust client implementation. diff --git a/core/node/da_clients/src/eigen/client.rs b/core/node/da_clients/src/eigen/client.rs new file mode 100644 index 000000000000..d977620526aa --- /dev/null +++ b/core/node/da_clients/src/eigen/client.rs @@ -0,0 +1,65 @@ +use std::{str::FromStr, sync::Arc}; + +use async_trait::async_trait; +use secp256k1::SecretKey; +use subxt_signer::ExposeSecret; +use zksync_config::{configs::da_client::eigen::EigenSecrets, EigenConfig}; +use zksync_da_client::{ + types::{DAError, DispatchResponse, InclusionData}, + DataAvailabilityClient, +}; + +use super::sdk::RawEigenClient; +use crate::utils::to_non_retriable_da_error; + +#[derive(Debug, Clone)] +pub struct EigenClient { + client: Arc, +} + +impl EigenClient { + pub async fn new(config: EigenConfig, secrets: EigenSecrets) -> anyhow::Result { + let private_key = SecretKey::from_str(secrets.private_key.0.expose_secret().as_str()) + .map_err(|e| anyhow::anyhow!("Failed to parse private key: {}", e))?; + + Ok(EigenClient { + client: Arc::new( + RawEigenClient::new( + config.rpc_node_url, + config.inclusion_polling_interval_ms, + private_key, + ) + .await?, + ), + }) + } +} + +#[async_trait] +impl DataAvailabilityClient for EigenClient { + async fn dispatch_blob( + &self, + _: u32, // batch number + data: Vec, + ) -> Result { + let blob_id = self + .client + .dispatch_blob(data) + .await + .map_err(to_non_retriable_da_error)?; + + Ok(DispatchResponse::from(blob_id)) + } + + async fn get_inclusion_data(&self, _: &str) -> Result, DAError> { + Ok(Some(InclusionData { data: vec![] })) + } + + fn clone_boxed(&self) -> Box { + Box::new(self.clone()) + } + + fn blob_size_limit(&self) -> Option { + Some(1920 * 1024) // 2mb - 128kb as a buffer + } +} diff --git a/core/node/da_clients/src/eigen/generated/common.rs b/core/node/da_clients/src/eigen/generated/common.rs new file mode 100644 index 000000000000..0599b9af4127 --- /dev/null +++ b/core/node/da_clients/src/eigen/generated/common.rs @@ -0,0 +1,63 @@ +// This file is @generated by prost-build. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct G1Commitment { + /// The X coordinate of the KZG commitment. This is the raw byte representation of the field element. + #[prost(bytes = "vec", tag = "1")] + pub x: ::prost::alloc::vec::Vec, + /// The Y coordinate of the KZG commitment. This is the raw byte representation of the field element. + #[prost(bytes = "vec", tag = "2")] + pub y: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct G2Commitment { + /// The A0 element of the X coordinate of G2 point. + #[prost(bytes = "vec", tag = "1")] + pub x_a0: ::prost::alloc::vec::Vec, + /// The A1 element of the X coordinate of G2 point. + #[prost(bytes = "vec", tag = "2")] + pub x_a1: ::prost::alloc::vec::Vec, + /// The A0 element of the Y coordinate of G2 point. + #[prost(bytes = "vec", tag = "3")] + pub y_a0: ::prost::alloc::vec::Vec, + /// The A1 element of the Y coordinate of G2 point. + #[prost(bytes = "vec", tag = "4")] + pub y_a1: ::prost::alloc::vec::Vec, +} +/// BlobCommitment represents commitment of a specific blob, containing its +/// KZG commitment, degree proof, the actual degree, and data length in number of symbols. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobCommitment { + #[prost(message, optional, tag = "1")] + pub commitment: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub length_commitment: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub length_proof: ::core::option::Option, + #[prost(uint32, tag = "4")] + pub data_length: u32, +} +/// BlobCertificate is what gets attested by the network +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobCertificate { + #[prost(uint32, tag = "1")] + pub version: u32, + #[prost(bytes = "vec", tag = "2")] + pub blob_key: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub blob_commitment: ::core::option::Option, + #[prost(uint32, repeated, tag = "4")] + pub quorum_numbers: ::prost::alloc::vec::Vec, + #[prost(uint32, tag = "5")] + pub reference_block_number: u32, +} +/// A chunk of a blob. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ChunkData { + #[prost(bytes = "vec", tag = "1")] + pub data: ::prost::alloc::vec::Vec, +} diff --git a/core/node/da_clients/src/eigen/generated/disperser.rs b/core/node/da_clients/src/eigen/generated/disperser.rs new file mode 100644 index 000000000000..7e94d910ecb7 --- /dev/null +++ b/core/node/da_clients/src/eigen/generated/disperser.rs @@ -0,0 +1,517 @@ +// This file is @generated by prost-build. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AuthenticatedRequest { + #[prost(oneof = "authenticated_request::Payload", tags = "1, 2")] + pub payload: ::core::option::Option, +} +/// Nested message and enum types in `AuthenticatedRequest`. +pub mod authenticated_request { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Payload { + #[prost(message, tag = "1")] + DisperseRequest(super::DisperseBlobRequest), + #[prost(message, tag = "2")] + AuthenticationData(super::AuthenticationData), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AuthenticatedReply { + #[prost(oneof = "authenticated_reply::Payload", tags = "1, 2")] + pub payload: ::core::option::Option, +} +/// Nested message and enum types in `AuthenticatedReply`. +pub mod authenticated_reply { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Payload { + #[prost(message, tag = "1")] + BlobAuthHeader(super::BlobAuthHeader), + #[prost(message, tag = "2")] + DisperseReply(super::DisperseBlobReply), + } +} +/// BlobAuthHeader contains information about the blob for the client to verify and sign. +/// - Once payments are enabled, the BlobAuthHeader will contain the KZG commitment to the blob, which the client +/// will verify and sign. Having the client verify the KZG commitment instead of calculating it avoids +/// the need for the client to have the KZG structured reference string (SRS), which can be large. +/// The signed KZG commitment prevents the disperser from sending a different blob to the DA Nodes +/// than the one the client sent. +/// - In the meantime, the BlobAuthHeader contains a simple challenge parameter is used to prevent +/// replay attacks in the event that a signature is leaked. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobAuthHeader { + #[prost(uint32, tag = "1")] + pub challenge_parameter: u32, +} +/// AuthenticationData contains the signature of the BlobAuthHeader. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AuthenticationData { + #[prost(bytes = "vec", tag = "1")] + pub authentication_data: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DisperseBlobRequest { + /// The data to be dispersed. + /// The size of data must be <= 2MiB. Every 32 bytes of data chunk is interpreted as an integer in big endian format + /// where the lower address has more significant bits. The integer must stay in the valid range to be interpreted + /// as a field element on the bn254 curve. The valid range is + /// 0 <= x < 21888242871839275222246405745257275088548364400416034343698204186575808495617 + /// containing slightly less than 254 bits and more than 253 bits. If any one of the 32 bytes chunk is outside the range, + /// the whole request is deemed as invalid, and rejected. + #[prost(bytes = "vec", tag = "1")] + pub data: ::prost::alloc::vec::Vec, + /// The quorums to which the blob will be sent, in addition to the required quorums which are configured + /// on the EigenDA smart contract. If required quorums are included here, an error will be returned. + /// The disperser will ensure that the encoded blobs for each quorum are all processed + /// within the same batch. + #[prost(uint32, repeated, tag = "2")] + pub custom_quorum_numbers: ::prost::alloc::vec::Vec, + /// The account ID of the client. This should be a hex-encoded string of the ECSDA public key + /// corresponding to the key used by the client to sign the BlobAuthHeader. + #[prost(string, tag = "3")] + pub account_id: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DisperseBlobReply { + /// The status of the blob associated with the request_id. + #[prost(enumeration = "BlobStatus", tag = "1")] + pub result: i32, + /// The request ID generated by the disperser. + /// Once a request is accepted (although not processed), a unique request ID will be + /// generated. + /// Two different DisperseBlobRequests (determined by the hash of the DisperseBlobRequest) + /// will have different IDs, and the same DisperseBlobRequest sent repeatedly at different + /// times will also have different IDs. + /// The client should use this ID to query the processing status of the request (via + /// the GetBlobStatus API). + #[prost(bytes = "vec", tag = "2")] + pub request_id: ::prost::alloc::vec::Vec, +} +/// BlobStatusRequest is used to query the status of a blob. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobStatusRequest { + #[prost(bytes = "vec", tag = "1")] + pub request_id: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobStatusReply { + /// The status of the blob. + #[prost(enumeration = "BlobStatus", tag = "1")] + pub status: i32, + /// The blob info needed for clients to confirm the blob against the EigenDA contracts. + #[prost(message, optional, tag = "2")] + pub info: ::core::option::Option, +} +/// RetrieveBlobRequest contains parameters to retrieve the blob. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RetrieveBlobRequest { + #[prost(bytes = "vec", tag = "1")] + pub batch_header_hash: ::prost::alloc::vec::Vec, + #[prost(uint32, tag = "2")] + pub blob_index: u32, +} +/// RetrieveBlobReply contains the retrieved blob data +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RetrieveBlobReply { + #[prost(bytes = "vec", tag = "1")] + pub data: ::prost::alloc::vec::Vec, +} +/// BlobInfo contains information needed to confirm the blob against the EigenDA contracts +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobInfo { + #[prost(message, optional, tag = "1")] + pub blob_header: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub blob_verification_proof: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobHeader { + /// KZG commitment of the blob. + #[prost(message, optional, tag = "1")] + pub commitment: ::core::option::Option, + /// The length of the blob in symbols (each symbol is 32 bytes). + #[prost(uint32, tag = "2")] + pub data_length: u32, + /// The params of the quorums that this blob participates in. + #[prost(message, repeated, tag = "3")] + pub blob_quorum_params: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobQuorumParam { + /// The ID of the quorum. + #[prost(uint32, tag = "1")] + pub quorum_number: u32, + /// The max percentage of stake within the quorum that can be held by or delegated + /// to adversarial operators. Currently, this and the next parameter are standardized + /// across the quorum using values read from the EigenDA contracts. + #[prost(uint32, tag = "2")] + pub adversary_threshold_percentage: u32, + /// The min percentage of stake that must attest in order to consider + /// the dispersal is successful. + #[prost(uint32, tag = "3")] + pub confirmation_threshold_percentage: u32, + /// The length of each chunk. + #[prost(uint32, tag = "4")] + pub chunk_length: u32, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobVerificationProof { + /// batch_id is an incremental ID assigned to a batch by EigenDAServiceManager + #[prost(uint32, tag = "1")] + pub batch_id: u32, + /// The index of the blob in the batch (which is logically an ordered list of blobs). + #[prost(uint32, tag = "2")] + pub blob_index: u32, + #[prost(message, optional, tag = "3")] + pub batch_metadata: ::core::option::Option, + /// inclusion_proof is a merkle proof for a blob header's inclusion in a batch + #[prost(bytes = "vec", tag = "4")] + pub inclusion_proof: ::prost::alloc::vec::Vec, + /// indexes of quorums in BatchHeader.quorum_numbers that match the quorums in BlobHeader.blob_quorum_params + /// Ex. BlobHeader.blob_quorum_params = [ + /// { + /// quorum_number = 0, + /// ... + /// }, + /// { + /// quorum_number = 3, + /// ... + /// }, + /// { + /// quorum_number = 5, + /// ... + /// }, + /// ] + /// BatchHeader.quorum_numbers = \[0, 5, 3\] => 0x000503 + /// Then, quorum_indexes = \[0, 2, 1\] => 0x000201 + #[prost(bytes = "vec", tag = "5")] + pub quorum_indexes: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BatchMetadata { + #[prost(message, optional, tag = "1")] + pub batch_header: ::core::option::Option, + /// The hash of all public keys of the operators that did not sign the batch. + #[prost(bytes = "vec", tag = "2")] + pub signatory_record_hash: ::prost::alloc::vec::Vec, + /// The fee payment paid by users for dispersing this batch. It's the bytes + /// representation of a big.Int value. + #[prost(bytes = "vec", tag = "3")] + pub fee: ::prost::alloc::vec::Vec, + /// The Ethereum block number at which the batch is confirmed onchain. + #[prost(uint32, tag = "4")] + pub confirmation_block_number: u32, + /// This is the hash of the ReducedBatchHeader defined onchain, see: + /// + /// The is the message that the operators will sign their signatures on. + #[prost(bytes = "vec", tag = "5")] + pub batch_header_hash: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BatchHeader { + /// The root of the merkle tree with the hashes of blob headers as leaves. + #[prost(bytes = "vec", tag = "1")] + pub batch_root: ::prost::alloc::vec::Vec, + /// All quorums associated with blobs in this batch. Sorted in ascending order. + /// Ex. \[0, 2, 1\] => 0x000102 + #[prost(bytes = "vec", tag = "2")] + pub quorum_numbers: ::prost::alloc::vec::Vec, + /// The percentage of stake that has signed for this batch. + /// The quorum_signed_percentages\[i\] is percentage for the quorum_numbers\[i\]. + #[prost(bytes = "vec", tag = "3")] + pub quorum_signed_percentages: ::prost::alloc::vec::Vec, + /// The Ethereum block number at which the batch was created. + /// The Disperser will encode and disperse the blobs based on the onchain info + /// (e.g. operator stakes) at this block number. + #[prost(uint32, tag = "4")] + pub reference_block_number: u32, +} +/// BlobStatus represents the status of a blob. +/// The status of a blob is updated as the blob is processed by the disperser. +/// The status of a blob can be queried by the client using the GetBlobStatus API. +/// Intermediate states are states that the blob can be in while being processed, and it can be updated to a differet state: +/// - PROCESSING +/// - DISPERSING +/// - CONFIRMED +/// Terminal states are states that will not be updated to a different state: +/// - FAILED +/// - FINALIZED +/// - INSUFFICIENT_SIGNATURES +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum BlobStatus { + Unknown = 0, + /// PROCESSING means that the blob is currently being processed by the disperser + Processing = 1, + /// CONFIRMED means that the blob has been dispersed to DA Nodes and the dispersed + /// batch containing the blob has been confirmed onchain + Confirmed = 2, + /// FAILED means that the blob has failed permanently (for reasons other than insufficient + /// signatures, which is a separate state) + Failed = 3, + /// FINALIZED means that the block containing the blob's confirmation transaction has been finalized on Ethereum + Finalized = 4, + /// INSUFFICIENT_SIGNATURES means that the confirmation threshold for the blob was not met + /// for at least one quorum. + InsufficientSignatures = 5, + /// DISPERSING means that the blob is currently being dispersed to DA Nodes and being confirmed onchain + Dispersing = 6, +} +impl BlobStatus { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + BlobStatus::Unknown => "UNKNOWN", + BlobStatus::Processing => "PROCESSING", + BlobStatus::Confirmed => "CONFIRMED", + BlobStatus::Failed => "FAILED", + BlobStatus::Finalized => "FINALIZED", + BlobStatus::InsufficientSignatures => "INSUFFICIENT_SIGNATURES", + BlobStatus::Dispersing => "DISPERSING", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNKNOWN" => Some(Self::Unknown), + "PROCESSING" => Some(Self::Processing), + "CONFIRMED" => Some(Self::Confirmed), + "FAILED" => Some(Self::Failed), + "FINALIZED" => Some(Self::Finalized), + "INSUFFICIENT_SIGNATURES" => Some(Self::InsufficientSignatures), + "DISPERSING" => Some(Self::Dispersing), + _ => None, + } + } +} +/// Generated client implementations. +pub mod disperser_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Disperser defines the public APIs for dispersing blobs. + #[derive(Debug, Clone)] + pub struct DisperserClient { + inner: tonic::client::Grpc, + } + impl DisperserClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl DisperserClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> DisperserClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + DisperserClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// This API accepts blob to disperse from clients. + /// This executes the dispersal async, i.e. it returns once the request + /// is accepted. The client could use GetBlobStatus() API to poll the the + /// processing status of the blob. + pub async fn disperse_blob( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/disperser.Disperser/DisperseBlob", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("disperser.Disperser", "DisperseBlob")); + self.inner.unary(req, path, codec).await + } + /// DisperseBlobAuthenticated is similar to DisperseBlob, except that it requires the + /// client to authenticate itself via the AuthenticationData message. The protoco is as follows: + /// 1. The client sends a DisperseBlobAuthenticated request with the DisperseBlobRequest message + /// 2. The Disperser sends back a BlobAuthHeader message containing information for the client to + /// verify and sign. + /// 3. The client verifies the BlobAuthHeader and sends back the signed BlobAuthHeader in an + /// AuthenticationData message. + /// 4. The Disperser verifies the signature and returns a DisperseBlobReply message. + pub async fn disperse_blob_authenticated( + &mut self, + request: impl tonic::IntoStreamingRequest< + Message = super::AuthenticatedRequest, + >, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/disperser.Disperser/DisperseBlobAuthenticated", + ); + let mut req = request.into_streaming_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("disperser.Disperser", "DisperseBlobAuthenticated"), + ); + self.inner.streaming(req, path, codec).await + } + /// This API is meant to be polled for the blob status. + pub async fn get_blob_status( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/disperser.Disperser/GetBlobStatus", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("disperser.Disperser", "GetBlobStatus")); + self.inner.unary(req, path, codec).await + } + /// This retrieves the requested blob from the Disperser's backend. + /// This is a more efficient way to retrieve blobs than directly retrieving + /// from the DA Nodes (see detail about this approach in + /// api/proto/retriever/retriever.proto). + /// The blob should have been initially dispersed via this Disperser service + /// for this API to work. + pub async fn retrieve_blob( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/disperser.Disperser/RetrieveBlob", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("disperser.Disperser", "RetrieveBlob")); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/core/node/da_clients/src/eigen/mod.rs b/core/node/da_clients/src/eigen/mod.rs new file mode 100644 index 000000000000..699eae894246 --- /dev/null +++ b/core/node/da_clients/src/eigen/mod.rs @@ -0,0 +1,14 @@ +mod client; +mod sdk; + +pub use self::client::EigenClient; + +#[allow(clippy::all)] +pub(crate) mod disperser { + include!("generated/disperser.rs"); +} + +#[allow(clippy::all)] +pub(crate) mod common { + include!("generated/common.rs"); +} diff --git a/core/node/da_clients/src/eigen/sdk.rs b/core/node/da_clients/src/eigen/sdk.rs new file mode 100644 index 000000000000..4013cafea298 --- /dev/null +++ b/core/node/da_clients/src/eigen/sdk.rs @@ -0,0 +1,217 @@ +use std::{str::FromStr, time::Duration}; + +use secp256k1::{ecdsa::RecoverableSignature, SecretKey}; +use tokio::sync::mpsc; +use tokio_stream::{wrappers::ReceiverStream, StreamExt}; +use tonic::{ + transport::{Channel, ClientTlsConfig, Endpoint}, + Streaming, +}; + +use crate::eigen::{ + disperser, + disperser::{ + authenticated_request::Payload::{AuthenticationData, DisperseRequest}, + disperser_client::DisperserClient, + AuthenticatedReply, BlobAuthHeader, BlobVerificationProof, DisperseBlobReply, + }, +}; + +#[derive(Debug, Clone)] +pub struct RawEigenClient { + client: DisperserClient, + polling_interval: Duration, + private_key: SecretKey, + account_id: String, +} + +impl RawEigenClient { + pub(crate) const BUFFER_SIZE: usize = 1000; + + pub async fn new( + rpc_node_url: String, + inclusion_polling_interval_ms: u64, + private_key: SecretKey, + ) -> anyhow::Result { + let endpoint = + Endpoint::from_str(rpc_node_url.as_str())?.tls_config(ClientTlsConfig::new())?; + let client = DisperserClient::connect(endpoint) + .await + .map_err(|e| anyhow::anyhow!("Failed to connect to Disperser server: {}", e))?; + let polling_interval = Duration::from_millis(inclusion_polling_interval_ms); + + let account_id = get_account_id(&private_key); + + Ok(RawEigenClient { + client, + polling_interval, + private_key, + account_id, + }) + } + + pub async fn dispatch_blob(&self, data: Vec) -> anyhow::Result { + let mut client_clone = self.client.clone(); + let (tx, rx) = mpsc::channel(Self::BUFFER_SIZE); + + let response_stream = client_clone.disperse_blob_authenticated(ReceiverStream::new(rx)); + + // 1. send DisperseBlobRequest + self.disperse_data(data, &tx).await?; + + // this await is blocked until the first response on the stream, so we only await after sending the `DisperseBlobRequest` + let mut response_stream = response_stream.await?.into_inner(); + + // 2. receive BlobAuthHeader + let blob_auth_header = self.receive_blob_auth_header(&mut response_stream).await?; + + // 3. sign and send BlobAuthHeader + self.submit_authentication_data(blob_auth_header.clone(), &tx) + .await?; + + // 4. receive DisperseBlobReply + let reply = response_stream + .next() + .await + .ok_or_else(|| anyhow::anyhow!("No response from server"))? + .unwrap() + .payload + .ok_or_else(|| anyhow::anyhow!("No payload in response"))?; + + let disperser::authenticated_reply::Payload::DisperseReply(disperse_reply) = reply else { + return Err(anyhow::anyhow!("Unexpected response from server")); + }; + + // 5. poll for blob status until it reaches the Confirmed state + let verification_proof = self + .await_for_inclusion(client_clone, disperse_reply) + .await?; + let blob_id = format!( + "{}:{}", + verification_proof.batch_id, verification_proof.blob_index + ); + tracing::info!("Blob dispatch confirmed, blob id: {}", blob_id); + + Ok(blob_id) + } + + async fn disperse_data( + &self, + data: Vec, + tx: &mpsc::Sender, + ) -> anyhow::Result<()> { + let req = disperser::AuthenticatedRequest { + payload: Some(DisperseRequest(disperser::DisperseBlobRequest { + data, + custom_quorum_numbers: vec![], + account_id: self.account_id.clone(), + })), + }; + + tx.send(req) + .await + .map_err(|e| anyhow::anyhow!("Failed to send DisperseBlobRequest: {}", e)) + } + + async fn submit_authentication_data( + &self, + blob_auth_header: BlobAuthHeader, + tx: &mpsc::Sender, + ) -> anyhow::Result<()> { + // TODO: replace challenge_parameter with actual auth header when it is available + let digest = zksync_basic_types::web3::keccak256( + &blob_auth_header.challenge_parameter.to_be_bytes(), + ); + let signature: RecoverableSignature = secp256k1::Secp256k1::signing_only() + .sign_ecdsa_recoverable( + &secp256k1::Message::from_slice(&digest[..])?, + &self.private_key, + ); + let (recovery_id, sig) = signature.serialize_compact(); + + let mut signature = Vec::with_capacity(65); + signature.extend_from_slice(&sig); + signature.push(recovery_id.to_i32() as u8); + + let req = disperser::AuthenticatedRequest { + payload: Some(AuthenticationData(disperser::AuthenticationData { + authentication_data: signature, + })), + }; + + tx.send(req) + .await + .map_err(|e| anyhow::anyhow!("Failed to send AuthenticationData: {}", e)) + } + + async fn receive_blob_auth_header( + &self, + response_stream: &mut Streaming, + ) -> anyhow::Result { + let reply = response_stream + .next() + .await + .ok_or_else(|| anyhow::anyhow!("No response from server"))?; + + let Ok(reply) = reply else { + return Err(anyhow::anyhow!("Err from server: {:?}", reply)); + }; + + let reply = reply + .payload + .ok_or_else(|| anyhow::anyhow!("No payload in response"))?; + + if let disperser::authenticated_reply::Payload::BlobAuthHeader(blob_auth_header) = reply { + Ok(blob_auth_header) + } else { + Err(anyhow::anyhow!("Unexpected response from server")) + } + } + + async fn await_for_inclusion( + &self, + mut client: DisperserClient, + disperse_blob_reply: DisperseBlobReply, + ) -> anyhow::Result { + let polling_request = disperser::BlobStatusRequest { + request_id: disperse_blob_reply.request_id, + }; + + loop { + tokio::time::sleep(self.polling_interval).await; + let resp = client + .get_blob_status(polling_request.clone()) + .await? + .into_inner(); + + match disperser::BlobStatus::try_from(resp.status)? { + disperser::BlobStatus::Processing | disperser::BlobStatus::Dispersing => {} + disperser::BlobStatus::Failed => { + return Err(anyhow::anyhow!("Blob dispatch failed")) + } + disperser::BlobStatus::InsufficientSignatures => { + return Err(anyhow::anyhow!("Insufficient signatures")) + } + disperser::BlobStatus::Confirmed | disperser::BlobStatus::Finalized => { + let verification_proof = resp + .info + .ok_or_else(|| anyhow::anyhow!("No blob header in response"))? + .blob_verification_proof + .ok_or_else(|| anyhow::anyhow!("No blob verification proof in response"))?; + + return Ok(verification_proof); + } + + _ => return Err(anyhow::anyhow!("Received unknown blob status")), + } + } + } +} + +fn get_account_id(secret_key: &SecretKey) -> String { + let public_key = + secp256k1::PublicKey::from_secret_key(&secp256k1::Secp256k1::new(), secret_key); + let hex = hex::encode(public_key.serialize_uncompressed()); + + format!("0x{}", hex) +} diff --git a/core/node/da_clients/src/lib.rs b/core/node/da_clients/src/lib.rs index 8515c128ff3f..8a4c565a650a 100644 --- a/core/node/da_clients/src/lib.rs +++ b/core/node/da_clients/src/lib.rs @@ -1,5 +1,6 @@ pub mod avail; pub mod celestia; +pub mod eigen; pub mod no_da; pub mod object_store; mod utils; diff --git a/core/node/da_clients/src/no_da.rs b/core/node/da_clients/src/no_da.rs index 2710c9ce9d9b..db0557510ed2 100644 --- a/core/node/da_clients/src/no_da.rs +++ b/core/node/da_clients/src/no_da.rs @@ -15,7 +15,7 @@ impl DataAvailabilityClient for NoDAClient { } async fn get_inclusion_data(&self, _: &str) -> Result, DAError> { - return Ok(Some(InclusionData::default())); + Ok(Some(InclusionData::default())) } fn clone_boxed(&self) -> Box { diff --git a/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs b/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs new file mode 100644 index 000000000000..d5391ee433f9 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs @@ -0,0 +1,46 @@ +use zksync_config::{configs::da_client::eigen::EigenSecrets, EigenConfig}; +use zksync_da_client::DataAvailabilityClient; +use zksync_da_clients::eigen::EigenClient; + +use crate::{ + implementations::resources::da_client::DAClientResource, + wiring_layer::{WiringError, WiringLayer}, + IntoContext, +}; + +#[derive(Debug)] +pub struct EigenWiringLayer { + config: EigenConfig, + secrets: EigenSecrets, +} + +impl EigenWiringLayer { + pub fn new(config: EigenConfig, secrets: EigenSecrets) -> Self { + Self { config, secrets } + } +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub client: DAClientResource, +} + +#[async_trait::async_trait] +impl WiringLayer for EigenWiringLayer { + type Input = (); + type Output = Output; + + fn layer_name(&self) -> &'static str { + "eigen_client_layer" + } + + async fn wire(self, _input: Self::Input) -> Result { + let client: Box = + Box::new(EigenClient::new(self.config, self.secrets).await?); + + Ok(Self::Output { + client: DAClientResource(client), + }) + } +} diff --git a/core/node/node_framework/src/implementations/layers/da_clients/mod.rs b/core/node/node_framework/src/implementations/layers/da_clients/mod.rs index 6bb6ce4fb877..c7865c74f3b1 100644 --- a/core/node/node_framework/src/implementations/layers/da_clients/mod.rs +++ b/core/node/node_framework/src/implementations/layers/da_clients/mod.rs @@ -1,4 +1,5 @@ pub mod avail; pub mod celestia; +pub mod eigen; pub mod no_da; pub mod object_store; From 6adb2249ff0946ec6d02f25437c9f71b1079ad79 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Tue, 29 Oct 2024 16:38:14 +0100 Subject: [PATCH 25/32] feat(consensus): enabled syncing pregenesis blocks over p2p (#3192) It was already implemented, just not enabled, because the blockMetadata RPC has to be whitelisted first. --- core/node/consensus/src/en.rs | 22 +------- core/node/consensus/src/era.rs | 14 +---- core/node/consensus/src/testonly.rs | 11 +--- core/node/consensus/src/tests/attestation.rs | 10 ++-- core/node/consensus/src/tests/mod.rs | 56 +++++++++----------- 5 files changed, 34 insertions(+), 79 deletions(-) diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index 6e3619f57e2e..ec8d3c19b54a 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -35,14 +35,6 @@ pub(super) struct EN { impl EN { /// Task running a consensus node for the external node. /// It may be a validator, but it cannot be a leader (cannot propose blocks). - /// - /// If `enable_pregenesis` is false, - /// before starting the consensus node it fetches all the blocks - /// older than consensus genesis from the main node using json RPC. - /// NOTE: currently `enable_pregenesis` is hardcoded to `false` in `era.rs`. - /// True is used only in tests. Once the `block_metadata` RPC is enabled everywhere - /// this flag should be removed and fetching pregenesis blocks will always be done - /// over the gossip network. pub async fn run( self, ctx: &ctx::Ctx, @@ -50,7 +42,6 @@ impl EN { cfg: ConsensusConfig, secrets: ConsensusSecrets, build_version: Option, - enable_pregenesis: bool, ) -> anyhow::Result<()> { let attester = config::attester_key(&secrets).context("attester_key")?; @@ -74,24 +65,13 @@ impl EN { .await .wrap("try_update_global_config()")?; - let mut payload_queue = conn + let payload_queue = conn .new_payload_queue(ctx, actions, self.sync_state.clone()) .await .wrap("new_payload_queue()")?; drop(conn); - // Fetch blocks before the genesis. - if !enable_pregenesis { - self.fetch_blocks( - ctx, - &mut payload_queue, - Some(global_config.genesis.first_block), - ) - .await - .wrap("fetch_blocks()")?; - } - // Monitor the genesis of the main node. // If it changes, it means that a hard fork occurred and we need to reset the consensus state. s.spawn_bg::<()>({ diff --git a/core/node/consensus/src/era.rs b/core/node/consensus/src/era.rs index 916b7cdd89a5..3150f839680e 100644 --- a/core/node/consensus/src/era.rs +++ b/core/node/consensus/src/era.rs @@ -59,18 +59,8 @@ pub async fn run_external_node( is_validator = secrets.validator_key.is_some(), "running external node" ); - // We will enable it once the main node on all envs supports - // `block_metadata()` JSON RPC method. - let enable_pregenesis = false; - en.run( - ctx, - actions, - cfg, - secrets, - Some(build_version), - enable_pregenesis, - ) - .await + en.run(ctx, actions, cfg, secrets, Some(build_version)) + .await } None => { tracing::info!("running fetcher"); diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index db433665e570..ef4226c915f0 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -73,7 +73,6 @@ pub(super) struct ConfigSet { net: network::Config, pub(super) config: config::ConsensusConfig, pub(super) secrets: config::ConsensusSecrets, - pub(super) enable_pregenesis: bool, } impl ConfigSet { @@ -83,17 +82,11 @@ impl ConfigSet { config: make_config(&net, None), secrets: make_secrets(&net, None), net, - enable_pregenesis: self.enable_pregenesis, } } } -pub(super) fn new_configs( - rng: &mut impl Rng, - setup: &Setup, - seed_peers: usize, - pregenesis: bool, -) -> Vec { +pub(super) fn new_configs(rng: &mut impl Rng, setup: &Setup, seed_peers: usize) -> Vec { let net_cfgs = network::testonly::new_configs(rng, setup, 0); let genesis_spec = config::GenesisSpec { chain_id: setup.genesis.chain_id.0.try_into().unwrap(), @@ -133,7 +126,6 @@ pub(super) fn new_configs( config: make_config(&net, Some(genesis_spec.clone())), secrets: make_secrets(&net, setup.attester_keys.get(i).cloned()), net, - enable_pregenesis: pregenesis, }) .collect() } @@ -473,7 +465,6 @@ impl StateKeeper { cfgs.config, cfgs.secrets, cfgs.net.build_version, - cfgs.enable_pregenesis, ) .await } diff --git a/core/node/consensus/src/tests/attestation.rs b/core/node/consensus/src/tests/attestation.rs index 2701a986e9e9..5ee17d5e2eda 100644 --- a/core/node/consensus/src/tests/attestation.rs +++ b/core/node/consensus/src/tests/attestation.rs @@ -1,6 +1,6 @@ use anyhow::Context as _; use rand::Rng as _; -use test_casing::{test_casing, Product}; +use test_casing::test_casing; use tracing::Instrument as _; use zksync_concurrency::{ctx, error::Wrap, scope}; use zksync_consensus_roles::{ @@ -12,7 +12,7 @@ use zksync_test_account::Account; use zksync_types::ProtocolVersionId; use zksync_web3_decl::namespaces::EnNamespaceClient as _; -use super::{POLL_INTERVAL, PREGENESIS, VERSIONS}; +use super::{POLL_INTERVAL, VERSIONS}; use crate::{ mn::run_main_node, registry::{testonly, Registry}, @@ -126,9 +126,9 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { // Test running a couple of attesters (which are also validators). // Main node is expected to collect all certificates. // External nodes are expected to just vote for the batch. -#[test_casing(4, Product((VERSIONS,PREGENESIS)))] +#[test_casing(2, VERSIONS)] #[tokio::test] -async fn test_multiple_attesters(version: ProtocolVersionId, pregenesis: bool) { +async fn test_multiple_attesters(version: ProtocolVersionId) { const NODES: usize = 4; zksync_concurrency::testonly::abort_on_panic(); @@ -137,7 +137,7 @@ async fn test_multiple_attesters(version: ProtocolVersionId, pregenesis: bool) { let account = &mut Account::random(); let to_fund = &[account.address]; let setup = Setup::new(rng, 4); - let mut cfgs = new_configs(rng, &setup, NODES, pregenesis); + let mut cfgs = new_configs(rng, &setup, NODES); scope::run!(ctx, |ctx, s| async { let validator_pool = ConnectionPool::test(false, version).await; let (mut validator, runner) = StateKeeper::new(ctx, validator_pool.clone()).await?; diff --git a/core/node/consensus/src/tests/mod.rs b/core/node/consensus/src/tests/mod.rs index 8da17cfba8ac..663ccab49904 100644 --- a/core/node/consensus/src/tests/mod.rs +++ b/core/node/consensus/src/tests/mod.rs @@ -26,7 +26,6 @@ mod attestation; const VERSIONS: [ProtocolVersionId; 2] = [ProtocolVersionId::latest(), ProtocolVersionId::next()]; const FROM_SNAPSHOT: [bool; 2] = [true, false]; -const PREGENESIS: [bool; 2] = [true, false]; const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(500); #[test_casing(2, VERSIONS)] @@ -190,14 +189,14 @@ async fn test_validator_block_store(version: ProtocolVersionId) { // In the current implementation, consensus certificates are created asynchronously // for the L2 blocks constructed by the StateKeeper. This means that consensus actor // is effectively just back filling the consensus certificates for the L2 blocks in storage. -#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] #[tokio::test] -async fn test_validator(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { +async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); + let cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { @@ -254,14 +253,14 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId, pregene } // Test running a validator node and 2 full nodes recovered from different snapshots. -#[test_casing(4, Product((VERSIONS,PREGENESIS)))] +#[test_casing(2, VERSIONS)] #[tokio::test] -async fn test_nodes_from_various_snapshots(version: ProtocolVersionId, pregenesis: bool) { +async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { @@ -335,14 +334,14 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId, pregenesi .unwrap(); } -#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] #[tokio::test] -async fn test_config_change(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { +async fn test_config_change(from_snapshot: bool, version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let mut validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); + let mut validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); let account = &mut Account::random(); @@ -412,16 +411,16 @@ async fn test_config_change(from_snapshot: bool, version: ProtocolVersionId, pre // Test running a validator node and a couple of full nodes. // Validator is producing signed blocks and fetchers are expected to fetch // them directly or indirectly. -#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] #[tokio::test] -async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { +async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { const NODES: usize = 2; zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); let account = &mut Account::random(); // topology: @@ -500,16 +499,16 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId, pregen } // Test running external node (non-leader) validators. -#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] #[tokio::test] -async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { +async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { const NODES: usize = 3; zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, NODES); - let cfgs = testonly::new_configs(rng, &setup, 1, pregenesis); + let cfgs = testonly::new_configs(rng, &setup, 1); let account = &mut Account::random(); // Run all nodes in parallel. @@ -583,18 +582,14 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId, pre } // Test fetcher back filling missing certs. -#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] #[tokio::test] -async fn test_p2p_fetcher_backfill_certs( - from_snapshot: bool, - version: ProtocolVersionId, - pregenesis: bool, -) { +async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); let account = &mut Account::random(); @@ -668,16 +663,16 @@ async fn test_p2p_fetcher_backfill_certs( } // Test temporary fetcher fetching blocks if a lot of certs are missing. -#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] #[tokio::test] -async fn test_temporary_fetcher(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { +async fn test_temporary_fetcher(from_snapshot: bool, version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); // We force certs to be missing on EN by having 1 of the validators permanently offline. // This way no blocks will be finalized at all, so no one will have certs. let setup = Setup::new(rng, 2); - let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); let account = &mut Account::random(); @@ -749,8 +744,7 @@ async fn test_temporary_fetcher_termination(from_snapshot: bool, version: Protoc let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let pregenesis = true; - let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); let account = &mut Account::random(); @@ -797,14 +791,14 @@ async fn test_temporary_fetcher_termination(from_snapshot: bool, version: Protoc .unwrap(); } -#[test_casing(4, Product((VERSIONS,PREGENESIS)))] +#[test_casing(2, VERSIONS)] #[tokio::test] -async fn test_with_pruning(version: ProtocolVersionId, pregenesis: bool) { +async fn test_with_pruning(version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); let account = &mut Account::random(); From 1f8ad26c7c9757ffa13de1a2fd045fa9e16de5f6 Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Wed, 30 Oct 2024 12:20:32 +0100 Subject: [PATCH 26/32] fix: Run `zkstack dev lint -t autocompletion` to make CI happy. (#3196) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --------- Co-authored-by: Manuel Mauro --- .../crates/zkstack/completion/_zkstack.zsh | 472 +++++++++--------- 1 file changed, 236 insertions(+), 236 deletions(-) diff --git a/zkstack_cli/crates/zkstack/completion/_zkstack.zsh b/zkstack_cli/crates/zkstack/completion/_zkstack.zsh index 4df431754c84..f1cfc9946731 100644 --- a/zkstack_cli/crates/zkstack/completion/_zkstack.zsh +++ b/zkstack_cli/crates/zkstack/completion/_zkstack.zsh @@ -15,7 +15,7 @@ _zkstack() { local context curcontext="$curcontext" state line _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -37,7 +37,7 @@ _arguments "${_arguments_options[@]}" : \ '--generate=[The shell to generate the autocomplete script for]:GENERATOR:(bash elvish fish powershell zsh)' \ '-o+[The out directory to write the autocomplete script to]:OUT:_files' \ '--out=[The out directory to write the autocomplete script to]:OUT:_files' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -47,7 +47,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (ecosystem) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -65,11 +65,11 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (create) _arguments "${_arguments_options[@]}" : \ -'--ecosystem-name=[]:ECOSYSTEM_NAME: ' \ +'--ecosystem-name=[]:ECOSYSTEM_NAME:_default' \ '--l1-network=[L1 Network]:L1_NETWORK:(localhost sepolia holesky mainnet)' \ '--link-to-code=[Code link]:LINK_TO_CODE:_files -/' \ -'--chain-name=[]:CHAIN_NAME: ' \ -'--chain-id=[Chain ID]:CHAIN_ID: ' \ +'--chain-name=[]:CHAIN_NAME:_default' \ +'--chain-id=[Chain ID]:CHAIN_ID:_default' \ '--prover-mode=[Prover options]:PROVER_MODE:(no-proofs gpu)' \ '--wallet-creation=[Wallet options]:WALLET_CREATION:((localhost\:"Load wallets from localhost mnemonic, they are funded for localhost env" random\:"Generate random wallets" @@ -77,13 +77,13 @@ empty\:"Generate placeholder wallets" in-file\:"Specify file with wallets"))' \ '--wallet-path=[Wallet path]:WALLET_PATH:_files' \ '--l1-batch-commit-data-generator-mode=[Commit data generation mode]:L1_BATCH_COMMIT_DATA_GENERATOR_MODE:(rollup validium)' \ -'--base-token-address=[Base token address]:BASE_TOKEN_ADDRESS: ' \ -'--base-token-price-nominator=[Base token nominator]:BASE_TOKEN_PRICE_NOMINATOR: ' \ -'--base-token-price-denominator=[Base token denominator]:BASE_TOKEN_PRICE_DENOMINATOR: ' \ +'--base-token-address=[Base token address]:BASE_TOKEN_ADDRESS:_default' \ +'--base-token-price-nominator=[Base token nominator]:BASE_TOKEN_PRICE_NOMINATOR:_default' \ +'--base-token-price-denominator=[Base token denominator]:BASE_TOKEN_PRICE_DENOMINATOR:_default' \ '--set-as-default=[Set as default chain]' \ '--evm-emulator=[Enable EVM emulator]' \ '--start-containers=[Start reth and postgres containers after creation]' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--legacy-bridge[]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -94,17 +94,17 @@ in-file\:"Specify file with wallets"))' \ ;; (build-transactions) _arguments "${_arguments_options[@]}" : \ -'--sender=[Address of the transaction sender]:SENDER: ' \ -'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ +'--sender=[Address of the transaction sender]:SENDER:_default' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL:_default' \ '-o+[Output directory for the generated files]:OUT:_files' \ '--out=[Output directory for the generated files]:OUT:_files' \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -118,19 +118,19 @@ _arguments "${_arguments_options[@]}" : \ '--deploy-erc20=[Deploy ERC20 contracts]' \ '--deploy-ecosystem=[Deploy ecosystem contracts]' \ '--ecosystem-contracts-path=[Path to ecosystem contracts]:ECOSYSTEM_CONTRACTS_PATH:_files' \ -'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL:_default' \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ '--deploy-paymaster=[Deploy Paymaster contract]' \ -'--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ -'--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL:_default' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME:_default' \ '-o+[Enable Grafana]' \ '--observability=[Enable Grafana]' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '-d[]' \ '--dont-drop[]' \ @@ -146,18 +146,18 @@ _arguments "${_arguments_options[@]}" : \ ;; (change-default-chain) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ '-h[Print help]' \ '--help[Print help]' \ -'::name:' \ +'::name:_default' \ && ret=0 ;; (setup-observability) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -211,7 +211,7 @@ esac ;; (chain) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -229,8 +229,8 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (create) _arguments "${_arguments_options[@]}" : \ -'--chain-name=[]:CHAIN_NAME: ' \ -'--chain-id=[Chain ID]:CHAIN_ID: ' \ +'--chain-name=[]:CHAIN_NAME:_default' \ +'--chain-id=[Chain ID]:CHAIN_ID:_default' \ '--prover-mode=[Prover options]:PROVER_MODE:(no-proofs gpu)' \ '--wallet-creation=[Wallet options]:WALLET_CREATION:((localhost\:"Load wallets from localhost mnemonic, they are funded for localhost env" random\:"Generate random wallets" @@ -238,12 +238,12 @@ empty\:"Generate placeholder wallets" in-file\:"Specify file with wallets"))' \ '--wallet-path=[Wallet path]:WALLET_PATH:_files' \ '--l1-batch-commit-data-generator-mode=[Commit data generation mode]:L1_BATCH_COMMIT_DATA_GENERATOR_MODE:(rollup validium)' \ -'--base-token-address=[Base token address]:BASE_TOKEN_ADDRESS: ' \ -'--base-token-price-nominator=[Base token nominator]:BASE_TOKEN_PRICE_NOMINATOR: ' \ -'--base-token-price-denominator=[Base token denominator]:BASE_TOKEN_PRICE_DENOMINATOR: ' \ +'--base-token-address=[Base token address]:BASE_TOKEN_ADDRESS:_default' \ +'--base-token-price-nominator=[Base token nominator]:BASE_TOKEN_PRICE_NOMINATOR:_default' \ +'--base-token-price-denominator=[Base token denominator]:BASE_TOKEN_PRICE_DENOMINATOR:_default' \ '--set-as-default=[Set as default chain]' \ '--evm-emulator=[Enable EVM emulator]' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--legacy-bridge[]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -258,12 +258,12 @@ _arguments "${_arguments_options[@]}" : \ '--out=[Output directory for the generated files]:OUT:_files' \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -276,15 +276,15 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ -'--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL:_default' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME:_default' \ '--deploy-paymaster=[]' \ -'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '-d[]' \ '--dont-drop[]' \ @@ -307,10 +307,10 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (configs) _arguments "${_arguments_options[@]}" : \ -'--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ -'--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ -'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL:_default' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME:_default' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-d[Use default database urls and names]' \ '--dev[Use default database urls and names]' \ '-d[]' \ @@ -353,9 +353,9 @@ esac ;; (genesis) _arguments "${_arguments_options[@]}" : \ -'--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ -'--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL:_default' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-d[Use default database urls and names]' \ '--dev[Use default database urls and names]' \ '-d[]' \ @@ -377,9 +377,9 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (init-database) _arguments "${_arguments_options[@]}" : \ -'--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ -'--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL:_default' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-d[Use default database urls and names]' \ '--dev[Use default database urls and names]' \ '-d[]' \ @@ -393,7 +393,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (server) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -437,11 +437,11 @@ esac _arguments "${_arguments_options[@]}" : \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -454,11 +454,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -471,11 +471,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -488,11 +488,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -505,11 +505,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -522,11 +522,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -539,11 +539,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -556,11 +556,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -573,11 +573,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -700,7 +700,7 @@ esac ;; (dev) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -718,7 +718,7 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (database) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -738,11 +738,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '-p+[Prover database]' \ '--prover=[Prover database]' \ -'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL:_default' \ '-c+[Core database]' \ '--core=[Core database]' \ -'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -754,11 +754,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '-p+[Prover database]' \ '--prover=[Prover database]' \ -'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL:_default' \ '-c+[Core database]' \ '--core=[Core database]' \ -'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -770,11 +770,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '-p+[Prover database]' \ '--prover=[Prover database]' \ -'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL:_default' \ '-c+[Core database]' \ '--core=[Core database]' \ -'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -785,8 +785,8 @@ _arguments "${_arguments_options[@]}" : \ (new-migration) _arguments "${_arguments_options[@]}" : \ '--database=[Database to create new migration for]:DATABASE:(prover core)' \ -'--name=[Migration name]:NAME: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--name=[Migration name]:NAME:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -798,11 +798,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '-p+[Prover database]' \ '--prover=[Prover database]' \ -'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL:_default' \ '-c+[Core database]' \ '--core=[Core database]' \ -'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -814,11 +814,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '-p+[Prover database]' \ '--prover=[Prover database]' \ -'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL:_default' \ '-c+[Core database]' \ '--core=[Core database]' \ -'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -830,11 +830,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '-p+[Prover database]' \ '--prover=[Prover database]' \ -'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL:_default' \ '-c+[Core database]' \ '--core=[Core database]' \ -'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -896,7 +896,7 @@ esac ;; (test) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -914,9 +914,9 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (integration) _arguments "${_arguments_options[@]}" : \ -'-t+[Run just the tests matching a pattern. Same as the -t flag on jest.]:TEST_PATTERN: ' \ -'--test-pattern=[Run just the tests matching a pattern. Same as the -t flag on jest.]:TEST_PATTERN: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'-t+[Run just the tests matching a pattern. Same as the -t flag on jest.]:TEST_PATTERN:_default' \ +'--test-pattern=[Run just the tests matching a pattern. Same as the -t flag on jest.]:TEST_PATTERN:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-e[Run tests for external node]' \ '--external-node[Run tests for external node]' \ '-n[Do not install or build dependencies]' \ @@ -930,7 +930,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (fees) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-n[Do not install or build dependencies]' \ '--no-deps[Do not install or build dependencies]' \ '--no-kill[The test will not kill all the nodes during execution]' \ @@ -943,7 +943,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (revert) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--enable-consensus[Enable consensus]' \ '-e[Run tests for external node]' \ '--external-node[Run tests for external node]' \ @@ -959,7 +959,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (recovery) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-s[Run recovery from a snapshot instead of genesis]' \ '--snapshot[Run recovery from a snapshot instead of genesis]' \ '-n[Do not install or build dependencies]' \ @@ -974,7 +974,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (upgrade) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-n[Do not install or build dependencies]' \ '--no-deps[Do not install or build dependencies]' \ '-v[Verbose mode]' \ @@ -986,7 +986,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (build) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -996,8 +996,8 @@ _arguments "${_arguments_options[@]}" : \ ;; (rust) _arguments "${_arguments_options[@]}" : \ -'--options=[Cargo test flags]:OPTIONS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--options=[Cargo test flags]:OPTIONS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1007,7 +1007,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (l1-contracts) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1017,7 +1017,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (prover) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1027,7 +1027,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (wallet) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1037,7 +1037,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (loadtest) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1115,7 +1115,7 @@ esac ;; (clean) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1133,7 +1133,7 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (all) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1143,7 +1143,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (containers) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1153,7 +1153,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (contracts-cache) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1199,7 +1199,7 @@ esac ;; (snapshot) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1217,7 +1217,7 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (create) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1257,7 +1257,7 @@ esac _arguments "${_arguments_options[@]}" : \ '*-t+[]:TARGETS:(md sol js ts rs contracts autocompletion)' \ '*--targets=[]:TARGETS:(md sol js ts rs contracts autocompletion)' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-c[]' \ '--check[]' \ '-v[Verbose mode]' \ @@ -1269,7 +1269,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (fmt) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-c[]' \ '--check[]' \ '-v[Verbose mode]' \ @@ -1289,7 +1289,7 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (rustfmt) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1299,7 +1299,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (contract) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1311,7 +1311,7 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '*-t+[]:TARGETS:(md sol js ts rs contracts autocompletion)' \ '*--targets=[]:TARGETS:(md sol js ts rs contracts autocompletion)' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1357,7 +1357,7 @@ esac ;; (prover) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1375,7 +1375,7 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (info) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1385,9 +1385,9 @@ _arguments "${_arguments_options[@]}" : \ ;; (insert-batch) _arguments "${_arguments_options[@]}" : \ -'--number=[]:NUMBER: ' \ -'--version=[]:VERSION: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--number=[]:NUMBER:_default' \ +'--version=[]:VERSION:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--default[]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -1398,9 +1398,9 @@ _arguments "${_arguments_options[@]}" : \ ;; (insert-version) _arguments "${_arguments_options[@]}" : \ -'--version=[]:VERSION: ' \ -'--snark-wrapper=[]:SNARK_WRAPPER: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--version=[]:VERSION:_default' \ +'--snark-wrapper=[]:SNARK_WRAPPER:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--default[]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -1451,7 +1451,7 @@ _arguments "${_arguments_options[@]}" : \ '--l2-contracts=[Build L2 contracts]' \ '--system-contracts=[Build system contracts]' \ '--test-contracts=[Build test contracts]' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1461,9 +1461,9 @@ _arguments "${_arguments_options[@]}" : \ ;; (config-writer) _arguments "${_arguments_options[@]}" : \ -'-p+[Path to the config file to override]:PATH: ' \ -'--path=[Path to the config file to override]:PATH: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'-p+[Path to the config file to override]:PATH:_default' \ +'--path=[Path to the config file to override]:PATH:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1474,10 +1474,10 @@ _arguments "${_arguments_options[@]}" : \ (send-transactions) _arguments "${_arguments_options[@]}" : \ '--file=[]:FILE:_files' \ -'--private-key=[]:PRIVATE_KEY: ' \ -'--l1-rpc-url=[]:L1_RPC_URL: ' \ -'--confirmations=[]:CONFIRMATIONS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--private-key=[]:PRIVATE_KEY:_default' \ +'--l1-rpc-url=[]:L1_RPC_URL:_default' \ +'--confirmations=[]:CONFIRMATIONS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1487,9 +1487,9 @@ _arguments "${_arguments_options[@]}" : \ ;; (status) _arguments "${_arguments_options[@]}" : \ -'-u+[URL of the health check endpoint]:URL: ' \ -'--url=[URL of the health check endpoint]:URL: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'-u+[URL of the health check endpoint]:URL:_default' \ +'--url=[URL of the health check endpoint]:URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1507,7 +1507,7 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (ports) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1545,7 +1545,7 @@ esac ;; (generate-genesis) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1827,7 +1827,7 @@ esac ;; (prover) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1845,35 +1845,35 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (init) _arguments "${_arguments_options[@]}" : \ -'--proof-store-dir=[]:PROOF_STORE_DIR: ' \ -'--bucket-base-url=[]:BUCKET_BASE_URL: ' \ -'--credentials-file=[]:CREDENTIALS_FILE: ' \ -'--bucket-name=[]:BUCKET_NAME: ' \ -'--location=[]:LOCATION: ' \ -'--project-id=[]:PROJECT_ID: ' \ +'--proof-store-dir=[]:PROOF_STORE_DIR:_default' \ +'--bucket-base-url=[]:BUCKET_BASE_URL:_default' \ +'--credentials-file=[]:CREDENTIALS_FILE:_default' \ +'--bucket-name=[]:BUCKET_NAME:_default' \ +'--location=[]:LOCATION:_default' \ +'--project-id=[]:PROJECT_ID:_default' \ '--shall-save-to-public-bucket=[]:SHALL_SAVE_TO_PUBLIC_BUCKET:(true false)' \ -'--public-store-dir=[]:PUBLIC_STORE_DIR: ' \ -'--public-bucket-base-url=[]:PUBLIC_BUCKET_BASE_URL: ' \ -'--public-credentials-file=[]:PUBLIC_CREDENTIALS_FILE: ' \ -'--public-bucket-name=[]:PUBLIC_BUCKET_NAME: ' \ -'--public-location=[]:PUBLIC_LOCATION: ' \ -'--public-project-id=[]:PUBLIC_PROJECT_ID: ' \ -'(--clone)--bellman-cuda-dir=[]:BELLMAN_CUDA_DIR: ' \ +'--public-store-dir=[]:PUBLIC_STORE_DIR:_default' \ +'--public-bucket-base-url=[]:PUBLIC_BUCKET_BASE_URL:_default' \ +'--public-credentials-file=[]:PUBLIC_CREDENTIALS_FILE:_default' \ +'--public-bucket-name=[]:PUBLIC_BUCKET_NAME:_default' \ +'--public-location=[]:PUBLIC_LOCATION:_default' \ +'--public-project-id=[]:PUBLIC_PROJECT_ID:_default' \ +'(--clone)--bellman-cuda-dir=[]:BELLMAN_CUDA_DIR:_default' \ '--bellman-cuda=[]' \ '--setup-compressor-key=[]' \ -'--path=[]:PATH: ' \ +'--path=[]:PATH:_default' \ '--region=[]:REGION:(us europe asia)' \ '--mode=[]:MODE:(download generate)' \ '--setup-keys=[]' \ '--setup-database=[]:SETUP_DATABASE:(true false)' \ -'--prover-db-url=[Prover database url without database name]:PROVER_DB_URL: ' \ -'--prover-db-name=[Prover database name]:PROVER_DB_NAME: ' \ +'--prover-db-url=[Prover database url without database name]:PROVER_DB_URL:_default' \ +'--prover-db-name=[Prover database name]:PROVER_DB_NAME:_default' \ '-u+[Use default database urls and names]:USE_DEFAULT:(true false)' \ '--use-default=[Use default database urls and names]:USE_DEFAULT:(true false)' \ '-d+[]:DONT_DROP:(true false)' \ '--dont-drop=[]:DONT_DROP:(true false)' \ '--cloud-type=[]:CLOUD_TYPE:(gcp local)' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--dev[]' \ '(--bellman-cuda-dir)--clone[]' \ '-v[Verbose mode]' \ @@ -1887,7 +1887,7 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '--region=[]:REGION:(us europe asia)' \ '--mode=[]:MODE:(download generate)' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1899,13 +1899,13 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '--component=[]:COMPONENT:(gateway witness-generator witness-vector-generator prover circuit-prover compressor prover-job-monitor)' \ '--round=[]:ROUND:(all-rounds basic-circuits leaf-aggregation node-aggregation recursion-tip scheduler)' \ -'--threads=[]:THREADS: ' \ -'--max-allocation=[Memory allocation limit in bytes (for prover component)]:MAX_ALLOCATION: ' \ -'--witness-vector-generator-count=[]:WITNESS_VECTOR_GENERATOR_COUNT: ' \ -'--max-allocation=[]:MAX_ALLOCATION: ' \ +'--threads=[]:THREADS:_default' \ +'--max-allocation=[Memory allocation limit in bytes (for prover component)]:MAX_ALLOCATION:_default' \ +'--witness-vector-generator-count=[]:WITNESS_VECTOR_GENERATOR_COUNT:_default' \ +'--max-allocation=[]:MAX_ALLOCATION:_default' \ '--docker=[]:DOCKER:(true false)' \ -'--tag=[]:TAG: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--tag=[]:TAG:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1915,8 +1915,8 @@ _arguments "${_arguments_options[@]}" : \ ;; (init-bellman-cuda) _arguments "${_arguments_options[@]}" : \ -'(--clone)--bellman-cuda-dir=[]:BELLMAN_CUDA_DIR: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'(--clone)--bellman-cuda-dir=[]:BELLMAN_CUDA_DIR:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '(--bellman-cuda-dir)--clone[]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -1927,8 +1927,8 @@ _arguments "${_arguments_options[@]}" : \ ;; (compressor-keys) _arguments "${_arguments_options[@]}" : \ -'--path=[]:PATH: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--path=[]:PATH:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1982,10 +1982,10 @@ esac ;; (server) _arguments "${_arguments_options[@]}" : \ -'*--components=[Components of server to run]:COMPONENTS: ' \ -'*-a+[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'*--components=[Components of server to run]:COMPONENTS:_default' \ +'*-a+[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--genesis[Run server in genesis mode]' \ '--build[Build server but don'\''t run it]' \ '--uring[Enables uring support for RocksDB]' \ @@ -1998,7 +1998,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (external-node) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2016,10 +2016,10 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (configs) _arguments "${_arguments_options[@]}" : \ -'--db-url=[]:DB_URL: ' \ -'--db-name=[]:DB_NAME: ' \ -'--l1-rpc-url=[]:L1_RPC_URL: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--db-url=[]:DB_URL:_default' \ +'--db-name=[]:DB_NAME:_default' \ +'--l1-rpc-url=[]:L1_RPC_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-u[Use default database urls and names]' \ '--use-default[Use default database urls and names]' \ '-v[Verbose mode]' \ @@ -2031,7 +2031,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (init) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2041,11 +2041,11 @@ _arguments "${_arguments_options[@]}" : \ ;; (run) _arguments "${_arguments_options[@]}" : \ -'*--components=[Components of server to run]:COMPONENTS: ' \ +'*--components=[Components of server to run]:COMPONENTS:_default' \ '--enable-consensus=[Enable consensus]' \ -'*-a+[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'*-a+[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--reinit[]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -2094,7 +2094,7 @@ esac _arguments "${_arguments_options[@]}" : \ '-o+[Enable Grafana]' \ '--observability=[Enable Grafana]' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2104,7 +2104,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (contract-verifier) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2122,7 +2122,7 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (run) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2132,12 +2132,12 @@ _arguments "${_arguments_options[@]}" : \ ;; (init) _arguments "${_arguments_options[@]}" : \ -'--zksolc-version=[Version of zksolc to install]:ZKSOLC_VERSION: ' \ -'--zkvyper-version=[Version of zkvyper to install]:ZKVYPER_VERSION: ' \ -'--solc-version=[Version of solc to install]:SOLC_VERSION: ' \ -'--era-vm-solc-version=[Version of era vm solc to install]:ERA_VM_SOLC_VERSION: ' \ -'--vyper-version=[Version of vyper to install]:VYPER_VERSION: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--zksolc-version=[Version of zksolc to install]:ZKSOLC_VERSION:_default' \ +'--zkvyper-version=[Version of zkvyper to install]:ZKVYPER_VERSION:_default' \ +'--solc-version=[Version of solc to install]:SOLC_VERSION:_default' \ +'--era-vm-solc-version=[Version of era vm solc to install]:ERA_VM_SOLC_VERSION:_default' \ +'--vyper-version=[Version of vyper to install]:VYPER_VERSION:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--only[Install only provided compilers]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -2180,7 +2180,7 @@ esac ;; (portal) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2190,7 +2190,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (explorer) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2208,7 +2208,7 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (init) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2218,7 +2218,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (run-backend) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2228,7 +2228,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (run) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2274,7 +2274,7 @@ esac ;; (consensus) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2293,7 +2293,7 @@ _arguments "${_arguments_options[@]}" : \ (set-attester-committee) _arguments "${_arguments_options[@]}" : \ '--from-file=[Sets the attester committee in the consensus registry contract to the committee in the yaml file. File format is definied in \`commands/consensus/proto/mod.proto\`]:FROM_FILE:_files' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--from-genesis[Sets the attester committee in the consensus registry contract to \`consensus.genesis_spec.attesters\` in general.yaml]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -2304,7 +2304,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (get-attester-committee) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2346,7 +2346,7 @@ esac ;; (update) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-c[Update only the config files]' \ '--only-config[Update only the config files]' \ '-v[Verbose mode]' \ @@ -2358,7 +2358,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (markdown) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ From 767c5bc6a62c402c099abe93b7dbecbb59e4acb7 Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Wed, 30 Oct 2024 13:20:44 +0100 Subject: [PATCH 27/32] feat(prover): Add sending scale requests for Scaler targets (#3194) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add sending scale requests for Scaler targets. Add dry-run config option for Scaler. ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. ref ZKD-1855 --------- Co-authored-by: Manuel Mauro --- .../config/src/configs/prover_autoscaler.rs | 5 +- .../src/proto/config/prover_autoscaler.proto | 4 +- .../protobuf_config/src/prover_autoscaler.rs | 8 +- .../crates/bin/prover_autoscaler/src/agent.rs | 2 +- .../prover_autoscaler/src/global/scaler.rs | 92 +++++++++++++------ .../prover_autoscaler/src/global/watcher.rs | 9 +- .../crates/bin/prover_autoscaler/src/main.rs | 3 +- 7 files changed, 88 insertions(+), 35 deletions(-) diff --git a/core/lib/config/src/configs/prover_autoscaler.rs b/core/lib/config/src/configs/prover_autoscaler.rs index d345b53e6f31..ab6b8fdf202f 100644 --- a/core/lib/config/src/configs/prover_autoscaler.rs +++ b/core/lib/config/src/configs/prover_autoscaler.rs @@ -63,6 +63,9 @@ pub struct ProverAutoscalerScalerConfig { pub long_pending_duration: Duration, /// List of simple autoscaler targets. pub scaler_targets: Vec, + /// If dry-run enabled don't send any scale requests. + #[serde(default)] + pub dry_run: bool, } #[derive( @@ -122,7 +125,7 @@ pub enum QueueReportFields { #[derive(Debug, Clone, PartialEq, Deserialize, Default)] pub struct ScalerTarget { pub queue_report_field: QueueReportFields, - pub pod_name_prefix: String, + pub deployment: String, /// Max replicas per cluster. pub max_replicas: HashMap, /// The queue will be divided by the speed and rounded up to get number of replicas. diff --git a/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto b/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto index 0f723e22a93f..742181653861 100644 --- a/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto +++ b/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto @@ -52,9 +52,10 @@ message MaxReplica { message ScalerTarget { optional string queue_report_field = 1; // required - optional string pod_name_prefix = 2; // required + optional string deployment = 5; // required repeated MaxReplica max_replicas = 3; // required at least one optional uint64 speed = 4; // optional + reserved 2; reserved "pod_name_prefix"; } message ProverAutoscalerScalerConfig { @@ -69,4 +70,5 @@ message ProverAutoscalerScalerConfig { repeated MaxProver max_provers = 9; // optional repeated MinProver min_provers = 10; // optional repeated ScalerTarget scaler_targets = 11; // optional + optional bool dry_run = 12; // optional } diff --git a/core/lib/protobuf_config/src/prover_autoscaler.rs b/core/lib/protobuf_config/src/prover_autoscaler.rs index c3e7c9719f13..6b67d9f620ff 100644 --- a/core/lib/protobuf_config/src/prover_autoscaler.rs +++ b/core/lib/protobuf_config/src/prover_autoscaler.rs @@ -118,6 +118,7 @@ impl ProtoRepr for proto::ProverAutoscalerScalerConfig { .enumerate() .map(|(i, x)| x.read().context(i).unwrap()) .collect::>(), + dry_run: self.dry_run.unwrap_or_default(), }) } @@ -158,6 +159,7 @@ impl ProtoRepr for proto::ProverAutoscalerScalerConfig { .map(|(k, v)| proto::MinProver::build(&(k.clone(), *v))) .collect(), scaler_targets: this.scaler_targets.iter().map(ProtoRepr::build).collect(), + dry_run: Some(this.dry_run), } } } @@ -269,9 +271,7 @@ impl ProtoRepr for proto::ScalerTarget { queue_report_field: required(&self.queue_report_field) .and_then(|x| Ok((*x).parse()?)) .context("queue_report_field")?, - pod_name_prefix: required(&self.pod_name_prefix) - .context("pod_name_prefix")? - .clone(), + deployment: required(&self.deployment).context("deployment")?.clone(), max_replicas: self .max_replicas .iter() @@ -289,7 +289,7 @@ impl ProtoRepr for proto::ScalerTarget { fn build(this: &Self::Type) -> Self { Self { queue_report_field: Some(this.queue_report_field.to_string()), - pod_name_prefix: Some(this.pod_name_prefix.clone()), + deployment: Some(this.deployment.clone()), max_replicas: this .max_replicas .iter() diff --git a/prover/crates/bin/prover_autoscaler/src/agent.rs b/prover/crates/bin/prover_autoscaler/src/agent.rs index f810bc416721..030636ad6592 100644 --- a/prover/crates/bin/prover_autoscaler/src/agent.rs +++ b/prover/crates/bin/prover_autoscaler/src/agent.rs @@ -96,7 +96,7 @@ pub struct ScaleRequest { pub deployments: Vec, } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Default, Serialize, Deserialize)] pub struct ScaleResponse { pub scale_result: Vec, } diff --git a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs index 1bdd2b251040..362fbbac0744 100644 --- a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs +++ b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs @@ -83,7 +83,7 @@ pub struct GpuScaler { pub struct SimpleScaler { queue_report_field: QueueReportFields, - pod_name_prefix: String, + deployment: String, /// Which cluster to use first. cluster_priorities: HashMap, max_replicas: HashMap, @@ -365,6 +365,47 @@ impl GpuScaler { provers } + + fn diff( + namespace: &str, + provers: HashMap, + clusters: &Clusters, + requests: &mut HashMap, + ) { + provers + .into_iter() + .for_each(|(GPUPoolKey { cluster, gpu }, replicas)| { + let prover = gpu_to_prover(gpu); + clusters + .clusters + .get(&cluster) + .and_then(|c| c.namespaces.get(namespace)) + .and_then(|ns| ns.deployments.get(&prover)) + .map_or_else( + || { + tracing::error!( + "Wasn't able to find deployment {} in cluster {}, namespace {}", + prover, + cluster, + namespace + ) + }, + |deployment| { + if deployment.desired != replicas as i32 { + requests + .entry(cluster.clone()) + .or_default() + .deployments + .push(ScaleDeploymentRequest { + namespace: namespace.into(), + name: prover.clone(), + size: replicas as i32, + }); + } + }, + ); + }) + } } #[derive(Default, Debug, PartialEq, Eq)] @@ -389,7 +430,7 @@ impl SimpleScaler { ) -> Self { Self { queue_report_field: config.queue_report_field.clone(), - pod_name_prefix: config.pod_name_prefix.clone(), + deployment: config.deployment.clone(), cluster_priorities, max_replicas: config.max_replicas.clone(), speed: config.speed, @@ -418,7 +459,7 @@ impl SimpleScaler { // Initialize pool only if we have ready deployments. pool.pods.insert(PodStatus::Running, 0); - let pod_re = Regex::new(&format!("^{}-", self.pod_name_prefix)).unwrap(); + let pod_re = Regex::new(&format!("^{}-", self.deployment)).unwrap(); for (_, pod) in namespace_value .pods .iter() @@ -551,47 +592,46 @@ impl SimpleScaler { pods } -} -fn diff( - namespace: &str, - provers: HashMap, - clusters: &Clusters, - requests: &mut HashMap, -) { - provers - .into_iter() - .for_each(|(GPUPoolKey { cluster, gpu }, n)| { - let prover = gpu_to_prover(gpu); + fn diff( + &self, + namespace: &str, + replicas: HashMap, + clusters: &Clusters, + requests: &mut HashMap, + ) { + let deployment_name = self.deployment.clone(); + replicas.into_iter().for_each(|(cluster, replicas)| { clusters .clusters .get(&cluster) .and_then(|c| c.namespaces.get(namespace)) - .and_then(|ns| ns.deployments.get(&prover)) + .and_then(|ns| ns.deployments.get(&deployment_name)) .map_or_else( || { tracing::error!( "Wasn't able to find deployment {} in cluster {}, namespace {}", - prover, + deployment_name, cluster, namespace ) }, - |d| { - if d.desired != n as i32 { + |deployment| { + if deployment.desired != replicas as i32 { requests .entry(cluster.clone()) .or_default() .deployments .push(ScaleDeploymentRequest { namespace: namespace.into(), - name: prover.clone(), - size: n as i32, + name: deployment_name.clone(), + size: replicas as i32, }); } }, ); }) + } } /// is_namespace_running returns true if there are some pods running in it. @@ -638,7 +678,7 @@ impl Task for Scaler { AUTOSCALER_METRICS.provers[&(k.cluster.clone(), ns.clone(), k.gpu)] .set(*num as u64); } - diff(ns, provers, &guard.clusters, &mut scale_requests); + GpuScaler::diff(ns, provers, &guard.clusters, &mut scale_requests); } // Simple Scalers. @@ -647,15 +687,15 @@ impl Task for Scaler { .get(&(ppv.to_string(), scaler.queue_report_field.clone())) .cloned() .unwrap_or(0); - tracing::debug!("Running eval for namespace {ns}, PPV {ppv}, simple scaler {} found queue {q}", scaler.pod_name_prefix); + tracing::debug!("Running eval for namespace {ns}, PPV {ppv}, simple scaler {} found queue {q}", scaler.deployment); if q > 0 || is_namespace_running(ns, &guard.clusters) { - let pods = scaler.run(ns, q, &guard.clusters); - for (k, num) in &pods { + let replicas = scaler.run(ns, q, &guard.clusters); + for (k, num) in &replicas { AUTOSCALER_METRICS.jobs - [&(scaler.pod_name_prefix.clone(), k.clone(), ns.clone())] + [&(scaler.deployment.clone(), k.clone(), ns.clone())] .set(*num as u64); } - // TODO: diff and add into scale_requests. + scaler.diff(ns, replicas, &guard.clusters, &mut scale_requests); } } } diff --git a/prover/crates/bin/prover_autoscaler/src/global/watcher.rs b/prover/crates/bin/prover_autoscaler/src/global/watcher.rs index 6e02c0fe2fdc..95b9e32cac5b 100644 --- a/prover/crates/bin/prover_autoscaler/src/global/watcher.rs +++ b/prover/crates/bin/prover_autoscaler/src/global/watcher.rs @@ -38,11 +38,12 @@ pub fn check_is_ready(v: &Vec) -> Result<()> { pub struct Watcher { /// List of base URLs of all agents. pub cluster_agents: Vec>, + pub dry_run: bool, pub data: Arc>, } impl Watcher { - pub fn new(agent_urls: Vec) -> Self { + pub fn new(agent_urls: Vec, dry_run: bool) -> Self { let size = agent_urls.len(); Self { cluster_agents: agent_urls @@ -54,6 +55,7 @@ impl Watcher { ) }) .collect(), + dry_run, data: Arc::new(Mutex::new(WatchedData { clusters: Clusters::default(), is_ready: vec![false; size], @@ -80,6 +82,7 @@ impl Watcher { .collect(); } + let dry_run = self.dry_run; let handles: Vec<_> = id_requests .into_iter() .map(|(id, sr)| { @@ -92,6 +95,10 @@ impl Watcher { tokio::spawn(async move { let mut headers = HeaderMap::new(); headers.insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); + if dry_run { + tracing::info!("Dry-run mode, not sending the request."); + return Ok((id, Ok(ScaleResponse::default()))); + } let response = send_request_with_retries( &url, MAX_RETRIES, diff --git a/prover/crates/bin/prover_autoscaler/src/main.rs b/prover/crates/bin/prover_autoscaler/src/main.rs index 45e476079a55..ac5121dccd9c 100644 --- a/prover/crates/bin/prover_autoscaler/src/main.rs +++ b/prover/crates/bin/prover_autoscaler/src/main.rs @@ -110,7 +110,8 @@ async fn main() -> anyhow::Result<()> { let interval = scaler_config.scaler_run_interval.unsigned_abs(); let exporter_config = PrometheusExporterConfig::pull(scaler_config.prometheus_port); tasks.push(tokio::spawn(exporter_config.run(stop_receiver.clone()))); - let watcher = global::watcher::Watcher::new(scaler_config.agents.clone()); + let watcher = + global::watcher::Watcher::new(scaler_config.agents.clone(), scaler_config.dry_run); let queuer = global::queuer::Queuer::new(scaler_config.prover_job_monitor_url.clone()); let scaler = global::scaler::Scaler::new(watcher.clone(), queuer, scaler_config); tasks.extend(get_tasks(watcher, scaler, interval, stop_receiver)?); From 8db7e9306e5fa23f066be106363e6455531bbc09 Mon Sep 17 00:00:00 2001 From: Artur Puzio Date: Wed, 30 Oct 2024 16:13:50 +0100 Subject: [PATCH 28/32] feat: base token integration tests (#2509) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Introduce 2 new tests for base token: - gas calculation - proper application of dynamic changes to base token ratio - Improve forced price client fluctuations implementation to limit difference between consecutive values. ## Why ❔ - We want better integration test coverage of custom base token ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: Ivan Schasny --- .github/workflows/ci-core-reusable.yml | 8 +- .../src/configs/external_price_api_client.rs | 17 +- core/lib/config/src/testonly.rs | 1 + .../src/external_price_api_client.rs | 2 + .../src/forced_price_client.rs | 84 ++++++--- .../src/external_price_api_client.rs | 5 + .../config/external_price_api_client.proto | 1 + .../src/base_token_l1_behaviour.rs | 10 +- core/tests/ts-integration/src/env.ts | 3 +- core/tests/ts-integration/src/utils.ts | 174 +++++++++++++++--- .../ts-integration/tests/base-token.test.ts | 5 +- core/tests/ts-integration/tests/fees.test.ts | 160 +++++++++++++--- core/tests/ts-integration/tests/utils.ts | 81 -------- etc/env/base/external_price_api.toml | 4 +- etc/env/file_based/general.yaml | 4 +- .../zkstack/src/commands/chain/common.rs | 3 + 16 files changed, 388 insertions(+), 174 deletions(-) delete mode 100644 core/tests/ts-integration/tests/utils.ts diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index fb43133868b0..c245e7341d03 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -276,8 +276,8 @@ jobs: --wallet-creation localhost \ --l1-batch-commit-data-generator-mode rollup \ --base-token-address ${{ env.CUSTOM_TOKEN_ADDRESS }} \ - --base-token-price-nominator 3 \ - --base-token-price-denominator 2 \ + --base-token-price-nominator 314 \ + --base-token-price-denominator 1000 \ --set-as-default false \ --ignore-prerequisites @@ -332,8 +332,8 @@ jobs: --wallet-creation localhost \ --l1-batch-commit-data-generator-mode validium \ --base-token-address ${{ env.CUSTOM_TOKEN_ADDRESS }} \ - --base-token-price-nominator 3 \ - --base-token-price-denominator 2 \ + --base-token-price-nominator 314 \ + --base-token-price-denominator 1000 \ --set-as-default false \ --ignore-prerequisites diff --git a/core/lib/config/src/configs/external_price_api_client.rs b/core/lib/config/src/configs/external_price_api_client.rs index 15cc7d29d848..c1092f3a7275 100644 --- a/core/lib/config/src/configs/external_price_api_client.rs +++ b/core/lib/config/src/configs/external_price_api_client.rs @@ -4,16 +4,21 @@ use serde::Deserialize; pub const DEFAULT_TIMEOUT_MS: u64 = 10_000; +pub const DEFAULT_FORCED_NEXT_VALUE_FLUCTUATION: u32 = 3; + #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct ForcedPriceClientConfig { /// Forced conversion ratio pub numerator: Option, pub denominator: Option, - /// Forced fluctuation. It defines how much percent numerator / - /// denominator should fluctuate from their forced values. If it's None or 0, then ForcedPriceClient - /// will return the same quote every time it's called. Otherwise, ForcedPriceClient will return - /// forced_quote +/- forced_fluctuation % from its values. + /// Forced fluctuation. It defines how much percent the ratio should fluctuate from its forced + /// value. If it's None or 0, then the ForcedPriceClient will return the same quote every time + /// it's called. Otherwise, ForcedPriceClient will return quote with numerator +/- fluctuation %. pub fluctuation: Option, + /// In order to smooth out fluctuation, consecutive values returned by forced client will not + /// differ more than next_value_fluctuation percent. If it's None, a default of 3% will be applied. + #[serde(default = "ExternalPriceApiClientConfig::default_forced_next_value_fluctuation")] + pub next_value_fluctuation: u32, } #[derive(Debug, Clone, PartialEq, Deserialize)] @@ -31,6 +36,10 @@ impl ExternalPriceApiClientConfig { DEFAULT_TIMEOUT_MS } + fn default_forced_next_value_fluctuation() -> u32 { + DEFAULT_FORCED_NEXT_VALUE_FLUCTUATION + } + pub fn client_timeout(&self) -> Duration { Duration::from_millis(self.client_timeout_ms) } diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 45c776242630..49c5cff1dca0 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -1113,6 +1113,7 @@ impl Distribution, fluctuation: Option, + next_value_fluctuation: u32, } impl ForcedPriceClient { @@ -29,42 +35,70 @@ impl ForcedPriceClient { let fluctuation = forced_price_client_config .fluctuation .map(|x| x.clamp(0, 100)); + let next_value_fluctuation = forced_price_client_config + .next_value_fluctuation + .clamp(0, 100); - Self { - ratio: BaseTokenAPIRatio { + let ratio = if numerator < 100 && fluctuation.is_some_and(|f| f > 0) { + // If numerator is too small we need to multiply by 100 to make sure fluctuations can be applied + BaseTokenAPIRatio { + numerator: NonZeroU64::new(numerator * 100).unwrap(), + denominator: NonZeroU64::new(denominator * 100).unwrap(), + ratio_timestamp: chrono::Utc::now(), + } + } else { + BaseTokenAPIRatio { numerator: NonZeroU64::new(numerator).unwrap(), denominator: NonZeroU64::new(denominator).unwrap(), ratio_timestamp: chrono::Utc::now(), - }, + } + }; + + Self { + ratio, + previous_numerator: Mutex::new(NonZeroU64::new(numerator).unwrap()), fluctuation, + next_value_fluctuation, } } } #[async_trait] impl PriceAPIClient for ForcedPriceClient { - // Returns a ratio which is 10% higher or lower than the configured forced ratio. + /// Returns the configured ratio with fluctuation applied if enabled async fn fetch_ratio(&self, _token_address: Address) -> anyhow::Result { - if let Some(x) = self.fluctuation { - if x != 0 { - let mut rng = rand::thread_rng(); - - let mut adjust_range = |value: NonZeroU64| { - let value_f64 = value.get() as f64; - let min = (value_f64 * (1.0 - x as f64 / 100.0)).round() as u64; - let max = (value_f64 * (1.0 + x as f64 / 100.0)).round() as u64; - rng.gen_range(min..=max) - }; - let new_numerator = adjust_range(self.ratio.numerator); - let new_denominator = adjust_range(self.ratio.denominator); + if let Some(fluctation) = self.fluctuation { + let mut previous_numerator = self.previous_numerator.lock().await; + let mut rng = rand::thread_rng(); + let numerator_range = ( + max( + (self.ratio.numerator.get() as f64 * (1.0 - (fluctation as f64 / 100.0))) + .round() as u64, + (previous_numerator.get() as f64 + * (1.0 - (self.next_value_fluctuation as f64 / 100.0))) + .round() as u64, + ), + min( + (self.ratio.numerator.get() as f64 * (1.0 + (fluctation as f64 / 100.0))) + .round() as u64, + (previous_numerator.get() as f64 + * (1.0 + (self.next_value_fluctuation as f64 / 100.0))) + .round() as u64, + ), + ); - return Ok(BaseTokenAPIRatio { - numerator: NonZeroU64::new(new_numerator).unwrap_or(self.ratio.numerator), - denominator: NonZeroU64::new(new_denominator).unwrap_or(self.ratio.denominator), - ratio_timestamp: chrono::Utc::now(), - }); - } + let new_numerator = + NonZeroU64::new(rng.gen_range(numerator_range.0..=numerator_range.1)) + .unwrap_or(self.ratio.numerator); + let adjusted_ratio = BaseTokenAPIRatio { + numerator: new_numerator, + denominator: self.ratio.denominator, + ratio_timestamp: chrono::Utc::now(), + }; + *previous_numerator = new_numerator; + Ok(adjusted_ratio) + } else { + Ok(self.ratio) } - Ok(self.ratio) } } diff --git a/core/lib/protobuf_config/src/external_price_api_client.rs b/core/lib/protobuf_config/src/external_price_api_client.rs index e5ed809a1284..dbc341c1865a 100644 --- a/core/lib/protobuf_config/src/external_price_api_client.rs +++ b/core/lib/protobuf_config/src/external_price_api_client.rs @@ -17,6 +17,9 @@ impl ProtoRepr for proto::ExternalPriceApiClient { numerator: self.forced_numerator, denominator: self.forced_denominator, fluctuation: self.forced_fluctuation, + next_value_fluctuation: self.forced_next_value_fluctuation.unwrap_or( + configs::external_price_api_client::DEFAULT_FORCED_NEXT_VALUE_FLUCTUATION, + ), }), }, ) @@ -26,6 +29,7 @@ impl ProtoRepr for proto::ExternalPriceApiClient { let numerator = this.forced.as_ref().and_then(|x| x.numerator); let denominator = this.forced.as_ref().and_then(|x| x.denominator); let fluctuation = this.forced.as_ref().and_then(|x| x.fluctuation); + let next_value_fluctuation = this.forced.as_ref().map(|x| x.next_value_fluctuation); Self { source: Some(this.source.clone()), @@ -35,6 +39,7 @@ impl ProtoRepr for proto::ExternalPriceApiClient { forced_numerator: numerator, forced_denominator: denominator, forced_fluctuation: fluctuation, + forced_next_value_fluctuation: next_value_fluctuation, } } } diff --git a/core/lib/protobuf_config/src/proto/config/external_price_api_client.proto b/core/lib/protobuf_config/src/proto/config/external_price_api_client.proto index 646bcfbd7647..63f3233c575f 100644 --- a/core/lib/protobuf_config/src/proto/config/external_price_api_client.proto +++ b/core/lib/protobuf_config/src/proto/config/external_price_api_client.proto @@ -10,4 +10,5 @@ message ExternalPriceApiClient { optional uint64 forced_numerator = 5; optional uint64 forced_denominator = 6; optional uint32 forced_fluctuation = 7; + optional uint32 forced_next_value_fluctuation = 8; } diff --git a/core/node/base_token_adjuster/src/base_token_l1_behaviour.rs b/core/node/base_token_adjuster/src/base_token_l1_behaviour.rs index 0922101e59de..599aba36f3e9 100644 --- a/core/node/base_token_adjuster/src/base_token_l1_behaviour.rs +++ b/core/node/base_token_adjuster/src/base_token_l1_behaviour.rs @@ -220,10 +220,16 @@ impl BaseTokenL1Behaviour { if receipt.status == Some(1.into()) { return Ok(receipt.gas_used); } + let reason = (*l1_params.eth_client) + .as_ref() + .failure_reason(hash) + .await + .context("failed getting failure reason of `setTokenMultiplier` transaction")?; return Err(anyhow::Error::msg(format!( - "`setTokenMultiplier` transaction {:?} failed with status {:?}", + "`setTokenMultiplier` transaction {:?} failed with status {:?}, reason: {:?}", hex::encode(hash), - receipt.status + receipt.status, + reason ))); } else { tokio::time::sleep(sleep_duration).await; diff --git a/core/tests/ts-integration/src/env.ts b/core/tests/ts-integration/src/env.ts index 1de917c2362c..596872ab9c57 100644 --- a/core/tests/ts-integration/src/env.ts +++ b/core/tests/ts-integration/src/env.ts @@ -117,7 +117,8 @@ async function loadTestEnvironmentFromFile(fileConfig: FileConfig): Promise { } } +interface MainNodeOptions { + newL1GasPrice?: bigint; + newPubdataPrice?: bigint; + customBaseToken?: boolean; + externalPriceApiClientForcedNumerator?: number; + externalPriceApiClientForcedDenominator?: number; + externalPriceApiClientForcedFluctuation?: number; + baseTokenPricePollingIntervalMs?: number; + baseTokenAdjusterL1UpdateDeviationPercentage?: number; +} export class NodeSpawner { + private readonly generalConfigPath: string | undefined; + private readonly originalConfig: string | undefined; + public mainNode: Node | null; + public constructor( private readonly pathToHome: string, private readonly logs: fs.FileHandle, private readonly fileConfig: FileConfig, private readonly options: MainNodeSpawnOptions, private env?: ProcessEnvOptions['env'] - ) {} + ) { + this.mainNode = null; + if (fileConfig.loadFromFile) { + this.generalConfigPath = getConfigPath({ + pathToHome, + chain: fileConfig.chain, + configsFolder: 'configs', + config: 'general.yaml' + }); + this.originalConfig = fsSync.readFileSync(this.generalConfigPath, 'utf8'); + } + } + + public async killAndSpawnMainNode(configOverrides: MainNodeOptions | null = null): Promise { + if (this.mainNode != null) { + await this.mainNode.killAndWaitForShutdown(); + this.mainNode = null; + } + this.mainNode = await this.spawnMainNode(configOverrides); + } - public async spawnMainNode(newL1GasPrice?: string, newPubdataPrice?: string): Promise> { + private async spawnMainNode(overrides: MainNodeOptions | null): Promise> { const env = this.env ?? process.env; const { fileConfig, pathToHome, options, logs } = this; - const testMode = newPubdataPrice || newL1GasPrice; + const testMode = overrides?.newPubdataPrice != null || overrides?.newL1GasPrice != null; - console.log('New L1 Gas Price: ', newL1GasPrice); - console.log('New Pubdata Price: ', newPubdataPrice); + console.log('Overrides: ', overrides); if (fileConfig.loadFromFile) { - setTransactionSlots(pathToHome, fileConfig, testMode ? 1 : 8192); + this.restoreConfig(); + const config = this.readFileConfig(); + config['state_keeper']['transaction_slots'] = testMode ? 1 : 8192; - if (newL1GasPrice) { - setInternalEnforcedL1GasPrice(pathToHome, fileConfig, parseFloat(newL1GasPrice)); - } else { - deleteInternalEnforcedL1GasPrice(pathToHome, fileConfig); - } + if (overrides != null) { + if (overrides.newL1GasPrice) { + config['eth']['gas_adjuster']['internal_enforced_l1_gas_price'] = overrides.newL1GasPrice; + } + + if (overrides.newPubdataPrice) { + config['eth']['gas_adjuster']['internal_enforced_pubdata_price'] = overrides.newPubdataPrice; + } + + if (overrides.externalPriceApiClientForcedNumerator !== undefined) { + config['external_price_api_client']['forced_numerator'] = + overrides.externalPriceApiClientForcedNumerator; + } + + if (overrides.externalPriceApiClientForcedDenominator !== undefined) { + config['external_price_api_client']['forced_denominator'] = + overrides.externalPriceApiClientForcedDenominator; + } + + if (overrides.externalPriceApiClientForcedFluctuation !== undefined) { + config['external_price_api_client']['forced_fluctuation'] = + overrides.externalPriceApiClientForcedFluctuation; + } + + if (overrides.baseTokenPricePollingIntervalMs !== undefined) { + const cacheUpdateInterval = overrides.baseTokenPricePollingIntervalMs / 2; + // To reduce price polling interval we also need to reduce base token receipt checking and tx sending sleeps as they are blocking the poller. Also cache update needs to be reduced appropriately. + + config['base_token_adjuster']['l1_receipt_checking_sleep_ms'] = + overrides.baseTokenPricePollingIntervalMs; + config['base_token_adjuster']['l1_tx_sending_sleep_ms'] = overrides.baseTokenPricePollingIntervalMs; + config['base_token_adjuster']['price_polling_interval_ms'] = + overrides.baseTokenPricePollingIntervalMs; + config['base_token_adjuster']['price_cache_update_interval_ms'] = cacheUpdateInterval; + } - if (newPubdataPrice) { - setInternalEnforcedPubdataPrice(pathToHome, fileConfig, parseFloat(newPubdataPrice)); - } else { - deleteInternalEnforcedPubdataPrice(pathToHome, fileConfig); + if (overrides.baseTokenAdjusterL1UpdateDeviationPercentage !== undefined) { + config['base_token_adjuster']['l1_update_deviation_percentage'] = + overrides.baseTokenAdjusterL1UpdateDeviationPercentage; + } } + + this.writeFileConfig(config); } else { env['DATABASE_MERKLE_TREE_MODE'] = 'full'; - if (newPubdataPrice) { - env['ETH_SENDER_GAS_ADJUSTER_INTERNAL_ENFORCED_PUBDATA_PRICE'] = newPubdataPrice; - } + if (overrides != null) { + if (overrides.newPubdataPrice) { + env['ETH_SENDER_GAS_ADJUSTER_INTERNAL_ENFORCED_PUBDATA_PRICE'] = + overrides.newPubdataPrice.toString(); + } - if (newL1GasPrice) { - // We need to ensure that each transaction gets into its own batch for more fair comparison. - env['ETH_SENDER_GAS_ADJUSTER_INTERNAL_ENFORCED_L1_GAS_PRICE'] = newL1GasPrice; + if (overrides.newL1GasPrice) { + // We need to ensure that each transaction gets into its own batch for more fair comparison. + env['ETH_SENDER_GAS_ADJUSTER_INTERNAL_ENFORCED_L1_GAS_PRICE'] = overrides.newL1GasPrice.toString(); + } + + if (overrides.externalPriceApiClientForcedNumerator !== undefined) { + env['EXTERNAL_PRICE_API_CLIENT_FORCED_NUMERATOR'] = + overrides.externalPriceApiClientForcedNumerator.toString(); + } + + if (overrides.externalPriceApiClientForcedDenominator !== undefined) { + env['EXTERNAL_PRICE_API_CLIENT_FORCED_DENOMINATOR'] = + overrides.externalPriceApiClientForcedDenominator.toString(); + } + + if (overrides.externalPriceApiClientForcedFluctuation !== undefined) { + env['EXTERNAL_PRICE_API_CLIENT_FORCED_FLUCTUATION'] = + overrides.externalPriceApiClientForcedFluctuation.toString(); + } + + if (overrides.baseTokenPricePollingIntervalMs !== undefined) { + const cacheUpdateInterval = overrides.baseTokenPricePollingIntervalMs / 2; + // To reduce price polling interval we also need to reduce base token receipt checking and tx sending sleeps as they are blocking the poller. Also cache update needs to be reduced appropriately. + env['BASE_TOKEN_ADJUSTER_L1_RECEIPT_CHECKING_SLEEP_MS'] = + overrides.baseTokenPricePollingIntervalMs.toString(); + env['BASE_TOKEN_ADJUSTER_L1_TX_SENDING_SLEEP_MS'] = + overrides.baseTokenPricePollingIntervalMs.toString(); + env['BASE_TOKEN_ADJUSTER_PRICE_POLLING_INTERVAL_MS'] = + overrides.baseTokenPricePollingIntervalMs.toString(); + env['BASE_TOKEN_ADJUSTER_PRICE_CACHE_UPDATE_INTERVAL_MS'] = cacheUpdateInterval.toString(); + } + + if (overrides.baseTokenAdjusterL1UpdateDeviationPercentage !== undefined) { + env['BASE_TOKEN_ADJUSTER_L1_UPDATE_DEVIATION_PERCENTAGE'] = + overrides.baseTokenAdjusterL1UpdateDeviationPercentage.toString(); + } } if (testMode) { @@ -175,6 +271,26 @@ export class NodeSpawner { await waitForNodeToStart(proc, options.apiWeb3JsonRpcHttpUrl); return new Node(proc, options.apiWeb3JsonRpcHttpUrl, NodeType.MAIN); } + + public restoreConfig() { + if (this.generalConfigPath != void 0 && this.originalConfig != void 0) + fsSync.writeFileSync(this.generalConfigPath, this.originalConfig, 'utf8'); + } + + private readFileConfig() { + if (this.generalConfigPath == void 0) + throw new Error('Trying to set property in config while not in file mode'); + const generalConfig = fsSync.readFileSync(this.generalConfigPath, 'utf8'); + return YAML.parse(generalConfig); + } + + private writeFileConfig(config: any) { + if (this.generalConfigPath == void 0) + throw new Error('Trying to set property in config while not in file mode'); + + const newGeneralConfig = YAML.stringify(config); + fsSync.writeFileSync(this.generalConfigPath, newGeneralConfig, 'utf8'); + } } async function waitForNodeToStart(proc: ChildProcessWithoutNullStreams, l2Url: string) { diff --git a/core/tests/ts-integration/tests/base-token.test.ts b/core/tests/ts-integration/tests/base-token.test.ts index 8ecc9de3ddb9..432ce70ae17f 100644 --- a/core/tests/ts-integration/tests/base-token.test.ts +++ b/core/tests/ts-integration/tests/base-token.test.ts @@ -39,9 +39,8 @@ describe('base ERC20 contract checks', () => { const numerator = Number(await zksyncContract.baseTokenGasPriceMultiplierNominator()); const denominator = Number(await zksyncContract.baseTokenGasPriceMultiplierDenominator()); - // checking that the numerator and denominator don't have their default values - expect(numerator).toBe(3); - expect(denominator).toBe(2); + expect(numerator).toBe(314); + expect(denominator).toBe(1000); }); test('Can perform a deposit', async () => { diff --git a/core/tests/ts-integration/tests/fees.test.ts b/core/tests/ts-integration/tests/fees.test.ts index e99d3b67911b..fc156e03f16d 100644 --- a/core/tests/ts-integration/tests/fees.test.ts +++ b/core/tests/ts-integration/tests/fees.test.ts @@ -15,13 +15,15 @@ import { TestContextOwner, TestMaster } from '../src'; import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; import { DataAvailabityMode, Token } from '../src/types'; -import { SYSTEM_CONTEXT_ADDRESS, getTestContract } from '../src/helpers'; +import { SYSTEM_CONTEXT_ADDRESS, getTestContract, waitForNewL1Batch, anyTransaction } from '../src/helpers'; import { loadConfig, shouldLoadConfigFromFile } from 'utils/build/file-configs'; import { logsTestPath } from 'utils/build/logs'; -import path from 'path'; -import { NodeSpawner, Node, NodeType } from '../src/utils'; -import { deleteInternalEnforcedL1GasPrice, deleteInternalEnforcedPubdataPrice, setTransactionSlots } from './utils'; +import { sleep } from 'utils/build'; import { killPidWithAllChilds } from 'utils/build/kill'; +import path from 'path'; +import { NodeSpawner } from '../src/utils'; +import { sendTransfers } from '../src/context-owner'; +import { Reporter } from '../src/reporter'; declare global { var __ZKSYNC_TEST_CONTEXT_OWNER__: TestContextOwner; @@ -60,13 +62,13 @@ testFees('Test fees', function () { let tokenDetails: Token; let aliceErc20: zksync.Contract; + let isETHBasedChain: boolean; let mainLogs: fs.FileHandle; let baseTokenAddress: string; let ethClientWeb3Url: string; let apiWeb3JsonRpcHttpUrl: string; let mainNodeSpawner: NodeSpawner; - let mainNode: Node; const fileConfig = shouldLoadConfigFromFile(); const pathToHome = path.join(__dirname, '../../../..'); @@ -121,11 +123,41 @@ testFees('Test fees', function () { baseTokenAddress }); - mainNode = await mainNodeSpawner.spawnMainNode(); + await mainNodeSpawner.killAndSpawnMainNode(); alice = testMaster.mainAccount(); tokenDetails = testMaster.environment().erc20Token; aliceErc20 = new ethers.Contract(tokenDetails.l1Address, zksync.utils.IERC20, alice.ethWallet()); + + const mainWallet = new zksync.Wallet( + testMaster.environment().mainWalletPK, + alice._providerL2(), + alice._providerL1() + ); + + isETHBasedChain = baseTokenAddress == zksync.utils.ETH_ADDRESS_IN_CONTRACTS; + + // On non ETH based chains the standard deposit is not enough to run all this tests + if (!isETHBasedChain) { + const depositTx = await mainWallet.deposit({ + token: baseTokenAddress, + amount: ethers.parseEther('100'), + approveERC20: true, + approveBaseERC20: true + }); + await depositTx.wait(); + await Promise.all( + await sendTransfers( + zksync.utils.ETH_ADDRESS, + mainWallet, + { alice: alice.privateKey }, + ethers.parseEther('100'), + undefined, + undefined, + new Reporter() + ) + ); + } }); test('Test all fees', async () => { @@ -177,8 +209,10 @@ testFees('Test fees', function () { ]; for (const gasPrice of L1_GAS_PRICES_TO_TEST) { // For the sake of simplicity, we'll use the same pubdata price as the L1 gas price. - await mainNode.killAndWaitForShutdown(); - mainNode = await mainNodeSpawner.spawnMainNode(gasPrice.toString(), gasPrice.toString()); + await mainNodeSpawner.killAndSpawnMainNode({ + newL1GasPrice: gasPrice, + newPubdataPrice: gasPrice + }); reports = await appendResults( alice, @@ -213,6 +247,96 @@ testFees('Test fees', function () { console.log(`Full report: \n\n${reports.join('\n\n')}`); }); + test('Test gas price expected value', async () => { + const l1GasPrice = 2_000_000_000n; /// set to 2 gwei + await mainNodeSpawner.killAndSpawnMainNode({ + newL1GasPrice: l1GasPrice, + newPubdataPrice: l1GasPrice + }); + + // wait for new batch so gas price is updated with new config set above + await waitForNewL1Batch(alice); + + const receipt = await anyTransaction(alice); + + const feeParams = await alice._providerL2().getFeeParams(); + const feeConfig = feeParams.V2.config; + // type is missing conversion_ratio field + const conversionRatio: { numerator: bigint; denominator: bigint } = (feeParams.V2 as any)['conversion_ratio']; + if (isETHBasedChain) { + expect(conversionRatio.numerator).toBe(1); //number not bigint for some reason + expect(conversionRatio.denominator).toBe(1); + } else { + expect(conversionRatio.numerator).toBeGreaterThan(1n); + } + + // the minimum + compute overhead of 0.01gwei in validium mode + const expectedETHGasPrice = + feeConfig.minimal_l2_gas_price + + (feeConfig.compute_overhead_part * feeParams.V2.l1_gas_price * feeConfig.batch_overhead_l1_gas) / + feeConfig.max_gas_per_batch; + const expectedConvertedGasPrice = + (expectedETHGasPrice * conversionRatio.numerator) / conversionRatio.denominator; + + expect(receipt.gasPrice).toBe(BigInt(expectedConvertedGasPrice)); + }); + + test('Test base token ratio fluctuations', async () => { + const l1GasPrice = 2_000_000_000n; /// set to 2 gwei + + if (isETHBasedChain) return; + + await mainNodeSpawner.killAndSpawnMainNode({ + newL1GasPrice: l1GasPrice, + newPubdataPrice: l1GasPrice, + externalPriceApiClientForcedNumerator: 300, + externalPriceApiClientForcedDenominator: 100, + externalPriceApiClientForcedFluctuation: 20, + baseTokenPricePollingIntervalMs: 1000, + baseTokenAdjusterL1UpdateDeviationPercentage: 0 + }); + + const beginFeeParams = await alice._providerL2().getFeeParams(); + const mainContract = await alice.getMainContract(); + const beginL1Nominator = await mainContract.baseTokenGasPriceMultiplierNominator(); + let changedL2 = false; + let changedL1 = false; + for (let i = 0; i < 20; i++) { + await sleep(0.5); + const newFeeParams = await alice._providerL2().getFeeParams(); + // we need any as FeeParams is missing existing conversion_ratio field + + if ( + ((newFeeParams.V2 as any)['conversion_ratio'].numerator as number) != + ((beginFeeParams.V2 as any)['conversion_ratio'].numerator as number) + ) { + // @ts-ignore + const diff = + (newFeeParams.V2 as any)['conversion_ratio'].numerator - + (beginFeeParams.V2 as any)['conversion_ratio'].numerator; + // Deviation is 20%, Adding 5% extra for any arithmetic precision issues, 25%*300 = 75 + expect(diff).toBeLessThan(75); + expect(diff).toBeGreaterThan(-75); + changedL2 = true; + break; + } + } + expect(changedL2).toBeTruthy(); + for (let i = 0; i < 10; i++) { + const newL1Nominator = await mainContract.baseTokenGasPriceMultiplierNominator(); + if (newL1Nominator != beginL1Nominator) { + const diff = newL1Nominator - beginL1Nominator; + expect(diff).toBeLessThan(75); // as above + expect(diff).toBeGreaterThan(-75); + changedL1 = true; + break; + } + await sleep(0.5); + } + + expect(changedL1).toBeTruthy(); + }); + test('Test gas consumption under large L1 gas price', async () => { if (testMaster.environment().l1BatchCommitDataGeneratorMode === DataAvailabityMode.Validium) { // We skip this test for Validium mode, since L1 gas price has little impact on the gasLimit in this mode. @@ -233,11 +357,10 @@ testFees('Test fees', function () { // that the gasLimit is indeed over u32::MAX, which is the most important tested property. const requiredPubdataPrice = minimalL2GasPrice * 100_000n; - await mainNode.killAndWaitForShutdown(); - mainNode = await mainNodeSpawner.spawnMainNode( - requiredPubdataPrice.toString(), - requiredPubdataPrice.toString() - ); + await mainNodeSpawner.killAndSpawnMainNode({ + newL1GasPrice: requiredPubdataPrice, + newPubdataPrice: requiredPubdataPrice + }); const l1Messenger = new ethers.Contract(zksync.utils.L1_MESSENGER_ADDRESS, zksync.utils.L1_MESSENGER, alice); @@ -278,16 +401,11 @@ testFees('Test fees', function () { }); afterAll(async () => { - await mainNode.killAndWaitForShutdown(); + await mainNodeSpawner.killAndSpawnMainNode(); // Returning the pubdata price to the default one - - // Restore defaults - setTransactionSlots(pathToHome, fileConfig, 8192); - deleteInternalEnforcedL1GasPrice(pathToHome, fileConfig); - deleteInternalEnforcedPubdataPrice(pathToHome, fileConfig); - mainNode = await mainNodeSpawner.spawnMainNode(); + // Spawning with no options restores defaults. await testMaster.deinitialize(); - __ZKSYNC_TEST_CONTEXT_OWNER__.setL2NodePid(mainNode.proc.pid!); + __ZKSYNC_TEST_CONTEXT_OWNER__.setL2NodePid(mainNodeSpawner.mainNode!.proc.pid!); }); }); diff --git a/core/tests/ts-integration/tests/utils.ts b/core/tests/ts-integration/tests/utils.ts deleted file mode 100644 index 24df8a170c20..000000000000 --- a/core/tests/ts-integration/tests/utils.ts +++ /dev/null @@ -1,81 +0,0 @@ -import * as fs from 'fs'; -import { getConfigPath } from 'utils/build/file-configs'; - -export function setInternalEnforcedPubdataPrice(pathToHome: string, fileConfig: any, value: number) { - setGasAdjusterProperty(pathToHome, fileConfig, 'internal_enforced_pubdata_price', value); -} - -export function setInternalEnforcedL1GasPrice(pathToHome: string, fileConfig: any, value: number) { - setGasAdjusterProperty(pathToHome, fileConfig, 'internal_enforced_l1_gas_price', value); -} - -export function deleteInternalEnforcedPubdataPrice(pathToHome: string, fileConfig: any) { - deleteProperty(pathToHome, fileConfig, 'internal_enforced_pubdata_price'); -} - -export function deleteInternalEnforcedL1GasPrice(pathToHome: string, fileConfig: any) { - deleteProperty(pathToHome, fileConfig, 'internal_enforced_l1_gas_price'); -} - -export function setTransactionSlots(pathToHome: string, fileConfig: any, value: number) { - setPropertyInGeneralConfig(pathToHome, fileConfig, 'transaction_slots', value); -} - -function setPropertyInGeneralConfig(pathToHome: string, fileConfig: any, property: string, value: number) { - const generalConfigPath = getConfigPath({ - pathToHome, - chain: fileConfig.chain, - configsFolder: 'configs', - config: 'general.yaml' - }); - const generalConfig = fs.readFileSync(generalConfigPath, 'utf8'); - - const regex = new RegExp(`${property}:\\s*\\d+(\\.\\d+)?`, 'g'); - const newGeneralConfig = generalConfig.replace(regex, `${property}: ${value}`); - - fs.writeFileSync(generalConfigPath, newGeneralConfig, 'utf8'); -} - -function setGasAdjusterProperty(pathToHome: string, fileConfig: any, property: string, value: number) { - const generalConfigPath = getConfigPath({ - pathToHome, - chain: fileConfig.chain, - configsFolder: 'configs', - config: 'general.yaml' - }); - const generalConfig = fs.readFileSync(generalConfigPath, 'utf8'); - - // Define the regex pattern to check if the property already exists - const propertyRegex = new RegExp(`(^\\s*${property}:\\s*\\d+(\\.\\d+)?$)`, 'm'); - const gasAdjusterRegex = new RegExp('(^\\s*gas_adjuster:.*$)', 'gm'); - - let newGeneralConfig; - - if (propertyRegex.test(generalConfig)) { - // If the property exists, modify its value - newGeneralConfig = generalConfig.replace(propertyRegex, ` ${property}: ${value}`); - } else { - // If the property does not exist, add it under the gas_adjuster section - newGeneralConfig = generalConfig.replace(gasAdjusterRegex, `$1\n ${property}: ${value}`); - } - - fs.writeFileSync(generalConfigPath, newGeneralConfig, 'utf8'); -} - -function deleteProperty(pathToHome: string, fileConfig: any, property: string) { - const generalConfigPath = getConfigPath({ - pathToHome, - chain: fileConfig.chain, - configsFolder: 'configs', - config: 'general.yaml' - }); - const generalConfig = fs.readFileSync(generalConfigPath, 'utf8'); - - // Define the regex pattern to find the property line and remove it completely - const propertyRegex = new RegExp(`^\\s*${property}:.*\\n?`, 'm'); - - // Remove the line if the property exists - const newGeneralConfig = generalConfig.replace(propertyRegex, ''); - - fs.writeFileSync(generalConfigPath, newGeneralConfig, 'utf8'); -} diff --git a/etc/env/base/external_price_api.toml b/etc/env/base/external_price_api.toml index bb22e86c432b..fe88e71e82a2 100644 --- a/etc/env/base/external_price_api.toml +++ b/etc/env/base/external_price_api.toml @@ -6,5 +6,5 @@ source = "forced" [external_price_api_client.forced] -numerator = 3 -denominator = 2 +numerator = 314 +denominator = 1000 diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 5abee904765b..94758d92e180 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -299,8 +299,8 @@ base_token_adjuster: external_price_api_client: source: "forced" client_timeout_ms: 10000 - forced_numerator: 3 - forced_denominator: 2 + forced_numerator: 314 + forced_denominator: 1000 house_keeper: diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/common.rs b/zkstack_cli/crates/zkstack/src/commands/chain/common.rs index e0aa0b4e0470..0c35b3ee4fe0 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/common.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/common.rs @@ -27,6 +27,9 @@ pub async fn distribute_eth( if let Some(deployer) = chain_wallets.deployer { addresses.push(deployer.address) } + if let Some(setter) = chain_wallets.token_multiplier_setter { + addresses.push(setter.address) + } common::ethereum::distribute_eth( wallets.operator, addresses, From 42f177ac43b86cd24321ad9222121fc8a91c49e0 Mon Sep 17 00:00:00 2001 From: Dima Zhornyk <55756184+dimazhornyk@users.noreply.github.com> Date: Wed, 30 Oct 2024 16:50:04 +0100 Subject: [PATCH 29/32] fix(da-clients): enable tls-roots feature for tonic (#3201) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Enable `tls-roots` feature in `tonic` crate. ## Why ❔ Without this feature, the connection with the Eigen disperser can't be established ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- Cargo.lock | 1 + core/node/da_clients/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 9f94faea781c..eb2a72eb8c4a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9106,6 +9106,7 @@ dependencies = [ "percent-encoding", "pin-project", "prost 0.12.6", + "rustls-native-certs 0.7.3", "rustls-pemfile 2.2.0", "rustls-pki-types", "tokio", diff --git a/core/node/da_clients/Cargo.toml b/core/node/da_clients/Cargo.toml index bde71ce3ec5a..b62376b6e476 100644 --- a/core/node/da_clients/Cargo.toml +++ b/core/node/da_clients/Cargo.toml @@ -50,7 +50,7 @@ sha2.workspace = true prost.workspace = true bech32.workspace = true ripemd.workspace = true -tonic = { workspace = true, features = ["tls", "default"] } +tonic = { workspace = true, features = ["tls", "tls-roots", "prost", "codegen"] } pbjson-types.workspace = true # Eigen dependencies From 8ae06b237647715937fb3656d881c0fd460f2a07 Mon Sep 17 00:00:00 2001 From: Dima Zhornyk <55756184+dimazhornyk@users.noreply.github.com> Date: Wed, 30 Oct 2024 19:19:25 +0100 Subject: [PATCH 30/32] fix(da-clients): add padding to the data within EigenDA blob (#3203) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add the padding to the blob data in EigenDA client ## Why ❔ The unpadded larger blobs can't be decoded by the EigenDA disperser. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- core/node/da_clients/Cargo.toml | 2 +- core/node/da_clients/src/eigen/sdk.rs | 31 ++++++++++++++++++++++++++- 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/core/node/da_clients/Cargo.toml b/core/node/da_clients/Cargo.toml index b62376b6e476..e0c85b3030ab 100644 --- a/core/node/da_clients/Cargo.toml +++ b/core/node/da_clients/Cargo.toml @@ -50,7 +50,7 @@ sha2.workspace = true prost.workspace = true bech32.workspace = true ripemd.workspace = true -tonic = { workspace = true, features = ["tls", "tls-roots", "prost", "codegen"] } +tonic = { workspace = true, features = ["tls-roots", "prost", "codegen"] } pbjson-types.workspace = true # Eigen dependencies diff --git a/core/node/da_clients/src/eigen/sdk.rs b/core/node/da_clients/src/eigen/sdk.rs index 4013cafea298..7ab7ea3ce33b 100644 --- a/core/node/da_clients/src/eigen/sdk.rs +++ b/core/node/da_clients/src/eigen/sdk.rs @@ -25,6 +25,8 @@ pub struct RawEigenClient { account_id: String, } +pub(crate) const DATA_CHUNK_SIZE: usize = 32; + impl RawEigenClient { pub(crate) const BUFFER_SIZE: usize = 1000; @@ -55,9 +57,10 @@ impl RawEigenClient { let (tx, rx) = mpsc::channel(Self::BUFFER_SIZE); let response_stream = client_clone.disperse_blob_authenticated(ReceiverStream::new(rx)); + let padded_data = convert_by_padding_empty_byte(&data); // 1. send DisperseBlobRequest - self.disperse_data(data, &tx).await?; + self.disperse_data(padded_data, &tx).await?; // this await is blocked until the first response on the stream, so we only await after sending the `DisperseBlobRequest` let mut response_stream = response_stream.await?.into_inner(); @@ -215,3 +218,29 @@ fn get_account_id(secret_key: &SecretKey) -> String { format!("0x{}", hex) } + +fn convert_by_padding_empty_byte(data: &[u8]) -> Vec { + let parse_size = DATA_CHUNK_SIZE - 1; + + // Calculate the number of chunks + let data_len = (data.len() + parse_size - 1) / parse_size; + + // Pre-allocate `valid_data` with enough space for all chunks + let mut valid_data = vec![0u8; data_len * DATA_CHUNK_SIZE]; + let mut valid_end = data_len * DATA_CHUNK_SIZE; + + for (i, chunk) in data.chunks(parse_size).enumerate() { + let offset = i * DATA_CHUNK_SIZE; + valid_data[offset] = 0x00; // Set first byte of each chunk to 0x00 for big-endian compliance + + let copy_end = offset + 1 + chunk.len(); + valid_data[offset + 1..copy_end].copy_from_slice(chunk); + + if i == data_len - 1 && chunk.len() < parse_size { + valid_end = offset + 1 + chunk.len(); + } + } + + valid_data.truncate(valid_end); + valid_data +} From 96540975d917761d8e464ebbdf52704955bcd898 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Thu, 31 Oct 2024 12:37:14 +0200 Subject: [PATCH 31/32] fix(merkle-tree): Fix tree truncation (#3178) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Removes all stale keys for the "future" tree versions during truncation. ## Why ❔ Otherwise, we may get bogus stale keys and non-stale nodes removed from the tree during pruning. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- core/lib/merkle_tree/src/domain.rs | 5 +- core/lib/merkle_tree/src/lib.rs | 38 ++++++----- core/lib/merkle_tree/src/pruning.rs | 67 ++++++++++++++++++- core/lib/merkle_tree/src/storage/database.rs | 30 +++++++++ core/lib/merkle_tree/src/storage/parallel.rs | 24 ++++++- core/lib/merkle_tree/src/storage/rocksdb.rs | 28 +++++++- .../tests/integration/merkle_tree.rs | 6 +- 7 files changed, 173 insertions(+), 25 deletions(-) diff --git a/core/lib/merkle_tree/src/domain.rs b/core/lib/merkle_tree/src/domain.rs index bb69bda209cc..5064c791ed5b 100644 --- a/core/lib/merkle_tree/src/domain.rs +++ b/core/lib/merkle_tree/src/domain.rs @@ -360,7 +360,10 @@ impl ZkSyncTree { pub fn roll_back_logs(&mut self, last_l1_batch_to_keep: L1BatchNumber) -> anyhow::Result<()> { self.tree.db.reset(); let retained_version_count = u64::from(last_l1_batch_to_keep.0 + 1); - self.tree.truncate_recent_versions(retained_version_count) + // Since `Patched<_>` doesn't implement `PruneDatabase`, we borrow the underlying DB, which is safe + // because the in-memory patch was reset above. + MerkleTree::new_unchecked(self.tree.db.inner_mut()) + .truncate_recent_versions(retained_version_count) } /// Saves the accumulated changes in the tree to RocksDB. diff --git a/core/lib/merkle_tree/src/lib.rs b/core/lib/merkle_tree/src/lib.rs index 824f23eaf526..5e97d6d77c69 100644 --- a/core/lib/merkle_tree/src/lib.rs +++ b/core/lib/merkle_tree/src/lib.rs @@ -200,24 +200,6 @@ impl MerkleTree { root.unwrap_or(Root::Empty) } - /// Removes the most recent versions from the database. - /// - /// The current implementation does not actually remove node data for the removed versions - /// since it's likely to be reused in the future (especially upper-level internal nodes). - /// - /// # Errors - /// - /// Proxies database I/O errors. - pub fn truncate_recent_versions(&mut self, retained_version_count: u64) -> anyhow::Result<()> { - let mut manifest = self.db.manifest().unwrap_or_default(); - if manifest.version_count > retained_version_count { - manifest.version_count = retained_version_count; - let patch = PatchSet::from_manifest(manifest); - self.db.apply_patch(patch)?; - } - Ok(()) - } - /// Extends this tree by creating its new version. /// /// # Return value @@ -259,6 +241,26 @@ impl MerkleTree { } impl MerkleTree { + /// Removes the most recent versions from the database. + /// + /// The current implementation does not actually remove node data for the removed versions + /// since it's likely to be reused in the future (especially upper-level internal nodes). + /// + /// # Errors + /// + /// Proxies database I/O errors. + pub fn truncate_recent_versions(&mut self, retained_version_count: u64) -> anyhow::Result<()> { + let mut manifest = self.db.manifest().unwrap_or_default(); + let current_version_count = manifest.version_count; + if current_version_count > retained_version_count { + // It is necessary to remove "future" stale keys since otherwise they may be used in future pruning and lead + // to non-obsolete tree nodes getting removed. + manifest.version_count = retained_version_count; + self.db.truncate(manifest, ..current_version_count)?; + } + Ok(()) + } + /// Returns the first retained version of the tree. pub fn first_retained_version(&self) -> Option { match self.db.min_stale_key_version() { diff --git a/core/lib/merkle_tree/src/pruning.rs b/core/lib/merkle_tree/src/pruning.rs index a74db40ef5e6..2e328d0a2bb5 100644 --- a/core/lib/merkle_tree/src/pruning.rs +++ b/core/lib/merkle_tree/src/pruning.rs @@ -250,7 +250,7 @@ mod tests { use super::*; use crate::{ types::{Node, NodeKey}, - Database, Key, MerkleTree, PatchSet, TreeEntry, ValueHash, + Database, Key, MerkleTree, PatchSet, RocksDBWrapper, TreeEntry, ValueHash, }; fn create_db() -> PatchSet { @@ -506,4 +506,69 @@ mod tests { println!("Keys are pruned after each update"); test_keys_are_removed_by_pruning_when_overwritten_in_multiple_batches(true); } + + fn test_pruning_with_truncation(db: impl PruneDatabase) { + let mut tree = MerkleTree::new(db).unwrap(); + let kvs: Vec<_> = (0_u64..100) + .map(|i| TreeEntry::new(Key::from(i), i + 1, ValueHash::zero())) + .collect(); + tree.extend(kvs).unwrap(); + + let overridden_kvs = vec![TreeEntry::new( + Key::from(0), + 1, + ValueHash::repeat_byte(0xaa), + )]; + tree.extend(overridden_kvs).unwrap(); + + let stale_keys = tree.db.stale_keys(1); + assert!( + stale_keys.iter().any(|key| !key.is_empty()), + "{stale_keys:?}" + ); + + // Revert `overridden_kvs`. + tree.truncate_recent_versions(1).unwrap(); + assert_eq!(tree.latest_version(), Some(0)); + let future_stale_keys = tree.db.stale_keys(1); + assert!(future_stale_keys.is_empty()); + + // Add a new version without the key. To make the matter more egregious, the inserted key + // differs from all existing keys, starting from the first nibble. + let new_key = Key::from_big_endian(&[0xaa; 32]); + let new_kvs = vec![TreeEntry::new(new_key, 101, ValueHash::repeat_byte(0xaa))]; + tree.extend(new_kvs).unwrap(); + assert_eq!(tree.latest_version(), Some(1)); + + let stale_keys = tree.db.stale_keys(1); + assert_eq!(stale_keys.len(), 1); + assert!( + stale_keys[0].is_empty() && stale_keys[0].version == 0, + "{stale_keys:?}" + ); + + let (mut pruner, _) = MerkleTreePruner::new(tree.db); + let prunable_version = pruner.last_prunable_version().unwrap(); + assert_eq!(prunable_version, 1); + let stats = pruner + .prune_up_to(prunable_version) + .unwrap() + .expect("tree was not pruned"); + assert_eq!(stats.target_retained_version, 1); + assert_eq!(stats.pruned_key_count, 1); // only the root node should have been pruned + + let tree = MerkleTree::new(pruner.db).unwrap(); + tree.verify_consistency(1, false).unwrap(); + } + + #[test] + fn pruning_with_truncation() { + test_pruning_with_truncation(PatchSet::default()); + } + + #[test] + fn pruning_with_truncation_on_rocksdb() { + let temp_dir = tempfile::TempDir::new().unwrap(); + test_pruning_with_truncation(RocksDBWrapper::new(temp_dir.path()).unwrap()); + } } diff --git a/core/lib/merkle_tree/src/storage/database.rs b/core/lib/merkle_tree/src/storage/database.rs index a6e8a36c7084..a18deb643ca2 100644 --- a/core/lib/merkle_tree/src/storage/database.rs +++ b/core/lib/merkle_tree/src/storage/database.rs @@ -400,6 +400,17 @@ pub trait PruneDatabase: Database { /// /// Propagates database I/O errors. fn prune(&mut self, patch: PrunePatchSet) -> anyhow::Result<()>; + + /// Atomically truncates the specified range of versions and stale keys. + /// + /// # Errors + /// + /// Propagates database I/O errors. + fn truncate( + &mut self, + manifest: Manifest, + truncated_versions: ops::RangeTo, + ) -> anyhow::Result<()>; } impl PruneDatabase for &mut T { @@ -414,6 +425,14 @@ impl PruneDatabase for &mut T { fn prune(&mut self, patch: PrunePatchSet) -> anyhow::Result<()> { (**self).prune(patch) } + + fn truncate( + &mut self, + manifest: Manifest, + truncated_versions: ops::RangeTo, + ) -> anyhow::Result<()> { + (**self).truncate(manifest, truncated_versions) + } } impl PruneDatabase for PatchSet { @@ -447,6 +466,17 @@ impl PruneDatabase for PatchSet { .retain(|version, _| !patch.deleted_stale_key_versions.contains(version)); Ok(()) } + + fn truncate( + &mut self, + manifest: Manifest, + truncated_versions: ops::RangeTo, + ) -> anyhow::Result<()> { + self.manifest = manifest; + self.stale_keys_by_version + .retain(|version, _| !truncated_versions.contains(version)); + Ok(()) + } } #[cfg(test)] diff --git a/core/lib/merkle_tree/src/storage/parallel.rs b/core/lib/merkle_tree/src/storage/parallel.rs index c5368c4561d2..06b147efee8a 100644 --- a/core/lib/merkle_tree/src/storage/parallel.rs +++ b/core/lib/merkle_tree/src/storage/parallel.rs @@ -4,7 +4,7 @@ use std::{ any::Any, collections::{HashMap, VecDeque}, error::Error as StdError, - mem, + mem, ops, sync::{mpsc, Arc}, thread, time::Duration, @@ -375,6 +375,17 @@ impl PruneDatabase for ParallelDatabase { .context("failed synchronizing database before pruning")?; self.inner.prune(patch) } + + fn truncate( + &mut self, + manifest: Manifest, + truncated_versions: ops::RangeTo, + ) -> anyhow::Result<()> { + // Require the underlying database to be fully synced. + self.wait_sync() + .context("failed synchronizing database before truncation")?; + self.inner.truncate(manifest, truncated_versions) + } } /// Database with either sequential or parallel persistence. @@ -479,6 +490,17 @@ impl PruneDatabase for MaybeParallel { Self::Parallel(db) => db.prune(patch), } } + + fn truncate( + &mut self, + manifest: Manifest, + truncated_versions: ops::RangeTo, + ) -> anyhow::Result<()> { + match self { + Self::Sequential(db) => db.truncate(manifest, truncated_versions), + Self::Parallel(db) => db.truncate(manifest, truncated_versions), + } + } } #[cfg(test)] diff --git a/core/lib/merkle_tree/src/storage/rocksdb.rs b/core/lib/merkle_tree/src/storage/rocksdb.rs index 22335c829404..6995bbfbfc7f 100644 --- a/core/lib/merkle_tree/src/storage/rocksdb.rs +++ b/core/lib/merkle_tree/src/storage/rocksdb.rs @@ -1,6 +1,6 @@ //! RocksDB implementation of [`Database`]. -use std::{any::Any, cell::RefCell, path::Path, sync::Arc}; +use std::{any::Any, cell::RefCell, ops, path::Path, sync::Arc}; use anyhow::Context as _; use rayon::prelude::*; @@ -351,6 +351,32 @@ impl PruneDatabase for RocksDBWrapper { .write(write_batch) .context("Failed writing a batch to RocksDB") } + + fn truncate( + &mut self, + manifest: Manifest, + truncated_versions: ops::RangeTo, + ) -> anyhow::Result<()> { + anyhow::ensure!( + manifest.version_count <= truncated_versions.end, + "Invalid truncate call: manifest={manifest:?}, truncated_versions={truncated_versions:?}" + ); + let mut write_batch = self.db.new_write_batch(); + + let tree_cf = MerkleTreeColumnFamily::Tree; + let mut node_bytes = Vec::with_capacity(128); + manifest.serialize(&mut node_bytes); + write_batch.put_cf(tree_cf, Self::MANIFEST_KEY, &node_bytes); + + let stale_keys_cf = MerkleTreeColumnFamily::StaleKeys; + let first_version = &manifest.version_count.to_be_bytes() as &[_]; + let last_version = &truncated_versions.end.to_be_bytes(); + write_batch.delete_range_cf(stale_keys_cf, first_version..last_version); + + self.db + .write(write_batch) + .context("Failed writing a batch to RocksDB") + } } #[cfg(test)] diff --git a/core/lib/merkle_tree/tests/integration/merkle_tree.rs b/core/lib/merkle_tree/tests/integration/merkle_tree.rs index fc26cafe9ba7..789872d18730 100644 --- a/core/lib/merkle_tree/tests/integration/merkle_tree.rs +++ b/core/lib/merkle_tree/tests/integration/merkle_tree.rs @@ -6,8 +6,8 @@ use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; use test_casing::test_casing; use zksync_crypto_primitives::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{ - Database, HashTree, MerkleTree, PatchSet, Patched, TreeEntry, TreeInstruction, TreeLogEntry, - TreeRangeDigest, + Database, HashTree, MerkleTree, PatchSet, Patched, PruneDatabase, TreeEntry, TreeInstruction, + TreeLogEntry, TreeRangeDigest, }; use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; @@ -270,7 +270,7 @@ fn accumulating_commits(chunk_size: usize) { test_accumulated_commits(PatchSet::default(), chunk_size); } -fn test_root_hash_computing_with_reverts(db: &mut impl Database) { +fn test_root_hash_computing_with_reverts(db: &mut impl PruneDatabase) { let (kvs, expected_hash) = &*ENTRIES_AND_HASH; let (initial_update, final_update) = kvs.split_at(75); let key_updates: Vec<_> = kvs From e95f50a4ca8958b250dd79241adf44352b736c63 Mon Sep 17 00:00:00 2001 From: zksync-era-bot <147085853+zksync-era-bot@users.noreply.github.com> Date: Thu, 31 Oct 2024 15:23:09 +0400 Subject: [PATCH 32/32] chore(main): release prover 16.6.0 (#2761) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit :robot: I have created a release *beep* *boop* --- ## [16.6.0](https://github.com/matter-labs/zksync-era/compare/prover-v16.5.0...prover-v16.6.0) (2024-10-31) ### Features * (DB migration) Rename recursion_scheduler_level_vk_hash to snark_wrapper_vk_hash ([#2809](https://github.com/matter-labs/zksync-era/issues/2809)) ([64f9551](https://github.com/matter-labs/zksync-era/commit/64f95514c99f95da2a19a97ff064c29a97efc22f)) * Add initial version prover_autoscaler ([#2993](https://github.com/matter-labs/zksync-era/issues/2993)) ([ebf9604](https://github.com/matter-labs/zksync-era/commit/ebf9604c5ab2a1cae1ffd2f9c922f35a1d0ad876)) * added seed_peers to consensus global config ([#2920](https://github.com/matter-labs/zksync-era/issues/2920)) ([e9d1d90](https://github.com/matter-labs/zksync-era/commit/e9d1d905f1ce86f9de2cf39d79be4b5aada4a81d)) * attester committees data extractor (BFT-434) ([#2684](https://github.com/matter-labs/zksync-era/issues/2684)) ([92dde03](https://github.com/matter-labs/zksync-era/commit/92dde039ee8a0bc08e2019b7fa6f243a34d9816f)) * Bump crypto and protocol deps ([#2825](https://github.com/matter-labs/zksync-era/issues/2825)) ([a5ffaf1](https://github.com/matter-labs/zksync-era/commit/a5ffaf1b4e291d6f09ba8c1f224f5900665bffc4)) * **circuit_prover:** Add circuit prover ([#2908](https://github.com/matter-labs/zksync-era/issues/2908)) ([48317e6](https://github.com/matter-labs/zksync-era/commit/48317e640a00b016bf7bf782cc94fccaf077ed6d)) * **consensus:** Support for syncing blocks before consensus genesis over p2p network ([#3040](https://github.com/matter-labs/zksync-era/issues/3040)) ([d3edc3d](https://github.com/matter-labs/zksync-era/commit/d3edc3d817c151ed00d4fa822fdae0a746e33356)) * **da-clients:** add secrets ([#2954](https://github.com/matter-labs/zksync-era/issues/2954)) ([f4631e4](https://github.com/matter-labs/zksync-era/commit/f4631e4466de620cc1401b326d864cdb8b48a05d)) * gateway preparation ([#3006](https://github.com/matter-labs/zksync-era/issues/3006)) ([16f2757](https://github.com/matter-labs/zksync-era/commit/16f275756cd28024a6b11ac1ac327eb5b8b446e1)) * Integrate tracers and implement circuits tracer in vm2 ([#2653](https://github.com/matter-labs/zksync-era/issues/2653)) ([87b02e3](https://github.com/matter-labs/zksync-era/commit/87b02e3ab5c1f61d59dd0f0eefa9ec33a7b55488)) * Move prover data to /home/popzxc/workspace/current/zksync-era/prover/data ([#2778](https://github.com/matter-labs/zksync-era/issues/2778)) ([62e4d46](https://github.com/matter-labs/zksync-era/commit/62e4d4619dde9d6bd9102f1410eea75b0e2051c5)) * Prover e2e test ([#2975](https://github.com/matter-labs/zksync-era/issues/2975)) ([0edd796](https://github.com/matter-labs/zksync-era/commit/0edd7962429b3530ae751bd7cc947c97193dd0ca)) * **prover:** add CLI option to run prover with max allocation ([#2794](https://github.com/matter-labs/zksync-era/issues/2794)) ([35e4cae](https://github.com/matter-labs/zksync-era/commit/35e4cae29314fa98ce356a875e08b3e869a31036)) * **prover:** Add endpoint to PJM to get queue reports ([#2918](https://github.com/matter-labs/zksync-era/issues/2918)) ([2cec83f](https://github.com/matter-labs/zksync-era/commit/2cec83f26e0b9309387135ca43718af4fcd6f6b1)) * **prover:** Add error to panic message of prover ([#2807](https://github.com/matter-labs/zksync-era/issues/2807)) ([6e057eb](https://github.com/matter-labs/zksync-era/commit/6e057ebf277e0cbc7964079c01ef0348e006a53b)) * **prover:** Add min_provers and dry_run features. Improve metrics and test. ([#3129](https://github.com/matter-labs/zksync-era/issues/3129)) ([7c28964](https://github.com/matter-labs/zksync-era/commit/7c289649b7b3c418c7193a35b51c264cf4970f3c)) * **prover:** Add scale failure events watching and pods eviction. ([#3175](https://github.com/matter-labs/zksync-era/issues/3175)) ([dd166f8](https://github.com/matter-labs/zksync-era/commit/dd166f887b11a8dfb039a0030dda923c481f67af)) * **prover:** Add sending scale requests for Scaler targets ([#3194](https://github.com/matter-labs/zksync-era/issues/3194)) ([767c5bc](https://github.com/matter-labs/zksync-era/commit/767c5bc6a62c402c099abe93b7dbecbb59e4acb7)) * **prover:** Add support for scaling WGs and compressor ([#3179](https://github.com/matter-labs/zksync-era/issues/3179)) ([c41db9e](https://github.com/matter-labs/zksync-era/commit/c41db9ecec1c21b80969604f703ac6990f6f3434)) * **prover:** Autoscaler sends scale request to appropriate agents. ([#3150](https://github.com/matter-labs/zksync-era/issues/3150)) ([bfedac0](https://github.com/matter-labs/zksync-era/commit/bfedac03b53055c6e2d5fa6bd6bdc78e2cb1724c)) * **prover:** Extract keystore into a separate crate ([#2797](https://github.com/matter-labs/zksync-era/issues/2797)) ([e239260](https://github.com/matter-labs/zksync-era/commit/e239260d77b55fcce0b1f485029762a605cdb6d0)) * **prover:** Optimize setup keys loading ([#2847](https://github.com/matter-labs/zksync-era/issues/2847)) ([19887ef](https://github.com/matter-labs/zksync-era/commit/19887ef21a8bbd26977353f8ee277b711850dfd2)) * **prover:** Refactor WitnessGenerator ([#2845](https://github.com/matter-labs/zksync-era/issues/2845)) ([934634b](https://github.com/matter-labs/zksync-era/commit/934634b149377c730ec39e904508c40628ff4019)) * **prover:** Update witness generator to zkevm_test_harness 0.150.6 ([#3029](https://github.com/matter-labs/zksync-era/issues/3029)) ([2151c28](https://github.com/matter-labs/zksync-era/commit/2151c2832498ca6e7ee1eee0bfdf6a0568345fee)) * **prover:** Use query macro instead string literals for queries ([#2930](https://github.com/matter-labs/zksync-era/issues/2930)) ([1cf959d](https://github.com/matter-labs/zksync-era/commit/1cf959da12d2b6369f34a67ccc2575b4b173d75a)) * **prover:** WG refactoring [#3](https://github.com/matter-labs/zksync-era/issues/3) ([#2942](https://github.com/matter-labs/zksync-era/issues/2942)) ([df68762](https://github.com/matter-labs/zksync-era/commit/df6876221936a44fa2fb8c80c01d043d229621fc)) * **prover:** WitnessGenerator refactoring [#2](https://github.com/matter-labs/zksync-era/issues/2) ([#2899](https://github.com/matter-labs/zksync-era/issues/2899)) ([36e5340](https://github.com/matter-labs/zksync-era/commit/36e534091f73f4e3ce86e322fb20842cda6a6b61)) * Refactor metrics/make API use binaries ([#2735](https://github.com/matter-labs/zksync-era/issues/2735)) ([8ed086a](https://github.com/matter-labs/zksync-era/commit/8ed086afecfcad30bfda44fc4d29a00beea71cca)) * Remove prover db from house keeper ([#2795](https://github.com/matter-labs/zksync-era/issues/2795)) ([85b7346](https://github.com/matter-labs/zksync-era/commit/85b734664b4306e988da07005860a7ea0fb7d22d)) * **tee:** use hex serialization for RPC responses ([#2887](https://github.com/matter-labs/zksync-era/issues/2887)) ([abe0440](https://github.com/matter-labs/zksync-era/commit/abe0440811ae4daf4a0f307922a282e9664308e0)) * **utils:** Rework locate_workspace, introduce Workspace type ([#2830](https://github.com/matter-labs/zksync-era/issues/2830)) ([d256092](https://github.com/matter-labs/zksync-era/commit/d2560928cc67b40a97a5497ac8542915bf6f91a9)) * vm2 tracers can access storage ([#3114](https://github.com/matter-labs/zksync-era/issues/3114)) ([e466b52](https://github.com/matter-labs/zksync-era/commit/e466b52948e3c4ed1cb5af4fd999a52028e4d216)) * **vm:** Do not panic on VM divergence ([#2705](https://github.com/matter-labs/zksync-era/issues/2705)) ([7aa5721](https://github.com/matter-labs/zksync-era/commit/7aa5721d22e253d05d369a60d5bcacbf52021c48)) * **vm:** EVM emulator support – base ([#2979](https://github.com/matter-labs/zksync-era/issues/2979)) ([deafa46](https://github.com/matter-labs/zksync-era/commit/deafa460715334a77edf9fe8aa76fa90029342c4)) * **vm:** Extract batch executor to separate crate ([#2702](https://github.com/matter-labs/zksync-era/issues/2702)) ([b82dfa4](https://github.com/matter-labs/zksync-era/commit/b82dfa4d29fce107223c3638fe490b5cb0f28d8c)) * **zk_toolbox:** `zk_supervisor prover` subcommand ([#2820](https://github.com/matter-labs/zksync-era/issues/2820)) ([3506731](https://github.com/matter-labs/zksync-era/commit/3506731d1702bdec8c6b5b41cabca9a257f0269b)) * **zk_toolbox:** Add external_node consensus support ([#2821](https://github.com/matter-labs/zksync-era/issues/2821)) ([4a10d7d](https://github.com/matter-labs/zksync-era/commit/4a10d7d9554d6c1aa2f4fc46557d40baaad8ff2f)) * **zk_toolbox:** Add SQL format for zk supervisor ([#2950](https://github.com/matter-labs/zksync-era/issues/2950)) ([540e5d7](https://github.com/matter-labs/zksync-era/commit/540e5d7554f54e80d52f1bfae37e03ca8f787baf)) * **zk_toolbox:** deploy legacy bridge ([#2837](https://github.com/matter-labs/zksync-era/issues/2837)) ([93b4e08](https://github.com/matter-labs/zksync-era/commit/93b4e08257802d11108870d867dd59fa35e52733)) * **zk_toolbox:** Redesign zk_toolbox commands ([#3003](https://github.com/matter-labs/zksync-era/issues/3003)) ([114834f](https://github.com/matter-labs/zksync-era/commit/114834f357421c62d596a1954fac8ce615cfde49)) * **zkstack_cli:** Build dependencies at zkstack build time ([#3157](https://github.com/matter-labs/zksync-era/issues/3157)) ([724d9a9](https://github.com/matter-labs/zksync-era/commit/724d9a9c7f2127263845b640c843e751fd3c21ae)) ### Bug Fixes * allow compilation under current toolchain ([#3176](https://github.com/matter-labs/zksync-era/issues/3176)) ([89eadd3](https://github.com/matter-labs/zksync-era/commit/89eadd353c4fb84bb815ae56b29f4ff3467b80f3)) * **api:** Return correct flat call tracer ([#2917](https://github.com/matter-labs/zksync-era/issues/2917)) ([218646a](https://github.com/matter-labs/zksync-era/commit/218646aa1c56200f4ffee99b7f83366e2689354f)) * count SECP256 precompile to account validation gas limit as well ([#2859](https://github.com/matter-labs/zksync-era/issues/2859)) ([fee0c2a](https://github.com/matter-labs/zksync-era/commit/fee0c2ad08a5ab4a04252765b367eb9fbb1f3db7)) * Fix Doc lint. ([#3158](https://github.com/matter-labs/zksync-era/issues/3158)) ([c79949b](https://github.com/matter-labs/zksync-era/commit/c79949b8ffde9867b961192afa6c815b44865ae4)) * ignore unknown fields in rpc json response ([#2962](https://github.com/matter-labs/zksync-era/issues/2962)) ([692ea73](https://github.com/matter-labs/zksync-era/commit/692ea73f75a5fb9db2b4ac33ad24d20568638742)) * **prover:** Do not exit on missing watcher data. ([#3119](https://github.com/matter-labs/zksync-era/issues/3119)) ([76ed6d9](https://github.com/matter-labs/zksync-era/commit/76ed6d966051c56f8e894c18461c5ea284b1a74b)) * **prover:** fix setup_metadata_to_setup_data_key ([#2875](https://github.com/matter-labs/zksync-era/issues/2875)) ([4ae5a93](https://github.com/matter-labs/zksync-era/commit/4ae5a93e9e96cd0cd529baf9ffa78c1b21a9c4b1)) * **prover:** Run for zero queue to allow scaling down to 0 ([#3115](https://github.com/matter-labs/zksync-era/issues/3115)) ([bbe1919](https://github.com/matter-labs/zksync-era/commit/bbe191937fa5c5711a7164fd4f0c2ae65cda0833)) * **tee_verifier:** correctly initialize storage for re-execution ([#3017](https://github.com/matter-labs/zksync-era/issues/3017)) ([9d88373](https://github.com/matter-labs/zksync-era/commit/9d88373f1b745c489e98e5ef542644a70e815498)) * **vm:** Prepare new VM for use in API server and fix divergences ([#2994](https://github.com/matter-labs/zksync-era/issues/2994)) ([741b77e](https://github.com/matter-labs/zksync-era/commit/741b77e080f75c6a93d3ee779b1c9ce4297618f9)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --- .github/release-please/manifest.json | 2 +- prover/CHANGELOG.md | 61 ++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 1 deletion(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index a0d1d73bddaf..47fc53044802 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { "core": "25.0.0", - "prover": "16.5.0", + "prover": "16.6.0", "zkstack_cli": "0.1.2" } diff --git a/prover/CHANGELOG.md b/prover/CHANGELOG.md index 0201ce4a920f..83a9e31fbe33 100644 --- a/prover/CHANGELOG.md +++ b/prover/CHANGELOG.md @@ -1,5 +1,66 @@ # Changelog +## [16.6.0](https://github.com/matter-labs/zksync-era/compare/prover-v16.5.0...prover-v16.6.0) (2024-10-31) + + +### Features + +* (DB migration) Rename recursion_scheduler_level_vk_hash to snark_wrapper_vk_hash ([#2809](https://github.com/matter-labs/zksync-era/issues/2809)) ([64f9551](https://github.com/matter-labs/zksync-era/commit/64f95514c99f95da2a19a97ff064c29a97efc22f)) +* Add initial version prover_autoscaler ([#2993](https://github.com/matter-labs/zksync-era/issues/2993)) ([ebf9604](https://github.com/matter-labs/zksync-era/commit/ebf9604c5ab2a1cae1ffd2f9c922f35a1d0ad876)) +* added seed_peers to consensus global config ([#2920](https://github.com/matter-labs/zksync-era/issues/2920)) ([e9d1d90](https://github.com/matter-labs/zksync-era/commit/e9d1d905f1ce86f9de2cf39d79be4b5aada4a81d)) +* attester committees data extractor (BFT-434) ([#2684](https://github.com/matter-labs/zksync-era/issues/2684)) ([92dde03](https://github.com/matter-labs/zksync-era/commit/92dde039ee8a0bc08e2019b7fa6f243a34d9816f)) +* Bump crypto and protocol deps ([#2825](https://github.com/matter-labs/zksync-era/issues/2825)) ([a5ffaf1](https://github.com/matter-labs/zksync-era/commit/a5ffaf1b4e291d6f09ba8c1f224f5900665bffc4)) +* **circuit_prover:** Add circuit prover ([#2908](https://github.com/matter-labs/zksync-era/issues/2908)) ([48317e6](https://github.com/matter-labs/zksync-era/commit/48317e640a00b016bf7bf782cc94fccaf077ed6d)) +* **consensus:** Support for syncing blocks before consensus genesis over p2p network ([#3040](https://github.com/matter-labs/zksync-era/issues/3040)) ([d3edc3d](https://github.com/matter-labs/zksync-era/commit/d3edc3d817c151ed00d4fa822fdae0a746e33356)) +* **da-clients:** add secrets ([#2954](https://github.com/matter-labs/zksync-era/issues/2954)) ([f4631e4](https://github.com/matter-labs/zksync-era/commit/f4631e4466de620cc1401b326d864cdb8b48a05d)) +* gateway preparation ([#3006](https://github.com/matter-labs/zksync-era/issues/3006)) ([16f2757](https://github.com/matter-labs/zksync-era/commit/16f275756cd28024a6b11ac1ac327eb5b8b446e1)) +* Integrate tracers and implement circuits tracer in vm2 ([#2653](https://github.com/matter-labs/zksync-era/issues/2653)) ([87b02e3](https://github.com/matter-labs/zksync-era/commit/87b02e3ab5c1f61d59dd0f0eefa9ec33a7b55488)) +* Move prover data to /home/popzxc/workspace/current/zksync-era/prover/data ([#2778](https://github.com/matter-labs/zksync-era/issues/2778)) ([62e4d46](https://github.com/matter-labs/zksync-era/commit/62e4d4619dde9d6bd9102f1410eea75b0e2051c5)) +* Prover e2e test ([#2975](https://github.com/matter-labs/zksync-era/issues/2975)) ([0edd796](https://github.com/matter-labs/zksync-era/commit/0edd7962429b3530ae751bd7cc947c97193dd0ca)) +* **prover:** add CLI option to run prover with max allocation ([#2794](https://github.com/matter-labs/zksync-era/issues/2794)) ([35e4cae](https://github.com/matter-labs/zksync-era/commit/35e4cae29314fa98ce356a875e08b3e869a31036)) +* **prover:** Add endpoint to PJM to get queue reports ([#2918](https://github.com/matter-labs/zksync-era/issues/2918)) ([2cec83f](https://github.com/matter-labs/zksync-era/commit/2cec83f26e0b9309387135ca43718af4fcd6f6b1)) +* **prover:** Add error to panic message of prover ([#2807](https://github.com/matter-labs/zksync-era/issues/2807)) ([6e057eb](https://github.com/matter-labs/zksync-era/commit/6e057ebf277e0cbc7964079c01ef0348e006a53b)) +* **prover:** Add min_provers and dry_run features. Improve metrics and test. ([#3129](https://github.com/matter-labs/zksync-era/issues/3129)) ([7c28964](https://github.com/matter-labs/zksync-era/commit/7c289649b7b3c418c7193a35b51c264cf4970f3c)) +* **prover:** Add scale failure events watching and pods eviction. ([#3175](https://github.com/matter-labs/zksync-era/issues/3175)) ([dd166f8](https://github.com/matter-labs/zksync-era/commit/dd166f887b11a8dfb039a0030dda923c481f67af)) +* **prover:** Add sending scale requests for Scaler targets ([#3194](https://github.com/matter-labs/zksync-era/issues/3194)) ([767c5bc](https://github.com/matter-labs/zksync-era/commit/767c5bc6a62c402c099abe93b7dbecbb59e4acb7)) +* **prover:** Add support for scaling WGs and compressor ([#3179](https://github.com/matter-labs/zksync-era/issues/3179)) ([c41db9e](https://github.com/matter-labs/zksync-era/commit/c41db9ecec1c21b80969604f703ac6990f6f3434)) +* **prover:** Autoscaler sends scale request to appropriate agents. ([#3150](https://github.com/matter-labs/zksync-era/issues/3150)) ([bfedac0](https://github.com/matter-labs/zksync-era/commit/bfedac03b53055c6e2d5fa6bd6bdc78e2cb1724c)) +* **prover:** Extract keystore into a separate crate ([#2797](https://github.com/matter-labs/zksync-era/issues/2797)) ([e239260](https://github.com/matter-labs/zksync-era/commit/e239260d77b55fcce0b1f485029762a605cdb6d0)) +* **prover:** Optimize setup keys loading ([#2847](https://github.com/matter-labs/zksync-era/issues/2847)) ([19887ef](https://github.com/matter-labs/zksync-era/commit/19887ef21a8bbd26977353f8ee277b711850dfd2)) +* **prover:** Refactor WitnessGenerator ([#2845](https://github.com/matter-labs/zksync-era/issues/2845)) ([934634b](https://github.com/matter-labs/zksync-era/commit/934634b149377c730ec39e904508c40628ff4019)) +* **prover:** Update witness generator to zkevm_test_harness 0.150.6 ([#3029](https://github.com/matter-labs/zksync-era/issues/3029)) ([2151c28](https://github.com/matter-labs/zksync-era/commit/2151c2832498ca6e7ee1eee0bfdf6a0568345fee)) +* **prover:** Use query macro instead string literals for queries ([#2930](https://github.com/matter-labs/zksync-era/issues/2930)) ([1cf959d](https://github.com/matter-labs/zksync-era/commit/1cf959da12d2b6369f34a67ccc2575b4b173d75a)) +* **prover:** WG refactoring [#3](https://github.com/matter-labs/zksync-era/issues/3) ([#2942](https://github.com/matter-labs/zksync-era/issues/2942)) ([df68762](https://github.com/matter-labs/zksync-era/commit/df6876221936a44fa2fb8c80c01d043d229621fc)) +* **prover:** WitnessGenerator refactoring [#2](https://github.com/matter-labs/zksync-era/issues/2) ([#2899](https://github.com/matter-labs/zksync-era/issues/2899)) ([36e5340](https://github.com/matter-labs/zksync-era/commit/36e534091f73f4e3ce86e322fb20842cda6a6b61)) +* Refactor metrics/make API use binaries ([#2735](https://github.com/matter-labs/zksync-era/issues/2735)) ([8ed086a](https://github.com/matter-labs/zksync-era/commit/8ed086afecfcad30bfda44fc4d29a00beea71cca)) +* Remove prover db from house keeper ([#2795](https://github.com/matter-labs/zksync-era/issues/2795)) ([85b7346](https://github.com/matter-labs/zksync-era/commit/85b734664b4306e988da07005860a7ea0fb7d22d)) +* **tee:** use hex serialization for RPC responses ([#2887](https://github.com/matter-labs/zksync-era/issues/2887)) ([abe0440](https://github.com/matter-labs/zksync-era/commit/abe0440811ae4daf4a0f307922a282e9664308e0)) +* **utils:** Rework locate_workspace, introduce Workspace type ([#2830](https://github.com/matter-labs/zksync-era/issues/2830)) ([d256092](https://github.com/matter-labs/zksync-era/commit/d2560928cc67b40a97a5497ac8542915bf6f91a9)) +* vm2 tracers can access storage ([#3114](https://github.com/matter-labs/zksync-era/issues/3114)) ([e466b52](https://github.com/matter-labs/zksync-era/commit/e466b52948e3c4ed1cb5af4fd999a52028e4d216)) +* **vm:** Do not panic on VM divergence ([#2705](https://github.com/matter-labs/zksync-era/issues/2705)) ([7aa5721](https://github.com/matter-labs/zksync-era/commit/7aa5721d22e253d05d369a60d5bcacbf52021c48)) +* **vm:** EVM emulator support – base ([#2979](https://github.com/matter-labs/zksync-era/issues/2979)) ([deafa46](https://github.com/matter-labs/zksync-era/commit/deafa460715334a77edf9fe8aa76fa90029342c4)) +* **vm:** Extract batch executor to separate crate ([#2702](https://github.com/matter-labs/zksync-era/issues/2702)) ([b82dfa4](https://github.com/matter-labs/zksync-era/commit/b82dfa4d29fce107223c3638fe490b5cb0f28d8c)) +* **zk_toolbox:** `zk_supervisor prover` subcommand ([#2820](https://github.com/matter-labs/zksync-era/issues/2820)) ([3506731](https://github.com/matter-labs/zksync-era/commit/3506731d1702bdec8c6b5b41cabca9a257f0269b)) +* **zk_toolbox:** Add external_node consensus support ([#2821](https://github.com/matter-labs/zksync-era/issues/2821)) ([4a10d7d](https://github.com/matter-labs/zksync-era/commit/4a10d7d9554d6c1aa2f4fc46557d40baaad8ff2f)) +* **zk_toolbox:** Add SQL format for zk supervisor ([#2950](https://github.com/matter-labs/zksync-era/issues/2950)) ([540e5d7](https://github.com/matter-labs/zksync-era/commit/540e5d7554f54e80d52f1bfae37e03ca8f787baf)) +* **zk_toolbox:** deploy legacy bridge ([#2837](https://github.com/matter-labs/zksync-era/issues/2837)) ([93b4e08](https://github.com/matter-labs/zksync-era/commit/93b4e08257802d11108870d867dd59fa35e52733)) +* **zk_toolbox:** Redesign zk_toolbox commands ([#3003](https://github.com/matter-labs/zksync-era/issues/3003)) ([114834f](https://github.com/matter-labs/zksync-era/commit/114834f357421c62d596a1954fac8ce615cfde49)) +* **zkstack_cli:** Build dependencies at zkstack build time ([#3157](https://github.com/matter-labs/zksync-era/issues/3157)) ([724d9a9](https://github.com/matter-labs/zksync-era/commit/724d9a9c7f2127263845b640c843e751fd3c21ae)) + + +### Bug Fixes + +* allow compilation under current toolchain ([#3176](https://github.com/matter-labs/zksync-era/issues/3176)) ([89eadd3](https://github.com/matter-labs/zksync-era/commit/89eadd353c4fb84bb815ae56b29f4ff3467b80f3)) +* **api:** Return correct flat call tracer ([#2917](https://github.com/matter-labs/zksync-era/issues/2917)) ([218646a](https://github.com/matter-labs/zksync-era/commit/218646aa1c56200f4ffee99b7f83366e2689354f)) +* count SECP256 precompile to account validation gas limit as well ([#2859](https://github.com/matter-labs/zksync-era/issues/2859)) ([fee0c2a](https://github.com/matter-labs/zksync-era/commit/fee0c2ad08a5ab4a04252765b367eb9fbb1f3db7)) +* Fix Doc lint. ([#3158](https://github.com/matter-labs/zksync-era/issues/3158)) ([c79949b](https://github.com/matter-labs/zksync-era/commit/c79949b8ffde9867b961192afa6c815b44865ae4)) +* ignore unknown fields in rpc json response ([#2962](https://github.com/matter-labs/zksync-era/issues/2962)) ([692ea73](https://github.com/matter-labs/zksync-era/commit/692ea73f75a5fb9db2b4ac33ad24d20568638742)) +* **prover:** Do not exit on missing watcher data. ([#3119](https://github.com/matter-labs/zksync-era/issues/3119)) ([76ed6d9](https://github.com/matter-labs/zksync-era/commit/76ed6d966051c56f8e894c18461c5ea284b1a74b)) +* **prover:** fix setup_metadata_to_setup_data_key ([#2875](https://github.com/matter-labs/zksync-era/issues/2875)) ([4ae5a93](https://github.com/matter-labs/zksync-era/commit/4ae5a93e9e96cd0cd529baf9ffa78c1b21a9c4b1)) +* **prover:** Run for zero queue to allow scaling down to 0 ([#3115](https://github.com/matter-labs/zksync-era/issues/3115)) ([bbe1919](https://github.com/matter-labs/zksync-era/commit/bbe191937fa5c5711a7164fd4f0c2ae65cda0833)) +* **tee_verifier:** correctly initialize storage for re-execution ([#3017](https://github.com/matter-labs/zksync-era/issues/3017)) ([9d88373](https://github.com/matter-labs/zksync-era/commit/9d88373f1b745c489e98e5ef542644a70e815498)) +* **vm:** Prepare new VM for use in API server and fix divergences ([#2994](https://github.com/matter-labs/zksync-era/issues/2994)) ([741b77e](https://github.com/matter-labs/zksync-era/commit/741b77e080f75c6a93d3ee779b1c9ce4297618f9)) + ## [16.5.0](https://github.com/matter-labs/zksync-era/compare/prover-v16.4.0...prover-v16.5.0) (2024-08-28)