From d27502fb72111b6359163e3f3c235975e50ace05 Mon Sep 17 00:00:00 2001 From: perekopskiy Date: Thu, 14 Nov 2024 11:24:09 +0200 Subject: [PATCH 01/11] integrate gateway into some components --- Cargo.lock | 6 + core/bin/external_node/src/config/mod.rs | 59 +- core/bin/external_node/src/node_builder.rs | 22 +- core/bin/external_node/src/tests/mod.rs | 6 +- core/bin/external_node/src/tests/utils.rs | 3 + core/bin/zksync_server/src/node_builder.rs | 7 +- core/lib/basic_types/src/lib.rs | 2 +- core/lib/basic_types/src/web3/mod.rs | 8 +- core/lib/config/src/configs/en_config.rs | 1 - core/lib/config/src/configs/secrets.rs | 1 + core/lib/config/src/testonly.rs | 3 +- core/lib/constants/src/contracts.rs | 1 + core/lib/constants/src/message_root.rs | 13 +- core/lib/contracts/src/lib.rs | 462 +++++++++- ...90ded732839b9f5bf16042205a730fac07c3a.json | 3 +- ...53b2126b52d568196f333973a345f984ea7c4.json | 22 + ...44469e431e35c4419fd710050d51eeefd6b8b.json | 22 + ...6b602aa0cf9b0c5b1ef39b7d07d6309454fcd.json | 2 +- ...f98ded0ad3e59e6de69c2b13d92d8ab1a07dd.json | 3 +- ...f99779da93fc8939dc724a49b286c5568129.json} | 7 +- ...5204831b7ede1a8e3b3c2d441d505c4ca58f.json} | 44 +- ...5094de508af93f4085be7cf3b54b1e8ecdadd.json | 2 +- ...7928aa933616d2186c13a4e005297b0ad63a7.json | 15 + ...55a360f8ae12b6378f8168c000d668d6489d0.json | 22 + ...ce5a6cca1524adfe99b0cb35662746479dcc1.json | 3 +- ...5d1cd01521f82e2962c3feebac395caef36f.json} | 40 +- ...5393ec02458104c483a2023b24881ae0c6716.json | 23 - ...53e30af54675ce58273cbb29312e6e88cbdf5.json | 22 + ...09fb7e3baa5ae84bd5e010008f8972e1a7f98.json | 28 + ...011081834_batch_chain_merkle_path.down.sql | 2 + ...41011081834_batch_chain_merkle_path.up.sql | 5 + core/lib/dal/src/blocks_dal.rs | 148 +++- core/lib/dal/src/blocks_web3_dal.rs | 36 + core/lib/dal/src/eth_sender_dal.rs | 36 +- core/lib/dal/src/eth_watcher_dal.rs | 1 + core/lib/dal/src/models/storage_block.rs | 14 +- core/lib/env_config/src/eth_sender.rs | 5 + core/lib/eth_client/src/clients/http/query.rs | 2 +- core/lib/eth_client/src/clients/mock.rs | 20 +- .../src/i_executor/methods/commit_batches.rs | 26 +- .../src/i_executor/methods/execute_batches.rs | 57 +- .../src/i_executor/methods/prove_batches.rs | 74 +- .../structures/commit_batch_info.rs | 150 +++- .../src/i_executor/structures/mod.rs | 10 +- .../structures/stored_batch_info.rs | 24 +- core/lib/mini_merkle_tree/src/lib.rs | 27 +- core/lib/mini_merkle_tree/src/tests.rs | 16 +- core/lib/multivm/src/versions/shadow/tests.rs | 40 +- .../src/versions/testonly/default_aa.rs | 6 +- .../src/versions/testonly/l1_messenger.rs | 175 ++++ core/lib/multivm/src/versions/testonly/mod.rs | 1 + .../multivm/src/versions/testonly/refunds.rs | 9 +- .../src/versions/testonly/tester/mod.rs | 21 +- .../versions/vm_fast/tests/l1_messenger.rs | 6 + .../multivm/src/versions/vm_fast/tests/mod.rs | 19 +- .../versions/vm_latest/tests/l1_messenger.rs | 9 + .../src/versions/vm_latest/tests/mod.rs | 16 +- core/lib/protobuf_config/src/en.rs | 9 - .../protobuf_config/src/proto/config/en.proto | 2 +- .../src/proto/config/secrets.proto | 1 + core/lib/protobuf_config/src/secrets.rs | 10 + core/lib/snapshots_applier/src/tests/utils.rs | 3 + core/lib/types/src/api/mod.rs | 51 +- core/lib/types/src/commitment/mod.rs | 26 + core/lib/types/src/l2_to_l1_log.rs | 18 + core/lib/types/src/storage/mod.rs | 5 + core/lib/vm_interface/src/pubdata/mod.rs | 4 +- core/lib/web3_decl/src/namespaces/eth.rs | 2 +- core/lib/web3_decl/src/namespaces/unstable.rs | 11 +- core/lib/web3_decl/src/types.rs | 98 +-- core/node/api_server/Cargo.toml | 1 + .../web3/backend_jsonrpsee/namespaces/eth.rs | 12 +- .../backend_jsonrpsee/namespaces/unstable.rs | 14 +- .../node/api_server/src/web3/namespaces/en.rs | 6 +- .../src/web3/namespaces/unstable.rs | 62 -- .../src/web3/namespaces/unstable/mod.rs | 139 +++ .../src/web3/namespaces/unstable/utils.rs | 104 +++ .../api_server/src/web3/namespaces/zks.rs | 78 +- core/node/api_server/src/web3/state.rs | 29 +- core/node/api_server/src/web3/tests/mod.rs | 18 +- core/node/commitment_generator/src/lib.rs | 4 +- core/node/consistency_checker/src/lib.rs | 305 +++++-- .../node/consistency_checker/src/tests/mod.rs | 241 ++++- core/node/db_pruner/src/tests.rs | 1 + core/node/eth_sender/src/aggregator.rs | 5 +- core/node/eth_sender/src/eth_tx_aggregator.rs | 81 +- core/node/eth_sender/src/tester.rs | 2 +- core/node/eth_sender/src/tests.rs | 2 +- core/node/eth_sender/src/zksync_functions.rs | 42 +- core/node/eth_watch/Cargo.toml | 5 + core/node/eth_watch/src/client.rs | 237 ++++- .../appended_chain_batch_root.rs | 236 +++++ .../decentralized_upgrades.rs | 15 +- .../eth_watch/src/event_processors/mod.rs | 24 +- .../src/event_processors/priority_ops.rs | 18 +- core/node/eth_watch/src/lib.rs | 91 +- core/node/eth_watch/src/tests.rs | 791 ----------------- core/node/eth_watch/src/tests/client.rs | 487 +++++++++++ core/node/eth_watch/src/tests/mod.rs | 827 ++++++++++++++++++ .../layers/consistency_checker.rs | 20 +- .../src/implementations/layers/eth_watch.rs | 12 +- .../layers/query_eth_client.rs | 40 +- .../layers/tree_data_fetcher.rs | 31 +- .../resources/eth_interface.rs | 9 + .../node_sync/src/batch_status_updater/mod.rs | 115 ++- .../src/batch_status_updater/tests.rs | 45 +- core/node/node_sync/src/metrics.rs | 5 +- .../node_sync/src/tree_data_fetcher/mod.rs | 23 +- .../src/tree_data_fetcher/provider/mod.rs | 107 ++- .../src/tree_data_fetcher/provider/tests.rs | 211 ++++- .../commands/external_node/prepare_configs.rs | 2 +- 111 files changed, 4914 insertions(+), 1630 deletions(-) create mode 100644 core/lib/dal/.sqlx/query-2d0a4e9281e53b0e410b9be0ebd53b2126b52d568196f333973a345f984ea7c4.json create mode 100644 core/lib/dal/.sqlx/query-2e3107b0c5e8466598066ceca9844469e431e35c4419fd710050d51eeefd6b8b.json rename core/lib/dal/.sqlx/{query-5503575d9377785894de6cf6139a8d4768c6a803a1a90889e5a1b8254c315231.json => query-5cc172812d228c5b60be8dd8b8eaf99779da93fc8939dc724a49b286c5568129.json} (57%) rename core/lib/dal/.sqlx/{query-d3760406b7bf5d14a3fe6cbc9fb3926df634ebf0d8286181fa04884fb747cee8.json => query-9b011c7afa158edd17fe0dc56dad5204831b7ede1a8e3b3c2d441d505c4ca58f.json} (52%) create mode 100644 core/lib/dal/.sqlx/query-c1f9ecf033d609457106189bc4d7928aa933616d2186c13a4e005297b0ad63a7.json create mode 100644 core/lib/dal/.sqlx/query-c2c288d268d6b266acbfc1058bc55a360f8ae12b6378f8168c000d668d6489d0.json rename core/lib/dal/.sqlx/{query-a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6.json => query-cd6ab0aea6e1f72c58c189e098be5d1cd01521f82e2962c3feebac395caef36f.json} (51%) delete mode 100644 core/lib/dal/.sqlx/query-cf3c7b918a3f82476543841d4dc5393ec02458104c483a2023b24881ae0c6716.json create mode 100644 core/lib/dal/.sqlx/query-cf8aaa95e3e8c376b6083c7015753e30af54675ce58273cbb29312e6e88cbdf5.json create mode 100644 core/lib/dal/.sqlx/query-f516657dd48332522a5580e26c509fb7e3baa5ae84bd5e010008f8972e1a7f98.json create mode 100644 core/lib/dal/migrations/20241011081834_batch_chain_merkle_path.down.sql create mode 100644 core/lib/dal/migrations/20241011081834_batch_chain_merkle_path.up.sql create mode 100644 core/lib/multivm/src/versions/testonly/l1_messenger.rs create mode 100644 core/lib/multivm/src/versions/vm_fast/tests/l1_messenger.rs create mode 100644 core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs delete mode 100644 core/node/api_server/src/web3/namespaces/unstable.rs create mode 100644 core/node/api_server/src/web3/namespaces/unstable/mod.rs create mode 100644 core/node/api_server/src/web3/namespaces/unstable/utils.rs create mode 100644 core/node/eth_watch/src/event_processors/appended_chain_batch_root.rs delete mode 100644 core/node/eth_watch/src/tests.rs create mode 100644 core/node/eth_watch/src/tests/client.rs create mode 100644 core/node/eth_watch/src/tests/mod.rs diff --git a/Cargo.lock b/Cargo.lock index eb93300b1729..9c62c84a658e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11100,6 +11100,9 @@ dependencies = [ "anyhow", "async-recursion", "async-trait", + "bincode", + "hex", + "itertools 0.10.5", "test-log", "thiserror", "tokio", @@ -11109,9 +11112,11 @@ dependencies = [ "zksync_contracts", "zksync_dal", "zksync_eth_client", + "zksync_mini_merkle_tree", "zksync_shared_metrics", "zksync_system_constants", "zksync_types", + "zksync_web3_decl", ] [[package]] @@ -11465,6 +11470,7 @@ dependencies = [ "zksync_config", "zksync_consensus_roles", "zksync_contracts", + "zksync_crypto_primitives", "zksync_dal", "zksync_health_check", "zksync_metadata_calculator", diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 81604f83008a..6f3222af6dfa 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -102,11 +102,11 @@ impl ConfigurationSource for Environment { /// This part of the external node config is fetched directly from the main node. #[derive(Debug, Deserialize)] pub(crate) struct RemoteENConfig { - pub bridgehub_proxy_addr: Option
, - pub state_transition_proxy_addr: Option
, - pub transparent_proxy_admin_addr: Option
, - /// Should not be accessed directly. Use [`ExternalNodeConfig::diamond_proxy_address`] instead. - diamond_proxy_addr: Address, + pub l1_bridgehub_proxy_addr: Option
, + pub l1_state_transition_proxy_addr: Option
, + pub l1_transparent_proxy_admin_addr: Option
, + /// Should not be accessed directly. Use [`ExternalNodeConfig::l1_diamond_proxy_address`] instead. + l1_diamond_proxy_addr: Address, // While on L1 shared bridge and legacy bridge are different contracts with different addresses, // the `l2_erc20_bridge_addr` and `l2_shared_bridge_addr` are basically the same contract, but with // a different name, with names adapted only for consistency. @@ -144,7 +144,7 @@ impl RemoteENConfig { .rpc_context("ecosystem_contracts") .await .ok(); - let diamond_proxy_addr = client + let l1_diamond_proxy_addr = client .get_main_contract() .rpc_context("get_main_contract") .await?; @@ -180,14 +180,14 @@ impl RemoteENConfig { } Ok(Self { - bridgehub_proxy_addr: ecosystem_contracts.as_ref().map(|a| a.bridgehub_proxy_addr), - state_transition_proxy_addr: ecosystem_contracts + l1_bridgehub_proxy_addr: ecosystem_contracts.as_ref().map(|a| a.bridgehub_proxy_addr), + l1_state_transition_proxy_addr: ecosystem_contracts .as_ref() .map(|a| a.state_transition_proxy_addr), - transparent_proxy_admin_addr: ecosystem_contracts + l1_transparent_proxy_admin_addr: ecosystem_contracts .as_ref() .map(|a| a.transparent_proxy_admin_addr), - diamond_proxy_addr, + l1_diamond_proxy_addr, l2_testnet_paymaster_addr, l1_erc20_bridge_proxy_addr: bridges.l1_erc20_default_bridge, l2_erc20_bridge_addr: l2_erc20_default_bridge, @@ -212,10 +212,10 @@ impl RemoteENConfig { #[cfg(test)] fn mock() -> Self { Self { - bridgehub_proxy_addr: None, - state_transition_proxy_addr: None, - transparent_proxy_admin_addr: None, - diamond_proxy_addr: Address::repeat_byte(1), + l1_bridgehub_proxy_addr: None, + l1_state_transition_proxy_addr: None, + l1_transparent_proxy_admin_addr: None, + l1_diamond_proxy_addr: Address::repeat_byte(1), l1_erc20_bridge_proxy_addr: Some(Address::repeat_byte(2)), l2_erc20_bridge_addr: Some(Address::repeat_byte(3)), l2_weth_bridge_addr: None, @@ -479,7 +479,6 @@ pub(crate) struct OptionalENConfig { #[serde(default = "OptionalENConfig::default_pruning_data_retention_sec")] pruning_data_retention_sec: u64, /// Gateway RPC URL, needed for operating during migration. - #[allow(dead_code)] pub gateway_url: Option, /// Interval for bridge addresses refreshing in seconds. bridge_addresses_refresh_interval_sec: Option, @@ -489,7 +488,11 @@ pub(crate) struct OptionalENConfig { } impl OptionalENConfig { - fn from_configs(general_config: &GeneralConfig, enconfig: &ENConfig) -> anyhow::Result { + fn from_configs( + general_config: &GeneralConfig, + enconfig: &ENConfig, + secrets: &Secrets, + ) -> anyhow::Result { let api_namespaces = load_config!(general_config.api_config, web3_json_rpc.api_namespaces) .map(|a: Vec| a.iter().map(|a| a.parse()).collect::>()) .transpose()?; @@ -721,7 +724,7 @@ impl OptionalENConfig { .unwrap_or_else(Self::default_main_node_rate_limit_rps), api_namespaces, contracts_diamond_proxy_addr: None, - gateway_url: enconfig.gateway_url.clone(), + gateway_url: secrets.l1.as_ref().and_then(|l1| l1.gateway_url.clone()), bridge_addresses_refresh_interval_sec: enconfig.bridge_addresses_refresh_interval_sec, timestamp_asserter_min_time_till_end_sec: general_config .timestamp_asserter_config @@ -1340,7 +1343,11 @@ impl ExternalNodeConfig<()> { &external_node_config, &secrets_config, )?; - let optional = OptionalENConfig::from_configs(&general_config, &external_node_config)?; + let optional = OptionalENConfig::from_configs( + &general_config, + &external_node_config, + &secrets_config, + )?; let postgres = PostgresConfig { database_url: secrets_config .database @@ -1383,7 +1390,7 @@ impl ExternalNodeConfig<()> { let remote = RemoteENConfig::fetch(main_node_client) .await .context("Unable to fetch required config values from the main node")?; - let remote_diamond_proxy_addr = remote.diamond_proxy_addr; + let remote_diamond_proxy_addr = remote.l1_diamond_proxy_addr; if let Some(local_diamond_proxy_addr) = self.optional.contracts_diamond_proxy_addr { anyhow::ensure!( local_diamond_proxy_addr == remote_diamond_proxy_addr, @@ -1430,14 +1437,14 @@ impl ExternalNodeConfig { } } - /// Returns a verified diamond proxy address. + /// Returns verified L1 diamond proxy address. /// If local configuration contains the address, it will be checked against the one returned by the main node. /// Otherwise, the remote value will be used. However, using remote value has trust implications for the main /// node so relying on it solely is not recommended. - pub fn diamond_proxy_address(&self) -> Address { + pub fn l1_diamond_proxy_address(&self) -> Address { self.optional .contracts_diamond_proxy_addr - .unwrap_or(self.remote.diamond_proxy_addr) + .unwrap_or(self.remote.l1_diamond_proxy_addr) } } @@ -1461,10 +1468,10 @@ impl From<&ExternalNodeConfig> for InternalApiConfig { l1_weth_bridge: config.remote.l1_weth_bridge_addr, l2_weth_bridge: config.remote.l2_weth_bridge_addr, }, - bridgehub_proxy_addr: config.remote.bridgehub_proxy_addr, - state_transition_proxy_addr: config.remote.state_transition_proxy_addr, - transparent_proxy_admin_addr: config.remote.transparent_proxy_admin_addr, - diamond_proxy_addr: config.remote.diamond_proxy_addr, + l1_bridgehub_proxy_addr: config.remote.l1_bridgehub_proxy_addr, + l1_state_transition_proxy_addr: config.remote.l1_state_transition_proxy_addr, + l1_transparent_proxy_admin_addr: config.remote.l1_transparent_proxy_admin_addr, + l1_diamond_proxy_addr: config.remote.l1_diamond_proxy_addr, l2_testnet_paymaster_addr: config.remote.l2_testnet_paymaster_addr, req_entities_limit: config.optional.req_entities_limit, fee_history_limit: config.optional.fee_history_limit, diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs index 5c70fd436781..a5614d38fe35 100644 --- a/core/bin/external_node/src/node_builder.rs +++ b/core/bin/external_node/src/node_builder.rs @@ -57,7 +57,7 @@ use zksync_node_framework::{ service::{ZkStackService, ZkStackServiceBuilder}, }; use zksync_state::RocksdbStorageOptions; -use zksync_types::L2_NATIVE_TOKEN_VAULT_ADDRESS; +use zksync_types::L2_ASSET_ROUTER_ADDRESS; use crate::{config::ExternalNodeConfig, metrics::framework::ExternalNodeMetricsLayer, Component}; @@ -181,8 +181,7 @@ impl ExternalNodeBuilder { let query_eth_client_layer = QueryEthClientLayer::new( self.config.required.settlement_layer_id(), self.config.required.eth_client_url.clone(), - // TODO(EVM-676): add this config for external node - Default::default(), + self.config.optional.gateway_url.clone(), ); self.node.add_layer(query_eth_client_layer); Ok(self) @@ -200,12 +199,11 @@ impl ExternalNodeBuilder { .remote .l2_shared_bridge_addr .context("Missing `l2_shared_bridge_addr`")?; - let l2_legacy_shared_bridge_addr = if l2_shared_bridge_addr == L2_NATIVE_TOKEN_VAULT_ADDRESS - { - // System has migrated to `L2_NATIVE_TOKEN_VAULT_ADDRESS`, use legacy shared bridge address from main node. + let l2_legacy_shared_bridge_addr = if l2_shared_bridge_addr == L2_ASSET_ROUTER_ADDRESS { + // System has migrated to `L2_ASSET_ROUTER_ADDRESS`, use legacy shared bridge address from main node. self.config.remote.l2_legacy_shared_bridge_addr } else { - // System hasn't migrated on `L2_NATIVE_TOKEN_VAULT_ADDRESS`, we can safely use `l2_shared_bridge_addr`. + // System hasn't migrated on `L2_ASSET_ROUTER_ADDRESS`, we can safely use `l2_shared_bridge_addr`. Some(l2_shared_bridge_addr) }; @@ -278,7 +276,7 @@ impl ExternalNodeBuilder { fn add_l1_batch_commitment_mode_validation_layer(mut self) -> anyhow::Result { let layer = L1BatchCommitmentModeValidationLayer::new( - self.config.diamond_proxy_address(), + self.config.l1_diamond_proxy_address(), self.config.optional.l1_batch_commit_data_generator_mode, ); self.node.add_layer(layer); @@ -297,9 +295,10 @@ impl ExternalNodeBuilder { fn add_consistency_checker_layer(mut self) -> anyhow::Result { let max_batches_to_recheck = 10; // TODO (BFT-97): Make it a part of a proper EN config let layer = ConsistencyCheckerLayer::new( - self.config.diamond_proxy_address(), + self.config.l1_diamond_proxy_address(), max_batches_to_recheck, self.config.optional.l1_batch_commit_data_generator_mode, + self.config.required.l2_chain_id, ); self.node.add_layer(layer); Ok(self) @@ -324,7 +323,10 @@ impl ExternalNodeBuilder { } fn add_tree_data_fetcher_layer(mut self) -> anyhow::Result { - let layer = TreeDataFetcherLayer::new(self.config.diamond_proxy_address()); + let layer = TreeDataFetcherLayer::new( + self.config.l1_diamond_proxy_address(), + self.config.required.l2_chain_id, + ); self.node.add_layer(layer); Ok(self) } diff --git a/core/bin/external_node/src/tests/mod.rs b/core/bin/external_node/src/tests/mod.rs index 59aceea819f1..fd616a467ee8 100644 --- a/core/bin/external_node/src/tests/mod.rs +++ b/core/bin/external_node/src/tests/mod.rs @@ -35,7 +35,7 @@ async fn external_node_basics(components_str: &'static str) { } let l2_client = utils::mock_l2_client(&env); - let eth_client = utils::mock_eth_client(env.config.diamond_proxy_address()); + let eth_client = utils::mock_eth_client(env.config.l1_diamond_proxy_address()); let node_handle = tokio::task::spawn_blocking(move || { std::thread::spawn(move || { @@ -104,7 +104,7 @@ async fn node_reacts_to_stop_signal_during_initial_reorg_detection() { let (env, env_handles) = utils::TestEnvironment::with_genesis_block("core").await; let l2_client = utils::mock_l2_client_hanging(); - let eth_client = utils::mock_eth_client(env.config.diamond_proxy_address()); + let eth_client = utils::mock_eth_client(env.config.l1_diamond_proxy_address()); let mut node_handle = tokio::task::spawn_blocking(move || { std::thread::spawn(move || { @@ -140,7 +140,7 @@ async fn running_tree_without_core_is_not_allowed() { let (env, _env_handles) = utils::TestEnvironment::with_genesis_block("tree").await; let l2_client = utils::mock_l2_client(&env); - let eth_client = utils::mock_eth_client(env.config.diamond_proxy_address()); + let eth_client = utils::mock_eth_client(env.config.l1_diamond_proxy_address()); let node_handle = tokio::task::spawn_blocking(move || { std::thread::spawn(move || { diff --git a/core/bin/external_node/src/tests/utils.rs b/core/bin/external_node/src/tests/utils.rs index b26fa80d1a95..58e2a88e5fb7 100644 --- a/core/bin/external_node/src/tests/utils.rs +++ b/core/bin/external_node/src/tests/utils.rs @@ -23,10 +23,13 @@ pub(super) fn block_details_base(hash: H256) -> api::BlockDetailsBase { status: api::BlockStatus::Sealed, commit_tx_hash: None, committed_at: None, + commit_chain_id: None, prove_tx_hash: None, proven_at: None, + prove_chain_id: None, execute_tx_hash: None, executed_at: None, + execute_chain_id: None, l1_gas_price: 0, l2_fair_gas_price: 0, fair_pubdata_price: None, diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 32478ede5bf8..70178a6a9c2b 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -161,11 +161,7 @@ impl MainNodeBuilder { let query_eth_client_layer = QueryEthClientLayer::new( genesis.settlement_layer_id(), eth_config.l1_rpc_url, - self.configs - .eth - .as_ref() - .and_then(|x| Some(x.gas_adjuster?.settlement_mode)) - .unwrap_or(SettlementMode::SettlesToL1), + eth_config.gateway_url, ); self.node.add_layer(query_eth_client_layer); Ok(self) @@ -283,6 +279,7 @@ impl MainNodeBuilder { self.node.add_layer(EthWatchLayer::new( try_load_config!(eth_config.watcher), self.contracts_config.clone(), + self.genesis_config.l2_chain_id, )); Ok(self) } diff --git a/core/lib/basic_types/src/lib.rs b/core/lib/basic_types/src/lib.rs index 5776416265d2..ad28613a28bb 100644 --- a/core/lib/basic_types/src/lib.rs +++ b/core/lib/basic_types/src/lib.rs @@ -148,7 +148,7 @@ impl<'de> Deserialize<'de> for L2ChainId { } impl L2ChainId { - fn new(number: u64) -> Result { + pub fn new(number: u64) -> Result { if number > L2ChainId::max().0 { return Err(format!( "Cannot convert given value {} into L2ChainId. It's greater than MAX: {}", diff --git a/core/lib/basic_types/src/web3/mod.rs b/core/lib/basic_types/src/web3/mod.rs index e6d3cab37273..98625831c991 100644 --- a/core/lib/basic_types/src/web3/mod.rs +++ b/core/lib/basic_types/src/web3/mod.rs @@ -198,7 +198,7 @@ pub struct Filter { } #[derive(Default, Debug, PartialEq, Clone)] -pub struct ValueOrArray(Vec); +pub struct ValueOrArray(pub Vec); impl ValueOrArray { pub fn flatten(self) -> Vec { @@ -206,6 +206,12 @@ impl ValueOrArray { } } +impl From for ValueOrArray { + fn from(value: T) -> Self { + Self(vec![value]) + } +} + impl Serialize for ValueOrArray where T: Serialize, diff --git a/core/lib/config/src/configs/en_config.rs b/core/lib/config/src/configs/en_config.rs index 4cab47b0779e..13a0e1f2c99d 100644 --- a/core/lib/config/src/configs/en_config.rs +++ b/core/lib/config/src/configs/en_config.rs @@ -18,6 +18,5 @@ pub struct ENConfig { pub main_node_url: SensitiveUrl, pub main_node_rate_limit_rps: Option, - pub gateway_url: Option, pub bridge_addresses_refresh_interval_sec: Option, } diff --git a/core/lib/config/src/configs/secrets.rs b/core/lib/config/src/configs/secrets.rs index 75ff067c2473..0572c65e9e94 100644 --- a/core/lib/config/src/configs/secrets.rs +++ b/core/lib/config/src/configs/secrets.rs @@ -16,6 +16,7 @@ pub struct DatabaseSecrets { #[derive(Debug, Clone, PartialEq)] pub struct L1Secrets { pub l1_rpc_url: SensitiveUrl, + pub gateway_url: Option, } #[derive(Debug, Clone, PartialEq)] diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index c24d47f27b33..aa2f109269dc 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -851,6 +851,7 @@ impl Distribution for EncodeDist { use configs::secrets::L1Secrets; L1Secrets { l1_rpc_url: format!("localhost:{}", rng.gen::()).parse().unwrap(), + gateway_url: Some(format!("localhost:{}", rng.gen::()).parse().unwrap()), } } } @@ -937,8 +938,6 @@ impl Distribution for EncodeDist { _ => L1BatchCommitmentMode::Validium, }, main_node_rate_limit_rps: self.sample_opt(|| rng.gen()), - gateway_url: self - .sample_opt(|| format!("localhost:{}", rng.gen::()).parse().unwrap()), bridge_addresses_refresh_interval_sec: self.sample_opt(|| rng.gen()), } } diff --git a/core/lib/constants/src/contracts.rs b/core/lib/constants/src/contracts.rs index 4f0f362d9149..f9138b2bbf17 100644 --- a/core/lib/constants/src/contracts.rs +++ b/core/lib/constants/src/contracts.rs @@ -135,6 +135,7 @@ pub const EVM_GAS_MANAGER_ADDRESS: Address = H160([ 0x00, 0x00, 0x80, 0x13, ]); +/// Note, that the `Create2Factory` and higher are explicitly deployed on a non-system-contract address. pub const CREATE2_FACTORY_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, diff --git a/core/lib/constants/src/message_root.rs b/core/lib/constants/src/message_root.rs index a8f4a034fb99..9bb8764cd667 100644 --- a/core/lib/constants/src/message_root.rs +++ b/core/lib/constants/src/message_root.rs @@ -1,5 +1,14 @@ -// Position of `FullTree::_height` in `MessageRoot`'s storage layout. +/// Position of `chainCount` in `MessageRoot`'s storage layout. +pub const CHAIN_COUNT_KEY: usize = 0; + +/// Position of `chainIndexToId` in `MessageRoot`'s storage layout. +pub const CHAIN_INDEX_TO_ID_KEY: usize = 2; + +/// Position of `FullTree::_height` in `MessageRoot`'s storage layout. pub const AGG_TREE_HEIGHT_KEY: usize = 3; -// Position of `FullTree::nodes` in `MessageRoot`'s storage layout. +/// Position of `FullTree::nodes` in `MessageRoot`'s storage layout. pub const AGG_TREE_NODES_KEY: usize = 5; + +/// Position of `chainTree` in `MessageRoot`'s storage layout. +pub const CHAIN_TREE_KEY: usize = 7; diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index ad9f7739ba0d..f387f791be12 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -284,7 +284,9 @@ impl SystemContractsRepo { "artifacts-zk/contracts-preprocessed/{0}{1}.sol/{1}.json", directory, name ))) - .expect("One of the outputs should exists") + .unwrap_or_else(|| { + panic!("One of the outputs should exists for {directory}{name}"); + }) } } ContractLanguage::Yul => { @@ -1003,3 +1005,461 @@ pub static DIAMOND_CUT: Lazy = Lazy::new(|| { }"#; serde_json::from_str(abi).unwrap() }); + +pub static POST_BOOJUM_COMMIT_FUNCTION: Lazy = Lazy::new(|| { + let abi = r#" + { + "inputs": [ + { + "components": [ + { + "internalType": "uint64", + "name": "batchNumber", + "type": "uint64" + }, + { + "internalType": "bytes32", + "name": "batchHash", + "type": "bytes32" + }, + { + "internalType": "uint64", + "name": "indexRepeatedStorageChanges", + "type": "uint64" + }, + { + "internalType": "uint256", + "name": "numberOfLayer1Txs", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "priorityOperationsHash", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "l2LogsTreeRoot", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "timestamp", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "commitment", + "type": "bytes32" + } + ], + "internalType": "struct IExecutor.StoredBatchInfo", + "name": "_lastCommittedBatchData", + "type": "tuple" + }, + { + "components": [ + { + "internalType": "uint64", + "name": "batchNumber", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "timestamp", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "indexRepeatedStorageChanges", + "type": "uint64" + }, + { + "internalType": "bytes32", + "name": "newStateRoot", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "numberOfLayer1Txs", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "priorityOperationsHash", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "bootloaderHeapInitialContentsHash", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "eventsQueueStateHash", + "type": "bytes32" + }, + { + "internalType": "bytes", + "name": "systemLogs", + "type": "bytes" + }, + { + "internalType": "bytes", + "name": "pubdataCommitments", + "type": "bytes" + } + ], + "internalType": "struct IExecutor.CommitBatchInfo[]", + "name": "_newBatchesData", + "type": "tuple[]" + } + ], + "name": "commitBatches", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }"#; + serde_json::from_str(abi).unwrap() +}); + +pub static POST_SHARED_BRIDGE_COMMIT_FUNCTION: Lazy = Lazy::new(|| { + let abi = r#" + { + "inputs": [ + { + "internalType": "uint256", + "name": "_chainId", + "type": "uint256" + }, + { + "components": [ + { + "internalType": "uint64", + "name": "batchNumber", + "type": "uint64" + }, + { + "internalType": "bytes32", + "name": "batchHash", + "type": "bytes32" + }, + { + "internalType": "uint64", + "name": "indexRepeatedStorageChanges", + "type": "uint64" + }, + { + "internalType": "uint256", + "name": "numberOfLayer1Txs", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "priorityOperationsHash", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "l2LogsTreeRoot", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "timestamp", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "commitment", + "type": "bytes32" + } + ], + "internalType": "struct IExecutor.StoredBatchInfo", + "name": "_lastCommittedBatchData", + "type": "tuple" + }, + { + "components": [ + { + "internalType": "uint64", + "name": "batchNumber", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "timestamp", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "indexRepeatedStorageChanges", + "type": "uint64" + }, + { + "internalType": "bytes32", + "name": "newStateRoot", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "numberOfLayer1Txs", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "priorityOperationsHash", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "bootloaderHeapInitialContentsHash", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "eventsQueueStateHash", + "type": "bytes32" + }, + { + "internalType": "bytes", + "name": "systemLogs", + "type": "bytes" + }, + { + "internalType": "bytes", + "name": "pubdataCommitments", + "type": "bytes" + } + ], + "internalType": "struct IExecutor.CommitBatchInfo[]", + "name": "_newBatchesData", + "type": "tuple[]" + } + ], + "name": "commitBatchesSharedBridge", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }"#; + serde_json::from_str(abi).unwrap() +}); + +pub static POST_SHARED_BRIDGE_PROVE_FUNCTION: Lazy = Lazy::new(|| { + let abi = r#" + { + "inputs": [ + { + "internalType": "uint256", + "name": "_chainId", + "type": "uint256" + }, + { + "components": [ + { + "internalType": "uint64", + "name": "batchNumber", + "type": "uint64" + }, + { + "internalType": "bytes32", + "name": "batchHash", + "type": "bytes32" + }, + { + "internalType": "uint64", + "name": "indexRepeatedStorageChanges", + "type": "uint64" + }, + { + "internalType": "uint256", + "name": "numberOfLayer1Txs", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "priorityOperationsHash", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "l2LogsTreeRoot", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "timestamp", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "commitment", + "type": "bytes32" + } + ], + "internalType": "struct IExecutor.StoredBatchInfo", + "name": "_prevBatch", + "type": "tuple" + }, + { + "components": [ + { + "internalType": "uint64", + "name": "batchNumber", + "type": "uint64" + }, + { + "internalType": "bytes32", + "name": "batchHash", + "type": "bytes32" + }, + { + "internalType": "uint64", + "name": "indexRepeatedStorageChanges", + "type": "uint64" + }, + { + "internalType": "uint256", + "name": "numberOfLayer1Txs", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "priorityOperationsHash", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "l2LogsTreeRoot", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "timestamp", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "commitment", + "type": "bytes32" + } + ], + "internalType": "struct IExecutor.StoredBatchInfo[]", + "name": "_committedBatches", + "type": "tuple[]" + }, + { + "components": [ + { + "internalType": "uint256[]", + "name": "recursiveAggregationInput", + "type": "uint256[]" + }, + { + "internalType": "uint256[]", + "name": "serializedProof", + "type": "uint256[]" + } + ], + "internalType": "struct IExecutor.ProofInput", + "name": "_proof", + "type": "tuple" + } + ], + "name": "proveBatchesSharedBridge", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }"#; + serde_json::from_str(abi).unwrap() +}); + +pub static POST_SHARED_BRIDGE_EXECUTE_FUNCTION: Lazy = Lazy::new(|| { + let abi = r#" + { + "inputs": [ + { + "internalType": "uint256", + "name": "_chainId", + "type": "uint256" + }, + { + "components": [ + { + "internalType": "uint64", + "name": "batchNumber", + "type": "uint64" + }, + { + "internalType": "bytes32", + "name": "batchHash", + "type": "bytes32" + }, + { + "internalType": "uint64", + "name": "indexRepeatedStorageChanges", + "type": "uint64" + }, + { + "internalType": "uint256", + "name": "numberOfLayer1Txs", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "priorityOperationsHash", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "l2LogsTreeRoot", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "timestamp", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "commitment", + "type": "bytes32" + } + ], + "internalType": "struct IExecutor.StoredBatchInfo[]", + "name": "_batchesData", + "type": "tuple[]" + } + ], + "name": "executeBatchesSharedBridge", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }"#; + serde_json::from_str(abi).unwrap() +}); + +// Temporary thing, should be removed when new contracts are merged. +pub static MESSAGE_ROOT_CONTRACT: Lazy = Lazy::new(|| { + let abi = r#" + { + "inputs": [ + { + "internalType": "uint256", + "name": "_chainId", + "type": "uint256" + } + ], + "name": "getChainRoot", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }"#; + serde_json::from_str(abi).unwrap() +}); diff --git a/core/lib/dal/.sqlx/query-228aa5ec4c4eb56143823b96a8190ded732839b9f5bf16042205a730fac07c3a.json b/core/lib/dal/.sqlx/query-228aa5ec4c4eb56143823b96a8190ded732839b9f5bf16042205a730fac07c3a.json index b8d6482ea744..32a2212dfdf6 100644 --- a/core/lib/dal/.sqlx/query-228aa5ec4c4eb56143823b96a8190ded732839b9f5bf16042205a730fac07c3a.json +++ b/core/lib/dal/.sqlx/query-228aa5ec4c4eb56143823b96a8190ded732839b9f5bf16042205a730fac07c3a.json @@ -11,7 +11,8 @@ "kind": { "Enum": [ "ProtocolUpgrades", - "PriorityTransactions" + "PriorityTransactions", + "ChainBatchRoot" ] } } diff --git a/core/lib/dal/.sqlx/query-2d0a4e9281e53b0e410b9be0ebd53b2126b52d568196f333973a345f984ea7c4.json b/core/lib/dal/.sqlx/query-2d0a4e9281e53b0e410b9be0ebd53b2126b52d568196f333973a345f984ea7c4.json new file mode 100644 index 000000000000..adbd2c0931ec --- /dev/null +++ b/core/lib/dal/.sqlx/query-2d0a4e9281e53b0e410b9be0ebd53b2126b52d568196f333973a345f984ea7c4.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n batch_chain_merkle_path\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "batch_chain_merkle_path", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + true + ] + }, + "hash": "2d0a4e9281e53b0e410b9be0ebd53b2126b52d568196f333973a345f984ea7c4" +} diff --git a/core/lib/dal/.sqlx/query-2e3107b0c5e8466598066ceca9844469e431e35c4419fd710050d51eeefd6b8b.json b/core/lib/dal/.sqlx/query-2e3107b0c5e8466598066ceca9844469e431e35c4419fd710050d51eeefd6b8b.json new file mode 100644 index 000000000000..69dd87a6c355 --- /dev/null +++ b/core/lib/dal/.sqlx/query-2e3107b0c5e8466598066ceca9844469e431e35c4419fd710050d51eeefd6b8b.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n local_root\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "local_root", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + true + ] + }, + "hash": "2e3107b0c5e8466598066ceca9844469e431e35c4419fd710050d51eeefd6b8b" +} diff --git a/core/lib/dal/.sqlx/query-2fa2ba4a62f79d780d239409d426b602aa0cf9b0c5b1ef39b7d07d6309454fcd.json b/core/lib/dal/.sqlx/query-2fa2ba4a62f79d780d239409d426b602aa0cf9b0c5b1ef39b7d07d6309454fcd.json index 0db6ba6f51b6..1d515edba819 100644 --- a/core/lib/dal/.sqlx/query-2fa2ba4a62f79d780d239409d426b602aa0cf9b0c5b1ef39b7d07d6309454fcd.json +++ b/core/lib/dal/.sqlx/query-2fa2ba4a62f79d780d239409d426b602aa0cf9b0c5b1ef39b7d07d6309454fcd.json @@ -69,7 +69,7 @@ false, false, false, - true, + false, false, false, true, diff --git a/core/lib/dal/.sqlx/query-3ee6c2a87c65eaece7048da53c9f98ded0ad3e59e6de69c2b13d92d8ab1a07dd.json b/core/lib/dal/.sqlx/query-3ee6c2a87c65eaece7048da53c9f98ded0ad3e59e6de69c2b13d92d8ab1a07dd.json index e2a808d41f89..8bab74d20f5c 100644 --- a/core/lib/dal/.sqlx/query-3ee6c2a87c65eaece7048da53c9f98ded0ad3e59e6de69c2b13d92d8ab1a07dd.json +++ b/core/lib/dal/.sqlx/query-3ee6c2a87c65eaece7048da53c9f98ded0ad3e59e6de69c2b13d92d8ab1a07dd.json @@ -17,7 +17,8 @@ "kind": { "Enum": [ "ProtocolUpgrades", - "PriorityTransactions" + "PriorityTransactions", + "ChainBatchRoot" ] } } diff --git a/core/lib/dal/.sqlx/query-5503575d9377785894de6cf6139a8d4768c6a803a1a90889e5a1b8254c315231.json b/core/lib/dal/.sqlx/query-5cc172812d228c5b60be8dd8b8eaf99779da93fc8939dc724a49b286c5568129.json similarity index 57% rename from core/lib/dal/.sqlx/query-5503575d9377785894de6cf6139a8d4768c6a803a1a90889e5a1b8254c315231.json rename to core/lib/dal/.sqlx/query-5cc172812d228c5b60be8dd8b8eaf99779da93fc8939dc724a49b286c5568129.json index 5f27c7549b47..c2ccaf3b3d7c 100644 --- a/core/lib/dal/.sqlx/query-5503575d9377785894de6cf6139a8d4768c6a803a1a90889e5a1b8254c315231.json +++ b/core/lib/dal/.sqlx/query-5cc172812d228c5b60be8dd8b8eaf99779da93fc8939dc724a49b286c5568129.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "INSERT INTO eth_txs (raw_tx, nonce, tx_type, contract_address, predicted_gas_cost, created_at, updated_at) VALUES ('\\x00', 0, $1, '', 0, now(), now()) RETURNING id", + "query": "INSERT INTO eth_txs (raw_tx, nonce, tx_type, contract_address, predicted_gas_cost, chain_id, created_at, updated_at) VALUES ('\\x00', 0, $1, '', 0, $2, now(), now()) RETURNING id", "describe": { "columns": [ { @@ -11,12 +11,13 @@ ], "parameters": { "Left": [ - "Text" + "Text", + "Int8" ] }, "nullable": [ false ] }, - "hash": "5503575d9377785894de6cf6139a8d4768c6a803a1a90889e5a1b8254c315231" + "hash": "5cc172812d228c5b60be8dd8b8eaf99779da93fc8939dc724a49b286c5568129" } diff --git a/core/lib/dal/.sqlx/query-d3760406b7bf5d14a3fe6cbc9fb3926df634ebf0d8286181fa04884fb747cee8.json b/core/lib/dal/.sqlx/query-9b011c7afa158edd17fe0dc56dad5204831b7ede1a8e3b3c2d441d505c4ca58f.json similarity index 52% rename from core/lib/dal/.sqlx/query-d3760406b7bf5d14a3fe6cbc9fb3926df634ebf0d8286181fa04884fb747cee8.json rename to core/lib/dal/.sqlx/query-9b011c7afa158edd17fe0dc56dad5204831b7ede1a8e3b3c2d441d505c4ca58f.json index ed3270de573e..8de8a19da5f8 100644 --- a/core/lib/dal/.sqlx/query-d3760406b7bf5d14a3fe6cbc9fb3926df634ebf0d8286181fa04884fb747cee8.json +++ b/core/lib/dal/.sqlx/query-9b011c7afa158edd17fe0dc56dad5204831b7ede1a8e3b3c2d441d505c4ca58f.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n WHERE\n is_sealed\n )\n ) AS \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n miniblocks.protocol_version,\n miniblocks.fee_account_address\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n miniblocks.number = $1\n ", + "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n WHERE\n is_sealed\n )\n ) AS \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n commit_tx_data.chain_id AS \"commit_chain_id?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n prove_tx_data.chain_id AS \"prove_chain_id?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n execute_tx_data.chain_id AS \"execute_chain_id?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n miniblocks.protocol_version,\n miniblocks.fee_account_address\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs AS commit_tx_data\n ON (\n l1_batches.eth_commit_tx_id = commit_tx_data.id\n AND commit_tx_data.confirmed_eth_tx_history_id IS NOT NULL\n )\n LEFT JOIN eth_txs AS prove_tx_data\n ON (\n l1_batches.eth_prove_tx_id = prove_tx_data.id\n AND prove_tx_data.confirmed_eth_tx_history_id IS NOT NULL\n )\n LEFT JOIN eth_txs AS execute_tx_data\n ON (\n l1_batches.eth_execute_tx_id = execute_tx_data.id\n AND execute_tx_data.confirmed_eth_tx_history_id IS NOT NULL\n )\n WHERE\n miniblocks.number = $1\n ", "describe": { "columns": [ { @@ -45,61 +45,76 @@ }, { "ordinal": 8, + "name": "commit_chain_id?", + "type_info": "Int8" + }, + { + "ordinal": 9, "name": "prove_tx_hash?", "type_info": "Text" }, { - "ordinal": 9, + "ordinal": 10, "name": "proven_at?", "type_info": "Timestamp" }, { - "ordinal": 10, + "ordinal": 11, + "name": "prove_chain_id?", + "type_info": "Int8" + }, + { + "ordinal": 12, "name": "execute_tx_hash?", "type_info": "Text" }, { - "ordinal": 11, + "ordinal": 13, "name": "executed_at?", "type_info": "Timestamp" }, { - "ordinal": 12, + "ordinal": 14, + "name": "execute_chain_id?", + "type_info": "Int8" + }, + { + "ordinal": 15, "name": "l1_gas_price", "type_info": "Int8" }, { - "ordinal": 13, + "ordinal": 16, "name": "l2_fair_gas_price", "type_info": "Int8" }, { - "ordinal": 14, + "ordinal": 17, "name": "fair_pubdata_price", "type_info": "Int8" }, { - "ordinal": 15, + "ordinal": 18, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 16, + "ordinal": 19, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 17, + "ordinal": 20, "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { - "ordinal": 18, + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 19, + "ordinal": 22, "name": "fee_account_address", "type_info": "Bytea" } @@ -118,10 +133,13 @@ false, false, true, + true, false, true, + true, false, true, + true, false, false, true, @@ -132,5 +150,5 @@ false ] }, - "hash": "d3760406b7bf5d14a3fe6cbc9fb3926df634ebf0d8286181fa04884fb747cee8" + "hash": "9b011c7afa158edd17fe0dc56dad5204831b7ede1a8e3b3c2d441d505c4ca58f" } diff --git a/core/lib/dal/.sqlx/query-a115f795672787fe25bfaa8fd345094de508af93f4085be7cf3b54b1e8ecdadd.json b/core/lib/dal/.sqlx/query-a115f795672787fe25bfaa8fd345094de508af93f4085be7cf3b54b1e8ecdadd.json index ac7989a5be77..ebe8ce232cfb 100644 --- a/core/lib/dal/.sqlx/query-a115f795672787fe25bfaa8fd345094de508af93f4085be7cf3b54b1e8ecdadd.json +++ b/core/lib/dal/.sqlx/query-a115f795672787fe25bfaa8fd345094de508af93f4085be7cf3b54b1e8ecdadd.json @@ -67,7 +67,7 @@ false, false, false, - true, + false, false, false, true, diff --git a/core/lib/dal/.sqlx/query-c1f9ecf033d609457106189bc4d7928aa933616d2186c13a4e005297b0ad63a7.json b/core/lib/dal/.sqlx/query-c1f9ecf033d609457106189bc4d7928aa933616d2186c13a4e005297b0ad63a7.json new file mode 100644 index 000000000000..90623e77e985 --- /dev/null +++ b/core/lib/dal/.sqlx/query-c1f9ecf033d609457106189bc4d7928aa933616d2186c13a4e005297b0ad63a7.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE\n l1_batches\n SET\n batch_chain_merkle_path = $2\n WHERE\n number = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Bytea" + ] + }, + "nullable": [] + }, + "hash": "c1f9ecf033d609457106189bc4d7928aa933616d2186c13a4e005297b0ad63a7" +} diff --git a/core/lib/dal/.sqlx/query-c2c288d268d6b266acbfc1058bc55a360f8ae12b6378f8168c000d668d6489d0.json b/core/lib/dal/.sqlx/query-c2c288d268d6b266acbfc1058bc55a360f8ae12b6378f8168c000d668d6489d0.json new file mode 100644 index 000000000000..751d272b0b0e --- /dev/null +++ b/core/lib/dal/.sqlx/query-c2c288d268d6b266acbfc1058bc55a360f8ae12b6378f8168c000d668d6489d0.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n l2_l1_merkle_root\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l2_l1_merkle_root", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + true + ] + }, + "hash": "c2c288d268d6b266acbfc1058bc55a360f8ae12b6378f8168c000d668d6489d0" +} diff --git a/core/lib/dal/.sqlx/query-c2c40d5aae2e0276de453c78a39ce5a6cca1524adfe99b0cb35662746479dcc1.json b/core/lib/dal/.sqlx/query-c2c40d5aae2e0276de453c78a39ce5a6cca1524adfe99b0cb35662746479dcc1.json index 61832d25fd24..5e2ea45e0bc2 100644 --- a/core/lib/dal/.sqlx/query-c2c40d5aae2e0276de453c78a39ce5a6cca1524adfe99b0cb35662746479dcc1.json +++ b/core/lib/dal/.sqlx/query-c2c40d5aae2e0276de453c78a39ce5a6cca1524adfe99b0cb35662746479dcc1.json @@ -11,7 +11,8 @@ "kind": { "Enum": [ "ProtocolUpgrades", - "PriorityTransactions" + "PriorityTransactions", + "ChainBatchRoot" ] } } diff --git a/core/lib/dal/.sqlx/query-a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6.json b/core/lib/dal/.sqlx/query-cd6ab0aea6e1f72c58c189e098be5d1cd01521f82e2962c3feebac395caef36f.json similarity index 51% rename from core/lib/dal/.sqlx/query-a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6.json rename to core/lib/dal/.sqlx/query-cd6ab0aea6e1f72c58c189e098be5d1cd01521f82e2962c3feebac395caef36f.json index 28ffcc5ae468..1af3384a2d9f 100644 --- a/core/lib/dal/.sqlx/query-a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6.json +++ b/core/lib/dal/.sqlx/query-cd6ab0aea6e1f72c58c189e098be5d1cd01521f82e2962c3feebac395caef36f.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n WITH\n mb AS (\n SELECT\n l1_gas_price,\n l2_fair_gas_price,\n fair_pubdata_price\n FROM\n miniblocks\n WHERE\n l1_batch_number = $1\n LIMIT\n 1\n )\n \n SELECT\n l1_batches.number,\n l1_batches.timestamp,\n l1_batches.l1_tx_count,\n l1_batches.l2_tx_count,\n l1_batches.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n mb.l1_gas_price,\n mb.l2_fair_gas_price,\n mb.fair_pubdata_price,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash\n FROM\n l1_batches\n INNER JOIN mb ON TRUE\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n l1_batches.number = $1\n ", + "query": "\n WITH\n mb AS (\n SELECT\n l1_gas_price,\n l2_fair_gas_price,\n fair_pubdata_price\n FROM\n miniblocks\n WHERE\n l1_batch_number = $1\n LIMIT\n 1\n )\n \n SELECT\n l1_batches.number,\n l1_batches.timestamp,\n l1_batches.l1_tx_count,\n l1_batches.l2_tx_count,\n l1_batches.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n commit_tx_data.chain_id AS \"commit_chain_id?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n prove_tx_data.chain_id AS \"prove_chain_id?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n execute_tx_data.chain_id AS \"execute_chain_id?\",\n mb.l1_gas_price,\n mb.l2_fair_gas_price,\n mb.fair_pubdata_price,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash\n FROM\n l1_batches\n INNER JOIN mb ON TRUE\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs AS commit_tx_data\n ON (\n l1_batches.eth_commit_tx_id = commit_tx_data.id\n AND commit_tx_data.confirmed_eth_tx_history_id IS NOT NULL\n )\n LEFT JOIN eth_txs AS prove_tx_data\n ON (\n l1_batches.eth_prove_tx_id = prove_tx_data.id\n AND prove_tx_data.confirmed_eth_tx_history_id IS NOT NULL\n )\n LEFT JOIN eth_txs AS execute_tx_data\n ON (\n l1_batches.eth_execute_tx_id = execute_tx_data.id\n AND execute_tx_data.confirmed_eth_tx_history_id IS NOT NULL\n )\n WHERE\n l1_batches.number = $1\n ", "describe": { "columns": [ { @@ -40,51 +40,66 @@ }, { "ordinal": 7, + "name": "commit_chain_id?", + "type_info": "Int8" + }, + { + "ordinal": 8, "name": "prove_tx_hash?", "type_info": "Text" }, { - "ordinal": 8, + "ordinal": 9, "name": "proven_at?", "type_info": "Timestamp" }, { - "ordinal": 9, + "ordinal": 10, + "name": "prove_chain_id?", + "type_info": "Int8" + }, + { + "ordinal": 11, "name": "execute_tx_hash?", "type_info": "Text" }, { - "ordinal": 10, + "ordinal": 12, "name": "executed_at?", "type_info": "Timestamp" }, { - "ordinal": 11, + "ordinal": 13, + "name": "execute_chain_id?", + "type_info": "Int8" + }, + { + "ordinal": 14, "name": "l1_gas_price", "type_info": "Int8" }, { - "ordinal": 12, + "ordinal": 15, "name": "l2_fair_gas_price", "type_info": "Int8" }, { - "ordinal": 13, + "ordinal": 16, "name": "fair_pubdata_price", "type_info": "Int8" }, { - "ordinal": 14, + "ordinal": 17, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 15, + "ordinal": 18, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 16, + "ordinal": 19, "name": "evm_emulator_code_hash", "type_info": "Bytea" } @@ -102,10 +117,13 @@ true, false, true, + true, false, true, + true, false, true, + true, false, false, true, @@ -114,5 +132,5 @@ true ] }, - "hash": "a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6" + "hash": "cd6ab0aea6e1f72c58c189e098be5d1cd01521f82e2962c3feebac395caef36f" } diff --git a/core/lib/dal/.sqlx/query-cf3c7b918a3f82476543841d4dc5393ec02458104c483a2023b24881ae0c6716.json b/core/lib/dal/.sqlx/query-cf3c7b918a3f82476543841d4dc5393ec02458104c483a2023b24881ae0c6716.json deleted file mode 100644 index 59bfa4858c02..000000000000 --- a/core/lib/dal/.sqlx/query-cf3c7b918a3f82476543841d4dc5393ec02458104c483a2023b24881ae0c6716.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n (\n SELECT\n *\n FROM\n storage_logs\n WHERE\n hashed_key = $1\n AND miniblock_number <= COALESCE(\n (\n SELECT\n MAX(number)\n FROM\n miniblocks\n ),\n (\n SELECT\n miniblock_number\n FROM\n snapshot_recovery\n )\n )\n ORDER BY\n miniblock_number DESC,\n operation_number DESC\n LIMIT\n 1\n ) sl\n WHERE\n sl.value != $2\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "count!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Bytea", - "Bytea" - ] - }, - "nullable": [ - null - ] - }, - "hash": "cf3c7b918a3f82476543841d4dc5393ec02458104c483a2023b24881ae0c6716" -} diff --git a/core/lib/dal/.sqlx/query-cf8aaa95e3e8c376b6083c7015753e30af54675ce58273cbb29312e6e88cbdf5.json b/core/lib/dal/.sqlx/query-cf8aaa95e3e8c376b6083c7015753e30af54675ce58273cbb29312e6e88cbdf5.json new file mode 100644 index 000000000000..b79441ab4d63 --- /dev/null +++ b/core/lib/dal/.sqlx/query-cf8aaa95e3e8c376b6083c7015753e30af54675ce58273cbb29312e6e88cbdf5.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT eth_txs.chain_id\n FROM l1_batches\n JOIN eth_txs ON eth_txs.id = l1_batches.eth_commit_tx_id\n WHERE\n number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "chain_id", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + true + ] + }, + "hash": "cf8aaa95e3e8c376b6083c7015753e30af54675ce58273cbb29312e6e88cbdf5" +} diff --git a/core/lib/dal/.sqlx/query-f516657dd48332522a5580e26c509fb7e3baa5ae84bd5e010008f8972e1a7f98.json b/core/lib/dal/.sqlx/query-f516657dd48332522a5580e26c509fb7e3baa5ae84bd5e010008f8972e1a7f98.json new file mode 100644 index 000000000000..9f7de50539be --- /dev/null +++ b/core/lib/dal/.sqlx/query-f516657dd48332522a5580e26c509fb7e3baa5ae84bd5e010008f8972e1a7f98.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number, l2_l1_merkle_root\n FROM\n l1_batches\n JOIN eth_txs ON eth_txs.id = l1_batches.eth_execute_tx_id\n WHERE\n batch_chain_merkle_path IS NOT NULL\n AND chain_id = $1\n ORDER BY number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "l2_l1_merkle_root", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + true + ] + }, + "hash": "f516657dd48332522a5580e26c509fb7e3baa5ae84bd5e010008f8972e1a7f98" +} diff --git a/core/lib/dal/migrations/20241011081834_batch_chain_merkle_path.down.sql b/core/lib/dal/migrations/20241011081834_batch_chain_merkle_path.down.sql new file mode 100644 index 000000000000..da7142b8f819 --- /dev/null +++ b/core/lib/dal/migrations/20241011081834_batch_chain_merkle_path.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE l1_batches + DROP COLUMN batch_chain_merkle_path BYTEA; diff --git a/core/lib/dal/migrations/20241011081834_batch_chain_merkle_path.up.sql b/core/lib/dal/migrations/20241011081834_batch_chain_merkle_path.up.sql new file mode 100644 index 000000000000..8b133f70904b --- /dev/null +++ b/core/lib/dal/migrations/20241011081834_batch_chain_merkle_path.up.sql @@ -0,0 +1,5 @@ +ALTER TABLE l1_batches + ADD COLUMN batch_chain_merkle_path BYTEA; + +-- postgres doesn't allow dropping enum variant, so nothing is done in down.sql +ALTER TYPE event_type ADD VALUE 'ChainBatchRoot'; diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 943aa12caf75..c0e58f91c51e 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -21,9 +21,9 @@ use zksync_types::{ }, commitment::{L1BatchCommitmentArtifacts, L1BatchWithMetadata}, fee_model::BatchFeeInput, - l2_to_l1_log::UserL2ToL1Log, + l2_to_l1_log::{BatchAndChainMerklePath, UserL2ToL1Log}, writes::TreeWrite, - Address, Bloom, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, U256, + Address, Bloom, L1BatchNumber, L2BlockNumber, ProtocolVersionId, SLChainId, H256, U256, }; use zksync_vm_interface::CircuitStatistic; @@ -1980,6 +1980,150 @@ impl BlocksDal<'_, '_> { Ok(Some((H256::from_slice(&hash), row.timestamp as u64))) } + pub async fn get_l1_batch_local_root( + &mut self, + number: L1BatchNumber, + ) -> DalResult> { + let Some(row) = sqlx::query!( + r#" + SELECT + local_root + FROM + l1_batches + WHERE + number = $1 + "#, + i64::from(number.0) + ) + .instrument("get_l1_batch_local_root") + .with_arg("number", &number) + .fetch_optional(self.storage) + .await? + else { + return Ok(None); + }; + let Some(local_root) = row.local_root else { + return Ok(None); + }; + Ok(Some(H256::from_slice(&local_root))) + } + + pub async fn get_l1_batch_l2_l1_merkle_root( + &mut self, + number: L1BatchNumber, + ) -> DalResult> { + let Some(row) = sqlx::query!( + r#" + SELECT + l2_l1_merkle_root + FROM + l1_batches + WHERE + number = $1 + "#, + i64::from(number.0) + ) + .instrument("get_l1_batch_l2_l1_merkle_root") + .with_arg("number", &number) + .fetch_optional(self.storage) + .await? + else { + return Ok(None); + }; + let Some(l2_l1_merkle_root) = row.l2_l1_merkle_root else { + return Ok(None); + }; + Ok(Some(H256::from_slice(&l2_l1_merkle_root))) + } + + pub async fn get_l1_batch_chain_merkle_path( + &mut self, + number: L1BatchNumber, + ) -> DalResult> { + let Some(row) = sqlx::query!( + r#" + SELECT + batch_chain_merkle_path + FROM + l1_batches + WHERE + number = $1 + "#, + i64::from(number.0) + ) + .instrument("get_l1_batch_chain_merkle_path") + .with_arg("number", &number) + .fetch_optional(self.storage) + .await? + else { + return Ok(None); + }; + let Some(batch_chain_merkle_path) = row.batch_chain_merkle_path else { + return Ok(None); + }; + Ok(Some( + bincode::deserialize(&batch_chain_merkle_path).unwrap(), + )) + } + + pub async fn get_executed_batch_roots_on_sl( + &mut self, + sl_chain_id: SLChainId, + ) -> DalResult> { + let result = sqlx::query!( + r#" + SELECT + number, l2_l1_merkle_root + FROM + l1_batches + JOIN eth_txs ON eth_txs.id = l1_batches.eth_execute_tx_id + WHERE + batch_chain_merkle_path IS NOT NULL + AND chain_id = $1 + ORDER BY number + "#, + sl_chain_id.0 as i64 + ) + .instrument("get_executed_batch_roots_on_sl") + .with_arg("sl_chain_id", &sl_chain_id) + .fetch_all(self.storage) + .await? + .into_iter() + .map(|row| { + let number = L1BatchNumber(row.number as u32); + let root = H256::from_slice(&row.l2_l1_merkle_root.unwrap()); + (number, root) + }) + .collect(); + Ok(result) + } + + pub async fn set_batch_chain_merkle_path( + &mut self, + number: L1BatchNumber, + proof: BatchAndChainMerklePath, + ) -> DalResult<()> { + let proof_bin = bincode::serialize(&proof).unwrap(); + sqlx::query!( + r#" + UPDATE + l1_batches + SET + batch_chain_merkle_path = $2 + WHERE + number = $1 + "#, + i64::from(number.0), + &proof_bin + ) + .instrument("set_batch_chain_merkle_path") + .with_arg("number", &number) + .execute(self.storage) + .await?; + + Ok(()) + } + pub async fn get_l1_batch_metadata( &mut self, number: L1BatchNumber, diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 4699eac4e5eb..c7187a83006b 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -679,10 +679,13 @@ impl BlocksWeb3Dal<'_, '_> { miniblocks.hash AS "root_hash?", commit_tx.tx_hash AS "commit_tx_hash?", commit_tx.confirmed_at AS "committed_at?", + commit_tx_data.chain_id AS "commit_chain_id?", prove_tx.tx_hash AS "prove_tx_hash?", prove_tx.confirmed_at AS "proven_at?", + prove_tx_data.chain_id AS "prove_chain_id?", execute_tx.tx_hash AS "execute_tx_hash?", execute_tx.confirmed_at AS "executed_at?", + execute_tx_data.chain_id AS "execute_chain_id?", miniblocks.l1_gas_price, miniblocks.l2_fair_gas_price, miniblocks.fair_pubdata_price, @@ -709,6 +712,21 @@ impl BlocksWeb3Dal<'_, '_> { l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL ) + LEFT JOIN eth_txs AS commit_tx_data + ON ( + l1_batches.eth_commit_tx_id = commit_tx_data.id + AND commit_tx_data.confirmed_eth_tx_history_id IS NOT NULL + ) + LEFT JOIN eth_txs AS prove_tx_data + ON ( + l1_batches.eth_prove_tx_id = prove_tx_data.id + AND prove_tx_data.confirmed_eth_tx_history_id IS NOT NULL + ) + LEFT JOIN eth_txs AS execute_tx_data + ON ( + l1_batches.eth_execute_tx_id = execute_tx_data.id + AND execute_tx_data.confirmed_eth_tx_history_id IS NOT NULL + ) WHERE miniblocks.number = $1 "#, @@ -752,10 +770,13 @@ impl BlocksWeb3Dal<'_, '_> { l1_batches.hash AS "root_hash?", commit_tx.tx_hash AS "commit_tx_hash?", commit_tx.confirmed_at AS "committed_at?", + commit_tx_data.chain_id AS "commit_chain_id?", prove_tx.tx_hash AS "prove_tx_hash?", prove_tx.confirmed_at AS "proven_at?", + prove_tx_data.chain_id AS "prove_chain_id?", execute_tx.tx_hash AS "execute_tx_hash?", execute_tx.confirmed_at AS "executed_at?", + execute_tx_data.chain_id AS "execute_chain_id?", mb.l1_gas_price, mb.l2_fair_gas_price, mb.fair_pubdata_price, @@ -780,6 +801,21 @@ impl BlocksWeb3Dal<'_, '_> { l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL ) + LEFT JOIN eth_txs AS commit_tx_data + ON ( + l1_batches.eth_commit_tx_id = commit_tx_data.id + AND commit_tx_data.confirmed_eth_tx_history_id IS NOT NULL + ) + LEFT JOIN eth_txs AS prove_tx_data + ON ( + l1_batches.eth_prove_tx_id = prove_tx_data.id + AND prove_tx_data.confirmed_eth_tx_history_id IS NOT NULL + ) + LEFT JOIN eth_txs AS execute_tx_data + ON ( + l1_batches.eth_execute_tx_id = execute_tx_data.id + AND execute_tx_data.confirmed_eth_tx_history_id IS NOT NULL + ) WHERE l1_batches.number = $1 "#, diff --git a/core/lib/dal/src/eth_sender_dal.rs b/core/lib/dal/src/eth_sender_dal.rs index 4ce76547ac9b..7739bcdb325d 100644 --- a/core/lib/dal/src/eth_sender_dal.rs +++ b/core/lib/dal/src/eth_sender_dal.rs @@ -2,11 +2,14 @@ use std::{convert::TryFrom, str::FromStr}; use anyhow::Context as _; use sqlx::types::chrono::{DateTime, Utc}; -use zksync_db_connection::{connection::Connection, interpolate_query, match_query_as}; +use zksync_db_connection::{ + connection::Connection, error::DalResult, instrument::InstrumentExt, interpolate_query, + match_query_as, +}; use zksync_types::{ aggregated_operations::AggregatedActionType, eth_sender::{EthTx, EthTxBlobSidecar, TxHistory, TxHistoryToSend}, - Address, L1BatchNumber, H256, U256, + Address, L1BatchNumber, SLChainId, H256, U256, }; use crate::{ @@ -421,6 +424,27 @@ impl EthSenderDal<'_, '_> { Ok(()) } + pub async fn get_batch_commit_chain_id( + &mut self, + batch_number: L1BatchNumber, + ) -> DalResult> { + let row = sqlx::query!( + r#" + SELECT eth_txs.chain_id + FROM l1_batches + JOIN eth_txs ON eth_txs.id = l1_batches.eth_commit_tx_id + WHERE + number = $1 + "#, + i64::from(batch_number.0), + ) + .instrument("get_batch_commit_chain_id") + .with_arg("batch_number", &batch_number) + .fetch_optional(self.storage) + .await?; + Ok(row.and_then(|r| r.chain_id).map(|id| SLChainId(id as u64))) + } + pub async fn get_confirmed_tx_hash_by_eth_tx_id( &mut self, eth_tx_id: u32, @@ -464,6 +488,7 @@ impl EthSenderDal<'_, '_> { tx_type: AggregatedActionType, tx_hash: H256, confirmed_at: DateTime, + sl_chain_id: Option, ) -> anyhow::Result<()> { let mut transaction = self .storage @@ -489,10 +514,11 @@ impl EthSenderDal<'_, '_> { // Insert general tx descriptor. let eth_tx_id = sqlx::query_scalar!( - "INSERT INTO eth_txs (raw_tx, nonce, tx_type, contract_address, predicted_gas_cost, created_at, updated_at) \ - VALUES ('\\x00', 0, $1, '', 0, now(), now()) \ + "INSERT INTO eth_txs (raw_tx, nonce, tx_type, contract_address, predicted_gas_cost, chain_id, created_at, updated_at) \ + VALUES ('\\x00', 0, $1, '', 0, $2, now(), now()) \ RETURNING id", - tx_type.to_string() + tx_type.to_string(), + sl_chain_id.map(|chain_id| chain_id.0 as i64) ) .fetch_one(transaction.conn()) .await?; diff --git a/core/lib/dal/src/eth_watcher_dal.rs b/core/lib/dal/src/eth_watcher_dal.rs index 062ad47219d8..84061a03650d 100644 --- a/core/lib/dal/src/eth_watcher_dal.rs +++ b/core/lib/dal/src/eth_watcher_dal.rs @@ -12,6 +12,7 @@ pub struct EthWatcherDal<'a, 'c> { pub enum EventType { ProtocolUpgrades, PriorityTransactions, + ChainBatchRoot, } impl EthWatcherDal<'_, '_> { diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index 159ed71cc3e9..54635932a1af 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -10,7 +10,7 @@ use zksync_types::{ commitment::{L1BatchCommitmentMode, L1BatchMetaParameters, L1BatchMetadata, PubdataParams}, fee_model::{BatchFeeInput, L1PeggedBatchFeeModelInput, PubdataIndependentBatchFeeModelInput}, l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log, UserL2ToL1Log}, - Address, Bloom, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, + Address, Bloom, L1BatchNumber, L2BlockNumber, ProtocolVersionId, SLChainId, H256, }; /// This is the gas limit that was used inside blocks before we started saving block gas limit into the database. @@ -317,10 +317,13 @@ pub(crate) struct StorageBlockDetails { pub root_hash: Option>, pub commit_tx_hash: Option, pub committed_at: Option, + pub commit_chain_id: Option, pub prove_tx_hash: Option, pub proven_at: Option, + pub prove_chain_id: Option, pub execute_tx_hash: Option, pub executed_at: Option, + pub execute_chain_id: Option, // L1 gas price assumed in the corresponding batch pub l1_gas_price: i64, // L2 gas price assumed in the corresponding batch @@ -355,6 +358,7 @@ impl From for api::BlockDetails { committed_at: details .committed_at .map(|committed_at| DateTime::from_naive_utc_and_offset(committed_at, Utc)), + commit_chain_id: details.commit_chain_id.map(|id| SLChainId(id as u64)), prove_tx_hash: details .prove_tx_hash .as_deref() @@ -362,6 +366,7 @@ impl From for api::BlockDetails { proven_at: details .proven_at .map(|proven_at| DateTime::::from_naive_utc_and_offset(proven_at, Utc)), + prove_chain_id: details.prove_chain_id.map(|id| SLChainId(id as u64)), execute_tx_hash: details .execute_tx_hash .as_deref() @@ -369,6 +374,7 @@ impl From for api::BlockDetails { executed_at: details .executed_at .map(|executed_at| DateTime::::from_naive_utc_and_offset(executed_at, Utc)), + execute_chain_id: details.execute_chain_id.map(|id| SLChainId(id as u64)), l1_gas_price: details.l1_gas_price as u64, l2_fair_gas_price: details.l2_fair_gas_price as u64, fair_pubdata_price: details.fair_pubdata_price.map(|x| x as u64), @@ -399,10 +405,13 @@ pub(crate) struct StorageL1BatchDetails { pub root_hash: Option>, pub commit_tx_hash: Option, pub committed_at: Option, + pub commit_chain_id: Option, pub prove_tx_hash: Option, pub proven_at: Option, + pub prove_chain_id: Option, pub execute_tx_hash: Option, pub executed_at: Option, + pub execute_chain_id: Option, pub l1_gas_price: i64, pub l2_fair_gas_price: i64, pub fair_pubdata_price: Option, @@ -432,6 +441,7 @@ impl From for api::L1BatchDetails { committed_at: details .committed_at .map(|committed_at| DateTime::::from_naive_utc_and_offset(committed_at, Utc)), + commit_chain_id: details.commit_chain_id.map(|id| SLChainId(id as u64)), prove_tx_hash: details .prove_tx_hash .as_deref() @@ -439,6 +449,7 @@ impl From for api::L1BatchDetails { proven_at: details .proven_at .map(|proven_at| DateTime::::from_naive_utc_and_offset(proven_at, Utc)), + prove_chain_id: details.prove_chain_id.map(|id| SLChainId(id as u64)), execute_tx_hash: details .execute_tx_hash .as_deref() @@ -446,6 +457,7 @@ impl From for api::L1BatchDetails { executed_at: details .executed_at .map(|executed_at| DateTime::::from_naive_utc_and_offset(executed_at, Utc)), + execute_chain_id: details.execute_chain_id.map(|id| SLChainId(id as u64)), l1_gas_price: details.l1_gas_price as u64, l2_fair_gas_price: details.l2_fair_gas_price as u64, fair_pubdata_price: details.fair_pubdata_price.map(|x| x as u64), diff --git a/core/lib/env_config/src/eth_sender.rs b/core/lib/env_config/src/eth_sender.rs index 0fd61fd173b6..7029881b0c6d 100644 --- a/core/lib/env_config/src/eth_sender.rs +++ b/core/lib/env_config/src/eth_sender.rs @@ -23,6 +23,9 @@ impl FromEnv for L1Secrets { .context("ETH_CLIENT_WEB3_URL")? .parse() .context("ETH_CLIENT_WEB3_URL")?, + gateway_url: std::env::var("ETH_CLIENT_GATEWAY_WEB3_URL") + .ok() + .map(|url| url.parse().expect("ETH_CLIENT_GATEWAY_WEB3_URL")), }) } } @@ -97,6 +100,7 @@ mod tests { }, L1Secrets { l1_rpc_url: "http://127.0.0.1:8545".to_string().parse().unwrap(), + gateway_url: Some("http://127.0.0.1:8547".to_string().parse().unwrap()), }, ) } @@ -140,6 +144,7 @@ mod tests { ETH_WATCH_CONFIRMATIONS_FOR_ETH_EVENT="0" ETH_WATCH_ETH_NODE_POLL_INTERVAL="300" ETH_CLIENT_WEB3_URL="http://127.0.0.1:8545" + ETH_CLIENT_GATEWAY_WEB3_URL="http://127.0.0.1:8547" "#; lock.set_env(config); diff --git a/core/lib/eth_client/src/clients/http/query.rs b/core/lib/eth_client/src/clients/http/query.rs index de115cf6e7a6..f0f745a8b4db 100644 --- a/core/lib/eth_client/src/clients/http/query.rs +++ b/core/lib/eth_client/src/clients/http/query.rs @@ -425,7 +425,7 @@ where let chunk_size = chunk_end - chunk_start + 1; let fee_history = client - .fee_history(U64::from(chunk_size).into(), chunk_end.into(), vec![]) + .fee_history(U64::from(chunk_size).into(), chunk_end.into(), None) .rpc_context("fee_history") .with_arg("chunk_size", &chunk_size) .with_arg("block", &chunk_end) diff --git a/core/lib/eth_client/src/clients/mock.rs b/core/lib/eth_client/src/clients/mock.rs index 8e81b6c6f209..2b0100a39dc6 100644 --- a/core/lib/eth_client/src/clients/mock.rs +++ b/core/lib/eth_client/src/clients/mock.rs @@ -10,7 +10,7 @@ use zksync_types::{ api::FeeHistory, ethabi, web3::{self, contract::Tokenize, BlockId}, - Address, L1ChainId, L2ChainId, SLChainId, EIP_4844_TX_TYPE, H160, H256, U256, U64, + Address, L2ChainId, SLChainId, EIP_4844_TX_TYPE, H160, H256, U256, U64, }; use zksync_web3_decl::client::{MockClient, MockClientBuilder, Network, L1, L2}; @@ -237,6 +237,7 @@ pub struct MockSettlementLayerBuilder { non_ordering_confirmations: bool, inner: Arc>, call_handler: Box, + chain_id: u64, _network: PhantomData, } @@ -267,6 +268,7 @@ impl Default for MockSettlementLayerBuilder { call_handler: Box::new(|call, block_id| { panic!("Unexpected eth_call: {call:?}, {block_id:?}"); }), + chain_id: 9, _network: PhantomData, } } @@ -315,6 +317,10 @@ impl MockSettlementLayerBuilder { } } + pub fn with_chain_id(self, chain_id: u64) -> Self { + Self { chain_id, ..self } + } + fn get_block_by_number( fee_history: &[BaseFees], block: web3::BlockNumber, @@ -449,12 +455,12 @@ fn l2_eth_fee_history( impl SupportedMockSLNetwork for L1 { fn build_client(builder: MockSettlementLayerBuilder) -> MockClient { - const CHAIN_ID: L1ChainId = L1ChainId(9); - let base_fee_history = builder.base_fee_history.clone(); + let chain_id = builder.chain_id; + let net = SLChainId(builder.chain_id).into(); builder - .build_client_inner(CHAIN_ID.0, CHAIN_ID.into()) + .build_client_inner(chain_id, net) .method( "eth_feeHistory", move |block_count: U64, newest_block: web3::BlockNumber, _: Option>| { @@ -467,12 +473,12 @@ impl SupportedMockSLNetwork for L1 { impl SupportedMockSLNetwork for L2 { fn build_client(builder: MockSettlementLayerBuilder) -> MockClient { - let chain_id: L2ChainId = 9u64.try_into().unwrap(); - let base_fee_history = builder.base_fee_history.clone(); + let chain_id = builder.chain_id; + let net = L2ChainId::new(builder.chain_id).unwrap().into(); builder - .build_client_inner(chain_id.as_u64(), chain_id.into()) + .build_client_inner(chain_id, net) .method( "eth_feeHistory", move |block_count: U64, newest_block: web3::BlockNumber, _: Option>| { diff --git a/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs b/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs index 67819f7d7ccd..b883cbdbabc8 100644 --- a/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs +++ b/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs @@ -1,11 +1,11 @@ use zksync_types::{ commitment::{L1BatchCommitmentMode, L1BatchWithMetadata}, - ethabi::Token, + ethabi::{encode, Token}, pubdata_da::PubdataSendingMode, }; use crate::{ - i_executor::structures::{CommitBatchInfo, StoredBatchInfo}, + i_executor::structures::{CommitBatchInfo, StoredBatchInfo, SUPPORTED_ENCODING_VERSION}, Tokenizable, Tokenize, }; @@ -20,13 +20,31 @@ pub struct CommitBatches<'a> { impl Tokenize for CommitBatches<'_> { fn into_tokens(self) -> Vec { + let protocol_version = self.l1_batches[0].header.protocol_version.unwrap(); let stored_batch_info = StoredBatchInfo::from(self.last_committed_l1_batch).into_token(); let l1_batches_to_commit = self .l1_batches .iter() .map(|batch| CommitBatchInfo::new(self.mode, batch, self.pubdata_da).into_token()) .collect(); - - vec![stored_batch_info, Token::Array(l1_batches_to_commit)] + if protocol_version.is_pre_gateway() { + vec![stored_batch_info, Token::Array(l1_batches_to_commit)] + } else { + let encoded_data = encode(&[ + stored_batch_info.clone(), + Token::Array(l1_batches_to_commit.clone()), + ]); + let commit_data = [[SUPPORTED_ENCODING_VERSION].to_vec(), encoded_data] + .concat() + .to_vec(); + vec![ + Token::Uint((self.last_committed_l1_batch.header.number.0 + 1).into()), + Token::Uint( + (self.last_committed_l1_batch.header.number.0 + self.l1_batches.len() as u32) + .into(), + ), + Token::Bytes(commit_data), + ] + } } } diff --git a/core/lib/l1_contract_interface/src/i_executor/methods/execute_batches.rs b/core/lib/l1_contract_interface/src/i_executor/methods/execute_batches.rs index fe5213d8c561..e2e29bfefcfe 100644 --- a/core/lib/l1_contract_interface/src/i_executor/methods/execute_batches.rs +++ b/core/lib/l1_contract_interface/src/i_executor/methods/execute_batches.rs @@ -1,20 +1,61 @@ -use zksync_types::{commitment::L1BatchWithMetadata, ethabi::Token}; +use zksync_types::{ + commitment::{L1BatchWithMetadata, PriorityOpsMerkleProof}, + ethabi::{encode, Token}, +}; -use crate::{i_executor::structures::StoredBatchInfo, Tokenizable, Tokenize}; +use crate::{ + i_executor::structures::{StoredBatchInfo, SUPPORTED_ENCODING_VERSION}, + Tokenizable, Tokenize, +}; /// Input required to encode `executeBatches` call. #[derive(Debug, Clone)] pub struct ExecuteBatches { pub l1_batches: Vec, + pub priority_ops_proofs: Vec, } impl Tokenize for &ExecuteBatches { fn into_tokens(self) -> Vec { - vec![Token::Array( - self.l1_batches - .iter() - .map(|batch| StoredBatchInfo::from(batch).into_token()) - .collect(), - )] + let protocol_version = self.l1_batches[0].header.protocol_version.unwrap(); + + if protocol_version.is_pre_gateway() { + vec![Token::Array( + self.l1_batches + .iter() + .map(|batch| StoredBatchInfo::from(batch).into_token()) + .collect(), + )] + } else { + let encoded_data = encode(&[ + Token::Array( + self.l1_batches + .iter() + .map(|batch| StoredBatchInfo::from(batch).into_token()) + .collect(), + ), + Token::Array( + self.priority_ops_proofs + .iter() + .map(|proof| proof.into_token()) + .collect(), + ), + ]); + let execute_data = [[SUPPORTED_ENCODING_VERSION].to_vec(), encoded_data] + .concat() + .to_vec(); + + vec![ + Token::Uint(self.l1_batches[0].header.number.0.into()), + Token::Uint( + self.l1_batches[self.l1_batches.len() - 1] + .header + .number + .0 + .into(), + ), + Token::Bytes(execute_data), + ] + } } } diff --git a/core/lib/l1_contract_interface/src/i_executor/methods/prove_batches.rs b/core/lib/l1_contract_interface/src/i_executor/methods/prove_batches.rs index 935d8a44e0b7..a54cf407d09f 100644 --- a/core/lib/l1_contract_interface/src/i_executor/methods/prove_batches.rs +++ b/core/lib/l1_contract_interface/src/i_executor/methods/prove_batches.rs @@ -1,8 +1,14 @@ use crypto_codegen::serialize_proof; use zksync_prover_interface::outputs::L1BatchProofForL1; -use zksync_types::{commitment::L1BatchWithMetadata, ethabi::Token, U256}; +use zksync_types::{ + commitment::L1BatchWithMetadata, + ethabi::{encode, Token}, +}; -use crate::{i_executor::structures::StoredBatchInfo, Tokenizable, Tokenize}; +use crate::{ + i_executor::structures::{StoredBatchInfo, SUPPORTED_ENCODING_VERSION}, + Tokenizable, Tokenize, +}; /// Input required to encode `proveBatches` call. #[derive(Debug, Clone)] @@ -15,13 +21,14 @@ pub struct ProveBatches { impl Tokenize for &ProveBatches { fn into_tokens(self) -> Vec { - let prev_l1_batch = StoredBatchInfo::from(&self.prev_l1_batch).into_token(); + let prev_l1_batch_info = StoredBatchInfo::from(&self.prev_l1_batch).into_token(); let batches_arg = self .l1_batches .iter() .map(|batch| StoredBatchInfo::from(batch).into_token()) .collect(); let batches_arg = Token::Array(batches_arg); + let protocol_version = self.l1_batches[0].header.protocol_version.unwrap(); if self.should_verify { // currently we only support submitting a single proof @@ -29,40 +36,53 @@ impl Tokenize for &ProveBatches { assert_eq!(self.l1_batches.len(), 1); let L1BatchProofForL1 { - aggregation_result_coords, - scheduler_proof, - .. + scheduler_proof, .. } = self.proofs.first().unwrap(); let (_, proof) = serialize_proof(scheduler_proof); - let aggregation_result_coords = if self.l1_batches[0] - .header - .protocol_version - .unwrap() - .is_pre_boojum() - { - Token::Array( - aggregation_result_coords - .iter() - .map(|bytes| Token::Uint(U256::from_big_endian(bytes))) - .collect(), - ) + if protocol_version.is_pre_gateway() { + let proof_input = Token::Tuple(vec![ + Token::Array(Vec::new()), + Token::Array(proof.into_iter().map(Token::Uint).collect()), + ]); + + vec![prev_l1_batch_info, batches_arg, proof_input] } else { - Token::Array(Vec::new()) - }; - let proof_input = Token::Tuple(vec![ - aggregation_result_coords, - Token::Array(proof.into_iter().map(Token::Uint).collect()), - ]); + let proof_input = Token::Array(proof.into_iter().map(Token::Uint).collect()); - vec![prev_l1_batch, batches_arg, proof_input] - } else { + let encoded_data = encode(&[prev_l1_batch_info, batches_arg, proof_input]); + let prove_data = [[SUPPORTED_ENCODING_VERSION].to_vec(), encoded_data] + .concat() + .to_vec(); + + vec![ + Token::Uint((self.prev_l1_batch.header.number.0 + 1).into()), + Token::Uint( + (self.prev_l1_batch.header.number.0 + self.l1_batches.len() as u32).into(), + ), + Token::Bytes(prove_data), + ] + } + } else if protocol_version.is_pre_gateway() { vec![ - prev_l1_batch, + prev_l1_batch_info, batches_arg, Token::Tuple(vec![Token::Array(vec![]), Token::Array(vec![])]), ] + } else { + let encoded_data = encode(&[prev_l1_batch_info, batches_arg, Token::Array(vec![])]); + let prove_data = [[SUPPORTED_ENCODING_VERSION].to_vec(), encoded_data] + .concat() + .to_vec(); + + vec![ + Token::Uint((self.prev_l1_batch.header.number.0 + 1).into()), + Token::Uint( + (self.prev_l1_batch.header.number.0 + self.l1_batches.len() as u32).into(), + ), + Token::Bytes(prove_data), + ] } } } diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs index 6438aeb7f55c..b0aa0a291826 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs @@ -3,10 +3,10 @@ use zksync_types::{ pre_boojum_serialize_commitments, serialize_commitments, L1BatchCommitmentMode, L1BatchWithMetadata, }, - ethabi::Token, + ethabi::{ParamType, Token}, pubdata_da::PubdataSendingMode, - web3::contract::Error as ContractError, - ProtocolVersionId, U256, + web3::{contract::Error as ContractError, keccak256}, + ProtocolVersionId, H256, U256, }; use crate::{ @@ -15,9 +15,9 @@ use crate::{ }; /// These are used by the L1 Contracts to indicate what DA layer is used for pubdata -const PUBDATA_SOURCE_CALLDATA: u8 = 0; -const PUBDATA_SOURCE_BLOBS: u8 = 1; -const PUBDATA_SOURCE_CUSTOM: u8 = 2; +pub const PUBDATA_SOURCE_CALLDATA: u8 = 0; +pub const PUBDATA_SOURCE_BLOBS: u8 = 1; +pub const PUBDATA_SOURCE_CUSTOM_PRE_GATEWAY: u8 = 2; /// Encoding for `CommitBatchInfo` from `IExecutor.sol` for a contract running in rollup mode. #[derive(Debug)] @@ -40,6 +40,21 @@ impl<'a> CommitBatchInfo<'a> { } } + pub fn post_gateway_schema() -> ParamType { + ParamType::Tuple(vec![ + ParamType::Uint(64), // `batch_number` + ParamType::Uint(64), // `timestamp` + ParamType::Uint(64), // `index_repeated_storage_changes` + ParamType::FixedBytes(32), // `new_state_root` + ParamType::Uint(256), // `numberOfLayer1Txs` + ParamType::FixedBytes(32), // `priorityOperationsHash` + ParamType::FixedBytes(32), // `bootloaderHeapInitialContentsHash` + ParamType::FixedBytes(32), // `eventsQueueStateHash` + ParamType::Bytes, // `systemLogs` + ParamType::Bytes, // `operatorDAInput` + ]) + } + fn base_tokens(&self) -> Vec { if self .l1_batch_with_metadata @@ -199,7 +214,7 @@ impl Tokenizable for CommitBatchInfo<'_> { // Here we're not pushing any pubdata on purpose; no pubdata is sent in Validium mode. L1BatchCommitmentMode::Validium => vec![], })); - } else { + } else if protocol_version.is_pre_gateway() { tokens.push(Token::Bytes(match (self.mode, self.pubdata_da) { // Here we're not pushing any pubdata on purpose; no pubdata is sent in Validium mode. ( @@ -211,14 +226,12 @@ impl Tokenizable for CommitBatchInfo<'_> { (L1BatchCommitmentMode::Validium, PubdataSendingMode::Blobs) => { vec![PUBDATA_SOURCE_BLOBS] } - (L1BatchCommitmentMode::Rollup, PubdataSendingMode::Custom) => { panic!("Custom pubdata DA is incompatible with Rollup mode") } (L1BatchCommitmentMode::Validium, PubdataSendingMode::Custom) => { - vec![PUBDATA_SOURCE_CUSTOM] + vec![PUBDATA_SOURCE_CUSTOM_PRE_GATEWAY] } - ( L1BatchCommitmentMode::Rollup, PubdataSendingMode::Calldata | PubdataSendingMode::RelayedL2Calldata, @@ -227,7 +240,8 @@ impl Tokenizable for CommitBatchInfo<'_> { // even if we are not using blobs. let pubdata = self.pubdata_input(); let blob_commitment = KzgInfo::new(&pubdata).to_blob_commitment(); - std::iter::once(PUBDATA_SOURCE_CALLDATA) + [PUBDATA_SOURCE_CALLDATA] + .into_iter() .chain(pubdata) .chain(blob_commitment) .collect() @@ -239,7 +253,86 @@ impl Tokenizable for CommitBatchInfo<'_> { let kzg_info = KzgInfo::new(blob); kzg_info.to_pubdata_commitment() }); - std::iter::once(PUBDATA_SOURCE_BLOBS) + [PUBDATA_SOURCE_BLOBS] + .into_iter() + .chain(pubdata_commitments) + .collect() + } + })); + } else { + let state_diff_hash = self + .l1_batch_with_metadata + .metadata + .state_diff_hash + .expect("Failed to get state_diff_hash from metadata"); + tokens.push(Token::Bytes(match (self.mode, self.pubdata_da) { + // Validiums with custom DA need the inclusion data to be part of operator_da_input + (L1BatchCommitmentMode::Validium, PubdataSendingMode::Custom) => { + let mut operator_da_input: Vec = state_diff_hash.0.into(); + + operator_da_input.extend( + &self + .l1_batch_with_metadata + .metadata + .da_inclusion_data + .clone() + .unwrap_or_default(), + ); + + operator_da_input + } + // Here we're not pushing any pubdata on purpose; no pubdata is sent in Validium mode. + ( + L1BatchCommitmentMode::Validium, + PubdataSendingMode::Calldata + | PubdataSendingMode::RelayedL2Calldata + | PubdataSendingMode::Blobs, + ) => state_diff_hash.0.into(), + (L1BatchCommitmentMode::Rollup, PubdataSendingMode::Custom) => { + panic!("Custom pubdata DA is incompatible with Rollup mode") + } + ( + L1BatchCommitmentMode::Rollup, + PubdataSendingMode::Calldata | PubdataSendingMode::RelayedL2Calldata, + ) => { + let pubdata = self.pubdata_input(); + + let header = + compose_header_for_l1_commit_rollup(state_diff_hash, pubdata.clone()); + + // We compute and add the blob commitment to the pubdata payload so that we can verify the proof + // even if we are not using blobs. + let blob_commitment = KzgInfo::new(&pubdata).to_blob_commitment(); + header + .into_iter() + .chain([PUBDATA_SOURCE_CALLDATA]) + .chain(pubdata) + .chain(blob_commitment) + .collect() + } + (L1BatchCommitmentMode::Rollup, PubdataSendingMode::Blobs) => { + let pubdata = self.pubdata_input(); + + let header = + compose_header_for_l1_commit_rollup(state_diff_hash, pubdata.clone()); + + let pubdata_commitments: Vec = pubdata + .chunks(ZK_SYNC_BYTES_PER_BLOB) + .flat_map(|blob| { + let kzg_info = KzgInfo::new(blob); + + let blob_commitment = kzg_info.to_pubdata_commitment(); + + // We also append 0s to show that we do not reuse previously published blobs. + blob_commitment + .into_iter() + .chain([0u8; 32]) + .collect::>() + }) + .collect(); + header + .into_iter() + .chain([PUBDATA_SOURCE_BLOBS]) .chain(pubdata_commitments) .collect() } @@ -249,3 +342,36 @@ impl Tokenizable for CommitBatchInfo<'_> { Token::Tuple(tokens) } } + +fn compose_header_for_l1_commit_rollup(state_diff_hash: H256, pubdata: Vec) -> Vec { + // The preimage under the hash `l2DAValidatorOutputHash` is expected to be in the following format: + // - First 32 bytes are the hash of the uncompressed state diff. + // - Then, there is a 32-byte hash of the full pubdata. + // - Then, there is the 1-byte number of blobs published. + // - Then, there are linear hashes of the published blobs, 32 bytes each. + + let mut full_header = vec![]; + + full_header.extend(state_diff_hash.0); + + let mut full_pubdata = pubdata; + let full_pubdata_hash = keccak256(&full_pubdata); + full_header.extend(full_pubdata_hash); + + // Now, we need to calculate the linear hashes of the blobs. + // Firstly, let's pad the pubdata to the size of the blob. + if full_pubdata.len() % ZK_SYNC_BYTES_PER_BLOB != 0 { + let padding = + vec![0u8; ZK_SYNC_BYTES_PER_BLOB - full_pubdata.len() % ZK_SYNC_BYTES_PER_BLOB]; + full_pubdata.extend(padding); + } + full_header.push((full_pubdata.len() / ZK_SYNC_BYTES_PER_BLOB) as u8); + + full_pubdata + .chunks(ZK_SYNC_BYTES_PER_BLOB) + .for_each(|chunk| { + full_header.extend(keccak256(chunk)); + }); + + full_header +} diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs b/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs index aa9872049015..9583e0204f75 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs @@ -6,4 +6,12 @@ mod stored_batch_info; #[cfg(test)] mod tests; -pub use self::{commit_batch_info::CommitBatchInfo, stored_batch_info::StoredBatchInfo}; +pub use self::{ + commit_batch_info::{ + CommitBatchInfo, PUBDATA_SOURCE_BLOBS, PUBDATA_SOURCE_CALLDATA, + PUBDATA_SOURCE_CUSTOM_PRE_GATEWAY, + }, + stored_batch_info::StoredBatchInfo, +}; + +pub const SUPPORTED_ENCODING_VERSION: u8 = 0; diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs index 26f9b30392ea..d2090097dbeb 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs @@ -23,17 +23,17 @@ pub struct StoredBatchInfo { } impl StoredBatchInfo { - fn schema() -> Vec { - vec![ParamType::Tuple(vec![ - ParamType::Uint(64), - ParamType::FixedBytes(32), - ParamType::Uint(64), - ParamType::Uint(256), - ParamType::FixedBytes(32), - ParamType::FixedBytes(32), - ParamType::Uint(256), - ParamType::FixedBytes(32), - ])] + pub fn schema() -> ParamType { + ParamType::Tuple(vec![ + ParamType::Uint(64), // `batch_number` + ParamType::FixedBytes(32), // `batch_hash` + ParamType::Uint(64), // `index_repeated_storage_changes` + ParamType::Uint(256), // `number_of_layer1_txs` + ParamType::FixedBytes(32), // `priority_operations_hash` + ParamType::FixedBytes(32), // `l2_logs_tree_root` + ParamType::Uint(256), // `timestamp` + ParamType::FixedBytes(32), // `commitment` + ]) } /// Encodes the struct into RLP. @@ -43,7 +43,7 @@ impl StoredBatchInfo { /// Decodes the struct from RLP. pub fn decode(rlp: &[u8]) -> anyhow::Result { - let [token] = ethabi::decode_whole(&Self::schema(), rlp)? + let [token] = ethabi::decode_whole(&[Self::schema()], rlp)? .try_into() .unwrap(); Ok(Self::from_token(token)?) diff --git a/core/lib/mini_merkle_tree/src/lib.rs b/core/lib/mini_merkle_tree/src/lib.rs index d34f57999961..fed28edb10c0 100644 --- a/core/lib/mini_merkle_tree/src/lib.rs +++ b/core/lib/mini_merkle_tree/src/lib.rs @@ -159,7 +159,7 @@ where /// Returns the root hash and the Merkle proof for a leaf with the specified 0-based `index`. /// `index` is relative to the leftmost uncached leaf. /// # Panics - /// Panics if `index` is >= than the number of leaves in the tree. + /// Panics if `index` is >= than the number of uncached leaves in the tree. pub fn merkle_root_and_path(&self, index: usize) -> (H256, Vec) { assert!(index < self.hashes.len(), "leaf index out of bounds"); let mut end_path = vec![]; @@ -170,6 +170,15 @@ where ) } + /// Returns the root hash and the Merkle proof for a leaf with the specified 0-based `index`. + /// `index` is an absolute position of the leaf. + /// # Panics + /// Panics if leaf at `index` is cached or if `index` is >= than the number of leaves in the tree. + pub fn merkle_root_and_path_by_absolute_index(&self, index: usize) -> (H256, Vec) { + assert!(index >= self.start_index, "leaf is cached"); + self.merkle_root_and_path(index - self.start_index) + } + /// Returns the root hash and the Merkle proofs for a range of leafs. /// The range is 0..length, where `0` is the leftmost untrimmed leaf (i.e. leaf under `self.start_index`). /// # Panics @@ -280,6 +289,16 @@ where hashes[0] } + + /// Returns the number of non-empty merkle tree elements. + pub fn length(&self) -> usize { + self.start_index + self.hashes.len() + } + + /// Returns index of the leftmost untrimmed leaf. + pub fn start_index(&self) -> usize { + self.start_index + } } fn tree_depth_by_size(tree_size: usize) -> usize { @@ -314,6 +333,12 @@ impl HashEmptySubtree<[u8; 88]> for KeccakHasher { } } +impl HashEmptySubtree<[u8; 96]> for KeccakHasher { + fn empty_leaf_hash(&self) -> H256 { + self.hash_bytes(&[0_u8; 96]) + } +} + fn compute_empty_tree_hashes(empty_leaf_hash: H256) -> Vec { iter::successors(Some(empty_leaf_hash), |hash| { Some(KeccakHasher.compress(hash, hash)) diff --git a/core/lib/mini_merkle_tree/src/tests.rs b/core/lib/mini_merkle_tree/src/tests.rs index 5aadab1d4e6f..51a684d945fd 100644 --- a/core/lib/mini_merkle_tree/src/tests.rs +++ b/core/lib/mini_merkle_tree/src/tests.rs @@ -4,6 +4,10 @@ use std::collections::VecDeque; use super::*; +fn empty_subtree_root(depth: usize) -> H256 { + >::empty_subtree_hash(&KeccakHasher, depth) +} + #[test] fn tree_depth_is_computed_correctly() { const TREE_SIZES_AND_DEPTHS: &[(usize, usize)] = &[ @@ -29,7 +33,7 @@ fn hash_of_empty_tree_with_single_item() { let len = 1 << depth; println!("checking tree with {len} items"); let tree = MiniMerkleTree::new(iter::once([0_u8; 88]), Some(len)); - assert_eq!(tree.merkle_root(), KeccakHasher.empty_subtree_hash(depth)); + assert_eq!(tree.merkle_root(), empty_subtree_root(depth)); } } @@ -42,10 +46,10 @@ fn hash_of_large_empty_tree_with_multiple_items() { let tree = MiniMerkleTree::new(leaves.clone(), Some(tree_size)); let depth = tree_depth_by_size(tree_size); - assert_eq!(tree.merkle_root(), KeccakHasher.empty_subtree_hash(depth)); + assert_eq!(tree.merkle_root(), empty_subtree_root(depth)); let tree = MiniMerkleTree::new(leaves, None); let depth = tree_depth_by_size(tree_size); - assert_eq!(tree.merkle_root(), KeccakHasher.empty_subtree_hash(depth)); + assert_eq!(tree.merkle_root(), empty_subtree_root(depth)); } } @@ -285,20 +289,20 @@ fn merkle_proofs_are_valid_in_very_small_trees() { fn dynamic_merkle_tree_growth() { let mut tree = MiniMerkleTree::new(iter::empty(), None); assert_eq!(tree.binary_tree_size, 1); - assert_eq!(tree.merkle_root(), KeccakHasher.empty_subtree_hash(0)); + assert_eq!(tree.merkle_root(), empty_subtree_root(0)); for len in 1..=8_usize { tree.push([0; 88]); assert_eq!(tree.binary_tree_size, len.next_power_of_two()); let depth = tree_depth_by_size(tree.binary_tree_size); - assert_eq!(tree.merkle_root(), KeccakHasher.empty_subtree_hash(depth)); + assert_eq!(tree.merkle_root(), empty_subtree_root(depth)); } // Shouldn't shrink after caching tree.trim_start(6); assert_eq!(tree.binary_tree_size, 8); - assert_eq!(tree.merkle_root(), KeccakHasher.empty_subtree_hash(3)); + assert_eq!(tree.merkle_root(), empty_subtree_root(3)); } #[test] diff --git a/core/lib/multivm/src/versions/shadow/tests.rs b/core/lib/multivm/src/versions/shadow/tests.rs index 4466d96a96b7..28ea79e4d3e4 100644 --- a/core/lib/multivm/src/versions/shadow/tests.rs +++ b/core/lib/multivm/src/versions/shadow/tests.rs @@ -3,11 +3,11 @@ use std::{collections::HashSet, rc::Rc}; use zksync_types::{writes::StateDiffRecord, StorageKey, Transaction, H256, U256}; -use zksync_vm_interface::pubdata::PubdataBuilder; use super::ShadowedFastVm; use crate::{ interface::{ + pubdata::{PubdataBuilder, PubdataInput}, utils::{ShadowMut, ShadowRef}, CurrentExecutionState, L2BlockEnv, VmExecutionResultAndLogs, }, @@ -120,11 +120,30 @@ impl TestedVm for ShadowedFastVm { }); } - fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64) { - self.get_mut("push_transaction_with_refund", |r| match r { - ShadowMut::Main(vm) => vm.push_transaction_with_refund(tx.clone(), refund), - ShadowMut::Shadow(vm) => vm.push_transaction_with_refund(tx.clone(), refund), - }); + fn push_transaction_with_refund_and_compression( + &mut self, + tx: Transaction, + refund: u64, + compression: bool, + ) { + self.get_mut( + "push_transaction_with_refund_and_compression", + |r| match r { + ShadowMut::Main(vm) => { + vm.push_transaction_with_refund_and_compression(tx.clone(), refund, compression) + } + ShadowMut::Shadow(vm) => { + vm.push_transaction_with_refund_and_compression(tx.clone(), refund, compression) + } + }, + ); + } + + fn pubdata_input(&self) -> PubdataInput { + self.get("pubdata_input", |r| match r { + ShadowRef::Main(vm) => vm.pubdata_input(), + ShadowRef::Shadow(vm) => vm.pubdata_input(), + }) } } @@ -297,6 +316,15 @@ mod is_write_initial { } } +mod l1_messenger { + use crate::versions::testonly::l1_messenger::*; + + #[test] + fn rollup_da_output_hash_match() { + test_rollup_da_output_hash_match::(); + } +} + mod l1_tx_execution { use crate::versions::testonly::l1_tx_execution::*; diff --git a/core/lib/multivm/src/versions/testonly/default_aa.rs b/core/lib/multivm/src/versions/testonly/default_aa.rs index a05d42d3918f..d42b7f3b2152 100644 --- a/core/lib/multivm/src/versions/testonly/default_aa.rs +++ b/core/lib/multivm/src/versions/testonly/default_aa.rs @@ -34,7 +34,11 @@ pub(crate) fn test_default_aa_interaction() { let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed(), "Transaction wasn't successful"); - vm.vm.finish_batch(default_pubdata_builder()); + let batch_result = vm.vm.finish_batch(default_pubdata_builder()); + assert!( + !batch_result.block_tip_execution_result.result.is_failed(), + "Batch tip execution wasn't successful" + ); vm.vm.get_current_execution_state(); diff --git a/core/lib/multivm/src/versions/testonly/l1_messenger.rs b/core/lib/multivm/src/versions/testonly/l1_messenger.rs new file mode 100644 index 000000000000..7659a1cee7c8 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/l1_messenger.rs @@ -0,0 +1,175 @@ +use std::rc::Rc; + +use ethabi::Token; +use zksync_contracts::l1_messenger_contract; +use zksync_test_account::TxType; +use zksync_types::{ + address_to_h256, u256_to_h256, web3::keccak256, Address, Execute, ProtocolVersionId, + L1_MESSENGER_ADDRESS, U256, +}; +use zksync_vm_interface::SystemEnv; + +// Bytecode is temporary hardcoded, should be removed after contracts are merged. +fn l2_rollup_da_validator_bytecode() -> Vec { + hex::decode("0012000000000002000a000000000002000000000301001900000060043002700000012703400197000100000031035500020000003103550003000000310355000400000031035500050000003103550006000000310355000700000031035500080000003103550009000000310355000a000000310355000b000000310355000c000000310355000d000000310355000e000000310355000f00000031035500100000003103550011000000010355000001270040019d0000008004000039000000400040043f00000001002001900000005d0000c13d000000040030008c000000fe0000413d000000000201043b00000129022001970000012a0020009c000000fe0000c13d000000a40030008c000000fe0000413d0000000002000416000000000002004b000000fe0000c13d0000008402100370000000000202043b000300000002001d0000012b0020009c000000fe0000213d00000003020000290000002302200039000000000032004b000000fe0000813d00000003020000290000000402200039000000000421034f000000000604043b0000012b0060009c000000fe0000213d0000000304000029000700240040003d0000000704600029000000000034004b000000fe0000213d0000004403100370000000000303043b000400000003001d0000006403100370000000000303043b000200000003001d000000040060008c000000fe0000413d0000002002200039000000000221034f000000000202043b000000e00220027000000058022000c90000000804200039000000000064004b000000fe0000213d00000003022000290000002802200039000000000121034f000000000101043b000500e00010027a000600000006001d000000650000c13d00000000090000190000000403000029000000000039004b000000f10000c13d0000014e0040009c000000fb0000a13d0000014001000041000000000010043f0000001101000039000000040010043f00000138010000410000049a000104300000000001000416000000000001004b000000fe0000c13d0000002001000039000001000010044300000120000004430000012801000041000004990001042e000000000800001900000000090000190000014f0040009c000000570000813d0000000403400039000000000063004b000000fe0000213d00000007024000290000001101000367000000000221034f000000000502043b000000e004500270000000000034001a000000570000413d0000000007340019000000000067004b000000fe0000213d00000000020004140000012c0050009c0000007b0000813d0000000003000031000000840000013d000000070330002900000127053001970001000000510355000000000034001a000000570000413d0000000003340019000000000330007b000000570000413d000000000151034f000a00000009001d000800000008001d000900000007001d000001270330019700010000003103e50000012d0020009c000003c20000813d00000000013103df000000c0022002100000012e022001970000012c022001c700010000002103b500000000012103af0000801002000039049804930000040f0000000003010019000000600330027000000127033001970000000100200190000002450000613d0000001f0230003900000130052001970000003f025000390000013104200197000000400200043d0000000004420019000000000024004b000000000600003900000001060040390000012b0040009c0000023f0000213d00000001006001900000023f0000c13d000000400040043f0000000004320436000000000005004b000000b10000613d0000000005540019000000000600003100000011066003670000000007040019000000006806043c0000000007870436000000000057004b000000ad0000c13d0000012f063001980000000005640019000000ba0000613d000000000701034f0000000008040019000000007907043c0000000008980436000000000058004b000000b60000c13d0000001f03300190000000c70000613d000000000161034f0000000303300210000000000605043300000000063601cf000000000636022f000000000101043b0000010003300089000000000131022f00000000013101cf000000000161019f00000000001504350000000001020433000000200010008c0000000a05000029000004210000c13d0000000002040433000000400100043d000000400310003900000000002304350000002002100039000000000052043500000040030000390000000000310435000001320010009c0000023f0000213d0000006003100039000000400030043f000001270020009c000001270200804100000040022002100000000001010433000001270010009c00000127010080410000006001100210000000000121019f0000000002000414000001270020009c0000012702008041000000c002200210000000000112019f00000133011001c700008010020000390498048e0000040f0000000100200190000000060600002900000009040000290000000808000029000000fe0000613d000000000901043b0000000108800039000000050080006c000000670000413d000000520000013d000000400100043d000000440210003900000000009204350000002402100039000000000032043500000134020000410000000000210435000000040210003900000000000204350000042d0000013d0000000403400039000000000063004b000001000000a13d00000000010000190000049a0001043000000007014000290000001101100367000000000101043b000400e00010027a0000025d0000c13d000000000900001900000000050300190000000003090019000000020090006c000002f20000c13d000000060050006c000002fd0000813d00000007015000290000001102000367000000000112034f000000000101043b000000f801100270000000010010008c000003030000c13d00000000060500190000014e0060009c0000000604000029000000570000213d0000000403600039000000000043004b000000fe0000213d00000003016000290000002501100039000000000112034f000000000101043b000000000043004b000002fd0000813d000000e8011002700000000703300029000000000432034f0000000503500039000000000404043b000000000031001a0000000607000029000000570000413d000a00000031001d0000000a0070006b000000fe0000213d000000050600008a0000000a0060006b000000570000213d0000000a050000290000000405500039000000000075004b000000fe0000213d0000000a08000029000300070080002d0000000306200360000000000606043b000400000006001d000000e006600272000500000006001d00090110006000cd0000013f0000613d000000090800002900000005068000fa000001100060008c000000570000c13d000000090050002a000000570000413d000200090050002d000000020070006c000000fe0000413d000000f804400270000000400a00043d0000004406a00039000000800700003900000000007604350000002406a000390000000000460435000001410400004100000000004a043500000007055000290000008404a00039000000090900002900000000009404350000000404a0003900000005060000290000000000640435000000000752034f0000001f0890018f00080000000a001d000000a405a0003900000142099001980000000006950019000001610000613d000000000a07034f000000000b05001900000000ac0a043c000000000bcb043600000000006b004b0000015d0000c13d0000000703300029000000000008004b0000016f0000613d000000000797034f0000000308800210000000000906043300000000098901cf000000000989022f000000000707043b0000010008800089000000000787022f00000000078701cf000000000797019f00000000007604350000000907000029000000000675001900000000000604350000001f06700039000001430660019700000000066500190000000004460049000000080500002900000064055000390000000000450435000000000432034f0000001f0510018f000000000216043600000144061001980000000003620019000001850000613d000000000704034f0000000008020019000000007907043c0000000008980436000000000038004b000001810000c13d000000000005004b000001920000613d000000000464034f0000000305500210000000000603043300000000065601cf000000000656022f000000000404043b0000010005500089000000000454022f00000000045401cf000000000464019f0000000000430435000000000312001900000000000304350000001f011000390000014501100197000000080300002900000000013100490000000001210019000001270010009c00000127010080410000006001100210000001270030009c000001270200004100000000020340190000004002200210000000000121019f0000000002000414000001270020009c0000012702008041000000c002200210000000000112019f0000800e02000039049804890000040f000000000301001900000060033002700000012703300197000000200030008c000000200400003900000000040340190000001f0640018f00000020074001900000000805700029000001b80000613d000000000801034f0000000809000029000000008a08043c0000000009a90436000000000059004b000001b40000c13d000000000006004b000001c50000613d000000000771034f0000000306600210000000000805043300000000086801cf000000000868022f000000000707043b0000010006600089000000000767022f00000000066701cf000000000686019f00000000006504350000000100200190000003480000613d0000001f01400039000000600110018f0000000802100029000000000012004b00000000010000390000000101004039000100000002001d0000012b0020009c0000023f0000213d00000001001001900000023f0000c13d0000000101000029000000400010043f000000200030008c0000000604000029000000fe0000413d00000008010000290000000001010433000800000001001d00000004010000290000012c0010009c000001e10000413d000000090200002900000005012000fa000001100010008c000000570000c13d0000000103000029000000440130003900000024023000390000000403300039000000020440006c000003660000c13d000001460400004100000001050000290000000000450435000000200400003900000000004304350000000a04000029000000000042043500000150034001980000001f0440018f000000000231001900000007050000290000001105500367000001fa0000613d000000000605034f0000000007010019000000006806043c0000000007870436000000000027004b000001f60000c13d000000000004004b000002070000613d000000000335034f0000000304400210000000000502043300000000054501cf000000000545022f000000000303043b0000010004400089000000000343022f00000000034301cf000000000353019f00000000003204350000000a030000290000001f023000390000015002200197000000000131001900000000000104350000004401200039000001270010009c000001270100804100000060011002100000000102000029000001270020009c00000127020080410000004002200210000000000112019f0000000002000414000001270020009c0000012702008041000000c002200210000000000112019f00008011020000390498048e0000040f000000000301001900000060033002700000001f0430018f0000012f0530019700000127033001970000000100200190000003720000613d0000000102500029000000000005004b0000022c0000613d000000000601034f0000000107000029000000006806043c0000000007870436000000000027004b000002280000c13d000000000004004b000002390000613d000000000151034f0000000304400210000000000502043300000000054501cf000000000545022f000000000101043b0000010004400089000000000141022f00000000014101cf000000000151019f00000000001204350000001f0130003900000130011001970000000101100029000900000001001d0000012b0010009c0000038a0000a13d0000014001000041000000000010043f0000004101000039000000040010043f00000138010000410000049a000104300000001f0430018f0000012f023001980000024e0000613d000000000501034f0000000006000019000000005705043c0000000006760436000000000026004b0000024a0000c13d000000000004004b0000025b0000613d000000000121034f0000000304400210000000000502043300000000054501cf000000000545022f000000000101043b0000010004400089000000000141022f00000000014101cf000000000151019f000000000012043500000060013002100000049a00010430000000000800001900000000090000190000014e0030009c000000570000213d0000000402300039000000000062004b000000fe0000213d00000007033000290000001101000367000000000331034f000000000303043b000000e00a30027000000000002a001a000000570000413d00000000072a0019000000000067004b000000fe0000213d0000013600300198000003130000c13d000001390030009c000003190000813d0000013a003001980000031f0000613d000000070420002900000127034001970000000002000414000100000031035500000000004a001a000000570000413d00000000044a0019000000000440007b000000570000413d00090000000a001d000a00000009001d000500000008001d000800000007001d000000000131034f000001270340019700010000003103e5000001270020009c000003c20000213d00000000013103df000000c0022002100000012e022001970000012c022001c700010000002103b500000000012103af0000000202000039049804930000040f00000000030100190000006003300270000001270330019700000001002001900000032a0000613d0000001f0230003900000130052001970000003f025000390000013104200197000000400200043d0000000004420019000000000024004b000000000600003900000001060040390000012b0040009c0000023f0000213d00000001006001900000023f0000c13d000000400040043f0000000004320436000000000005004b000000090a000029000002ad0000613d0000000005540019000000000600003100000011066003670000000007040019000000006806043c0000000007870436000000000057004b000002a90000c13d0000012f063001980000000005640019000002b60000613d000000000701034f0000000008040019000000007907043c0000000008980436000000000058004b000002b20000c13d0000001f03300190000002c30000613d000000000161034f0000000303300210000000000605043300000000063601cf000000000636022f000000000101043b0000010003300089000000000131022f00000000013101cf000000000161019f0000000000150435000000400100043d0000000002020433000000200020008c0000000a05000029000003420000c13d00000000020404330000013d02200197000000db03a002100000013e03300197000000000223019f0000013f022001c7000000400310003900000000002304350000002002100039000000000052043500000040030000390000000000310435000001320010009c0000023f0000213d0000006003100039000000400030043f000001270020009c000001270200804100000040022002100000000001010433000001270010009c00000127010080410000006001100210000000000121019f0000000002000414000001270020009c0000012702008041000000c002200210000000000112019f00000133011001c700008010020000390498048e0000040f0000000100200190000000060600002900000008030000290000000508000029000000fe0000613d000000000901043b0000000108800039000000040080006c0000025f0000413d000001060000013d000000400100043d0000004402100039000000000032043500000024021000390000000203000029000000000032043500000134020000410000000000210435000000040210003900000001030000390000042c0000013d0000014001000041000000000010043f0000003201000039000000040010043f00000138010000410000049a00010430000000400200043d0000004403200039000000000013043500000024012000390000000103000039000000000031043500000134010000410000000000120435000000040120003900000002030000390000000000310435000001270020009c0000012702008041000000400120021000000135011001c70000049a00010430000000400100043d0000013702000041000000000021043500000004021000390000000203000039000003240000013d000000400100043d0000013702000041000000000021043500000004021000390000000103000039000003240000013d000000400100043d00000137020000410000000000210435000000040210003900000003030000390000000000320435000001270010009c0000012701008041000000400110021000000138011001c70000049a000104300000001f0430018f0000012f02300198000003330000613d000000000501034f0000000006000019000000005705043c0000000006760436000000000026004b0000032f0000c13d000000000004004b000003400000613d000000000121034f0000000304400210000000000502043300000000054501cf000000000545022f000000000101043b0000010004400089000000000141022f00000000014101cf000000000151019f000000000012043500000060013002100000049a0001043000000044021000390000013b03000041000000000032043500000024021000390000001903000039000004270000013d0000001f0530018f0000012f06300198000000400200043d0000000004620019000003530000613d000000000701034f0000000008020019000000007907043c0000000008980436000000000048004b0000034f0000c13d000000000005004b000003600000613d000000000161034f0000000305500210000000000604043300000000065601cf000000000656022f000000000101043b0000010005500089000000000151022f00000000015101cf000000000161019f00000000001404350000006001300210000001270020009c00000127020080410000004002200210000000000112019f0000049a000104300000013405000041000000010600002900000000005604350000000305000039000000000053043500000000000204350000000000410435000001270060009c0000012706008041000000400160021000000135011001c70000049a00010430000000400200043d0000000006520019000000000005004b0000037c0000613d000000000701034f0000000008020019000000007907043c0000000008980436000000000068004b000003780000c13d000000000004004b000003600000613d000000000151034f0000000304400210000000000506043300000000054501cf000000000545022f000000000101043b0000010004400089000000000141022f00000000014101cf000000000151019f0000000000160435000003600000013d0000000901000029000000400010043f000000200030008c000000fe0000413d000000010100002900000000010104330000012b0010009c000000fe0000213d000000010230002900000001011000290000001f03100039000000000023004b000000fe0000813d00000000140104340000012b0040009c0000023f0000213d00000005034002100000003f05300039000001470550019700000009055000290000012b0050009c0000023f0000213d000000400050043f000000090500002900000000004504350000000003130019000000000023004b000000fe0000213d000000000004004b000003ae0000613d0000000902000029000000200220003900000000140104340000000000420435000000000031004b000003a90000413d000000000100041400000011020003670000000a0000006b000003b40000c13d0000000003000031000003be0000013d00000007030000290000012704300197000100000042035500000003050000290000000a0050006c000000570000413d0000000305000029000000000350007b000000570000413d000000000242034f000001270330019700010000003203e5000001270010009c000003c90000a13d000000400100043d00000044021000390000014d03000041000000000032043500000024021000390000000803000039000004270000013d00000000023203df000000c0011002100000012e011001970000012c011001c700010000001203b500000000011203af0000801002000039049804930000040f0000000003010019000000600330027000000127033001970000000100200190000004320000613d0000001f0230003900000130052001970000003f025000390000013104200197000000400200043d0000000004420019000000000024004b000000000600003900000001060040390000012b0040009c0000023f0000213d00000001006001900000023f0000c13d000000400040043f0000000004320436000000000005004b000003ef0000613d0000000005540019000000000600003100000011066003670000000007040019000000006806043c0000000007870436000000000057004b000003eb0000c13d0000001f0530018f0000012f063001980000000003640019000003f90000613d000000000701034f0000000008040019000000007907043c0000000008980436000000000038004b000003f50000c13d000000000005004b000004060000613d000000000161034f0000000305500210000000000603043300000000065601cf000000000656022f000000000101043b0000010005500089000000000151022f00000000015101cf000000000161019f00000000001304350000000001020433000000200010008c000004210000c13d000000400100043d00000009020000290000000002020433000001000020008c0000044a0000413d00000064021000390000014a03000041000000000032043500000044021000390000014b0300004100000000003204350000002402100039000000250300003900000000003204350000013c020000410000000000210435000000040210003900000020030000390000000000320435000001270010009c000001270100804100000040011002100000014c011001c70000049a00010430000000400100043d00000044021000390000014803000041000000000032043500000024021000390000001f0300003900000000003204350000013c020000410000000000210435000000040210003900000020030000390000000000320435000001270010009c0000012701008041000000400110021000000135011001c70000049a000104300000001f0430018f0000012f023001980000043b0000613d000000000501034f0000000006000019000000005705043c0000000006760436000000000026004b000004370000c13d000000000004004b000004480000613d000000000121034f0000000304400210000000000502043300000000054501cf000000000545022f000000000101043b0000010004400089000000000141022f00000000014101cf000000000151019f000000000012043500000060013002100000049a000104300000000003040433000000f8022002100000006004100039000000000024043500000040021000390000000000320435000000200210003900000008030000290000000000320435000000610310003900000009040000290000000004040433000000000004004b000004610000613d000000000500001900000009060000290000002006600039000900000006001d000000000606043300000000036304360000000105500039000000000045004b000004590000413d0000000003130049000000200430008a00000000004104350000001f0330003900000150043001970000000003140019000000000043004b000000000400003900000001040040390000012b0030009c0000023f0000213d00000001004001900000023f0000c13d000000400030043f000001270020009c000001270200804100000040022002100000000001010433000001270010009c00000127010080410000006001100210000000000121019f0000000002000414000001270020009c0000012702008041000000c002200210000000000112019f00000133011001c700008010020000390498048e0000040f0000000100200190000000fe0000613d000000000101043b000000400200043d0000000000120435000001270020009c0000012702008041000000400120021000000149011001c7000004990001042e0000048c002104210000000102000039000000000001042d0000000002000019000000000001042d00000491002104230000000102000039000000000001042d0000000002000019000000000001042d00000496002104230000000102000039000000000001042d0000000002000019000000000001042d0000049800000432000004990001042e0000049a00010430000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0000000200000000000000000000000000000040000001000000000000000000ffffffff0000000000000000000000000000000000000000000000000000000089f9a07200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffff0000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000ffffffff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffe000000000000000000000000000000000000000000000000000000001ffffffe000000000000000000000000000000000000000000000000000000003ffffffe0000000000000000000000000000000000000000000000000ffffffffffffff9f02000000000000000000000000000000000000000000000000000000000000007f7b0cf70000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000001f0000000000000000000000000000000000000000000000000000000043e266b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000024000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000007368612072657475726e656420696e76616c696420646174610000000000000008c379a00000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff06ffffff0000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004e487b71000000000000000000000000000000000000000000000000000000006006d8b500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ffffffffe0000000000000000000000000000000000000000000000000000003ffffffffe00000000000000000000000000000000000000000000000000000000000ffffe00000000000000000000000000000000000000000000000000000000001ffffe018876a04000000000000000000000000000000000000000000000000000000007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe06b656363616b3235362072657475726e656420696e76616c69642064617461000000000000000000000000000000000000000020000000000000000000000000206269747300000000000000000000000000000000000000000000000000000053616665436173743a2076616c756520646f65736e27742066697420696e203800000000000000000000000000000000000000840000000000000000000000004f766572666c6f77000000000000000000000000000000000000000000000000fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe00000000000000000000000000000000000000000000000000000000000000000e901f5bd8811df26e614332e2110b9bc002e2cbadd82065c67e102f858079d5a").unwrap() +} + +use super::{default_system_env, read_test_contract, ContractToDeploy, TestedVm, VmTesterBuilder}; +use crate::{ + interface::{ + pubdata::{PubdataBuilder, PubdataInput}, + InspectExecutionMode, TxExecutionMode, VmInterfaceExt, + }, + pubdata_builders::RollupPubdataBuilder, + vm_latest::constants::ZK_SYNC_BYTES_PER_BLOB, +}; + +const L2_DA_VALIDATOR_OUTPUT_HASH_KEY: usize = 5; +const USED_L2_DA_VALIDATOR_ADDRESS_KEY: usize = 6; + +fn encoded_uncompressed_state_diffs(input: &PubdataInput) -> Vec { + let mut result = vec![]; + for state_diff in input.state_diffs.iter() { + result.extend(state_diff.encode_padded()); + } + result +} + +fn compose_header_for_l1_commit_rollup(input: PubdataInput) -> Vec { + // The preimage under the hash `l2DAValidatorOutputHash` is expected to be in the following format: + // - First 32 bytes are the hash of the uncompressed state diff. + // - Then, there is a 32-byte hash of the full pubdata. + // - Then, there is the 1-byte number of blobs published. + // - Then, there are linear hashes of the published blobs, 32 bytes each. + + let mut full_header = vec![]; + + let uncompressed_state_diffs = encoded_uncompressed_state_diffs(&input); + let uncompressed_state_diffs_hash = keccak256(&uncompressed_state_diffs); + full_header.extend(uncompressed_state_diffs_hash); + + let pubdata_builder = RollupPubdataBuilder::new(Address::zero()); + let mut full_pubdata = + pubdata_builder.settlement_layer_pubdata(&input, ProtocolVersionId::latest()); + let full_pubdata_hash = keccak256(&full_pubdata); + full_header.extend(full_pubdata_hash); + + // Now, we need to calculate the linear hashes of the blobs. + // Firstly, let's pad the pubdata to the size of the blob. + if full_pubdata.len() % ZK_SYNC_BYTES_PER_BLOB != 0 { + let padding = + vec![0u8; ZK_SYNC_BYTES_PER_BLOB - full_pubdata.len() % ZK_SYNC_BYTES_PER_BLOB]; + full_pubdata.extend(padding); + } + full_header.push((full_pubdata.len() / ZK_SYNC_BYTES_PER_BLOB) as u8); + + full_pubdata + .chunks(ZK_SYNC_BYTES_PER_BLOB) + .for_each(|chunk| { + full_header.extend(keccak256(chunk)); + }); + + full_header +} + +pub(crate) fn test_rollup_da_output_hash_match() { + // In this test, we check whether the L2 DA output hash is as expected. + + let l2_da_validator_address = Address::repeat_byte(0x12); + let system_env = SystemEnv { + version: ProtocolVersionId::Version27, + ..default_system_env() + }; + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_system_env(system_env) + .with_custom_contracts(vec![ContractToDeploy { + bytecode: l2_rollup_da_validator_bytecode(), + address: l2_da_validator_address, + is_account: false, + is_funded: false, + }]) + .build::(); + + let account = &mut vm.rich_accounts[0]; + + // Firstly, deploy tx. It should publish the bytecode of the "test contract" + let counter = read_test_contract(); + + let tx = account.get_deploy_tx(&counter, None, TxType::L2).tx; + // We do not use compression here, to have the bytecode published in full. + vm.vm + .push_transaction_with_refund_and_compression(tx, 0, false); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Transaction wasn't successful"); + + // Then, we call the l1 messenger to also send an L2->L1 message. + let l1_messenger_contract = l1_messenger_contract(); + let encoded_data = l1_messenger_contract + .function("sendToL1") + .unwrap() + .encode_input(&[Token::Bytes(vec![])]) + .unwrap(); + + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(L1_MESSENGER_ADDRESS), + calldata: encoded_data, + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(tx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Transaction wasn't successful"); + + let pubdata_builder = RollupPubdataBuilder::new(l2_da_validator_address); + let batch_result = vm.vm.finish_batch(Rc::new(pubdata_builder)); + assert!( + !batch_result.block_tip_execution_result.result.is_failed(), + "Transaction wasn't successful {:?}", + batch_result.block_tip_execution_result.result + ); + let pubdata_input = vm.vm.pubdata_input(); + + // Just to double check that the test makes sense. + assert!(!pubdata_input.user_logs.is_empty()); + assert!(!pubdata_input.l2_to_l1_messages.is_empty()); + assert!(!pubdata_input.published_bytecodes.is_empty()); + assert!(!pubdata_input.state_diffs.is_empty()); + + let expected_header: Vec = compose_header_for_l1_commit_rollup(pubdata_input); + + let l2_da_validator_output_hash = batch_result + .block_tip_execution_result + .logs + .system_l2_to_l1_logs + .iter() + .find(|log| log.0.key == u256_to_h256(L2_DA_VALIDATOR_OUTPUT_HASH_KEY.into())) + .unwrap() + .0 + .value; + + assert_eq!( + l2_da_validator_output_hash, + keccak256(&expected_header).into() + ); + + let l2_used_da_validator_address = batch_result + .block_tip_execution_result + .logs + .system_l2_to_l1_logs + .iter() + .find(|log| log.0.key == u256_to_h256(USED_L2_DA_VALIDATOR_ADDRESS_KEY.into())) + .unwrap() + .0 + .value; + + assert_eq!( + l2_used_da_validator_address, + address_to_h256(&l2_da_validator_address) + ); +} diff --git a/core/lib/multivm/src/versions/testonly/mod.rs b/core/lib/multivm/src/versions/testonly/mod.rs index 3377a49064f8..0c618879357a 100644 --- a/core/lib/multivm/src/versions/testonly/mod.rs +++ b/core/lib/multivm/src/versions/testonly/mod.rs @@ -43,6 +43,7 @@ pub(super) mod evm_emulator; pub(super) mod gas_limit; pub(super) mod get_used_contracts; pub(super) mod is_write_initial; +pub(super) mod l1_messenger; pub(super) mod l1_tx_execution; pub(super) mod l2_blocks; pub(super) mod nonce_holder; diff --git a/core/lib/multivm/src/versions/testonly/refunds.rs b/core/lib/multivm/src/versions/testonly/refunds.rs index 874425fc435c..c98d62a86487 100644 --- a/core/lib/multivm/src/versions/testonly/refunds.rs +++ b/core/lib/multivm/src/versions/testonly/refunds.rs @@ -56,8 +56,11 @@ pub(crate) fn test_predetermined_refunded_gas() { .build::(); assert_eq!(account.address(), vm.rich_accounts[0].address()); - vm.vm - .push_transaction_with_refund(tx.clone(), result.refunds.gas_refunded); + vm.vm.push_transaction_with_refund_and_compression( + tx.clone(), + result.refunds.gas_refunded, + true, + ); let result_with_predefined_refunds = vm .vm @@ -112,7 +115,7 @@ pub(crate) fn test_predetermined_refunded_gas() { let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; vm.vm - .push_transaction_with_refund(tx, changed_operator_suggested_refund); + .push_transaction_with_refund_and_compression(tx, changed_operator_suggested_refund, true); let result = vm .vm .finish_batch(default_pubdata_builder()) diff --git a/core/lib/multivm/src/versions/testonly/tester/mod.rs b/core/lib/multivm/src/versions/testonly/tester/mod.rs index 716b9386235f..663942410a41 100644 --- a/core/lib/multivm/src/versions/testonly/tester/mod.rs +++ b/core/lib/multivm/src/versions/testonly/tester/mod.rs @@ -7,17 +7,16 @@ use zksync_types::{ writes::StateDiffRecord, Address, L1BatchNumber, StorageKey, Transaction, H256, U256, }; -use zksync_vm_interface::{ - pubdata::PubdataBuilder, CurrentExecutionState, InspectExecutionMode, VmExecutionResultAndLogs, - VmInterfaceHistoryEnabled, -}; pub(crate) use self::transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; use super::{get_empty_storage, read_test_contract}; use crate::{ interface::{ + pubdata::{PubdataBuilder, PubdataInput}, storage::{InMemoryStorage, StoragePtr, StorageView}, - L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmFactory, VmInterfaceExt, + CurrentExecutionState, InspectExecutionMode, L1BatchEnv, L2BlockEnv, SystemEnv, + TxExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterfaceExt, + VmInterfaceHistoryEnabled, }, versions::testonly::{ default_l1_batch, default_system_env, make_address_rich, ContractToDeploy, @@ -226,6 +225,14 @@ pub(crate) trait TestedVm: /// Same as `start_new_l2_block`, but should skip consistency checks (to verify they are performed by the bootloader). fn push_l2_block_unchecked(&mut self, block: L2BlockEnv); - /// Pushes a transaction with predefined refund value. - fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64); + /// Pushes a transaction with predefined refund value and compression. + fn push_transaction_with_refund_and_compression( + &mut self, + tx: Transaction, + refund: u64, + compression: bool, + ); + + /// Returns pubdata input. + fn pubdata_input(&self) -> PubdataInput; } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l1_messenger.rs b/core/lib/multivm/src/versions/vm_fast/tests/l1_messenger.rs new file mode 100644 index 000000000000..0bd01c7de134 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/l1_messenger.rs @@ -0,0 +1,6 @@ +use crate::{versions::testonly::l1_messenger::test_rollup_da_output_hash_match, vm_fast::Vm}; + +#[test] +fn rollup_da_output_hash_match() { + test_rollup_da_output_hash_match::>(); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs index 0a26e895b5a7..fef14671ed12 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs @@ -5,8 +5,9 @@ use zksync_types::{ }; use zksync_vm2::interface::{Event, HeapId, StateInterface}; use zksync_vm_interface::{ - pubdata::PubdataBuilder, storage::ReadStorage, CurrentExecutionState, L2BlockEnv, - VmExecutionMode, VmExecutionResultAndLogs, VmInterface, + pubdata::{PubdataBuilder, PubdataInput}, + storage::ReadStorage, + CurrentExecutionState, L2BlockEnv, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, }; use super::{circuits_tracer::CircuitsTracer, Vm}; @@ -26,6 +27,7 @@ mod evm_emulator; mod gas_limit; mod get_used_contracts; mod is_write_initial; +mod l1_messenger; mod l1_tx_execution; mod l2_blocks; mod nonce_holder; @@ -164,7 +166,16 @@ impl TestedVm for Vm> { self.bootloader_state.push_l2_block(block); } - fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64) { - self.push_transaction_inner(tx, refund, true); + fn push_transaction_with_refund_and_compression( + &mut self, + tx: Transaction, + refund: u64, + compression: bool, + ) { + self.push_transaction_inner(tx, refund, compression); + } + + fn pubdata_input(&self) -> PubdataInput { + self.bootloader_state.get_pubdata_information().clone() } } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs new file mode 100644 index 000000000000..f1dade9dd8e6 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs @@ -0,0 +1,9 @@ +use crate::{ + versions::testonly::l1_messenger::test_rollup_da_output_hash_match, + vm_latest::{HistoryEnabled, Vm}, +}; + +#[test] +fn rollup_da_output_hash_match() { + test_rollup_da_output_hash_match::>(); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs index fc226f03ecea..07f333a9ed8d 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs @@ -12,11 +12,11 @@ use zksync_types::{ h256_to_u256, writes::StateDiffRecord, StorageKey, StorageValue, Transaction, H256, U256, }; use zksync_utils::bytecode::hash_bytecode; -use zksync_vm_interface::pubdata::PubdataBuilder; use super::{HistoryEnabled, Vm}; use crate::{ interface::{ + pubdata::{PubdataBuilder, PubdataInput}, storage::{InMemoryStorage, ReadStorage, StorageView, WriteStorage}, CurrentExecutionState, L2BlockEnv, VmExecutionMode, VmExecutionResultAndLogs, }, @@ -46,6 +46,7 @@ mod evm_emulator; mod gas_limit; mod get_used_contracts; mod is_write_initial; +mod l1_messenger; mod l1_tx_execution; mod l2_blocks; mod nonce_holder; @@ -183,10 +184,19 @@ impl TestedVm for TestedLatestVm { self.bootloader_state.push_l2_block(block); } - fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64) { + fn push_transaction_with_refund_and_compression( + &mut self, + tx: Transaction, + refund: u64, + compression: bool, + ) { let tx = TransactionData::new(tx, false); let overhead = tx.overhead_gas(); - self.push_raw_transaction(tx, overhead, refund, true) + self.push_raw_transaction(tx, overhead, refund, compression) + } + + fn pubdata_input(&self) -> PubdataInput { + self.bootloader_state.get_pubdata_information().clone() } } diff --git a/core/lib/protobuf_config/src/en.rs b/core/lib/protobuf_config/src/en.rs index 9d1a39310604..700a1f0a8104 100644 --- a/core/lib/protobuf_config/src/en.rs +++ b/core/lib/protobuf_config/src/en.rs @@ -34,11 +34,6 @@ impl ProtoRepr for proto::ExternalNode { main_node_rate_limit_rps: self .main_node_rate_limit_rps .and_then(|a| NonZeroUsize::new(a as usize)), - gateway_url: self - .gateway_url - .as_ref() - .map(|a| a.parse().context("gateway_url")) - .transpose()?, bridge_addresses_refresh_interval_sec: self .bridge_addresses_refresh_interval_sec .and_then(NonZeroU64::new), @@ -57,10 +52,6 @@ impl ProtoRepr for proto::ExternalNode { .into(), ), main_node_rate_limit_rps: this.main_node_rate_limit_rps.map(|a| a.get() as u64), - gateway_url: this - .gateway_url - .as_ref() - .map(|a| a.expose_str().to_string()), bridge_addresses_refresh_interval_sec: this .bridge_addresses_refresh_interval_sec .map(|a| a.get()), diff --git a/core/lib/protobuf_config/src/proto/config/en.proto b/core/lib/protobuf_config/src/proto/config/en.proto index 69412704ea0f..a8d304c8289e 100644 --- a/core/lib/protobuf_config/src/proto/config/en.proto +++ b/core/lib/protobuf_config/src/proto/config/en.proto @@ -9,6 +9,6 @@ message ExternalNode { optional uint64 l1_chain_id = 3; // required optional uint64 main_node_rate_limit_rps = 6; // optional optional config.genesis.L1BatchCommitDataGeneratorMode l1_batch_commit_data_generator_mode = 7; // optional, default to rollup - optional string gateway_url = 8; // optional + reserved 8; reserved "gateway_url"; optional uint64 bridge_addresses_refresh_interval_sec = 9; // optional } diff --git a/core/lib/protobuf_config/src/proto/config/secrets.proto b/core/lib/protobuf_config/src/proto/config/secrets.proto index 7c9d0f928237..b9622b5d6a2e 100644 --- a/core/lib/protobuf_config/src/proto/config/secrets.proto +++ b/core/lib/protobuf_config/src/proto/config/secrets.proto @@ -11,6 +11,7 @@ message DatabaseSecrets { message L1Secrets { optional string l1_rpc_url = 1; // required + optional string gateway_url = 2; // optional } message ConsensusSecrets { diff --git a/core/lib/protobuf_config/src/secrets.rs b/core/lib/protobuf_config/src/secrets.rs index f5bc10a3e340..ca7218c0b278 100644 --- a/core/lib/protobuf_config/src/secrets.rs +++ b/core/lib/protobuf_config/src/secrets.rs @@ -86,12 +86,22 @@ impl ProtoRepr for proto::L1Secrets { fn read(&self) -> anyhow::Result { Ok(Self::Type { l1_rpc_url: SensitiveUrl::from_str(required(&self.l1_rpc_url).context("l1_rpc_url")?)?, + gateway_url: self + .gateway_url + .clone() + .map(|url| SensitiveUrl::from_str(&url)) + .transpose() + .context("gateway_url")?, }) } fn build(this: &Self::Type) -> Self { Self { l1_rpc_url: Some(this.l1_rpc_url.expose_str().to_string()), + gateway_url: this + .gateway_url + .as_ref() + .map(|url| url.expose_url().to_string()), } } } diff --git a/core/lib/snapshots_applier/src/tests/utils.rs b/core/lib/snapshots_applier/src/tests/utils.rs index cf68d2e181a6..4d16292ebf29 100644 --- a/core/lib/snapshots_applier/src/tests/utils.rs +++ b/core/lib/snapshots_applier/src/tests/utils.rs @@ -195,10 +195,13 @@ fn block_details_base(hash: H256) -> api::BlockDetailsBase { status: api::BlockStatus::Sealed, commit_tx_hash: None, committed_at: None, + commit_chain_id: None, prove_tx_hash: None, proven_at: None, + prove_chain_id: None, execute_tx_hash: None, executed_at: None, + execute_chain_id: None, l1_gas_price: 0, l2_fair_gas_price: 0, fair_pubdata_price: None, diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index 5f81e889b537..bd5b4ac5f2ab 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -6,7 +6,7 @@ use strum::Display; use zksync_basic_types::{ tee_types::TeeType, web3::{AccessList, Bytes, Index}, - Bloom, L1BatchNumber, H160, H256, H64, U256, U64, + Bloom, L1BatchNumber, SLChainId, H160, H256, H64, U256, U64, }; use zksync_contracts::BaseSystemContractsHashes; @@ -196,6 +196,13 @@ pub struct L2ToL1LogProof { pub root: H256, } +#[derive(Debug, Serialize, Deserialize, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ChainAggProof { + pub chain_id_leaf_proof: Vec, + pub chain_id_leaf_proof_mask: U256, +} + /// A struct with the two default bridge contracts. #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -466,6 +473,45 @@ impl Log { } } +impl From for zksync_basic_types::web3::Log { + fn from(log: Log) -> Self { + zksync_basic_types::web3::Log { + address: log.address, + topics: log.topics, + data: log.data, + block_hash: log.block_hash, + block_number: log.block_number, + transaction_hash: log.transaction_hash, + transaction_index: log.transaction_index, + log_index: log.log_index, + transaction_log_index: log.transaction_log_index, + log_type: log.log_type, + removed: log.removed, + block_timestamp: log.block_timestamp, + } + } +} + +impl From for Log { + fn from(log: zksync_basic_types::web3::Log) -> Self { + Log { + address: log.address, + topics: log.topics, + data: log.data, + block_hash: log.block_hash, + block_number: log.block_number, + transaction_hash: log.transaction_hash, + transaction_index: log.transaction_index, + log_index: log.log_index, + transaction_log_index: log.transaction_log_index, + log_type: log.log_type, + removed: log.removed, + block_timestamp: log.block_timestamp, + l1_batch_number: None, + } + } +} + /// A log produced by a transaction. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -819,10 +865,13 @@ pub struct BlockDetailsBase { pub status: BlockStatus, pub commit_tx_hash: Option, pub committed_at: Option>, + pub commit_chain_id: Option, pub prove_tx_hash: Option, pub proven_at: Option>, + pub prove_chain_id: Option, pub execute_tx_hash: Option, pub executed_at: Option>, + pub execute_chain_id: Option, pub l1_gas_price: u64, pub l2_fair_gas_price: u64, // Cost of publishing one byte (in wei). diff --git a/core/lib/types/src/commitment/mod.rs b/core/lib/types/src/commitment/mod.rs index 1eba7e7a9ec0..786ce03e671d 100644 --- a/core/lib/types/src/commitment/mod.rs +++ b/core/lib/types/src/commitment/mod.rs @@ -21,6 +21,7 @@ use zksync_system_constants::{ use crate::{ blob::num_blobs_required, block::{L1BatchHeader, L1BatchTreeData}, + ethabi, l2_to_l1_log::{ l2_to_l1_logs_tree_size, parse_system_logs_for_blob_hashes_pre_gateway, L2ToL1Log, SystemL2ToL1Log, UserL2ToL1Log, @@ -73,6 +74,31 @@ pub fn serialize_commitments(values: &[I]) -> Vec { input } +#[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize)] +pub struct PriorityOpsMerkleProof { + pub left_path: Vec, + pub right_path: Vec, + pub hashes: Vec, +} + +impl PriorityOpsMerkleProof { + pub fn into_token(&self) -> ethabi::Token { + let array_into_token = |array: &[H256]| { + ethabi::Token::Array( + array + .iter() + .map(|hash| ethabi::Token::FixedBytes(hash.as_bytes().to_vec())) + .collect(), + ) + }; + ethabi::Token::Tuple(vec![ + array_into_token(&self.left_path), + array_into_token(&self.right_path), + array_into_token(&self.hashes), + ]) + } +} + /// Precalculated data for the L1 batch that was used in commitment and L1 transaction. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct L1BatchMetadata { diff --git a/core/lib/types/src/l2_to_l1_log.rs b/core/lib/types/src/l2_to_l1_log.rs index 1b84a79024c7..0ee0547930ff 100644 --- a/core/lib/types/src/l2_to_l1_log.rs +++ b/core/lib/types/src/l2_to_l1_log.rs @@ -79,6 +79,24 @@ pub fn l2_to_l1_logs_tree_size(protocol_version: ProtocolVersionId) -> usize { } } +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BatchAndChainMerklePath { + pub batch_proof_len: u32, + pub proof: Vec, +} + +pub const LOG_PROOF_SUPPORTED_METADATA_VERSION: u8 = 1; + +pub const BATCH_LEAF_PADDING: H256 = H256([ + 0xd8, 0x2f, 0xec, 0x4a, 0x37, 0xcb, 0xdc, 0x47, 0xf1, 0xe5, 0xcc, 0x4a, 0xd6, 0x4d, 0xea, 0xcf, + 0x34, 0xa4, 0x8e, 0x6f, 0x7c, 0x61, 0xfa, 0x5b, 0x68, 0xfd, 0x58, 0xe5, 0x43, 0x25, 0x9c, 0xf4, +]); + +pub const CHAIN_ID_LEAF_PADDING: H256 = H256([ + 0x39, 0xbc, 0x69, 0x36, 0x3b, 0xb9, 0xe2, 0x6c, 0xf1, 0x42, 0x40, 0xde, 0x4e, 0x22, 0x56, 0x9e, + 0x95, 0xcf, 0x17, 0x5c, 0xfb, 0xcf, 0x1a, 0xde, 0x1a, 0x47, 0xa2, 0x53, 0xb4, 0xbf, 0x7f, 0x61, +]); + /// Returns the blob hashes parsed out from the system logs pub fn parse_system_logs_for_blob_hashes_pre_gateway( protocol_version: &ProtocolVersionId, diff --git a/core/lib/types/src/storage/mod.rs b/core/lib/types/src/storage/mod.rs index 84a29ed8c039..0f851857e6a4 100644 --- a/core/lib/types/src/storage/mod.rs +++ b/core/lib/types/src/storage/mod.rs @@ -91,6 +91,11 @@ pub fn get_system_context_key(key: H256) -> StorageKey { StorageKey::new(system_context, key) } +pub fn get_message_root_log_key(key: H256) -> StorageKey { + let message_root = AccountTreeId::new(L2_MESSAGE_ROOT_ADDRESS); + StorageKey::new(message_root, key) +} + pub fn get_deployer_key(key: H256) -> StorageKey { let deployer_contract = AccountTreeId::new(CONTRACT_DEPLOYER_ADDRESS); StorageKey::new(deployer_contract, key) diff --git a/core/lib/vm_interface/src/pubdata/mod.rs b/core/lib/vm_interface/src/pubdata/mod.rs index f901687b5fa6..54d7ebf5e1b0 100644 --- a/core/lib/vm_interface/src/pubdata/mod.rs +++ b/core/lib/vm_interface/src/pubdata/mod.rs @@ -13,7 +13,7 @@ use zksync_types::{ /// bytes32 value; /// } /// ``` -#[derive(Debug, Default, Clone, PartialEq)] +#[derive(Debug, Default, Clone, PartialEq, Eq)] pub struct L1MessengerL2ToL1Log { pub l2_shard_id: u8, pub is_service: bool, @@ -63,7 +63,7 @@ impl From for L2ToL1Log { } /// Struct based on which the pubdata blob is formed -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone, Default, PartialEq, Eq)] pub struct PubdataInput { pub user_logs: Vec, pub l2_to_l1_messages: Vec>, diff --git a/core/lib/web3_decl/src/namespaces/eth.rs b/core/lib/web3_decl/src/namespaces/eth.rs index 40cb6300cffa..4db58a06c59d 100644 --- a/core/lib/web3_decl/src/namespaces/eth.rs +++ b/core/lib/web3_decl/src/namespaces/eth.rs @@ -183,7 +183,7 @@ pub trait EthNamespace { &self, block_count: U64Number, newest_block: BlockNumber, - reward_percentiles: Vec, + reward_percentiles: Option>, ) -> RpcResult; #[method(name = "maxPriorityFeePerGas")] diff --git a/core/lib/web3_decl/src/namespaces/unstable.rs b/core/lib/web3_decl/src/namespaces/unstable.rs index e6b36dd26846..f666f02f2811 100644 --- a/core/lib/web3_decl/src/namespaces/unstable.rs +++ b/core/lib/web3_decl/src/namespaces/unstable.rs @@ -2,9 +2,9 @@ use jsonrpsee::core::RpcResult; use jsonrpsee::proc_macros::rpc; use zksync_types::{ - api::{TeeProof, TransactionExecutionInfo}, + api::{ChainAggProof, TeeProof, TransactionExecutionInfo}, tee_types::TeeType, - L1BatchNumber, H256, + L1BatchNumber, L2ChainId, H256, }; use crate::client::{ForWeb3Network, L2}; @@ -31,4 +31,11 @@ pub trait UnstableNamespace { l1_batch_number: L1BatchNumber, tee_type: Option, ) -> RpcResult>; + + #[method(name = "getChainLogProof")] + async fn get_chain_log_proof( + &self, + l1_batch_number: L1BatchNumber, + chain_id: L2ChainId, + ) -> RpcResult>; } diff --git a/core/lib/web3_decl/src/types.rs b/core/lib/web3_decl/src/types.rs index 36ee48a54a1b..375e5c4d992c 100644 --- a/core/lib/web3_decl/src/types.rs +++ b/core/lib/web3_decl/src/types.rs @@ -5,19 +5,16 @@ //! //! These "extensions" are required to provide more ZKsync-specific information while remaining Web3-compilant. -use core::{ - convert::{TryFrom, TryInto}, - fmt, - marker::PhantomData, -}; +use core::convert::{TryFrom, TryInto}; use rlp::Rlp; -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; +use serde::{Deserialize, Serialize}; pub use zksync_types::{ api::{Block, BlockNumber, Log, TransactionReceipt, TransactionRequest}, ethabi, web3::{ - BlockHeader, Bytes, CallRequest, FeeHistory, Index, SyncState, TraceFilter, U64Number, Work, + BlockHeader, Bytes, CallRequest, FeeHistory, Index, SyncState, TraceFilter, U64Number, + ValueOrArray, Work, }, Address, Transaction, H160, H256, H64, U256, U64, }; @@ -101,71 +98,6 @@ pub enum FilterChanges { Empty([u8; 0]), } -/// Either value or array of values. -/// -/// A value must serialize into a string. -#[derive(Default, Debug, PartialEq, Clone)] -pub struct ValueOrArray(pub Vec); - -impl From for ValueOrArray { - fn from(value: T) -> Self { - Self(vec![value]) - } -} - -impl Serialize for ValueOrArray { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - match self.0.len() { - 0 => serializer.serialize_none(), - 1 => Serialize::serialize(&self.0[0], serializer), - _ => Serialize::serialize(&self.0, serializer), - } - } -} - -impl<'de, T: Deserialize<'de>> Deserialize<'de> for ValueOrArray { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - struct Visitor(PhantomData); - - impl<'de, T: Deserialize<'de>> de::Visitor<'de> for Visitor { - type Value = ValueOrArray; - - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.write_str("string value or sequence of values") - } - - fn visit_str(self, value: &str) -> Result - where - E: de::Error, - { - use serde::de::IntoDeserializer; - - Deserialize::deserialize(value.into_deserializer()) - .map(|value| ValueOrArray(vec![value])) - } - - fn visit_seq(self, mut visitor: S) -> Result - where - S: de::SeqAccess<'de>, - { - let mut elements = Vec::with_capacity(visitor.size_hint().unwrap_or(1)); - while let Some(element) = visitor.next_element()? { - elements.push(element); - } - Ok(ValueOrArray(elements)) - } - } - - deserializer.deserialize_any(Visitor(PhantomData)) - } -} - /// Filter #[derive(Default, Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct Filter { @@ -185,6 +117,28 @@ pub struct Filter { pub block_hash: Option, } +impl From for Filter { + fn from(value: zksync_types::web3::Filter) -> Self { + let convert_block_number = |b: zksync_types::web3::BlockNumber| match b { + zksync_types::web3::BlockNumber::Finalized => BlockNumber::Finalized, + zksync_types::web3::BlockNumber::Safe => BlockNumber::Finalized, + zksync_types::web3::BlockNumber::Latest => BlockNumber::Latest, + zksync_types::web3::BlockNumber::Earliest => BlockNumber::Earliest, + zksync_types::web3::BlockNumber::Pending => BlockNumber::Pending, + zksync_types::web3::BlockNumber::Number(n) => BlockNumber::Number(n), + }; + let from_block = value.from_block.map(convert_block_number); + let to_block = value.to_block.map(convert_block_number); + Filter { + from_block, + to_block, + address: value.address, + topics: value.topics, + block_hash: value.block_hash, + } + } +} + /// Filter Builder #[derive(Default, Clone)] pub struct FilterBuilder { diff --git a/core/node/api_server/Cargo.toml b/core/node/api_server/Cargo.toml index d0723a9d23e7..b9ffb750b816 100644 --- a/core/node/api_server/Cargo.toml +++ b/core/node/api_server/Cargo.toml @@ -11,6 +11,7 @@ keywords.workspace = true categories.workspace = true [dependencies] +zksync_crypto_primitives.workspace = true zksync_config.workspace = true zksync_consensus_roles.workspace = true zksync_contracts.workspace = true diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs index 342756013752..93f0205c77fa 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs @@ -262,11 +262,15 @@ impl EthNamespaceServer for EthNamespace { &self, block_count: U64Number, newest_block: BlockNumber, - reward_percentiles: Vec, + reward_percentiles: Option>, ) -> RpcResult { - self.fee_history_impl(block_count.into(), newest_block, reward_percentiles) - .await - .map_err(|err| self.current_method().map_err(err)) + self.fee_history_impl( + block_count.into(), + newest_block, + reward_percentiles.unwrap_or_default(), + ) + .await + .map_err(|err| self.current_method().map_err(err)) } async fn max_priority_fee_per_gas(&self) -> RpcResult { diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/unstable.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/unstable.rs index 91330aa7d949..cfa8c84b05b0 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/unstable.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/unstable.rs @@ -1,7 +1,7 @@ use zksync_types::{ - api::{TeeProof, TransactionExecutionInfo}, + api::{ChainAggProof, TeeProof, TransactionExecutionInfo}, tee_types::TeeType, - L1BatchNumber, H256, + L1BatchNumber, L2ChainId, H256, }; use zksync_web3_decl::{ jsonrpsee::core::{async_trait, RpcResult}, @@ -30,4 +30,14 @@ impl UnstableNamespaceServer for UnstableNamespace { .await .map_err(|err| self.current_method().map_err(err)) } + + async fn get_chain_log_proof( + &self, + l1_batch_number: L1BatchNumber, + chain_id: L2ChainId, + ) -> RpcResult> { + self.get_chain_log_proof_impl(l1_batch_number, chain_id) + .await + .map_err(|err| self.current_method().map_err(err)) + } } diff --git a/core/node/api_server/src/web3/namespaces/en.rs b/core/node/api_server/src/web3/namespaces/en.rs index a09a0cb92fc7..56a7e2df458c 100644 --- a/core/node/api_server/src/web3/namespaces/en.rs +++ b/core/node/api_server/src/web3/namespaces/en.rs @@ -151,18 +151,18 @@ impl EnNamespace { Ok(self .state .api_config - .bridgehub_proxy_addr + .l1_bridgehub_proxy_addr .map(|bridgehub_proxy_addr| EcosystemContracts { bridgehub_proxy_addr, state_transition_proxy_addr: self .state .api_config - .state_transition_proxy_addr + .l1_state_transition_proxy_addr .unwrap(), transparent_proxy_admin_addr: self .state .api_config - .transparent_proxy_admin_addr + .l1_transparent_proxy_admin_addr .unwrap(), }) .context("Shared bridge doesn't supported")?) diff --git a/core/node/api_server/src/web3/namespaces/unstable.rs b/core/node/api_server/src/web3/namespaces/unstable.rs deleted file mode 100644 index 783088cdc36a..000000000000 --- a/core/node/api_server/src/web3/namespaces/unstable.rs +++ /dev/null @@ -1,62 +0,0 @@ -use chrono::{DateTime, Utc}; -use zksync_dal::{CoreDal, DalError}; -use zksync_types::{ - api::{TeeProof, TransactionExecutionInfo}, - tee_types::TeeType, - L1BatchNumber, -}; -use zksync_web3_decl::{error::Web3Error, types::H256}; - -use crate::web3::{backend_jsonrpsee::MethodTracer, RpcState}; - -#[derive(Debug)] -pub(crate) struct UnstableNamespace { - state: RpcState, -} - -impl UnstableNamespace { - pub fn new(state: RpcState) -> Self { - Self { state } - } - - pub(crate) fn current_method(&self) -> &MethodTracer { - &self.state.current_method - } - - pub async fn transaction_execution_info_impl( - &self, - hash: H256, - ) -> Result, Web3Error> { - let mut storage = self.state.acquire_connection().await?; - Ok(storage - .transactions_web3_dal() - .get_unstable_transaction_execution_info(hash) - .await - .map_err(DalError::generalize)? - .map(|execution_info| TransactionExecutionInfo { execution_info })) - } - - pub async fn get_tee_proofs_impl( - &self, - l1_batch_number: L1BatchNumber, - tee_type: Option, - ) -> Result, Web3Error> { - let mut storage = self.state.acquire_connection().await?; - Ok(storage - .tee_proof_generation_dal() - .get_tee_proofs(l1_batch_number, tee_type) - .await - .map_err(DalError::generalize)? - .into_iter() - .map(|proof| TeeProof { - l1_batch_number, - tee_type, - pubkey: proof.pubkey, - signature: proof.signature, - proof: proof.proof, - proved_at: DateTime::::from_naive_utc_and_offset(proof.updated_at, Utc), - attestation: proof.attestation, - }) - .collect::>()) - } -} diff --git a/core/node/api_server/src/web3/namespaces/unstable/mod.rs b/core/node/api_server/src/web3/namespaces/unstable/mod.rs new file mode 100644 index 000000000000..fdfa7631f7c4 --- /dev/null +++ b/core/node/api_server/src/web3/namespaces/unstable/mod.rs @@ -0,0 +1,139 @@ +use chrono::{DateTime, Utc}; +use itertools::Itertools; +use utils::{ + chain_id_leaf_preimage, get_chain_count, get_chain_id_from_index, get_chain_root_from_id, +}; +use zksync_crypto_primitives::hasher::keccak::KeccakHasher; +use zksync_dal::{CoreDal, DalError}; +use zksync_mini_merkle_tree::MiniMerkleTree; +use zksync_types::{ + api::{ChainAggProof, TeeProof, TransactionExecutionInfo}, + tee_types::TeeType, + L1BatchNumber, L2ChainId, +}; +use zksync_web3_decl::{error::Web3Error, types::H256}; + +use crate::web3::{backend_jsonrpsee::MethodTracer, RpcState}; + +mod utils; + +#[derive(Debug)] +pub(crate) struct UnstableNamespace { + state: RpcState, +} + +impl UnstableNamespace { + pub fn new(state: RpcState) -> Self { + Self { state } + } + + pub(crate) fn current_method(&self) -> &MethodTracer { + &self.state.current_method + } + + pub async fn transaction_execution_info_impl( + &self, + hash: H256, + ) -> Result, Web3Error> { + let mut storage = self.state.acquire_connection().await?; + Ok(storage + .transactions_web3_dal() + .get_unstable_transaction_execution_info(hash) + .await + .map_err(DalError::generalize)? + .map(|execution_info| TransactionExecutionInfo { execution_info })) + } + + pub async fn get_tee_proofs_impl( + &self, + l1_batch_number: L1BatchNumber, + tee_type: Option, + ) -> Result, Web3Error> { + let mut storage = self.state.acquire_connection().await?; + Ok(storage + .tee_proof_generation_dal() + .get_tee_proofs(l1_batch_number, tee_type) + .await + .map_err(DalError::generalize)? + .into_iter() + .map(|proof| TeeProof { + l1_batch_number, + tee_type, + pubkey: proof.pubkey, + signature: proof.signature, + proof: proof.proof, + proved_at: DateTime::::from_naive_utc_and_offset(proof.updated_at, Utc), + attestation: proof.attestation, + }) + .collect::>()) + } + + pub async fn get_chain_log_proof_impl( + &self, + l1_batch_number: L1BatchNumber, + l2_chain_id: L2ChainId, + ) -> Result, Web3Error> { + let mut connection = self.state.acquire_connection().await?; + self.state + .start_info + .ensure_not_pruned(l1_batch_number, &mut connection) + .await?; + + let Some((_, l2_block_number)) = connection + .blocks_dal() + .get_l2_block_range_of_l1_batch(l1_batch_number) + .await + .map_err(DalError::generalize)? + else { + return Ok(None); + }; + let chain_count_integer = get_chain_count(&mut connection, l2_block_number).await?; + + let mut chain_ids = Vec::new(); + for chain_index in 0..chain_count_integer { + chain_ids.push( + get_chain_id_from_index(&mut connection, chain_index, l2_block_number).await?, + ); + } + + let Some((chain_id_leaf_proof_mask, _)) = chain_ids + .iter() + .find_position(|id| **id == H256::from_low_u64_be(l2_chain_id.as_u64())) + else { + return Ok(None); + }; + + let mut leafs = Vec::new(); + for chain_id in chain_ids { + let chain_root = + get_chain_root_from_id(&mut connection, chain_id, l2_block_number).await?; + leafs.push(chain_id_leaf_preimage(chain_root, chain_id)); + } + + let chain_merkle_tree = + MiniMerkleTree::<[u8; 96], KeccakHasher>::new(leafs.into_iter(), None); + + let mut chain_id_leaf_proof = chain_merkle_tree + .merkle_root_and_path(chain_id_leaf_proof_mask) + .1; + + let Some(local_root) = connection + .blocks_dal() + .get_l1_batch_local_root(l1_batch_number) + .await + .map_err(DalError::generalize)? + else { + return Ok(None); + }; + + // Chain tree is the right subtree of the aggregated tree. + // We append root of the left subtree to form full proof. + let chain_id_leaf_proof_mask = chain_id_leaf_proof_mask | (1 << chain_id_leaf_proof.len()); + chain_id_leaf_proof.push(local_root); + + Ok(Some(ChainAggProof { + chain_id_leaf_proof, + chain_id_leaf_proof_mask: chain_id_leaf_proof_mask.into(), + })) + } +} diff --git a/core/node/api_server/src/web3/namespaces/unstable/utils.rs b/core/node/api_server/src/web3/namespaces/unstable/utils.rs new file mode 100644 index 000000000000..2d3187fab6b8 --- /dev/null +++ b/core/node/api_server/src/web3/namespaces/unstable/utils.rs @@ -0,0 +1,104 @@ +use zksync_dal::{Connection, Core, CoreDal, DalError}; +use zksync_multivm::circuit_sequencer_api_latest::boojum::ethereum_types::U256; +use zksync_system_constants::{ + message_root::{CHAIN_COUNT_KEY, CHAIN_INDEX_TO_ID_KEY, CHAIN_TREE_KEY}, + L2_MESSAGE_ROOT_ADDRESS, +}; +use zksync_types::{ + h256_to_u256, l2_to_l1_log::CHAIN_ID_LEAF_PADDING, u256_to_h256, web3::keccak256, + AccountTreeId, L2BlockNumber, StorageKey, H256, +}; +use zksync_web3_decl::error::Web3Error; + +pub(super) async fn get_chain_count( + connection: &mut Connection<'_, Core>, + block_number: L2BlockNumber, +) -> anyhow::Result { + let chain_count_key = CHAIN_COUNT_KEY; + let chain_count_storage_key = + message_root_log_key(H256::from_low_u64_be(chain_count_key as u64)); + let chain_count = connection + .storage_web3_dal() + .get_historical_value_unchecked(chain_count_storage_key.hashed_key(), block_number) + .await + .map_err(DalError::generalize)?; + if h256_to_u256(chain_count) > u8::MAX.into() { + anyhow::bail!("Chain count doesn't fit in `u8`"); + } + Ok(chain_count.0[31]) +} + +pub(super) async fn get_chain_id_from_index( + connection: &mut Connection<'_, Core>, + chain_index: u8, + block_number: L2BlockNumber, +) -> Result { + let key = H256::from_slice(&keccak256( + &[ + H256::from_low_u64_be(chain_index as u64).0, + H256::from_low_u64_be(CHAIN_INDEX_TO_ID_KEY as u64).0, + ] + .concat(), + )); + let storage_key = message_root_log_key(key); + let chain_id = connection + .storage_web3_dal() + .get_historical_value_unchecked(storage_key.hashed_key(), block_number) + .await + .map_err(DalError::generalize)?; + Ok(chain_id) +} + +pub(super) async fn get_chain_root_from_id( + connection: &mut Connection<'_, Core>, + chain_id: H256, + block_number: L2BlockNumber, +) -> Result { + let chain_tree_key = H256::from_slice(&keccak256( + &[chain_id.0, H256::from_low_u64_be(CHAIN_TREE_KEY as u64).0].concat(), + )); + let chain_sides_len_key = + u256_to_h256(h256_to_u256(chain_tree_key).overflowing_add(U256::one()).0); + let chain_sides_len_storage_key = message_root_log_key(chain_sides_len_key); + let chain_sides_len = connection + .storage_web3_dal() + .get_historical_value_unchecked(chain_sides_len_storage_key.hashed_key(), block_number) + .await + .map_err(DalError::generalize)?; + + let last_element_pos = { + let length = h256_to_u256(chain_sides_len); + assert!( + length > U256::zero(), + "_sides.length is zero, chain is not registered" + ); + + length - 1 + }; + let sides_data_start_key = H256(keccak256(chain_sides_len_key.as_bytes())); + let chain_root_key = h256_to_u256(sides_data_start_key) + .overflowing_add(last_element_pos) + .0; + let chain_root_storage_key = message_root_log_key(u256_to_h256(chain_root_key)); + let chain_root = connection + .storage_web3_dal() + .get_historical_value_unchecked(chain_root_storage_key.hashed_key(), block_number) + .await + .map_err(DalError::generalize)?; + Ok(chain_root) +} + +pub(super) fn chain_id_leaf_preimage(chain_root: H256, chain_id: H256) -> [u8; 96] { + let mut full_preimage = [0u8; 96]; + + full_preimage[0..32].copy_from_slice(CHAIN_ID_LEAF_PADDING.as_bytes()); + full_preimage[32..64].copy_from_slice(&chain_root.0); + full_preimage[64..96].copy_from_slice(&chain_id.0); + + full_preimage +} + +fn message_root_log_key(key: H256) -> StorageKey { + let message_root = AccountTreeId::new(L2_MESSAGE_ROOT_ADDRESS); + StorageKey::new(message_root, key) +} diff --git a/core/node/api_server/src/web3/namespaces/zks.rs b/core/node/api_server/src/web3/namespaces/zks.rs index 05c90f0b0140..c692494d5091 100644 --- a/core/node/api_server/src/web3/namespaces/zks.rs +++ b/core/node/api_server/src/web3/namespaces/zks.rs @@ -1,6 +1,7 @@ use std::collections::HashMap; use anyhow::Context as _; +use zksync_crypto_primitives::hasher::{keccak::KeccakHasher, Hasher}; use zksync_dal::{Connection, Core, CoreDal, DalError}; use zksync_metadata_calculator::api_server::TreeApiError; use zksync_mini_merkle_tree::MiniMerkleTree; @@ -17,7 +18,7 @@ use zksync_types::{ h256_to_u256, l1::L1Tx, l2::L2Tx, - l2_to_l1_log::{l2_to_l1_logs_tree_size, L2ToL1Log}, + l2_to_l1_log::{l2_to_l1_logs_tree_size, L2ToL1Log, LOG_PROOF_SUPPORTED_METADATA_VERSION}, tokens::ETHEREUM_ADDRESS, transaction_request::CallRequest, utils::storage_key_for_standard_token_balance, @@ -137,11 +138,11 @@ impl ZksNamespace { } pub fn get_bridgehub_contract_impl(&self) -> Option
{ - self.state.api_config.bridgehub_proxy_addr + self.state.api_config.l1_bridgehub_proxy_addr } pub fn get_main_contract_impl(&self) -> Address { - self.state.api_config.diamond_proxy_addr + self.state.api_config.l1_diamond_proxy_addr } pub fn get_testnet_paymaster_impl(&self) -> Option
{ @@ -321,9 +322,9 @@ impl ZksNamespace { return Ok(None); }; - let Some(batch) = storage + let Some(batch_with_metadata) = storage .blocks_dal() - .get_l1_batch_header(l1_batch_number) + .get_l1_batch_metadata(l1_batch_number) .await .map_err(DalError::generalize)? else { @@ -332,13 +333,71 @@ impl ZksNamespace { let merkle_tree_leaves = all_l1_logs_in_batch.iter().map(L2ToL1Log::to_bytes); - let protocol_version = batch + let protocol_version = batch_with_metadata + .header .protocol_version .unwrap_or_else(ProtocolVersionId::last_potentially_undefined); let tree_size = l2_to_l1_logs_tree_size(protocol_version); - let (root, proof) = MiniMerkleTree::new(merkle_tree_leaves, Some(tree_size)) + let (local_root, proof) = MiniMerkleTree::new(merkle_tree_leaves, Some(tree_size)) .merkle_root_and_path(l1_log_index); + + if protocol_version.is_pre_gateway() { + return Ok(Some(L2ToL1LogProof { + proof, + root: local_root, + id: l1_log_index as u32, + })); + } + + let aggregated_root = batch_with_metadata + .metadata + .aggregation_root + .expect("`aggregation_root` must be present for post-gateway branch"); + let root = KeccakHasher.compress(&local_root, &aggregated_root); + + let mut log_leaf_proof = proof; + log_leaf_proof.push(aggregated_root); + + let Some(sl_chain_id) = storage + .eth_sender_dal() + .get_batch_commit_chain_id(l1_batch_number) + .await + .map_err(DalError::generalize)? + else { + return Ok(None); + }; + + let (batch_proof_len, batch_chain_proof) = + if sl_chain_id.0 != self.state.api_config.l1_chain_id.0 { + let Some(batch_chain_proof) = storage + .blocks_dal() + .get_l1_batch_chain_merkle_path(l1_batch_number) + .await + .map_err(DalError::generalize)? + else { + return Ok(None); + }; + + (batch_chain_proof.batch_proof_len, batch_chain_proof.proof) + } else { + (0, Vec::new()) + }; + + let proof = { + let mut metadata = [0u8; 32]; + metadata[0] = LOG_PROOF_SUPPORTED_METADATA_VERSION; + metadata[1] = log_leaf_proof.len() as u8; + metadata[2] = batch_proof_len as u8; + + let mut result = vec![H256(metadata)]; + + result.extend(log_leaf_proof); + result.extend(batch_chain_proof); + + result + }; + Ok(Some(L2ToL1LogProof { proof, root, @@ -361,6 +420,11 @@ impl ZksNamespace { return Ok(None); }; + self.state + .start_info + .ensure_not_pruned(l1_batch_number, &mut storage) + .await?; + let log_proof = self .get_l2_to_l1_log_proof_inner( &mut storage, diff --git a/core/node/api_server/src/web3/state.rs b/core/node/api_server/src/web3/state.rs index d43771811ee0..900cd165c045 100644 --- a/core/node/api_server/src/web3/state.rs +++ b/core/node/api_server/src/web3/state.rs @@ -20,9 +20,8 @@ use zksync_dal::{Connection, ConnectionPool, Core, CoreDal, DalError}; use zksync_metadata_calculator::api_server::TreeApiClient; use zksync_node_sync::SyncState; use zksync_types::{ - api, api::BridgeAddresses, commitment::L1BatchCommitmentMode, l2::L2Tx, - transaction_request::CallRequest, Address, L1BatchNumber, L1ChainId, L2BlockNumber, L2ChainId, - H256, U256, U64, + api, commitment::L1BatchCommitmentMode, l2::L2Tx, transaction_request::CallRequest, Address, + L1BatchNumber, L1ChainId, L2BlockNumber, L2ChainId, H256, U256, U64, }; use zksync_web3_decl::{error::Web3Error, types::Filter}; @@ -104,10 +103,10 @@ pub struct InternalApiConfig { pub estimate_gas_acceptable_overestimation: u32, pub estimate_gas_optimize_search: bool, pub bridge_addresses: api::BridgeAddresses, - pub bridgehub_proxy_addr: Option
, - pub state_transition_proxy_addr: Option
, - pub transparent_proxy_admin_addr: Option
, - pub diamond_proxy_addr: Address, + pub l1_bridgehub_proxy_addr: Option
, + pub l1_state_transition_proxy_addr: Option
, + pub l1_transparent_proxy_admin_addr: Option
, + pub l1_diamond_proxy_addr: Address, pub l2_testnet_paymaster_addr: Option
, pub req_entities_limit: usize, pub fee_history_limit: u64, @@ -149,19 +148,19 @@ impl InternalApiConfig { ), l2_legacy_shared_bridge: contracts_config.l2_legacy_shared_bridge_addr, }, - bridgehub_proxy_addr: contracts_config + l1_bridgehub_proxy_addr: contracts_config .ecosystem_contracts .as_ref() .map(|a| a.bridgehub_proxy_addr), - state_transition_proxy_addr: contracts_config + l1_state_transition_proxy_addr: contracts_config .ecosystem_contracts .as_ref() .map(|a| a.state_transition_proxy_addr), - transparent_proxy_admin_addr: contracts_config + l1_transparent_proxy_admin_addr: contracts_config .ecosystem_contracts .as_ref() .map(|a| a.transparent_proxy_admin_addr), - diamond_proxy_addr: contracts_config.diamond_proxy_addr, + l1_diamond_proxy_addr: contracts_config.diamond_proxy_addr, l2_testnet_paymaster_addr: contracts_config.l2_testnet_paymaster_addr, req_entities_limit: web3_config.req_entities_limit(), fee_history_limit: web3_config.fee_history_limit(), @@ -213,18 +212,18 @@ impl SealedL2BlockNumber { } #[derive(Debug, Clone)] -pub struct BridgeAddressesHandle(Arc>); +pub struct BridgeAddressesHandle(Arc>); impl BridgeAddressesHandle { - pub fn new(bridge_addresses: BridgeAddresses) -> Self { + pub fn new(bridge_addresses: api::BridgeAddresses) -> Self { Self(Arc::new(RwLock::new(bridge_addresses))) } - pub async fn update(&self, bridge_addresses: BridgeAddresses) { + pub async fn update(&self, bridge_addresses: api::BridgeAddresses) { *self.0.write().await = bridge_addresses; } - pub async fn read(&self) -> BridgeAddresses { + pub async fn read(&self) -> api::BridgeAddresses { self.0.read().await.clone() } } diff --git a/core/node/api_server/src/web3/tests/mod.rs b/core/node/api_server/src/web3/tests/mod.rs index 9080c5ba413c..87441d3bb82b 100644 --- a/core/node/api_server/src/web3/tests/mod.rs +++ b/core/node/api_server/src/web3/tests/mod.rs @@ -1313,7 +1313,7 @@ impl HttpTest for FeeHistoryTest { .map(U256::from); let history = client - .fee_history(1_000.into(), api::BlockNumber::Latest, vec![]) + .fee_history(1_000.into(), api::BlockNumber::Latest, Some(vec![])) .await?; assert_eq!(history.inner.oldest_block, 0.into()); assert_eq!( @@ -1346,7 +1346,11 @@ impl HttpTest for FeeHistoryTest { // Check partial histories: blocks 0..=1 let history = client - .fee_history(1_000.into(), api::BlockNumber::Number(1.into()), vec![]) + .fee_history( + 1_000.into(), + api::BlockNumber::Number(1.into()), + Some(vec![]), + ) .await?; assert_eq!(history.inner.oldest_block, 0.into()); assert_eq!( @@ -1357,7 +1361,7 @@ impl HttpTest for FeeHistoryTest { // Blocks 1..=2 let history = client - .fee_history(2.into(), api::BlockNumber::Latest, vec![]) + .fee_history(2.into(), api::BlockNumber::Latest, Some(vec![])) .await?; assert_eq!(history.inner.oldest_block, 1.into()); assert_eq!( @@ -1368,7 +1372,7 @@ impl HttpTest for FeeHistoryTest { // Blocks 1..=1 let history = client - .fee_history(1.into(), api::BlockNumber::Number(1.into()), vec![]) + .fee_history(1.into(), api::BlockNumber::Number(1.into()), Some(vec![])) .await?; assert_eq!(history.inner.oldest_block, 1.into()); assert_eq!(history.inner.base_fee_per_gas, [100, 100].map(U256::from)); @@ -1376,7 +1380,11 @@ impl HttpTest for FeeHistoryTest { // Non-existing newest block. let err = client - .fee_history(1000.into(), api::BlockNumber::Number(100.into()), vec![]) + .fee_history( + 1000.into(), + api::BlockNumber::Number(100.into()), + Some(vec![]), + ) .await .unwrap_err(); assert_matches!( diff --git a/core/node/commitment_generator/src/lib.rs b/core/node/commitment_generator/src/lib.rs index 2ce0152abab6..71b019e230a7 100644 --- a/core/node/commitment_generator/src/lib.rs +++ b/core/node/commitment_generator/src/lib.rs @@ -293,13 +293,13 @@ impl CommitmentGenerator { }; let aggregation_root = if protocol_version.is_pre_gateway() { + H256::zero() + } else { let mut connection = self .connection_pool .connection_tagged("commitment_generator") .await?; read_aggregation_root(&mut connection, l1_batch_number).await? - } else { - H256::zero() }; CommitmentInput::PostBoojum { diff --git a/core/node/consistency_checker/src/lib.rs b/core/node/consistency_checker/src/lib.rs index e13e479117cc..340b1ccc1ec0 100644 --- a/core/node/consistency_checker/src/lib.rs +++ b/core/node/consistency_checker/src/lib.rs @@ -1,9 +1,12 @@ -use std::{borrow::Cow, collections::HashSet, fmt, time::Duration}; +use std::{borrow::Cow, cmp::Ordering, collections::HashSet, fmt, time::Duration}; use anyhow::Context as _; use serde::Serialize; use tokio::sync::watch; -use zksync_contracts::PRE_BOOJUM_COMMIT_FUNCTION; +use zksync_contracts::{ + bridgehub_contract, POST_BOOJUM_COMMIT_FUNCTION, POST_SHARED_BRIDGE_COMMIT_FUNCTION, + PRE_BOOJUM_COMMIT_FUNCTION, +}; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_eth_client::{ clients::{DynClient, L1}, @@ -11,16 +14,23 @@ use zksync_eth_client::{ }; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_l1_contract_interface::{ - i_executor::{commit::kzg::ZK_SYNC_BYTES_PER_BLOB, structures::CommitBatchInfo}, + i_executor::{ + commit::kzg::ZK_SYNC_BYTES_PER_BLOB, + structures::{ + CommitBatchInfo, StoredBatchInfo, PUBDATA_SOURCE_BLOBS, PUBDATA_SOURCE_CALLDATA, + PUBDATA_SOURCE_CUSTOM_PRE_GATEWAY, SUPPORTED_ENCODING_VERSION, + }, + }, Tokenizable, }; use zksync_shared_metrics::{CheckerComponent, EN_METRICS}; use zksync_types::{ commitment::{L1BatchCommitmentMode, L1BatchWithMetadata}, ethabi, - ethabi::Token, + ethabi::{ParamType, Token}, pubdata_da::PubdataSendingMode, - Address, L1BatchNumber, ProtocolVersionId, H256, U256, + Address, L1BatchNumber, L2ChainId, ProtocolVersionId, SLChainId, H256, L2_BRIDGEHUB_ADDRESS, + U256, }; #[cfg(test)] @@ -33,10 +43,10 @@ enum CheckError { #[error("error calling L1 contract")] ContractCall(#[from] ContractCallError), /// Error that is caused by the main node providing incorrect information etc. - #[error("failed validating commit transaction")] + #[error("failed validating commit transaction: {0}")] Validation(anyhow::Error), /// Error that is caused by violating invariants internal to *this* node (e.g., not having expected data in Postgres). - #[error("internal error")] + #[error("internal error: {0}")] Internal(anyhow::Error), } @@ -213,6 +223,13 @@ impl LocalL1BatchCommitData { .map_or(true, |version| version.is_pre_shared_bridge()) } + fn is_pre_gateway(&self) -> bool { + self.l1_batch + .header + .protocol_version + .map_or(true, |version| version.is_pre_gateway()) + } + /// All returned errors are validation errors. fn verify_commitment(&self, reference: ðabi::Token) -> anyhow::Result<()> { let protocol_version = self @@ -220,11 +237,13 @@ impl LocalL1BatchCommitData { .header .protocol_version .unwrap_or_else(ProtocolVersionId::last_potentially_undefined); - let da = detect_da(protocol_version, reference) + let da = detect_da(protocol_version, reference, self.commitment_mode) .context("cannot detect DA source from reference commitment token")?; - // For `PubdataDA::Calldata`, it's required that the pubdata fits into a single blob. - if matches!(da, PubdataSendingMode::Calldata) { + // For rollups with `PubdataSendingMode::Calldata`, it's required that the pubdata fits into a single blob. + if matches!(self.commitment_mode, L1BatchCommitmentMode::Rollup) + && matches!(da, PubdataSendingMode::Calldata) + { let pubdata_len = self .l1_batch .header @@ -258,12 +277,8 @@ impl LocalL1BatchCommitData { pub fn detect_da( protocol_version: ProtocolVersionId, reference: &Token, + commitment_mode: L1BatchCommitmentMode, ) -> Result { - /// These are used by the L1 Contracts to indicate what DA layer is used for pubdata - const PUBDATA_SOURCE_CALLDATA: u8 = 0; - const PUBDATA_SOURCE_BLOBS: u8 = 1; - const PUBDATA_SOURCE_CUSTOM: u8 = 2; - fn parse_error(message: impl Into>) -> ethabi::Error { ethabi::Error::Other(message.into()) } @@ -290,28 +305,80 @@ pub fn detect_da( "last reference token has unexpected shape; expected bytes, got {last_reference_token:?}" ))), }; - match last_reference_token.first() { - Some(&byte) if byte == PUBDATA_SOURCE_CALLDATA => Ok(PubdataSendingMode::Calldata), - Some(&byte) if byte == PUBDATA_SOURCE_BLOBS => Ok(PubdataSendingMode::Blobs), - Some(&byte) if byte == PUBDATA_SOURCE_CUSTOM => Ok(PubdataSendingMode::Custom), - Some(&byte) => Err(parse_error(format!( - "unexpected first byte of the last reference token; expected one of [{PUBDATA_SOURCE_CALLDATA}, {PUBDATA_SOURCE_BLOBS}], \ + + if protocol_version.is_pre_gateway() { + return match last_reference_token.first() { + Some(&byte) if byte == PUBDATA_SOURCE_CALLDATA => Ok(PubdataSendingMode::Calldata), + Some(&byte) if byte == PUBDATA_SOURCE_BLOBS => Ok(PubdataSendingMode::Blobs), + Some(&byte) if byte == PUBDATA_SOURCE_CUSTOM_PRE_GATEWAY => Ok(PubdataSendingMode::Custom), + Some(&byte) => Err(parse_error(format!( + "unexpected first byte of the last reference token; expected one of [{PUBDATA_SOURCE_CALLDATA}, {PUBDATA_SOURCE_BLOBS}, {PUBDATA_SOURCE_CUSTOM_PRE_GATEWAY}], \ + got {byte}" + ))), + None => Err(parse_error("last reference token is empty")), + }; + } + + match commitment_mode { + L1BatchCommitmentMode::Validium => { + // `Calldata`, `RelayedL2Calldata` and `Blobs` are encoded exactly the same way, + // token is just a `state_diff_hash` for them. + // For `Custom` it's `state_diff_hash` followed by `da_inclusion_data`. We can't distinguish + // between `Calldata`/`RelayedL2Calldata`/`Blobs`/`Custom` with empty `da_inclusion_data`, + // but it's ok to just return a `Calldata` given they are all encoded the same. + match last_reference_token.len().cmp(&32) { + Ordering::Equal => Ok(PubdataSendingMode::Calldata), + Ordering::Greater => Ok(PubdataSendingMode::Custom), + Ordering::Less => Err(parse_error( + "unexpected last reference token len for post-gateway version validium", + )), + } + } + L1BatchCommitmentMode::Rollup => { + // For rollup the format of this token (`operatorDAInput`) is: + // 32 bytes - `state_diff_hash` + // 32 bytes - hash of the full pubdata + // 1 byte - number of blobs + // 32 bytes for each blob - hashes of blobs + // 1 byte - pubdata source + // X bytes - blob/pubdata commitments + + let number_of_blobs = last_reference_token.get(64).copied().ok_or_else(|| { + parse_error(format!( + "last reference token is too short; expected at least 65 bytes, got {}", + last_reference_token.len() + )) + })? as usize; + + match last_reference_token.get(65 + 32 * number_of_blobs) { + Some(&byte) if byte == PUBDATA_SOURCE_CALLDATA => Ok(PubdataSendingMode::Calldata), + Some(&byte) if byte == PUBDATA_SOURCE_BLOBS => Ok(PubdataSendingMode::Blobs), + Some(&byte) => Err(parse_error(format!( + "unexpected first byte of the last reference token for rollup; expected one of [{PUBDATA_SOURCE_CALLDATA}, {PUBDATA_SOURCE_BLOBS}], \ got {byte}" - ))), - None => Err(parse_error("last reference token is empty")), + ))), + None => Err(parse_error(format!("last reference token is too short; expected at least 65 bytes, got {}", last_reference_token.len()))), + } + } } } +#[derive(Debug)] +pub struct SLChainData { + client: Box>, + chain_id: SLChainId, + diamond_proxy_addr: Option
, +} + #[derive(Debug)] pub struct ConsistencyChecker { /// ABI of the ZKsync contract contract: ethabi::Contract, - /// Address of the ZKsync diamond proxy on L1 - diamond_proxy_addr: Option
, /// How many past batches to check when starting max_batches_to_recheck: u32, sleep_interval: Duration, - l1_client: Box>, + l1_chain_data: SLChainData, + gateway_chain_data: Option, event_handler: Box, l1_data_mismatch_behavior: L1DataMismatchBehavior, pool: ConnectionPool, @@ -322,19 +389,43 @@ pub struct ConsistencyChecker { impl ConsistencyChecker { const DEFAULT_SLEEP_INTERVAL: Duration = Duration::from_secs(5); - pub fn new( + pub async fn new( l1_client: Box>, + gateway_client: Option>>, max_batches_to_recheck: u32, pool: ConnectionPool, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, ) -> anyhow::Result { let (health_check, health_updater) = ConsistencyCheckerHealthUpdater::new(); + let l1_chain_id = l1_client.fetch_chain_id().await?; + let l1_chain_data = SLChainData { + client: l1_client.for_component("consistency_checker"), + chain_id: l1_chain_id, + diamond_proxy_addr: None, + }; + + let gateway_chain_data = if let Some(client) = gateway_client { + let gateway_diamond_proxy = + CallFunctionArgs::new("getZKChain", Token::Uint(l2_chain_id.as_u64().into())) + .for_contract(L2_BRIDGEHUB_ADDRESS, &bridgehub_contract()) + .call(&client) + .await?; + let chain_id = client.fetch_chain_id().await?; + Some(SLChainData { + client: client.for_component("consistency_checker"), + chain_id, + diamond_proxy_addr: Some(gateway_diamond_proxy), + }) + } else { + None + }; Ok(Self { contract: zksync_contracts::hyperchain_contract(), - diamond_proxy_addr: None, max_batches_to_recheck, sleep_interval: Self::DEFAULT_SLEEP_INTERVAL, - l1_client: l1_client.for_component("consistency_checker"), + l1_chain_data, + gateway_chain_data, event_handler: Box::new(health_updater), l1_data_mismatch_behavior: L1DataMismatchBehavior::Log, pool, @@ -343,8 +434,8 @@ impl ConsistencyChecker { }) } - pub fn with_diamond_proxy_addr(mut self, address: Address) -> Self { - self.diamond_proxy_addr = Some(address); + pub fn with_l1_diamond_proxy_addr(mut self, address: Address) -> Self { + self.l1_chain_data.diamond_proxy_addr = Some(address); self } @@ -361,11 +452,36 @@ impl ConsistencyChecker { let commit_tx_hash = local.commit_tx_hash; tracing::info!("Checking commit tx {commit_tx_hash} for L1 batch #{batch_number}"); - let commit_tx_status = self - .l1_client + let sl_chain_id = self + .pool + .connection_tagged("consistency_checker") + .await + .map_err(|err| CheckError::Internal(err.into()))? + .eth_sender_dal() + .get_batch_commit_chain_id(batch_number) + .await + .map_err(|err| CheckError::Internal(err.into()))?; + let chain_data = match sl_chain_id { + Some(chain_id) => { + let Some(chain_data) = self.chain_data_by_id(chain_id) else { + return Err(CheckError::Validation(anyhow::anyhow!( + "failed to find client for chain id {chain_id}" + ))); + }; + chain_data + } + None => &self.l1_chain_data, + }; + let commit_tx_status = chain_data + .client .get_tx_status(commit_tx_hash) .await? - .with_context(|| format!("receipt for tx {commit_tx_hash:?} not found on L1")) + .with_context(|| { + format!( + "receipt for tx {commit_tx_hash:?} not found on target chain with id {}", + chain_data.chain_id + ) + }) .map_err(CheckError::Validation)?; if !commit_tx_status.success { let err = anyhow::anyhow!("main node gave us a failed commit tx {commit_tx_hash:?}"); @@ -373,14 +489,14 @@ impl ConsistencyChecker { } // We can't get tx calldata from the DB because it can be fake. - let commit_tx = self - .l1_client + let commit_tx = chain_data + .client .get_tx(commit_tx_hash) .await? .with_context(|| format!("commit transaction {commit_tx_hash:?} not found on L1")) .map_err(CheckError::Internal)?; // we've got a transaction receipt previously, thus an internal error - if let Some(diamond_proxy_addr) = self.diamond_proxy_addr { + if let Some(diamond_proxy_addr) = chain_data.diamond_proxy_addr { let event = self .contract .event("BlockCommit") @@ -423,10 +539,9 @@ impl ConsistencyChecker { let commit_function = if local.is_pre_boojum() { &*PRE_BOOJUM_COMMIT_FUNCTION } else if local.is_pre_shared_bridge() { - self.contract - .function("commitBatches") - .context("L1 contract does not have `commitBatches` function") - .map_err(CheckError::Internal)? + &*POST_BOOJUM_COMMIT_FUNCTION + } else if local.is_pre_gateway() { + &*POST_SHARED_BRIDGE_COMMIT_FUNCTION } else { self.contract .function("commitBatchesSharedBridge") @@ -434,12 +549,16 @@ impl ConsistencyChecker { .map_err(CheckError::Internal)? }; - let commitment = - Self::extract_commit_data(&commit_tx.input.0, commit_function, batch_number) - .with_context(|| { - format!("failed extracting commit data for transaction {commit_tx_hash:?}") - }) - .map_err(CheckError::Validation)?; + let commitment = Self::extract_commit_data( + &commit_tx.input.0, + commit_function, + batch_number, + local.is_pre_gateway(), + ) + .with_context(|| { + format!("failed extracting commit data for transaction {commit_tx_hash:?}") + }) + .map_err(CheckError::Validation)?; local .verify_commitment(&commitment) .map_err(CheckError::Validation) @@ -450,6 +569,7 @@ impl ConsistencyChecker { commit_tx_input_data: &[u8], commit_function: ðabi::Function, batch_number: L1BatchNumber, + pre_gateway: bool, ) -> anyhow::Result { let expected_solidity_selector = commit_function.short_signature(); let actual_solidity_selector = &commit_tx_input_data[..4]; @@ -461,11 +581,45 @@ impl ConsistencyChecker { let mut commit_input_tokens = commit_function .decode_input(&commit_tx_input_data[4..]) .context("Failed decoding calldata for L1 commit function")?; - let mut commitments = commit_input_tokens - .pop() - .context("Unexpected signature for L1 commit function")? - .into_array() - .context("Unexpected signature for L1 commit function")?; + let mut commitments: Vec; + if pre_gateway { + commitments = commit_input_tokens + .pop() + .context("Unexpected signature for L1 commit function")? + .into_array() + .context("Unexpected signature for L1 commit function")?; + } else { + let commitments_popped = commit_input_tokens + .pop() + .context("Unexpected signature for L1 commit function: no tokens")?; + let commitment_bytes = match commitments_popped { + Token::Bytes(arr) => arr, + _ => anyhow::bail!( + "Unexpected signature for L1 commit function: last token is not bytes" + ), + }; + let (version, encoded_data) = commitment_bytes.split_at(1); + anyhow::ensure!( + version[0] == SUPPORTED_ENCODING_VERSION, + "Unexpected encoding version: {}", + version[0] + ); + let decoded_data = ethabi::decode( + &[ + StoredBatchInfo::schema(), + ParamType::Array(Box::new(CommitBatchInfo::post_gateway_schema())), + ], + encoded_data, + ) + .context("Failed to decode commitData")?; + if let Some(Token::Array(batch_commitments)) = &decoded_data.get(1) { + // Now you have access to `stored_batch_info` and `l1_batches_to_commit` + // Process them as needed + commitments = batch_commitments.clone(); + } else { + anyhow::bail!("Unexpected data format"); + } + } // Commit transactions usually publish multiple commitments at once, so we need to find // the one that corresponds to the batch we're checking. @@ -473,15 +627,15 @@ impl ConsistencyChecker { .first() .context("L1 batch commitment is empty")?; let ethabi::Token::Tuple(first_batch_commitment) = first_batch_commitment else { - anyhow::bail!("Unexpected signature for L1 commit function"); + anyhow::bail!("Unexpected signature for L1 commit function 3"); }; let first_batch_number = first_batch_commitment .first() - .context("Unexpected signature for L1 commit function")?; + .context("Unexpected signature for L1 commit function 4")?; let first_batch_number = first_batch_number .clone() .into_uint() - .context("Unexpected signature for L1 commit function")?; + .context("Unexpected signature for L1 commit function 5")?; let first_batch_number = usize::try_from(first_batch_number) .map_err(|_| anyhow::anyhow!("Integer overflow for L1 batch number"))?; // ^ `TryFrom` has `&str` error here, so we can't use `.context()`. @@ -511,24 +665,31 @@ impl ConsistencyChecker { } async fn sanity_check_diamond_proxy_addr(&self) -> Result<(), CheckError> { - let Some(address) = self.diamond_proxy_addr else { - return Ok(()); - }; - tracing::debug!("Performing sanity checks for diamond proxy contract {address:?}"); + for client_data in std::iter::once(&self.l1_chain_data).chain(&self.gateway_chain_data) { + let Some(address) = client_data.diamond_proxy_addr else { + continue; + }; + let chain_id = client_data.chain_id; + tracing::debug!("Performing sanity checks for chain id {chain_id}, diamond proxy contract {address:?}"); - let version: U256 = CallFunctionArgs::new("getProtocolVersion", ()) - .for_contract(address, &self.contract) - .call(&self.l1_client) - .await?; - tracing::info!("Checked diamond proxy {address:?} (protocol version: {version})"); + let version: U256 = CallFunctionArgs::new("getProtocolVersion", ()) + .for_contract(address, &self.contract) + .call(&client_data.client) + .await?; + tracing::info!("Checked chain id {chain_id}, diamond proxy {address:?} (protocol version: {version})"); + } Ok(()) } pub async fn run(mut self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { tracing::info!( - "Starting consistency checker with diamond proxy contract: {:?}, sleep interval: {:?}, \ - max historic L1 batches to check: {}", - self.diamond_proxy_addr, + "Starting consistency checker with l1 diamond proxy contract: {:?}, \ + gateway diamond proxy contract: {:?}, \ + sleep interval: {:?}, max historic L1 batches to check: {}", + self.l1_chain_data.diamond_proxy_addr, + self.gateway_chain_data + .as_ref() + .map(|d| d.diamond_proxy_addr), self.sleep_interval, self.max_batches_to_recheck ); @@ -658,6 +819,16 @@ impl ConsistencyChecker { tracing::info!("Stop signal received, consistency_checker is shutting down"); Ok(()) } + + fn chain_data_by_id(&self, searched_chain_id: SLChainId) -> Option<&SLChainData> { + if searched_chain_id == self.l1_chain_data.chain_id { + Some(&self.l1_chain_data) + } else if Some(searched_chain_id) == self.gateway_chain_data.as_ref().map(|d| d.chain_id) { + self.gateway_chain_data.as_ref() + } else { + None + } + } } /// Repeatedly polls the DB until there is an L1 batch with metadata. We may not have such a batch initially diff --git a/core/node/consistency_checker/src/tests/mod.rs b/core/node/consistency_checker/src/tests/mod.rs index b09ef2b2272c..5571c3b74d9c 100644 --- a/core/node/consistency_checker/src/tests/mod.rs +++ b/core/node/consistency_checker/src/tests/mod.rs @@ -7,7 +7,7 @@ use test_casing::{test_casing, Product}; use tokio::sync::mpsc; use zksync_config::GenesisConfig; use zksync_dal::Connection; -use zksync_eth_client::{clients::MockSettlementLayer, Options}; +use zksync_eth_client::{clients::MockSettlementLayer, EthInterface, Options}; use zksync_l1_contract_interface::{i_executor::methods::CommitBatches, Tokenizable, Tokenize}; use zksync_node_genesis::{insert_genesis_batch, mock_genesis_config, GenesisParams}; use zksync_node_test_utils::{ @@ -30,9 +30,12 @@ pub(crate) fn create_l1_batch_with_metadata(number: u32) -> L1BatchWithMetadata } const PRE_BOOJUM_PROTOCOL_VERSION: ProtocolVersionId = ProtocolVersionId::Version10; -const DIAMOND_PROXY_ADDR: Address = Address::repeat_byte(1); +const L1_DIAMOND_PROXY_ADDR: Address = Address::repeat_byte(1); +const GATEWAY_DIAMOND_PROXY_ADDR: Address = Address::repeat_byte(2); const VALIDATOR_TIMELOCK_ADDR: Address = Address::repeat_byte(23); -const CHAIN_ID: u32 = 270; +const ERA_CHAIN_ID: u64 = 270; +const L1_CHAIN_ID: u64 = 9; +const GATEWAY_CHAIN_ID: u64 = 505; const COMMITMENT_MODES: [L1BatchCommitmentMode; 2] = [ L1BatchCommitmentMode::Rollup, L1BatchCommitmentMode::Validium, @@ -72,14 +75,10 @@ pub(crate) fn build_commit_tx_input_data( if protocol_version.is_pre_boojum() { PRE_BOOJUM_COMMIT_FUNCTION.encode_input(&tokens).unwrap() } else if protocol_version.is_pre_shared_bridge() { - contract - .function("commitBatches") - .unwrap() - .encode_input(&tokens) - .unwrap() + POST_BOOJUM_COMMIT_FUNCTION.encode_input(&tokens).unwrap() } else { // Post shared bridge transactions also require chain id - let tokens: Vec<_> = vec![Token::Uint(CHAIN_ID.into())] + let tokens: Vec<_> = vec![Token::Uint(ERA_CHAIN_ID.into())] .into_iter() .chain(tokens) .collect(); @@ -91,18 +90,25 @@ pub(crate) fn build_commit_tx_input_data( } } -pub(crate) fn create_mock_checker( +pub(crate) async fn create_mock_checker( client: MockSettlementLayer, pool: ConnectionPool, commitment_mode: L1BatchCommitmentMode, ) -> ConsistencyChecker { let (health_check, health_updater) = ConsistencyCheckerHealthUpdater::new(); + let client = client.into_client(); + let chain_id = client.fetch_chain_id().await.unwrap(); + let l1_chain_data = SLChainData { + client: Box::new(client), + chain_id, + diamond_proxy_addr: Some(L1_DIAMOND_PROXY_ADDR), + }; ConsistencyChecker { contract: zksync_contracts::hyperchain_contract(), - diamond_proxy_addr: Some(DIAMOND_PROXY_ADDR), max_batches_to_recheck: 100, sleep_interval: Duration::from_millis(10), - l1_client: Box::new(client.into_client()), + l1_chain_data, + gateway_chain_data: None, event_handler: Box::new(health_updater), l1_data_mismatch_behavior: L1DataMismatchBehavior::Bail, pool, @@ -111,23 +117,46 @@ pub(crate) fn create_mock_checker( } } -fn create_mock_ethereum() -> MockSettlementLayer { - let mock = MockSettlementLayer::builder().with_call_handler(|call, _block_id| { - assert_eq!(call.to, Some(DIAMOND_PROXY_ADDR)); - let packed_semver = ProtocolVersionId::latest().into_packed_semver_with_patch(0); - let contract = zksync_contracts::hyperchain_contract(); - let expected_input = contract - .function("getProtocolVersion") - .unwrap() - .encode_input(&[]) - .unwrap(); - assert_eq!(call.data, Some(expected_input.into())); +fn create_mock_sl(chain_id: u64, with_get_zk_chain: bool) -> MockSettlementLayer { + let mock = MockSettlementLayer::builder() + .with_call_handler(move |call, _block_id| match call.to { + Some(addr) if addr == L1_DIAMOND_PROXY_ADDR || addr == GATEWAY_DIAMOND_PROXY_ADDR => { + let packed_semver = ProtocolVersionId::latest().into_packed_semver_with_patch(0); + let contract = zksync_contracts::hyperchain_contract(); + let expected_input = contract + .function("getProtocolVersion") + .unwrap() + .encode_input(&[]) + .unwrap(); + assert_eq!(call.data, Some(expected_input.into())); - ethabi::Token::Uint(packed_semver) - }); + ethabi::Token::Uint(packed_semver) + } + Some(addr) if with_get_zk_chain && addr == L2_BRIDGEHUB_ADDRESS => { + let contract = zksync_contracts::bridgehub_contract(); + let expected_input = contract + .function("getZKChain") + .unwrap() + .encode_input(&[Token::Uint(ERA_CHAIN_ID.into())]) + .unwrap(); + assert_eq!(call.data, Some(expected_input.into())); + + ethabi::Token::Address(GATEWAY_DIAMOND_PROXY_ADDR) + } + _ => panic!("Received unexpected call"), + }) + .with_chain_id(chain_id); mock.build() } +fn create_mock_ethereum() -> MockSettlementLayer { + create_mock_sl(L1_CHAIN_ID, false) +} + +fn create_mock_gateway() -> MockSettlementLayer { + create_mock_sl(GATEWAY_CHAIN_ID, true) +} + impl HandleConsistencyCheckerEvent for mpsc::UnboundedSender { fn initialize(&mut self) { // Do nothing @@ -141,8 +170,8 @@ impl HandleConsistencyCheckerEvent for mpsc::UnboundedSender { self.send(last_checked_batch).ok(); } - fn report_inconsistent_batch(&mut self, _number: L1BatchNumber, _err: &anyhow::Error) { - // Do nothing + fn report_inconsistent_batch(&mut self, number: L1BatchNumber, err: &anyhow::Error) { + panic!("Error on batch #{number}: {err}"); } } @@ -163,6 +192,7 @@ fn build_commit_tx_input_data_is_correct(commitment_mode: L1BatchCommitmentMode) &commit_tx_input_data, commit_function, batch.header.number, + false, ) .unwrap(); assert_eq!( @@ -174,8 +204,7 @@ fn build_commit_tx_input_data_is_correct(commitment_mode: L1BatchCommitmentMode) #[test] fn extracting_commit_data_for_boojum_batch() { - let contract = zksync_contracts::hyperchain_contract(); - let commit_function = contract.function("commitBatches").unwrap(); + let commit_function = &*POST_BOOJUM_COMMIT_FUNCTION; // Calldata taken from the commit transaction for `https://sepolia.explorer.zksync.io/batch/4470`; // `https://sepolia.etherscan.io/tx/0x300b9115037028b1f8aa2177abf98148c3df95c9b04f95a4e25baf4dfee7711f` let commit_tx_input_data = include_bytes!("commit_l1_batch_4470_testnet_sepolia.calldata"); @@ -184,6 +213,7 @@ fn extracting_commit_data_for_boojum_batch() { commit_tx_input_data, commit_function, L1BatchNumber(4_470), + true, ) .unwrap(); @@ -197,6 +227,7 @@ fn extracting_commit_data_for_boojum_batch() { commit_tx_input_data, commit_function, L1BatchNumber(bogus_l1_batch), + true, ) .unwrap_err(); } @@ -204,8 +235,7 @@ fn extracting_commit_data_for_boojum_batch() { #[test] fn extracting_commit_data_for_multiple_batches() { - let contract = zksync_contracts::hyperchain_contract(); - let commit_function = contract.function("commitBatches").unwrap(); + let commit_function = &*POST_BOOJUM_COMMIT_FUNCTION; // Calldata taken from the commit transaction for `https://explorer.zksync.io/batch/351000`; // `https://etherscan.io/tx/0xbd8dfe0812df0da534eb95a2d2a4382d65a8172c0b648a147d60c1c2921227fd` let commit_tx_input_data = include_bytes!("commit_l1_batch_351000-351004_mainnet.calldata"); @@ -215,6 +245,7 @@ fn extracting_commit_data_for_multiple_batches() { commit_tx_input_data, commit_function, L1BatchNumber(l1_batch), + true, ) .unwrap(); @@ -229,6 +260,7 @@ fn extracting_commit_data_for_multiple_batches() { commit_tx_input_data, commit_function, L1BatchNumber(bogus_l1_batch), + true, ) .unwrap_err(); } @@ -244,6 +276,7 @@ fn extracting_commit_data_for_pre_boojum_batch() { commit_tx_input_data, &PRE_BOOJUM_COMMIT_FUNCTION, L1BatchNumber(200_000), + true, ) .unwrap(); @@ -265,6 +298,7 @@ impl SaveAction<'_> { self, storage: &mut Connection<'_, Core>, commit_tx_hash_by_l1_batch: &HashMap, + chain_id_by_l1_batch: &HashMap, ) { match self { Self::InsertBatch(l1_batch) => { @@ -291,6 +325,7 @@ impl SaveAction<'_> { } Self::InsertCommitTx(l1_batch_number) => { let commit_tx_hash = commit_tx_hash_by_l1_batch[&l1_batch_number]; + let chain_id = chain_id_by_l1_batch.get(&l1_batch_number).copied(); storage .eth_sender_dal() .insert_bogus_confirmed_eth_tx( @@ -298,6 +333,7 @@ impl SaveAction<'_> { AggregatedActionType::Commit, commit_tx_hash, chrono::Utc::now(), + chain_id, ) .await .unwrap(); @@ -367,7 +403,7 @@ fn l1_batch_commit_log(l1_batch: &L1BatchWithMetadata) -> Log { }); Log { - address: DIAMOND_PROXY_ADDR, + address: L1_DIAMOND_PROXY_ADDR, topics: vec![ *BLOCK_COMMIT_EVENT_HASH, H256::from_low_u64_be(l1_batch.header.number.0.into()), // batch number @@ -432,7 +468,7 @@ async fn normal_checker_function( let (l1_batch_updates_sender, mut l1_batch_updates_receiver) = mpsc::unbounded_channel(); let checker = ConsistencyChecker { event_handler: Box::new(l1_batch_updates_sender), - ..create_mock_checker(client, pool.clone(), commitment_mode) + ..create_mock_checker(client, pool.clone(), commitment_mode).await }; let (stop_sender, stop_receiver) = watch::channel(false); @@ -441,7 +477,112 @@ async fn normal_checker_function( // Add new batches to the storage. for save_action in save_actions_mapper(&l1_batches) { save_action - .apply(&mut storage, &commit_tx_hash_by_l1_batch) + .apply( + &mut storage, + &commit_tx_hash_by_l1_batch, + &Default::default(), + ) + .await; + tokio::time::sleep(Duration::from_millis(7)).await; + } + + // Wait until all batches are checked. + loop { + let checked_batch = l1_batch_updates_receiver.recv().await.unwrap(); + if checked_batch == l1_batches.last().unwrap().header.number { + break; + } + } + + // Send the stop signal to the checker and wait for it to stop. + stop_sender.send_replace(true); + checker_task.await.unwrap().unwrap(); +} + +#[tokio::test] +async fn checker_works_with_different_settlement_layers() { + // Use default action mapper. + let save_actions_mapper = SAVE_ACTION_MAPPERS[0].1; + let commitment_mode = L1BatchCommitmentMode::Rollup; + + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + + let l1_batches: Vec<_> = (1..=10).map(create_l1_batch_with_metadata).collect(); + let mut commit_tx_hash_by_l1_batch = HashMap::with_capacity(l1_batches.len()); + let mut chain_id_by_l1_batch = HashMap::with_capacity(l1_batches.len()); + let l1_client = create_mock_ethereum(); + let gateway_client = create_mock_gateway(); + + let clients = [l1_client, gateway_client]; + let diamond_proxies = [L1_DIAMOND_PROXY_ADDR, GATEWAY_DIAMOND_PROXY_ADDR]; + + for (i, l1_batches) in l1_batches.chunks(2).enumerate() { + let client = &clients[i & 1]; + let input_data = build_commit_tx_input_data(l1_batches, commitment_mode); + let signed_tx = client.sign_prepared_tx( + input_data.clone(), + VALIDATOR_TIMELOCK_ADDR, + Options { + nonce: Some((i / 2).into()), + ..Options::default() + }, + ); + let signed_tx = signed_tx.unwrap(); + client.as_ref().send_raw_tx(signed_tx.raw_tx).await.unwrap(); + client.execute_tx(signed_tx.hash, true, 1).with_logs( + l1_batches + .iter() + .map(|batch| { + let mut log = l1_batch_commit_log(batch); + log.address = diamond_proxies[i & 1]; + log + }) + .collect(), + ); + + commit_tx_hash_by_l1_batch.extend( + l1_batches + .iter() + .map(|batch| (batch.header.number, signed_tx.hash)), + ); + let chain_id = client.as_ref().fetch_chain_id().await.unwrap(); + chain_id_by_l1_batch.extend( + l1_batches + .iter() + .map(|batch| (batch.header.number, chain_id)), + ) + } + + let (l1_batch_updates_sender, mut l1_batch_updates_receiver) = mpsc::unbounded_channel(); + let mut checker = ConsistencyChecker::new( + Box::new(clients[0].clone().into_client()), + Some(Box::new(clients[1].clone().into_client())), + 100, + pool.clone(), + commitment_mode, + L2ChainId::new(ERA_CHAIN_ID).unwrap(), + ) + .await + .unwrap(); + checker.sleep_interval = Duration::from_millis(10); + checker.event_handler = Box::new(l1_batch_updates_sender); + checker.l1_data_mismatch_behavior = L1DataMismatchBehavior::Bail; + + let (stop_sender, stop_receiver) = watch::channel(false); + let checker_task = tokio::spawn(checker.run(stop_receiver)); + + // Add new batches to the storage. + for save_action in save_actions_mapper(&l1_batches) { + save_action + .apply( + &mut storage, + &commit_tx_hash_by_l1_batch, + &chain_id_by_l1_batch, + ) .await; tokio::time::sleep(Duration::from_millis(7)).await; } @@ -515,7 +656,7 @@ async fn checker_processes_pre_boojum_batches( let (l1_batch_updates_sender, mut l1_batch_updates_receiver) = mpsc::unbounded_channel(); let checker = ConsistencyChecker { event_handler: Box::new(l1_batch_updates_sender), - ..create_mock_checker(client, pool.clone(), commitment_mode) + ..create_mock_checker(client, pool.clone(), commitment_mode).await }; let (stop_sender, stop_receiver) = watch::channel(false); @@ -524,7 +665,11 @@ async fn checker_processes_pre_boojum_batches( // Add new batches to the storage. for save_action in save_actions_mapper(&l1_batches) { save_action - .apply(&mut storage, &commit_tx_hash_by_l1_batch) + .apply( + &mut storage, + &commit_tx_hash_by_l1_batch, + &Default::default(), + ) .await; tokio::time::sleep(Duration::from_millis(7)).await; } @@ -586,7 +731,11 @@ async fn checker_functions_after_snapshot_recovery( if !delay_batch_insertion { for &save_action in &save_actions { save_action - .apply(&mut storage, &commit_tx_hash_by_l1_batch) + .apply( + &mut storage, + &commit_tx_hash_by_l1_batch, + &Default::default(), + ) .await; } } @@ -594,7 +743,7 @@ async fn checker_functions_after_snapshot_recovery( let (l1_batch_updates_sender, mut l1_batch_updates_receiver) = mpsc::unbounded_channel(); let checker = ConsistencyChecker { event_handler: Box::new(l1_batch_updates_sender), - ..create_mock_checker(client, pool.clone(), commitment_mode) + ..create_mock_checker(client, pool.clone(), commitment_mode).await }; let (stop_sender, stop_receiver) = watch::channel(false); let checker_task = tokio::spawn(checker.run(stop_receiver)); @@ -603,7 +752,11 @@ async fn checker_functions_after_snapshot_recovery( tokio::time::sleep(Duration::from_millis(10)).await; for &save_action in &save_actions { save_action - .apply(&mut storage, &commit_tx_hash_by_l1_batch) + .apply( + &mut storage, + &commit_tx_hash_by_l1_batch, + &Default::default(), + ) .await; } } @@ -654,7 +807,7 @@ impl IncorrectDataKind { l1_batch: &L1BatchWithMetadata, commitment_mode: L1BatchCommitmentMode, ) -> H256 { - let mut log_origin = Some(DIAMOND_PROXY_ADDR); + let mut log_origin = Some(L1_DIAMOND_PROXY_ADDR); let (commit_tx_input_data, successful_status) = match self { Self::MissingStatus => { return H256::zero(); // Do not execute the transaction @@ -771,12 +924,16 @@ async fn checker_detects_incorrect_tx_data( ]; for save_action in save_actions { save_action - .apply(&mut storage, &commit_tx_hash_by_l1_batch) + .apply( + &mut storage, + &commit_tx_hash_by_l1_batch, + &Default::default(), + ) .await; } drop(storage); - let checker = create_mock_checker(client, pool, commitment_mode); + let checker = create_mock_checker(client, pool, commitment_mode).await; let (_stop_sender, stop_receiver) = watch::channel(false); // The checker must stop with an error. tokio::time::timeout(Duration::from_secs(30), checker.run(stop_receiver)) diff --git a/core/node/db_pruner/src/tests.rs b/core/node/db_pruner/src/tests.rs index 99fbada423dc..6faa28993e27 100644 --- a/core/node/db_pruner/src/tests.rs +++ b/core/node/db_pruner/src/tests.rs @@ -417,6 +417,7 @@ async fn mark_l1_batch_as_executed(storage: &mut Connection<'_, Core>, number: u AggregatedActionType::Execute, H256::from_low_u64_be(number.into()), chrono::Utc::now(), + None, ) .await .unwrap(); diff --git a/core/node/eth_sender/src/aggregator.rs b/core/node/eth_sender/src/aggregator.rs index 432804a21b2e..3bdbf23242f0 100644 --- a/core/node/eth_sender/src/aggregator.rs +++ b/core/node/eth_sender/src/aggregator.rs @@ -181,7 +181,10 @@ impl Aggregator { ) .await; - l1_batches.map(|l1_batches| ExecuteBatches { l1_batches }) + l1_batches.map(|l1_batches| ExecuteBatches { + l1_batches, + priority_ops_proofs: Vec::new(), + }) } async fn get_commit_operation( diff --git a/core/node/eth_sender/src/eth_tx_aggregator.rs b/core/node/eth_sender/src/eth_tx_aggregator.rs index ac9ed4aaaadb..16f20959d52e 100644 --- a/core/node/eth_sender/src/eth_tx_aggregator.rs +++ b/core/node/eth_sender/src/eth_tx_aggregator.rs @@ -380,7 +380,6 @@ impl EthTxAggregator { tracing::error!("Failed to get multicall data {err:?}"); err })?; - let contracts_are_pre_shared_bridge = protocol_version_id.is_pre_shared_bridge(); let snark_wrapper_vk_hash = self .get_snark_wrapper_vk_hash(verifier_address) @@ -422,14 +421,7 @@ impl EthTxAggregator { return Ok(()); } let is_gateway = self.settlement_mode.is_gateway(); - let tx = self - .save_eth_tx( - storage, - &agg_op, - contracts_are_pre_shared_bridge, - is_gateway, - ) - .await?; + let tx = self.save_eth_tx(storage, &agg_op, is_gateway).await?; Self::report_eth_tx_saving(storage, &agg_op, &tx).await; } Ok(()) @@ -468,19 +460,9 @@ impl EthTxAggregator { .await; } - fn encode_aggregated_op( - &self, - op: &AggregatedOperation, - contracts_are_pre_shared_bridge: bool, - ) -> TxData { - let operation_is_pre_shared_bridge = op.protocol_version().is_pre_shared_bridge(); - - // The post shared bridge contracts support pre-shared bridge operations, but vice versa is not true. - if contracts_are_pre_shared_bridge { - assert!(operation_is_pre_shared_bridge); - } - + fn encode_aggregated_op(&self, op: &AggregatedOperation) -> TxData { let mut args = vec![Token::Uint(self.rollup_chain_id.as_u64().into())]; + let is_op_pre_gateway = op.protocol_version().is_pre_gateway(); let (calldata, sidecar) = match op { AggregatedOperation::Commit(last_committed_l1_batch, l1_batches, pubdata_da) => { @@ -492,17 +474,12 @@ impl EthTxAggregator { }; let commit_data_base = commit_batches.into_tokens(); - let (encoding_fn, commit_data) = if contracts_are_pre_shared_bridge { - (&self.functions.pre_shared_bridge_commit, commit_data_base) + args.extend(commit_data_base); + let commit_data = args; + let encoding_fn = if is_op_pre_gateway { + &self.functions.post_shared_bridge_commit } else { - args.extend(commit_data_base); - ( - self.functions - .post_shared_bridge_commit - .as_ref() - .expect("Missing ABI for commitBatchesSharedBridge"), - args, - ) + &self.functions.post_gateway_commit }; let l1_batch_for_sidecar = @@ -515,37 +492,27 @@ impl EthTxAggregator { Self::encode_commit_data(encoding_fn, &commit_data, l1_batch_for_sidecar) } AggregatedOperation::PublishProofOnchain(op) => { - let calldata = if contracts_are_pre_shared_bridge { - self.functions - .pre_shared_bridge_prove - .encode_input(&op.into_tokens()) - .expect("Failed to encode prove transaction data") + args.extend(op.into_tokens()); + let encoding_fn = if is_op_pre_gateway { + &self.functions.post_shared_bridge_prove } else { - args.extend(op.into_tokens()); - self.functions - .post_shared_bridge_prove - .as_ref() - .expect("Missing ABI for proveBatchesSharedBridge") - .encode_input(&args) - .expect("Failed to encode prove transaction data") + &self.functions.post_gateway_prove }; + let calldata = encoding_fn + .encode_input(&args) + .expect("Failed to encode prove transaction data"); (calldata, None) } AggregatedOperation::Execute(op) => { - let calldata = if contracts_are_pre_shared_bridge { - self.functions - .pre_shared_bridge_execute - .encode_input(&op.into_tokens()) - .expect("Failed to encode execute transaction data") + args.extend(op.into_tokens()); + let encoding_fn = if is_op_pre_gateway { + &self.functions.post_shared_bridge_execute } else { - args.extend(op.into_tokens()); - self.functions - .post_shared_bridge_execute - .as_ref() - .expect("Missing ABI for executeBatchesSharedBridge") - .encode_input(&args) - .expect("Failed to encode execute transaction data") + &self.functions.post_gateway_execute }; + let calldata = encoding_fn + .encode_input(&args) + .expect("Failed to encode execute transaction data"); (calldata, None) } }; @@ -593,7 +560,6 @@ impl EthTxAggregator { &self, storage: &mut Connection<'_, Core>, aggregated_op: &AggregatedOperation, - contracts_are_pre_shared_bridge: bool, is_gateway: bool, ) -> Result { let mut transaction = storage.start_transaction().await.unwrap(); @@ -606,8 +572,7 @@ impl EthTxAggregator { (_, _) => None, }; let nonce = self.get_next_nonce(&mut transaction, sender_addr).await?; - let encoded_aggregated_op = - self.encode_aggregated_op(aggregated_op, contracts_are_pre_shared_bridge); + let encoded_aggregated_op = self.encode_aggregated_op(aggregated_op); let l1_batch_number_range = aggregated_op.l1_batch_range(); let predicted_gas_for_batches = transaction diff --git a/core/node/eth_sender/src/tester.rs b/core/node/eth_sender/src/tester.rs index 646df1dc1a7b..b62223d0d68b 100644 --- a/core/node/eth_sender/src/tester.rs +++ b/core/node/eth_sender/src/tester.rs @@ -414,6 +414,7 @@ impl EthSenderTester { .into_iter() .map(l1_batch_with_metadata) .collect(), + priority_ops_proofs: Vec::new(), }); self.next_l1_batch_number_to_execute += 1; self.save_operation(operation).await @@ -514,7 +515,6 @@ impl EthSenderTester { .save_eth_tx( &mut self.conn.connection().await.unwrap(), &aggregated_operation, - false, self.is_l2, ) .await diff --git a/core/node/eth_sender/src/tests.rs b/core/node/eth_sender/src/tests.rs index 8e5032a69cfc..aab6d2e43d76 100644 --- a/core/node/eth_sender/src/tests.rs +++ b/core/node/eth_sender/src/tests.rs @@ -34,6 +34,7 @@ fn get_dummy_operation(number: u32) -> AggregatedOperation { metadata: default_l1_batch_metadata(), raw_published_factory_deps: Vec::new(), }], + priority_ops_proofs: Vec::new(), }) } @@ -208,7 +209,6 @@ async fn resend_each_block(commitment_mode: L1BatchCommitmentMode) -> anyhow::Re &mut tester.conn.connection().await.unwrap(), &get_dummy_operation(0), false, - false, ) .await?; diff --git a/core/node/eth_sender/src/zksync_functions.rs b/core/node/eth_sender/src/zksync_functions.rs index 85508c71c03d..f3e4998ef37c 100644 --- a/core/node/eth_sender/src/zksync_functions.rs +++ b/core/node/eth_sender/src/zksync_functions.rs @@ -1,14 +1,19 @@ -use zksync_contracts::{hyperchain_contract, multicall_contract, verifier_contract}; +use zksync_contracts::{ + hyperchain_contract, multicall_contract, verifier_contract, POST_SHARED_BRIDGE_COMMIT_FUNCTION, + POST_SHARED_BRIDGE_EXECUTE_FUNCTION, POST_SHARED_BRIDGE_PROVE_FUNCTION, +}; use zksync_types::ethabi::{Contract, Function}; #[derive(Debug)] pub(super) struct ZkSyncFunctions { - pub(super) pre_shared_bridge_commit: Function, - pub(super) post_shared_bridge_commit: Option, - pub(super) pre_shared_bridge_prove: Function, - pub(super) post_shared_bridge_prove: Option, - pub(super) pre_shared_bridge_execute: Function, - pub(super) post_shared_bridge_execute: Option, + pub(super) post_shared_bridge_commit: Function, + pub(super) post_shared_bridge_prove: Function, + pub(super) post_shared_bridge_execute: Function, + + pub(super) post_gateway_commit: Function, + pub(super) post_gateway_prove: Function, + pub(super) post_gateway_execute: Function, + pub(super) get_l2_bootloader_bytecode_hash: Function, pub(super) get_l2_default_account_bytecode_hash: Function, pub(super) get_verifier: Function, @@ -47,15 +52,14 @@ impl Default for ZkSyncFunctions { let verifier_contract = verifier_contract(); let multicall_contract = multicall_contract(); - let pre_shared_bridge_commit = get_function(&zksync_contract, "commitBatches"); - let post_shared_bridge_commit = - get_optional_function(&zksync_contract, "commitBatchesSharedBridge"); - let pre_shared_bridge_prove = get_function(&zksync_contract, "proveBatches"); - let post_shared_bridge_prove = - get_optional_function(&zksync_contract, "proveBatchesSharedBridge"); - let pre_shared_bridge_execute = get_function(&zksync_contract, "executeBatches"); - let post_shared_bridge_execute = - get_optional_function(&zksync_contract, "executeBatchesSharedBridge"); + let post_shared_bridge_commit = POST_SHARED_BRIDGE_COMMIT_FUNCTION.clone(); + let post_shared_bridge_prove = POST_SHARED_BRIDGE_PROVE_FUNCTION.clone(); + let post_shared_bridge_execute = POST_SHARED_BRIDGE_EXECUTE_FUNCTION.clone(); + + let post_gateway_commit = get_function(&zksync_contract, "commitBatchesSharedBridge"); + let post_gateway_prove = get_function(&zksync_contract, "proveBatchesSharedBridge"); + let post_gateway_execute = get_function(&zksync_contract, "executeBatchesSharedBridge"); + let get_l2_bootloader_bytecode_hash = get_function(&zksync_contract, "getL2BootloaderBytecodeHash"); let get_l2_default_account_bytecode_hash = @@ -69,12 +73,12 @@ impl Default for ZkSyncFunctions { let verification_key_hash = get_function(&verifier_contract, "verificationKeyHash"); ZkSyncFunctions { - pre_shared_bridge_commit, post_shared_bridge_commit, - pre_shared_bridge_prove, post_shared_bridge_prove, - pre_shared_bridge_execute, post_shared_bridge_execute, + post_gateway_commit, + post_gateway_prove, + post_gateway_execute, get_l2_bootloader_bytecode_hash, get_l2_default_account_bytecode_hash, get_evm_emulator_bytecode_hash, diff --git a/core/node/eth_watch/Cargo.toml b/core/node/eth_watch/Cargo.toml index 985649c35daf..2a2374cef70e 100644 --- a/core/node/eth_watch/Cargo.toml +++ b/core/node/eth_watch/Cargo.toml @@ -18,6 +18,8 @@ zksync_contracts.workspace = true zksync_system_constants.workspace = true zksync_eth_client.workspace = true zksync_shared_metrics.workspace = true +zksync_mini_merkle_tree.workspace = true +zksync_web3_decl.workspace = true tokio = { workspace = true, features = ["time"] } anyhow.workspace = true @@ -25,7 +27,10 @@ thiserror.workspace = true async-trait.workspace = true tracing.workspace = true async-recursion.workspace = true +itertools.workspace = true [dev-dependencies] zksync_concurrency.workspace = true test-log.workspace = true +hex.workspace = true +bincode.workspace = true diff --git a/core/node/eth_watch/src/client.rs b/core/node/eth_watch/src/client.rs index ac5fc86c6e9f..18e49aad0813 100644 --- a/core/node/eth_watch/src/client.rs +++ b/core/node/eth_watch/src/client.rs @@ -1,21 +1,28 @@ -use std::fmt; +use std::{fmt, sync::Arc}; use anyhow::Context; use zksync_contracts::{ getters_facet_contract, state_transition_manager_contract, verifier_contract, + MESSAGE_ROOT_CONTRACT, }; use zksync_eth_client::{ clients::{DynClient, L1}, CallFunctionArgs, ClientError, ContractCallError, EnrichedClientError, EnrichedClientResult, EthInterface, }; +use zksync_system_constants::L2_MESSAGE_ROOT_ADDRESS; use zksync_types::{ + api::{ChainAggProof, Log}, ethabi::Contract, - web3::{BlockId, BlockNumber, FilterBuilder, Log}, - Address, SLChainId, H256, U256, + web3::{BlockId, BlockNumber, Filter, FilterBuilder}, + Address, L1BatchNumber, L2ChainId, SLChainId, H256, U256, U64, +}; +use zksync_web3_decl::{ + client::{Network, L2}, + namespaces::{EthNamespaceClient, UnstableNamespaceClient, ZksNamespaceClient}, }; -/// L1 client functionality used by [`EthWatch`](crate::EthWatch) and constituent event processors. +/// Common L1 and L2 client functionality used by [`EthWatch`](crate::EthWatch) and constituent event processors. #[async_trait::async_trait] pub trait EthClient: 'static + fmt::Debug + Send + Sync { /// Returns events in a given block range. @@ -27,6 +34,10 @@ pub trait EthClient: 'static + fmt::Debug + Send + Sync { topic2: Option, retries_left: usize, ) -> EnrichedClientResult>; + + /// Returns either finalized L1 block number or block number that satisfies `self.confirmations_for_eth_event` if it's set. + async fn confirmed_block_number(&self) -> EnrichedClientResult; + /// Returns finalized L1 block number. async fn finalized_block_number(&self) -> EnrichedClientResult; @@ -40,7 +51,17 @@ pub trait EthClient: 'static + fmt::Debug + Send + Sync { packed_version: H256, ) -> EnrichedClientResult>>; + /// Returns ID of the chain. async fn chain_id(&self) -> EnrichedClientResult; + + /// Returns chain root for `l2_chain_id` at the moment right after `block_number`. + /// `block_number` is block number on SL. + /// `l2_chain_id` is chain id of L2. + async fn get_chain_root( + &self, + block_number: U64, + l2_chain_id: L2ChainId, + ) -> Result; } pub const RETRY_LIMIT: usize = 5; @@ -50,10 +71,10 @@ const TOO_MANY_RESULTS_RETH: &str = "length limit exceeded"; const TOO_BIG_RANGE_RETH: &str = "query exceeds max block range"; const TOO_MANY_RESULTS_CHAINSTACK: &str = "range limit exceeded"; -/// Implementation of [`EthClient`] based on HTTP JSON-RPC (encapsulated via [`EthInterface`]). +/// Implementation of [`EthClient`] based on HTTP JSON-RPC. #[derive(Debug, Clone)] -pub struct EthHttpQueryClient { - client: Box>, +pub struct EthHttpQueryClient { + client: Box>, diamond_proxy_addr: Address, governance_address: Address, new_upgrade_cut_data_signature: H256, @@ -62,12 +83,16 @@ pub struct EthHttpQueryClient { chain_admin_address: Option
, verifier_contract_abi: Contract, getters_facet_contract_abi: Contract, + message_root_abi: Contract, confirmations_for_eth_event: Option, } -impl EthHttpQueryClient { +impl EthHttpQueryClient +where + Box>: GetLogsClient, +{ pub fn new( - client: Box>, + client: Box>, diamond_proxy_addr: Address, state_transition_manager_address: Option
, chain_admin_address: Option
, @@ -92,6 +117,7 @@ impl EthHttpQueryClient { .signature(), verifier_contract_abi: verifier_contract(), getters_facet_contract_abi: getters_facet_contract(), + message_root_abi: MESSAGE_ROOT_CONTRACT.clone(), confirmations_for_eth_event, } } @@ -102,6 +128,7 @@ impl EthHttpQueryClient { Some(self.governance_address), self.state_transition_manager_address, self.chain_admin_address, + Some(L2_MESSAGE_ROOT_ADDRESS), ] .into_iter() .flatten() @@ -126,7 +153,7 @@ impl EthHttpQueryClient { builder = builder.address(addresses); } let filter = builder.build(); - let mut result = self.client.logs(&filter).await; + let mut result = self.client.get_logs(filter).await; // This code is compatible with both Infura and Alchemy API providers. // Note: we don't handle rate-limits here - assumption is that we're never going to hit them. @@ -216,7 +243,10 @@ impl EthHttpQueryClient { } #[async_trait::async_trait] -impl EthClient for EthHttpQueryClient { +impl EthClient for EthHttpQueryClient +where + Box>: EthInterface + GetLogsClient, +{ async fn scheduler_vk_hash( &self, verifier_address: Address, @@ -274,27 +304,31 @@ impl EthClient for EthHttpQueryClient { .await } - async fn finalized_block_number(&self) -> EnrichedClientResult { + async fn confirmed_block_number(&self) -> EnrichedClientResult { if let Some(confirmations) = self.confirmations_for_eth_event { let latest_block_number = self.client.block_number().await?.as_u64(); Ok(latest_block_number.saturating_sub(confirmations)) } else { - let block = self - .client - .block(BlockId::Number(BlockNumber::Finalized)) - .await? - .ok_or_else(|| { - let err = ClientError::Custom("Finalized block must be present on L1".into()); - EnrichedClientError::new(err, "block") - })?; - let block_number = block.number.ok_or_else(|| { - let err = ClientError::Custom("Finalized block must contain number".into()); - EnrichedClientError::new(err, "block").with_arg("block", &block) - })?; - Ok(block_number.as_u64()) + self.finalized_block_number().await } } + async fn finalized_block_number(&self) -> EnrichedClientResult { + let block = self + .client + .block(BlockId::Number(BlockNumber::Finalized)) + .await? + .ok_or_else(|| { + let err = ClientError::Custom("Finalized block must be present on L1".into()); + EnrichedClientError::new(err, "block") + })?; + let block_number = block.number.ok_or_else(|| { + let err = ClientError::Custom("Finalized block must contain number".into()); + EnrichedClientError::new(err, "block").with_arg("block", &block) + })?; + Ok(block_number.as_u64()) + } + async fn get_total_priority_txs(&self) -> Result { CallFunctionArgs::new("getTotalPriorityTxs", ()) .for_contract(self.diamond_proxy_addr, &self.getters_facet_contract_abi) @@ -304,6 +338,157 @@ impl EthClient for EthHttpQueryClient { } async fn chain_id(&self) -> EnrichedClientResult { - Ok(self.client.fetch_chain_id().await?) + self.client.fetch_chain_id().await + } + + async fn get_chain_root( + &self, + block_number: U64, + l2_chain_id: L2ChainId, + ) -> Result { + CallFunctionArgs::new("getChainRoot", U256::from(l2_chain_id.as_u64())) + .with_block(BlockId::Number(block_number.into())) + .for_contract(L2_MESSAGE_ROOT_ADDRESS, &self.message_root_abi) + .call(&self.client) + .await + } +} + +/// Encapsulates `eth_getLogs` calls. +#[async_trait::async_trait] +pub trait GetLogsClient: 'static + fmt::Debug + Send + Sync { + /// Returns L2 version of [`Log`] with L2-specific fields, e.g. `l1_batch_number`. + /// L1 clients fill such fields with `None`. + async fn get_logs(&self, filter: Filter) -> EnrichedClientResult>; +} + +#[async_trait::async_trait] +impl GetLogsClient for Box> { + async fn get_logs(&self, filter: Filter) -> EnrichedClientResult> { + Ok(self + .logs(&filter) + .await? + .into_iter() + .map(Into::into) + .collect()) + } +} + +#[async_trait::async_trait] +impl GetLogsClient for Box> { + async fn get_logs(&self, filter: Filter) -> EnrichedClientResult> { + EthNamespaceClient::get_logs(self, filter.into()) + .await + .map_err(|err| EnrichedClientError::new(err, "eth_getLogs")) + } +} + +/// L2 client functionality used by [`EthWatch`](crate::EthWatch) and constituent event processors. +/// Trait extension for [`EthClient`]. +#[async_trait::async_trait] +pub trait L2EthClient: EthClient { + async fn get_chain_log_proof( + &self, + l1_batch_number: L1BatchNumber, + chain_id: L2ChainId, + ) -> EnrichedClientResult>; + + async fn get_chain_root_l2( + &self, + l1_batch_number: L1BatchNumber, + l2_chain_id: L2ChainId, + ) -> Result, ContractCallError>; +} + +#[async_trait::async_trait] +impl L2EthClient for EthHttpQueryClient { + async fn get_chain_log_proof( + &self, + l1_batch_number: L1BatchNumber, + chain_id: L2ChainId, + ) -> EnrichedClientResult> { + self.client + .get_chain_log_proof(l1_batch_number, chain_id) + .await + .map_err(|err| EnrichedClientError::new(err, "unstable_getChainLogProof")) + } + + async fn get_chain_root_l2( + &self, + l1_batch_number: L1BatchNumber, + l2_chain_id: L2ChainId, + ) -> Result, ContractCallError> { + let l2_block_range = self + .client + .get_l2_block_range(l1_batch_number) + .await + .map_err(|err| EnrichedClientError::new(err, "zks_getL1BatchBlockRange"))?; + if let Some((_, l2_block_number)) = l2_block_range { + self.get_chain_root(l2_block_number, l2_chain_id) + .await + .map(Some) + } else { + Ok(None) + } + } +} + +/// Wrapper for L2 client object. +/// It is used for L2EthClient -> EthClient dyn upcasting coercion: +/// Arc -> L2EthClientW -> Arc +#[derive(Debug, Clone)] +pub struct L2EthClientW(pub Arc); + +#[async_trait::async_trait] +impl EthClient for L2EthClientW { + async fn get_events( + &self, + from: BlockNumber, + to: BlockNumber, + topic1: H256, + topic2: Option, + retries_left: usize, + ) -> EnrichedClientResult> { + self.0 + .get_events(from, to, topic1, topic2, retries_left) + .await + } + + async fn confirmed_block_number(&self) -> EnrichedClientResult { + self.0.confirmed_block_number().await + } + + async fn finalized_block_number(&self) -> EnrichedClientResult { + self.0.finalized_block_number().await + } + + async fn get_total_priority_txs(&self) -> Result { + self.0.get_total_priority_txs().await + } + + async fn scheduler_vk_hash( + &self, + verifier_address: Address, + ) -> Result { + self.0.scheduler_vk_hash(verifier_address).await + } + + async fn diamond_cut_by_version( + &self, + packed_version: H256, + ) -> EnrichedClientResult>> { + self.0.diamond_cut_by_version(packed_version).await + } + + async fn chain_id(&self) -> EnrichedClientResult { + self.0.chain_id().await + } + + async fn get_chain_root( + &self, + block_number: U64, + l2_chain_id: L2ChainId, + ) -> Result { + self.0.get_chain_root(block_number, l2_chain_id).await } } diff --git a/core/node/eth_watch/src/event_processors/appended_chain_batch_root.rs b/core/node/eth_watch/src/event_processors/appended_chain_batch_root.rs new file mode 100644 index 000000000000..79b0d9a19f05 --- /dev/null +++ b/core/node/eth_watch/src/event_processors/appended_chain_batch_root.rs @@ -0,0 +1,236 @@ +use std::sync::Arc; + +use anyhow::Context; +use itertools::Itertools; +use zksync_dal::{eth_watcher_dal::EventType, Connection, Core, CoreDal, DalError}; +use zksync_mini_merkle_tree::MiniMerkleTree; +use zksync_types::{ + api::{ChainAggProof, Log}, + ethabi, h256_to_u256, + l2_to_l1_log::{ + BatchAndChainMerklePath, BATCH_LEAF_PADDING, LOG_PROOF_SUPPORTED_METADATA_VERSION, + }, + u256_to_h256, L1BatchNumber, L2ChainId, SLChainId, H256, U256, +}; + +use crate::{ + client::L2EthClient, + event_processors::{EventProcessor, EventProcessorError, EventsSource}, +}; + +/// Responsible for `AppendedChainBatchRoot` events and saving `BatchAndChainMerklePath` for batches. +#[derive(Debug)] +pub struct BatchRootProcessor { + next_batch_number_lower_bound: L1BatchNumber, + appended_chain_batch_root_signature: H256, + merkle_tree: MiniMerkleTree<[u8; 96]>, + l2_chain_id: L2ChainId, + sl_l2_client: Arc, +} + +impl BatchRootProcessor { + pub fn new( + next_batch_number_lower_bound: L1BatchNumber, + merkle_tree: MiniMerkleTree<[u8; 96]>, + l2_chain_id: L2ChainId, + sl_l2_client: Arc, + ) -> Self { + Self { + next_batch_number_lower_bound, + appended_chain_batch_root_signature: ethabi::long_signature( + "AppendedChainBatchRoot", + &[ + ethabi::ParamType::Uint(256), + ethabi::ParamType::Uint(256), + ethabi::ParamType::FixedBytes(32), + ], + ), + merkle_tree, + l2_chain_id, + sl_l2_client, + } + } +} + +#[async_trait::async_trait] +impl EventProcessor for BatchRootProcessor { + async fn process_events( + &mut self, + storage: &mut Connection<'_, Core>, + events: Vec, + ) -> Result { + let events_count = events.len(); + let mut transaction = storage + .start_transaction() + .await + .map_err(DalError::generalize)?; + + let grouped_events: Vec<_> = events + .into_iter() + .map(|log| { + let sl_l1_batch_number = L1BatchNumber( + log.l1_batch_number + .expect("Missing L1 batch number for finalized event") + .as_u32(), + ); + let chain_l1_batch_number = L1BatchNumber(h256_to_u256(log.topics[2]).as_u32()); + let logs_root_hash = H256::from_slice(&log.data.0); + + (sl_l1_batch_number, chain_l1_batch_number, logs_root_hash) + }) + .group_by(|(sl_l1_batch_number, _, _)| *sl_l1_batch_number) + .into_iter() + .map(|(sl_l1_batch_number, group)| { + let group: Vec<_> = group + .into_iter() + .map(|(_, chain_l1_batch_number, logs_root_hash)| { + (chain_l1_batch_number, logs_root_hash) + }) + .collect(); + + (sl_l1_batch_number, group) + }) + .collect(); + + let next_batch_number_lower_bound = self.next_batch_number_lower_bound; + let new_events = grouped_events + .into_iter() + .skip_while(|(_sl_l1_batch_number, events)| { + let first_event = events.first().unwrap(); + let last_event = events.last().unwrap(); + + match ( + first_event.0 < next_batch_number_lower_bound, + last_event.0 < next_batch_number_lower_bound, + ) { + (true, true) => true, // skip + (false, false) => false, // do not skip + _ => { + panic!("batch range was partially processed"); + } + } + }); + + let sl_chain_id = self.sl_l2_client.chain_id().await?; + for (sl_l1_batch_number, chain_batches) in new_events { + let chain_agg_proof = self + .sl_l2_client + .get_chain_log_proof(sl_l1_batch_number, self.l2_chain_id) + .await? + .context("Missing chain log proof for finalized batch")?; + let chain_proof_vector = + Self::chain_proof_vector(sl_l1_batch_number, chain_agg_proof, sl_chain_id); + + for (batch_number, batch_root) in &chain_batches { + let root_from_db = transaction + .blocks_dal() + .get_l1_batch_l2_l1_merkle_root(*batch_number) + .await + .map_err(DalError::generalize)? + .context("Missing l2_l1_merkle_root for finalized batch")?; + assert_eq!(root_from_db, *batch_root); + + self.merkle_tree + .push(Self::batch_leaf_preimage(*batch_root, *batch_number)); + self.next_batch_number_lower_bound = *batch_number + 1; + } + + let chain_root_local = self.merkle_tree.merkle_root(); + let chain_root_remote = self + .sl_l2_client + .get_chain_root_l2(sl_l1_batch_number, self.l2_chain_id) + .await?; + assert_eq!( + chain_root_local, + chain_root_remote.unwrap(), + "Chain root mismatch, l1 batch number #{sl_l1_batch_number}" + ); + + let number_of_leaves = self.merkle_tree.length(); + let batch_proofs = (0..chain_batches.len()).map(|i| { + let leaf_position = number_of_leaves - chain_batches.len() + i; + let batch_proof = self + .merkle_tree + .merkle_root_and_path_by_absolute_index(leaf_position) + .1; + let batch_proof_len = batch_proof.len() as u32; + let mut proof = vec![H256::from_low_u64_be(leaf_position as u64)]; + proof.extend(batch_proof); + proof.extend(chain_proof_vector.clone()); + + BatchAndChainMerklePath { + batch_proof_len, + proof, + } + }); + + for ((batch_number, _), proof) in chain_batches.iter().zip(batch_proofs) { + tracing::info!(%batch_number, "Saving batch-chain merkle path"); + transaction + .blocks_dal() + .set_batch_chain_merkle_path(*batch_number, proof) + .await + .map_err(DalError::generalize)?; + } + } + + transaction.commit().await.map_err(DalError::generalize)?; + + Ok(events_count) + } + + fn topic1(&self) -> H256 { + self.appended_chain_batch_root_signature + } + + fn topic2(&self) -> Option { + Some(H256::from_low_u64_be(self.l2_chain_id.as_u64())) + } + + fn event_source(&self) -> EventsSource { + EventsSource::SL + } + + fn event_type(&self) -> EventType { + EventType::ChainBatchRoot + } + + fn only_finalized_block(&self) -> bool { + true + } +} + +impl BatchRootProcessor { + pub(crate) fn batch_leaf_preimage(batch_root: H256, batch_number: L1BatchNumber) -> [u8; 96] { + let mut full_preimage = [0u8; 96]; + + full_preimage[0..32].copy_from_slice(BATCH_LEAF_PADDING.as_bytes()); + full_preimage[32..64].copy_from_slice(batch_root.as_bytes()); + full_preimage[64..96] + .copy_from_slice(H256::from_low_u64_be(batch_number.0 as u64).as_bytes()); + + full_preimage + } + + fn chain_proof_vector( + sl_l1_batch_number: L1BatchNumber, + chain_agg_proof: ChainAggProof, + sl_chain_id: SLChainId, + ) -> Vec { + let sl_encoded_data = U256::from(sl_l1_batch_number.0) * U256::from(2).pow(128.into()) + + chain_agg_proof.chain_id_leaf_proof_mask; + + let mut metadata = [0u8; 32]; + metadata[0] = LOG_PROOF_SUPPORTED_METADATA_VERSION; + metadata[1] = chain_agg_proof.chain_id_leaf_proof.len() as u8; + + let mut chain_proof_vector = vec![ + u256_to_h256(sl_encoded_data), + H256::from_low_u64_be(sl_chain_id.0), + H256(metadata), + ]; + chain_proof_vector.extend(chain_agg_proof.chain_id_leaf_proof); + + chain_proof_vector + } +} diff --git a/core/node/eth_watch/src/event_processors/decentralized_upgrades.rs b/core/node/eth_watch/src/event_processors/decentralized_upgrades.rs index aa43e7239f88..3f4b0f3cf5ab 100644 --- a/core/node/eth_watch/src/event_processors/decentralized_upgrades.rs +++ b/core/node/eth_watch/src/event_processors/decentralized_upgrades.rs @@ -1,7 +1,9 @@ +use std::sync::Arc; + use anyhow::Context as _; use zksync_dal::{eth_watcher_dal::EventType, Connection, Core, CoreDal, DalError}; use zksync_types::{ - ethabi::Contract, protocol_version::ProtocolSemanticVersion, web3::Log, ProtocolUpgrade, H256, + api::Log, ethabi::Contract, protocol_version::ProtocolSemanticVersion, ProtocolUpgrade, H256, U256, }; @@ -17,12 +19,14 @@ pub struct DecentralizedUpgradesEventProcessor { /// Last protocol version seen. Used to skip events for already known upgrade proposals. last_seen_protocol_version: ProtocolSemanticVersion, update_upgrade_timestamp_signature: H256, + sl_client: Arc, } impl DecentralizedUpgradesEventProcessor { pub fn new( last_seen_protocol_version: ProtocolSemanticVersion, chain_admin_contract: &Contract, + sl_client: Arc, ) -> Self { Self { last_seen_protocol_version, @@ -31,6 +35,7 @@ impl DecentralizedUpgradesEventProcessor { .context("UpdateUpgradeTimestamp event is missing in ABI") .unwrap() .signature(), + sl_client, } } } @@ -40,7 +45,6 @@ impl EventProcessor for DecentralizedUpgradesEventProcessor { async fn process_events( &mut self, storage: &mut Connection<'_, Core>, - sl_client: &dyn EthClient, events: Vec, ) -> Result { let mut upgrades = Vec::new(); @@ -51,7 +55,8 @@ impl EventProcessor for DecentralizedUpgradesEventProcessor { .ok() .context("upgrade timestamp is too big")?; - let diamond_cut = sl_client + let diamond_cut = self + .sl_client .diamond_cut_by_version(version) .await? .context("missing upgrade data on STM")?; @@ -62,7 +67,7 @@ impl EventProcessor for DecentralizedUpgradesEventProcessor { }; // Scheduler VK is not present in proposal event. It is hard coded in verifier contract. let scheduler_vk_hash = if let Some(address) = upgrade.verifier_address { - Some(sl_client.scheduler_vk_hash(address).await?) + Some(self.sl_client.scheduler_vk_hash(address).await?) } else { None }; @@ -128,7 +133,7 @@ impl EventProcessor for DecentralizedUpgradesEventProcessor { Ok(events.len()) } - fn relevant_topic(&self) -> H256 { + fn topic1(&self) -> H256 { self.update_upgrade_timestamp_signature } diff --git a/core/node/eth_watch/src/event_processors/mod.rs b/core/node/eth_watch/src/event_processors/mod.rs index f145181b0cf9..ddbf84e65937 100644 --- a/core/node/eth_watch/src/event_processors/mod.rs +++ b/core/node/eth_watch/src/event_processors/mod.rs @@ -2,16 +2,17 @@ use std::fmt; use zksync_dal::{eth_watcher_dal::EventType, Connection, Core}; use zksync_eth_client::{ContractCallError, EnrichedClientError}; -use zksync_types::{web3::Log, H256}; +use zksync_types::{api::Log, H256}; pub(crate) use self::{ + appended_chain_batch_root::BatchRootProcessor, decentralized_upgrades::DecentralizedUpgradesEventProcessor, priority_ops::PriorityOpsEventProcessor, }; -use crate::client::EthClient; +mod appended_chain_batch_root; mod decentralized_upgrades; -pub mod priority_ops; +mod priority_ops; /// Errors issued by an [`EventProcessor`]. #[derive(Debug, thiserror::Error)] @@ -50,19 +51,28 @@ impl EventProcessorError { /// feeds events to all processors one-by-one. #[async_trait::async_trait] pub(super) trait EventProcessor: 'static + fmt::Debug + Send + Sync { - /// Processes given events. All events are guaranteed to match [`Self::relevant_topic()`]. + /// Processes given events. All events are guaranteed to match [`Self::topic1()`] and [`Self::topic2()`]. /// Returns number of processed events, this result is used to update last processed block. async fn process_events( &mut self, storage: &mut Connection<'_, Core>, - sl_client: &dyn EthClient, events: Vec, ) -> Result; - /// Relevant topic which defines what events to be processed - fn relevant_topic(&self) -> H256; + /// Relevant topic1 which defines what events to be processed + fn topic1(&self) -> H256; + + /// Relevant topic2 which defines what events to be processed + fn topic2(&self) -> Option { + None + } fn event_source(&self) -> EventsSource; fn event_type(&self) -> EventType; + + /// Whether processor expect events only from finalized blocks. + fn only_finalized_block(&self) -> bool { + false + } } diff --git a/core/node/eth_watch/src/event_processors/priority_ops.rs b/core/node/eth_watch/src/event_processors/priority_ops.rs index 051c076850e9..cbb224da6396 100644 --- a/core/node/eth_watch/src/event_processors/priority_ops.rs +++ b/core/node/eth_watch/src/event_processors/priority_ops.rs @@ -1,10 +1,10 @@ -use std::convert::TryFrom; +use std::{convert::TryFrom, sync::Arc}; use anyhow::Context; use zksync_contracts::hyperchain_contract; use zksync_dal::{eth_watcher_dal::EventType, Connection, Core, CoreDal, DalError}; use zksync_shared_metrics::{TxStage, APP_METRICS}; -use zksync_types::{l1::L1Tx, web3::Log, PriorityOpId, H256}; +use zksync_types::{api::Log, l1::L1Tx, PriorityOpId, H256}; use crate::{ client::EthClient, @@ -17,16 +17,21 @@ use crate::{ pub struct PriorityOpsEventProcessor { next_expected_priority_id: PriorityOpId, new_priority_request_signature: H256, + sl_client: Arc, } impl PriorityOpsEventProcessor { - pub fn new(next_expected_priority_id: PriorityOpId) -> anyhow::Result { + pub fn new( + next_expected_priority_id: PriorityOpId, + sl_client: Arc, + ) -> anyhow::Result { Ok(Self { next_expected_priority_id, new_priority_request_signature: hyperchain_contract() .event("NewPriorityRequest") .context("NewPriorityRequest event is missing in ABI")? .signature(), + sl_client, }) } } @@ -36,14 +41,13 @@ impl EventProcessor for PriorityOpsEventProcessor { async fn process_events( &mut self, storage: &mut Connection<'_, Core>, - sl_client: &dyn EthClient, events: Vec, ) -> Result { let mut priority_ops = Vec::new(); let events_count = events.len(); for event in events { assert_eq!(event.topics[0], self.new_priority_request_signature); // guaranteed by the watcher - let tx = L1Tx::try_from(event) + let tx = L1Tx::try_from(Into::::into(event)) .map_err(|err| EventProcessorError::log_parse(err, "priority op"))?; priority_ops.push(tx); } @@ -84,7 +88,7 @@ impl EventProcessor for PriorityOpsEventProcessor { let stage_latency = METRICS.poll_eth_node[&PollStage::PersistL1Txs].start(); APP_METRICS.processed_txs[&TxStage::added_to_mempool()].inc(); APP_METRICS.processed_l1_txs[&TxStage::added_to_mempool()].inc(); - let processed_priority_transactions = sl_client.get_total_priority_txs().await?; + let processed_priority_transactions = self.sl_client.get_total_priority_txs().await?; let ops_to_insert: Vec<&L1Tx> = new_ops .iter() .take_while(|op| processed_priority_transactions > op.serial_id().0) @@ -105,7 +109,7 @@ impl EventProcessor for PriorityOpsEventProcessor { Ok(skipped_ops + ops_to_insert.len()) } - fn relevant_topic(&self) -> H256 { + fn topic1(&self) -> H256 { self.new_priority_request_signature } diff --git a/core/node/eth_watch/src/lib.rs b/core/node/eth_watch/src/lib.rs index 4185878d2ac4..908ff4da37f1 100644 --- a/core/node/eth_watch/src/lib.rs +++ b/core/node/eth_watch/src/lib.rs @@ -2,24 +2,27 @@ //! protocol upgrades etc. //! New events are accepted to the ZKsync network once they have the sufficient amount of L1 confirmations. -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use anyhow::Context as _; use tokio::sync::watch; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal, DalError}; +use zksync_mini_merkle_tree::MiniMerkleTree; use zksync_system_constants::PRIORITY_EXPIRATION; use zksync_types::{ ethabi::Contract, protocol_version::ProtocolSemanticVersion, - web3::BlockNumber as Web3BlockNumber, PriorityOpId, + web3::BlockNumber as Web3BlockNumber, L1BatchNumber, L2ChainId, PriorityOpId, }; -pub use self::client::EthHttpQueryClient; +pub use self::client::{EthClient, EthHttpQueryClient, L2EthClient}; use self::{ - client::{EthClient, RETRY_LIMIT}, + client::{L2EthClientW, RETRY_LIMIT}, event_processors::{EventProcessor, EventProcessorError, PriorityOpsEventProcessor}, metrics::METRICS, }; -use crate::event_processors::{DecentralizedUpgradesEventProcessor, EventsSource}; +use crate::event_processors::{ + BatchRootProcessor, DecentralizedUpgradesEventProcessor, EventsSource, +}; mod client; mod event_processors; @@ -31,42 +34,63 @@ mod tests; struct EthWatchState { last_seen_protocol_version: ProtocolSemanticVersion, next_expected_priority_id: PriorityOpId, + chain_batch_root_number_lower_bound: L1BatchNumber, + batch_merkle_tree: MiniMerkleTree<[u8; 96]>, } /// Ethereum watcher component. #[derive(Debug)] pub struct EthWatch { - l1_client: Box, - sl_client: Box, + l1_client: Arc, + sl_client: Arc, poll_interval: Duration, event_processors: Vec>, pool: ConnectionPool, } impl EthWatch { + #[allow(clippy::too_many_arguments)] pub async fn new( chain_admin_contract: &Contract, l1_client: Box, - sl_client: Box, + sl_l2_client: Option>, pool: ConnectionPool, poll_interval: Duration, + chain_id: L2ChainId, ) -> anyhow::Result { let mut storage = pool.connection_tagged("eth_watch").await?; - let state = Self::initialize_state(&mut storage).await?; + let l1_client: Arc = l1_client.into(); + let sl_l2_client: Option> = sl_l2_client.map(Into::into); + let sl_client: Arc = if let Some(sl_l2_client) = sl_l2_client.clone() { + Arc::new(L2EthClientW(sl_l2_client)) + } else { + l1_client.clone() + }; + + let state = Self::initialize_state(&mut storage, sl_client.as_ref()).await?; tracing::info!("initialized state: {state:?}"); drop(storage); let priority_ops_processor = - PriorityOpsEventProcessor::new(state.next_expected_priority_id)?; + PriorityOpsEventProcessor::new(state.next_expected_priority_id, sl_client.clone())?; let decentralized_upgrades_processor = DecentralizedUpgradesEventProcessor::new( state.last_seen_protocol_version, chain_admin_contract, + sl_client.clone(), ); - let event_processors: Vec> = vec![ + let mut event_processors: Vec> = vec![ Box::new(priority_ops_processor), Box::new(decentralized_upgrades_processor), ]; - + if let Some(sl_l2_client) = sl_l2_client { + let batch_root_processor = BatchRootProcessor::new( + state.chain_batch_root_number_lower_bound, + state.batch_merkle_tree, + chain_id, + sl_l2_client, + ); + event_processors.push(Box::new(batch_root_processor)); + } Ok(Self { l1_client, sl_client, @@ -77,7 +101,10 @@ impl EthWatch { } #[tracing::instrument(name = "EthWatch::initialize_state", skip_all)] - async fn initialize_state(storage: &mut Connection<'_, Core>) -> anyhow::Result { + async fn initialize_state( + storage: &mut Connection<'_, Core>, + sl_client: &dyn EthClient, + ) -> anyhow::Result { let next_expected_priority_id: PriorityOpId = storage .transactions_dal() .last_priority_id() @@ -90,9 +117,26 @@ impl EthWatch { .await? .context("expected at least one (genesis) version to be present in DB")?; + let sl_chain_id = sl_client.chain_id().await?; + let batch_hashes = storage + .blocks_dal() + .get_executed_batch_roots_on_sl(sl_chain_id) + .await?; + + let chain_batch_root_number_lower_bound = batch_hashes + .last() + .map(|(n, _)| *n + 1) + .unwrap_or(L1BatchNumber(0)); + let tree_leaves = batch_hashes.into_iter().map(|(batch_number, batch_root)| { + BatchRootProcessor::batch_leaf_preimage(batch_root, batch_number) + }); + let batch_merkle_tree = MiniMerkleTree::new(tree_leaves, None); + Ok(EthWatchState { next_expected_priority_id, last_seen_protocol_version, + chain_batch_root_number_lower_bound, + batch_merkle_tree, }) } @@ -137,37 +181,42 @@ impl EthWatch { EventsSource::SL => self.sl_client.as_ref(), }; let chain_id = client.chain_id().await?; - let finalized_block = client.finalized_block_number().await?; + let to_block = if processor.only_finalized_block() { + client.finalized_block_number().await? + } else { + client.confirmed_block_number().await? + }; let from_block = storage .eth_watcher_dal() .get_or_set_next_block_to_process( processor.event_type(), chain_id, - finalized_block.saturating_sub(PRIORITY_EXPIRATION), + to_block.saturating_sub(PRIORITY_EXPIRATION), ) .await .map_err(DalError::generalize)?; // There are no new blocks so there is nothing to be done - if from_block > finalized_block { + if from_block > to_block { continue; } + let processor_events = client .get_events( Web3BlockNumber::Number(from_block.into()), - Web3BlockNumber::Number(finalized_block.into()), - processor.relevant_topic(), - None, + Web3BlockNumber::Number(to_block.into()), + processor.topic1(), + processor.topic2(), RETRY_LIMIT, ) .await?; let processed_events_count = processor - .process_events(storage, &*self.sl_client, processor_events.clone()) + .process_events(storage, processor_events.clone()) .await?; let next_block_to_process = if processed_events_count == processor_events.len() { - finalized_block + 1 + to_block + 1 } else if processed_events_count == 0 { //nothing was processed from_block diff --git a/core/node/eth_watch/src/tests.rs b/core/node/eth_watch/src/tests.rs deleted file mode 100644 index 12ac8bdbf3f7..000000000000 --- a/core/node/eth_watch/src/tests.rs +++ /dev/null @@ -1,791 +0,0 @@ -use std::{collections::HashMap, convert::TryInto, sync::Arc}; - -use tokio::sync::RwLock; -use zksync_contracts::{ - chain_admin_contract, hyperchain_contract, state_transition_manager_contract, -}; -use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; -use zksync_eth_client::{ContractCallError, EnrichedClientResult}; -use zksync_types::{ - abi, - abi::ProposedUpgrade, - ethabi, - ethabi::Token, - l1::{L1Tx, OpProcessingType, PriorityQueueType}, - protocol_upgrade::{ProtocolUpgradeTx, ProtocolUpgradeTxCommonData}, - protocol_version::ProtocolSemanticVersion, - web3::{contract::Tokenizable, BlockNumber, Log}, - Address, Execute, L1TxCommonData, PriorityOpId, ProtocolUpgrade, ProtocolVersion, - ProtocolVersionId, SLChainId, Transaction, H160, H256, U256, U64, -}; - -use crate::{ - client::{EthClient, RETRY_LIMIT}, - EthWatch, -}; - -#[derive(Debug)] -struct FakeEthClientData { - transactions: HashMap>, - diamond_upgrades: HashMap>, - upgrade_timestamp: HashMap>, - last_finalized_block_number: u64, - chain_id: SLChainId, - processed_priority_transactions_count: u64, -} - -impl FakeEthClientData { - fn new(chain_id: SLChainId) -> Self { - Self { - transactions: Default::default(), - diamond_upgrades: Default::default(), - upgrade_timestamp: Default::default(), - last_finalized_block_number: 0, - chain_id, - processed_priority_transactions_count: 0, - } - } - - fn add_transactions(&mut self, transactions: &[L1Tx]) { - for transaction in transactions { - let eth_block = transaction.eth_block(); - self.transactions - .entry(eth_block.0 as u64) - .or_default() - .push(tx_into_log(transaction.clone())); - self.processed_priority_transactions_count += 1; - } - } - - fn add_upgrade_timestamp(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { - for (upgrade, eth_block) in upgrades { - self.upgrade_timestamp - .entry(*eth_block) - .or_default() - .push(upgrade_timestamp_log(*eth_block)); - self.diamond_upgrades - .entry(*eth_block) - .or_default() - .push(diamond_upgrade_log(upgrade.clone(), *eth_block)); - } - } - - fn set_last_finalized_block_number(&mut self, number: u64) { - self.last_finalized_block_number = number; - } - - fn set_processed_priority_transactions_count(&mut self, number: u64) { - self.processed_priority_transactions_count = number; - } -} - -#[derive(Debug, Clone)] -struct MockEthClient { - inner: Arc>, -} - -impl MockEthClient { - fn new(chain_id: SLChainId) -> Self { - Self { - inner: Arc::new(RwLock::new(FakeEthClientData::new(chain_id))), - } - } - - async fn add_transactions(&mut self, transactions: &[L1Tx]) { - self.inner.write().await.add_transactions(transactions); - } - - async fn add_upgrade_timestamp(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { - self.inner.write().await.add_upgrade_timestamp(upgrades); - } - - async fn set_last_finalized_block_number(&mut self, number: u64) { - self.inner - .write() - .await - .set_last_finalized_block_number(number); - } - - async fn set_processed_priority_transactions_count(&mut self, number: u64) { - self.inner - .write() - .await - .set_processed_priority_transactions_count(number) - } - - async fn block_to_number(&self, block: BlockNumber) -> u64 { - match block { - BlockNumber::Earliest => 0, - BlockNumber::Number(number) => number.as_u64(), - BlockNumber::Pending - | BlockNumber::Latest - | BlockNumber::Finalized - | BlockNumber::Safe => unreachable!(), - } - } -} - -#[async_trait::async_trait] -impl EthClient for MockEthClient { - async fn get_events( - &self, - from: BlockNumber, - to: BlockNumber, - topic1: H256, - topic2: Option, - _retries_left: usize, - ) -> EnrichedClientResult> { - let from = self.block_to_number(from).await; - let to = self.block_to_number(to).await; - let mut logs = vec![]; - for number in from..=to { - if let Some(ops) = self.inner.read().await.transactions.get(&number) { - logs.extend_from_slice(ops); - } - if let Some(ops) = self.inner.read().await.diamond_upgrades.get(&number) { - logs.extend_from_slice(ops); - } - if let Some(ops) = self.inner.read().await.upgrade_timestamp.get(&number) { - logs.extend_from_slice(ops); - } - } - Ok(logs - .into_iter() - .filter(|log| { - log.topics.first() == Some(&topic1) - && (topic2.is_none() || log.topics.get(1) == topic2.as_ref()) - }) - .collect()) - } - - async fn scheduler_vk_hash( - &self, - _verifier_address: Address, - ) -> Result { - Ok(H256::zero()) - } - - async fn finalized_block_number(&self) -> EnrichedClientResult { - Ok(self.inner.read().await.last_finalized_block_number) - } - - async fn diamond_cut_by_version( - &self, - packed_version: H256, - ) -> EnrichedClientResult>> { - let from_block = *self - .inner - .read() - .await - .diamond_upgrades - .keys() - .min() - .unwrap_or(&0); - let to_block = *self - .inner - .read() - .await - .diamond_upgrades - .keys() - .max() - .unwrap_or(&0); - - let logs = self - .get_events( - U64::from(from_block).into(), - U64::from(to_block).into(), - state_transition_manager_contract() - .event("NewUpgradeCutData") - .unwrap() - .signature(), - Some(packed_version), - RETRY_LIMIT, - ) - .await?; - - Ok(logs.into_iter().next().map(|log| log.data.0)) - } - - async fn get_total_priority_txs(&self) -> Result { - Ok(self - .inner - .read() - .await - .processed_priority_transactions_count) - } - - async fn chain_id(&self) -> EnrichedClientResult { - Ok(self.inner.read().await.chain_id) - } -} - -fn build_l1_tx(serial_id: u64, eth_block: u64) -> L1Tx { - let tx = L1Tx { - execute: Execute { - contract_address: Some(Address::repeat_byte(0x11)), - calldata: vec![1, 2, 3], - factory_deps: vec![], - value: U256::zero(), - }, - common_data: L1TxCommonData { - serial_id: PriorityOpId(serial_id), - sender: [1u8; 20].into(), - eth_block, - gas_limit: Default::default(), - max_fee_per_gas: Default::default(), - gas_per_pubdata_limit: 1u32.into(), - full_fee: Default::default(), - layer_2_tip_fee: U256::from(10u8), - refund_recipient: Address::zero(), - to_mint: Default::default(), - priority_queue_type: PriorityQueueType::Deque, - op_processing_type: OpProcessingType::Common, - canonical_tx_hash: H256::default(), - }, - received_timestamp_ms: 0, - }; - // Convert to abi::Transaction and back, so that canonical_tx_hash is computed. - let tx = Transaction::from_abi( - abi::Transaction::try_from(Transaction::from(tx)).unwrap(), - false, - ) - .unwrap(); - tx.try_into().unwrap() -} - -fn build_upgrade_tx(id: ProtocolVersionId, eth_block: u64) -> ProtocolUpgradeTx { - let tx = ProtocolUpgradeTx { - execute: Execute { - contract_address: Some(Address::repeat_byte(0x11)), - calldata: vec![1, 2, 3], - factory_deps: vec![], - value: U256::zero(), - }, - common_data: ProtocolUpgradeTxCommonData { - upgrade_id: id, - sender: [1u8; 20].into(), - eth_block, - gas_limit: Default::default(), - max_fee_per_gas: Default::default(), - gas_per_pubdata_limit: 1u32.into(), - refund_recipient: Address::zero(), - to_mint: Default::default(), - canonical_tx_hash: H256::zero(), - }, - received_timestamp_ms: 0, - }; - // Convert to abi::Transaction and back, so that canonical_tx_hash is computed. - Transaction::from_abi( - abi::Transaction::try_from(Transaction::from(tx)).unwrap(), - false, - ) - .unwrap() - .try_into() - .unwrap() -} - -async fn create_test_watcher( - connection_pool: ConnectionPool, - is_gateway: bool, -) -> (EthWatch, MockEthClient, MockEthClient) { - let l1_client = MockEthClient::new(SLChainId(42)); - let sl_client = if is_gateway { - MockEthClient::new(SLChainId(123)) - } else { - l1_client.clone() - }; - let watcher = EthWatch::new( - &chain_admin_contract(), - Box::new(l1_client.clone()), - Box::new(sl_client.clone()), - connection_pool, - std::time::Duration::from_nanos(1), - ) - .await - .unwrap(); - - (watcher, l1_client, sl_client) -} - -async fn create_l1_test_watcher( - connection_pool: ConnectionPool, -) -> (EthWatch, MockEthClient) { - let (watcher, l1_client, _) = create_test_watcher(connection_pool, false).await; - (watcher, l1_client) -} - -async fn create_gateway_test_watcher( - connection_pool: ConnectionPool, -) -> (EthWatch, MockEthClient, MockEthClient) { - create_test_watcher(connection_pool, true).await -} - -#[test_log::test(tokio::test)] -async fn test_normal_operation_l1_txs() { - let connection_pool = ConnectionPool::::test_pool().await; - setup_db(&connection_pool).await; - let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; - - let mut storage = connection_pool.connection().await.unwrap(); - client - .add_transactions(&[build_l1_tx(0, 10), build_l1_tx(1, 14), build_l1_tx(2, 18)]) - .await; - client.set_last_finalized_block_number(15).await; - // second tx will not be processed, as it's block is not finalized yet. - watcher.loop_iteration(&mut storage).await.unwrap(); - let db_txs = get_all_db_txs(&mut storage).await; - let mut db_txs: Vec = db_txs - .into_iter() - .map(|tx| tx.try_into().unwrap()) - .collect(); - db_txs.sort_by_key(|tx| tx.common_data.serial_id); - assert_eq!(db_txs.len(), 2); - let db_tx = db_txs[0].clone(); - assert_eq!(db_tx.common_data.serial_id.0, 0); - let db_tx = db_txs[1].clone(); - assert_eq!(db_tx.common_data.serial_id.0, 1); - - client.set_last_finalized_block_number(20).await; - // now the second tx will be processed - watcher.loop_iteration(&mut storage).await.unwrap(); - let db_txs = get_all_db_txs(&mut storage).await; - let mut db_txs: Vec = db_txs - .into_iter() - .map(|tx| tx.try_into().unwrap()) - .collect(); - db_txs.sort_by_key(|tx| tx.common_data.serial_id); - assert_eq!(db_txs.len(), 3); - let db_tx = db_txs[2].clone(); - assert_eq!(db_tx.common_data.serial_id.0, 2); -} - -#[test_log::test(tokio::test)] -async fn test_gap_in_upgrade_timestamp() { - let connection_pool = ConnectionPool::::test_pool().await; - setup_db(&connection_pool).await; - let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; - - let mut storage = connection_pool.connection().await.unwrap(); - client - .add_upgrade_timestamp(&[( - ProtocolUpgrade { - version: ProtocolSemanticVersion { - minor: ProtocolVersionId::next(), - patch: 0.into(), - }, - tx: None, - ..Default::default() - }, - 10, - )]) - .await; - client.set_last_finalized_block_number(15).await; - watcher.loop_iteration(&mut storage).await.unwrap(); - - let db_versions = storage.protocol_versions_dal().all_versions().await; - // there should be genesis version and just added version - assert_eq!(db_versions.len(), 2); - - let previous_version = (ProtocolVersionId::latest() as u16 - 1).try_into().unwrap(); - let next_version = ProtocolVersionId::next(); - assert_eq!(db_versions[0].minor, previous_version); - assert_eq!(db_versions[1].minor, next_version); -} - -#[test_log::test(tokio::test)] -async fn test_normal_operation_upgrade_timestamp() { - zksync_concurrency::testonly::abort_on_panic(); - let connection_pool = ConnectionPool::::test_pool().await; - setup_db(&connection_pool).await; - - let mut client = MockEthClient::new(SLChainId(42)); - let mut watcher = EthWatch::new( - &chain_admin_contract(), - Box::new(client.clone()), - Box::new(client.clone()), - connection_pool.clone(), - std::time::Duration::from_nanos(1), - ) - .await - .unwrap(); - - let mut storage = connection_pool.connection().await.unwrap(); - client - .add_upgrade_timestamp(&[ - ( - ProtocolUpgrade { - tx: None, - ..Default::default() - }, - 10, - ), - ( - ProtocolUpgrade { - version: ProtocolSemanticVersion { - minor: ProtocolVersionId::next(), - patch: 0.into(), - }, - tx: Some(build_upgrade_tx(ProtocolVersionId::next(), 18)), - ..Default::default() - }, - 18, - ), - ( - ProtocolUpgrade { - version: ProtocolSemanticVersion { - minor: ProtocolVersionId::next(), - patch: 1.into(), - }, - tx: None, - ..Default::default() - }, - 19, - ), - ]) - .await; - client.set_last_finalized_block_number(15).await; - // The second upgrade will not be processed, as it has less than 5 confirmations. - watcher.loop_iteration(&mut storage).await.unwrap(); - - let db_versions = storage.protocol_versions_dal().all_versions().await; - // There should be genesis version and just added version. - assert_eq!(db_versions.len(), 2); - assert_eq!(db_versions[1].minor, ProtocolVersionId::latest()); - - client.set_last_finalized_block_number(20).await; - // Now the second and the third upgrades will be processed. - watcher.loop_iteration(&mut storage).await.unwrap(); - let db_versions = storage.protocol_versions_dal().all_versions().await; - let mut expected_version = ProtocolSemanticVersion { - minor: ProtocolVersionId::next(), - patch: 0.into(), - }; - assert_eq!(db_versions.len(), 4); - assert_eq!(db_versions[2], expected_version); - expected_version.patch += 1; - assert_eq!(db_versions[3], expected_version); - - // Check that tx was saved with the second upgrade. - let tx = storage - .protocol_versions_dal() - .get_protocol_upgrade_tx(ProtocolVersionId::next()) - .await - .unwrap() - .expect("no protocol upgrade transaction"); - assert_eq!(tx.common_data.upgrade_id, ProtocolVersionId::next()); -} - -#[test_log::test(tokio::test)] -#[should_panic] -async fn test_gap_in_single_batch() { - let connection_pool = ConnectionPool::::test_pool().await; - setup_db(&connection_pool).await; - let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; - - let mut storage = connection_pool.connection().await.unwrap(); - client - .add_transactions(&[ - build_l1_tx(0, 10), - build_l1_tx(1, 14), - build_l1_tx(2, 14), - build_l1_tx(3, 14), - build_l1_tx(5, 14), - ]) - .await; - client.set_last_finalized_block_number(15).await; - watcher.loop_iteration(&mut storage).await.unwrap(); -} - -#[test_log::test(tokio::test)] -#[should_panic] -async fn test_gap_between_batches() { - let connection_pool = ConnectionPool::::test_pool().await; - setup_db(&connection_pool).await; - let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; - - let mut storage = connection_pool.connection().await.unwrap(); - client - .add_transactions(&[ - // this goes to the first batch - build_l1_tx(0, 10), - build_l1_tx(1, 14), - build_l1_tx(2, 14), - // this goes to the second batch - build_l1_tx(4, 20), - build_l1_tx(5, 22), - ]) - .await; - client.set_last_finalized_block_number(15).await; - watcher.loop_iteration(&mut storage).await.unwrap(); - - let db_txs = get_all_db_txs(&mut storage).await; - assert_eq!(db_txs.len(), 3); - client.set_last_finalized_block_number(25).await; - watcher.loop_iteration(&mut storage).await.unwrap(); -} - -#[test_log::test(tokio::test)] -async fn test_overlapping_batches() { - zksync_concurrency::testonly::abort_on_panic(); - let connection_pool = ConnectionPool::::test_pool().await; - setup_db(&connection_pool).await; - let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; - - let mut storage = connection_pool.connection().await.unwrap(); - client - .add_transactions(&[ - // this goes to the first batch - build_l1_tx(0, 10), - build_l1_tx(1, 14), - build_l1_tx(2, 14), - // this goes to the second batch - build_l1_tx(1, 20), - build_l1_tx(2, 22), - build_l1_tx(3, 23), - build_l1_tx(4, 23), - ]) - .await; - client.set_last_finalized_block_number(15).await; - watcher.loop_iteration(&mut storage).await.unwrap(); - - let db_txs = get_all_db_txs(&mut storage).await; - assert_eq!(db_txs.len(), 3); - - client.set_last_finalized_block_number(25).await; - watcher.loop_iteration(&mut storage).await.unwrap(); - - let db_txs = get_all_db_txs(&mut storage).await; - assert_eq!(db_txs.len(), 5); - let mut db_txs: Vec = db_txs - .into_iter() - .map(|tx| tx.try_into().unwrap()) - .collect(); - db_txs.sort_by_key(|tx| tx.common_data.serial_id); - let tx = db_txs[2].clone(); - assert_eq!(tx.common_data.serial_id.0, 2); - let tx = db_txs[4].clone(); - assert_eq!(tx.common_data.serial_id.0, 4); -} - -#[test_log::test(tokio::test)] -async fn test_transactions_get_gradually_processed_by_gateway() { - zksync_concurrency::testonly::abort_on_panic(); - let connection_pool = ConnectionPool::::test_pool().await; - setup_db(&connection_pool).await; - let (mut watcher, mut l1_client, mut gateway_client) = - create_gateway_test_watcher(connection_pool.clone()).await; - - let mut storage = connection_pool.connection().await.unwrap(); - l1_client - .add_transactions(&[ - build_l1_tx(0, 10), - build_l1_tx(1, 14), - build_l1_tx(2, 14), - build_l1_tx(3, 20), - build_l1_tx(4, 22), - ]) - .await; - l1_client.set_last_finalized_block_number(15).await; - gateway_client - .set_processed_priority_transactions_count(2) - .await; - watcher.loop_iteration(&mut storage).await.unwrap(); - - let db_txs = get_all_db_txs(&mut storage).await; - assert_eq!(db_txs.len(), 2); - - l1_client.set_last_finalized_block_number(25).await; - gateway_client - .set_processed_priority_transactions_count(4) - .await; - watcher.loop_iteration(&mut storage).await.unwrap(); - - let db_txs = get_all_db_txs(&mut storage).await; - assert_eq!(db_txs.len(), 4); - let mut db_txs: Vec = db_txs - .into_iter() - .map(|tx| tx.try_into().unwrap()) - .collect(); - db_txs.sort_by_key(|tx| tx.common_data.serial_id); - let tx = db_txs[2].clone(); - assert_eq!(tx.common_data.serial_id.0, 2); - let tx = db_txs[3].clone(); - assert_eq!(tx.common_data.serial_id.0, 3); -} - -async fn get_all_db_txs(storage: &mut Connection<'_, Core>) -> Vec { - storage.transactions_dal().reset_mempool().await.unwrap(); - storage - .transactions_dal() - .sync_mempool(&[], &[], 0, 0, 1000) - .await - .unwrap() - .into_iter() - .map(|x| x.0) - .collect() -} - -fn tx_into_log(tx: L1Tx) -> Log { - let tx = abi::Transaction::try_from(Transaction::from(tx)).unwrap(); - let abi::Transaction::L1 { - tx, - factory_deps, - eth_block, - .. - } = tx - else { - unreachable!() - }; - - let data = ethabi::encode( - &abi::NewPriorityRequest { - tx_id: tx.nonce, - tx_hash: tx.hash().into(), - expiration_timestamp: u64::MAX, - transaction: tx, - factory_deps, - } - .encode(), - ); - - Log { - address: Address::repeat_byte(0x1), - topics: vec![hyperchain_contract() - .event("NewPriorityRequest") - .expect("NewPriorityRequest event is missing in abi") - .signature()], - data: data.into(), - block_hash: Some(H256::repeat_byte(0x11)), - block_number: Some(eth_block.into()), - transaction_hash: Some(H256::default()), - transaction_index: Some(0u64.into()), - log_index: Some(0u64.into()), - transaction_log_index: Some(0u64.into()), - log_type: None, - removed: None, - block_timestamp: None, - } -} - -fn init_calldata(protocol_upgrade: ProtocolUpgrade) -> Vec { - let upgrade_token = upgrade_into_diamond_cut(protocol_upgrade); - - let encoded_params = ethabi::encode(&[upgrade_token]); - - let execute_upgrade_selector = hyperchain_contract() - .function("executeUpgrade") - .unwrap() - .short_signature(); - - // Concatenate the function selector with the encoded parameters - let mut calldata = Vec::with_capacity(4 + encoded_params.len()); - calldata.extend_from_slice(&execute_upgrade_selector); - calldata.extend_from_slice(&encoded_params); - - calldata -} - -fn diamond_upgrade_log(upgrade: ProtocolUpgrade, eth_block: u64) -> Log { - // struct DiamondCutData { - // FacetCut[] facetCuts; - // address initAddress; - // bytes initCalldata; - // } - let final_data = ethabi::encode(&[Token::Tuple(vec![ - Token::Array(vec![]), - Token::Address(H160::zero()), - Token::Bytes(init_calldata(upgrade.clone())), - ])]); - tracing::info!("{:?}", Token::Bytes(init_calldata(upgrade))); - - Log { - address: Address::repeat_byte(0x1), - topics: vec![ - state_transition_manager_contract() - .event("NewUpgradeCutData") - .unwrap() - .signature(), - H256::from_low_u64_be(eth_block), - ], - data: final_data.into(), - block_hash: Some(H256::repeat_byte(0x11)), - block_number: Some(eth_block.into()), - transaction_hash: Some(H256::random()), - transaction_index: Some(0u64.into()), - log_index: Some(0u64.into()), - transaction_log_index: Some(0u64.into()), - log_type: None, - removed: None, - block_timestamp: None, - } -} -fn upgrade_timestamp_log(eth_block: u64) -> Log { - let final_data = ethabi::encode(&[U256::from(12345).into_token()]); - - Log { - address: Address::repeat_byte(0x1), - topics: vec![ - chain_admin_contract() - .event("UpdateUpgradeTimestamp") - .expect("UpdateUpgradeTimestamp event is missing in ABI") - .signature(), - H256::from_low_u64_be(eth_block), - ], - data: final_data.into(), - block_hash: Some(H256::repeat_byte(0x11)), - block_number: Some(eth_block.into()), - transaction_hash: Some(H256::random()), - transaction_index: Some(0u64.into()), - log_index: Some(0u64.into()), - transaction_log_index: Some(0u64.into()), - log_type: None, - removed: None, - block_timestamp: None, - } -} - -fn upgrade_into_diamond_cut(upgrade: ProtocolUpgrade) -> Token { - let abi::Transaction::L1 { - tx, factory_deps, .. - } = upgrade - .tx - .map(|tx| Transaction::from(tx).try_into().unwrap()) - .unwrap_or(abi::Transaction::L1 { - tx: Default::default(), - factory_deps: vec![], - eth_block: 0, - }) - else { - unreachable!() - }; - ProposedUpgrade { - l2_protocol_upgrade_tx: tx, - factory_deps, - bootloader_hash: upgrade.bootloader_code_hash.unwrap_or_default().into(), - default_account_hash: upgrade.default_account_code_hash.unwrap_or_default().into(), - verifier: upgrade.verifier_address.unwrap_or_default(), - verifier_params: upgrade.verifier_params.unwrap_or_default().into(), - l1_contracts_upgrade_calldata: vec![], - post_upgrade_calldata: vec![], - upgrade_timestamp: upgrade.timestamp.into(), - new_protocol_version: upgrade.version.pack(), - } - .encode() -} - -async fn setup_db(connection_pool: &ConnectionPool) { - connection_pool - .connection() - .await - .unwrap() - .protocol_versions_dal() - .save_protocol_version_with_tx(&ProtocolVersion { - version: ProtocolSemanticVersion { - minor: (ProtocolVersionId::latest() as u16 - 1).try_into().unwrap(), - patch: 0.into(), - }, - ..Default::default() - }) - .await - .unwrap(); -} diff --git a/core/node/eth_watch/src/tests/client.rs b/core/node/eth_watch/src/tests/client.rs new file mode 100644 index 000000000000..04825f22d815 --- /dev/null +++ b/core/node/eth_watch/src/tests/client.rs @@ -0,0 +1,487 @@ +use std::{collections::HashMap, convert::TryInto, sync::Arc}; + +use tokio::sync::RwLock; +use zksync_contracts::{ + chain_admin_contract, hyperchain_contract, state_transition_manager_contract, +}; +use zksync_eth_client::{ContractCallError, EnrichedClientResult}; +use zksync_types::{ + abi, + abi::ProposedUpgrade, + api::{ChainAggProof, Log}, + ethabi, + ethabi::Token, + l1::L1Tx, + u256_to_h256, + web3::{contract::Tokenizable, BlockNumber}, + Address, L1BatchNumber, L2ChainId, ProtocolUpgrade, SLChainId, Transaction, H256, U256, U64, +}; + +use crate::client::{EthClient, L2EthClient, RETRY_LIMIT}; + +#[derive(Debug)] +pub struct FakeEthClientData { + transactions: HashMap>, + diamond_upgrades: HashMap>, + upgrade_timestamp: HashMap>, + last_finalized_block_number: u64, + chain_id: SLChainId, + processed_priority_transactions_count: u64, + chain_log_proofs: HashMap, + batch_roots: HashMap>, + chain_roots: HashMap, +} + +impl FakeEthClientData { + fn new(chain_id: SLChainId) -> Self { + Self { + transactions: Default::default(), + diamond_upgrades: Default::default(), + upgrade_timestamp: Default::default(), + last_finalized_block_number: 0, + chain_id, + processed_priority_transactions_count: 0, + chain_log_proofs: Default::default(), + batch_roots: Default::default(), + chain_roots: Default::default(), + } + } + + fn add_transactions(&mut self, transactions: &[L1Tx]) { + for transaction in transactions { + let eth_block = transaction.eth_block(); + self.transactions + .entry(eth_block.0 as u64) + .or_default() + .push(tx_into_log(transaction.clone())); + self.processed_priority_transactions_count += 1; + } + } + + fn add_upgrade_timestamp(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { + for (upgrade, eth_block) in upgrades { + self.upgrade_timestamp + .entry(*eth_block) + .or_default() + .push(upgrade_timestamp_log(*eth_block)); + self.diamond_upgrades + .entry(*eth_block) + .or_default() + .push(diamond_upgrade_log(upgrade.clone(), *eth_block)); + } + } + + fn set_last_finalized_block_number(&mut self, number: u64) { + self.last_finalized_block_number = number; + } + + fn set_processed_priority_transactions_count(&mut self, number: u64) { + self.processed_priority_transactions_count = number; + } + + fn add_batch_roots(&mut self, batch_roots: &[(u64, u64, H256)]) { + for (sl_block, l2_batch_number, batch_root) in batch_roots { + self.batch_roots + .entry(*sl_block) + .or_default() + .push(batch_root_to_log(*sl_block, *l2_batch_number, *batch_root)); + } + } + + fn add_chain_roots(&mut self, chain_roots: &[(u64, H256)]) { + for (batch, root) in chain_roots { + self.chain_roots.insert(*batch, *root); + } + } + + fn add_chain_log_proofs(&mut self, chain_log_proofs: Vec<(L1BatchNumber, ChainAggProof)>) { + for (batch, proof) in chain_log_proofs { + self.chain_log_proofs.insert(batch, proof); + } + } +} + +#[derive(Debug, Clone)] +pub struct MockEthClient { + inner: Arc>, +} + +impl MockEthClient { + pub fn new(chain_id: SLChainId) -> Self { + Self { + inner: Arc::new(RwLock::new(FakeEthClientData::new(chain_id))), + } + } + + pub async fn add_transactions(&mut self, transactions: &[L1Tx]) { + self.inner.write().await.add_transactions(transactions); + } + + pub async fn add_upgrade_timestamp(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { + self.inner.write().await.add_upgrade_timestamp(upgrades); + } + + pub async fn set_last_finalized_block_number(&mut self, number: u64) { + self.inner + .write() + .await + .set_last_finalized_block_number(number); + } + + pub async fn set_processed_priority_transactions_count(&mut self, number: u64) { + self.inner + .write() + .await + .set_processed_priority_transactions_count(number) + } + + pub async fn block_to_number(&self, block: BlockNumber) -> u64 { + match block { + BlockNumber::Earliest => 0, + BlockNumber::Number(number) => number.as_u64(), + BlockNumber::Pending + | BlockNumber::Latest + | BlockNumber::Finalized + | BlockNumber::Safe => unreachable!(), + } + } + + pub async fn add_batch_roots(&mut self, batch_roots: &[(u64, u64, H256)]) { + self.inner.write().await.add_batch_roots(batch_roots); + } + + pub async fn add_chain_roots(&mut self, chain_roots: &[(u64, H256)]) { + self.inner.write().await.add_chain_roots(chain_roots); + } + + pub async fn add_chain_log_proofs( + &mut self, + chain_log_proofs: Vec<(L1BatchNumber, ChainAggProof)>, + ) { + self.inner + .write() + .await + .add_chain_log_proofs(chain_log_proofs); + } +} + +#[async_trait::async_trait] +impl EthClient for MockEthClient { + async fn get_events( + &self, + from: BlockNumber, + to: BlockNumber, + topic1: H256, + topic2: Option, + _retries_left: usize, + ) -> EnrichedClientResult> { + let from = self.block_to_number(from).await; + let to = self.block_to_number(to).await; + let mut logs = vec![]; + for number in from..=to { + if let Some(ops) = self.inner.read().await.transactions.get(&number) { + logs.extend_from_slice(ops); + } + if let Some(ops) = self.inner.read().await.diamond_upgrades.get(&number) { + logs.extend_from_slice(ops); + } + if let Some(ops) = self.inner.read().await.upgrade_timestamp.get(&number) { + logs.extend_from_slice(ops); + } + if let Some(ops) = self.inner.read().await.batch_roots.get(&number) { + logs.extend_from_slice(ops); + } + } + Ok(logs + .into_iter() + .filter(|log| { + log.topics.first() == Some(&topic1) + && (topic2.is_none() || log.topics.get(1) == topic2.as_ref()) + }) + .collect()) + } + + async fn scheduler_vk_hash( + &self, + _verifier_address: Address, + ) -> Result { + Ok(H256::zero()) + } + + async fn finalized_block_number(&self) -> EnrichedClientResult { + Ok(self.inner.read().await.last_finalized_block_number) + } + + async fn confirmed_block_number(&self) -> EnrichedClientResult { + Ok(self.inner.read().await.last_finalized_block_number) + } + + async fn diamond_cut_by_version( + &self, + packed_version: H256, + ) -> EnrichedClientResult>> { + let from_block = *self + .inner + .read() + .await + .diamond_upgrades + .keys() + .min() + .unwrap_or(&0); + let to_block = *self + .inner + .read() + .await + .diamond_upgrades + .keys() + .max() + .unwrap_or(&0); + + let logs = self + .get_events( + U64::from(from_block).into(), + U64::from(to_block).into(), + state_transition_manager_contract() + .event("NewUpgradeCutData") + .unwrap() + .signature(), + Some(packed_version), + RETRY_LIMIT, + ) + .await?; + + Ok(logs.into_iter().next().map(|log| log.data.0)) + } + + async fn get_total_priority_txs(&self) -> Result { + Ok(self + .inner + .read() + .await + .processed_priority_transactions_count) + } + + async fn chain_id(&self) -> EnrichedClientResult { + Ok(self.inner.read().await.chain_id) + } + + async fn get_chain_root( + &self, + _block_number: U64, + _l2_chain_id: L2ChainId, + ) -> Result { + unimplemented!() + } +} + +#[async_trait::async_trait] +impl L2EthClient for MockEthClient { + async fn get_chain_log_proof( + &self, + l1_batch_number: L1BatchNumber, + _chain_id: L2ChainId, + ) -> EnrichedClientResult> { + Ok(self + .inner + .read() + .await + .chain_log_proofs + .get(&l1_batch_number) + .cloned()) + } + + async fn get_chain_root_l2( + &self, + l1_batch_number: L1BatchNumber, + _l2_chain_id: L2ChainId, + ) -> Result, ContractCallError> { + Ok(self + .inner + .read() + .await + .chain_roots + .get(&l1_batch_number.0.into()) + .cloned()) + } +} + +fn tx_into_log(tx: L1Tx) -> Log { + let tx = abi::Transaction::try_from(Transaction::from(tx)).unwrap(); + let abi::Transaction::L1 { + tx, + factory_deps, + eth_block, + .. + } = tx + else { + unreachable!() + }; + + let data = ethabi::encode( + &abi::NewPriorityRequest { + tx_id: tx.nonce, + tx_hash: tx.hash().into(), + expiration_timestamp: u64::MAX, + transaction: tx, + factory_deps, + } + .encode(), + ); + + Log { + address: Address::repeat_byte(0x1), + topics: vec![hyperchain_contract() + .event("NewPriorityRequest") + .expect("NewPriorityRequest event is missing in abi") + .signature()], + data: data.into(), + block_hash: Some(H256::repeat_byte(0x11)), + block_number: Some(eth_block.into()), + l1_batch_number: None, + transaction_hash: Some(H256::default()), + transaction_index: Some(0u64.into()), + log_index: Some(0u64.into()), + transaction_log_index: Some(0u64.into()), + log_type: None, + removed: None, + block_timestamp: None, + } +} + +fn init_calldata(protocol_upgrade: ProtocolUpgrade) -> Vec { + let upgrade_token = upgrade_into_diamond_cut(protocol_upgrade); + + let encoded_params = ethabi::encode(&[upgrade_token]); + + let execute_upgrade_selector = hyperchain_contract() + .function("executeUpgrade") + .unwrap() + .short_signature(); + + // Concatenate the function selector with the encoded parameters + let mut calldata = Vec::with_capacity(4 + encoded_params.len()); + calldata.extend_from_slice(&execute_upgrade_selector); + calldata.extend_from_slice(&encoded_params); + + calldata +} + +fn diamond_upgrade_log(upgrade: ProtocolUpgrade, eth_block: u64) -> Log { + // struct DiamondCutData { + // FacetCut[] facetCuts; + // address initAddress; + // bytes initCalldata; + // } + let final_data = ethabi::encode(&[Token::Tuple(vec![ + Token::Array(vec![]), + Token::Address(Address::zero()), + Token::Bytes(init_calldata(upgrade.clone())), + ])]); + tracing::info!("{:?}", Token::Bytes(init_calldata(upgrade))); + + Log { + address: Address::repeat_byte(0x1), + topics: vec![ + state_transition_manager_contract() + .event("NewUpgradeCutData") + .unwrap() + .signature(), + H256::from_low_u64_be(eth_block), + ], + data: final_data.into(), + block_hash: Some(H256::repeat_byte(0x11)), + block_number: Some(eth_block.into()), + l1_batch_number: None, + transaction_hash: Some(H256::random()), + transaction_index: Some(0u64.into()), + log_index: Some(0u64.into()), + transaction_log_index: Some(0u64.into()), + log_type: None, + removed: None, + block_timestamp: None, + } +} +fn upgrade_timestamp_log(eth_block: u64) -> Log { + let final_data = ethabi::encode(&[U256::from(12345).into_token()]); + + Log { + address: Address::repeat_byte(0x1), + topics: vec![ + chain_admin_contract() + .event("UpdateUpgradeTimestamp") + .expect("UpdateUpgradeTimestamp event is missing in ABI") + .signature(), + H256::from_low_u64_be(eth_block), + ], + data: final_data.into(), + block_hash: Some(H256::repeat_byte(0x11)), + block_number: Some(eth_block.into()), + l1_batch_number: None, + transaction_hash: Some(H256::random()), + transaction_index: Some(0u64.into()), + log_index: Some(0u64.into()), + transaction_log_index: Some(0u64.into()), + log_type: None, + removed: None, + block_timestamp: None, + } +} + +fn upgrade_into_diamond_cut(upgrade: ProtocolUpgrade) -> Token { + let abi::Transaction::L1 { + tx, factory_deps, .. + } = upgrade + .tx + .map(|tx| Transaction::from(tx).try_into().unwrap()) + .unwrap_or(abi::Transaction::L1 { + tx: Default::default(), + factory_deps: vec![], + eth_block: 0, + }) + else { + unreachable!() + }; + ProposedUpgrade { + l2_protocol_upgrade_tx: tx, + factory_deps, + bootloader_hash: upgrade.bootloader_code_hash.unwrap_or_default().into(), + default_account_hash: upgrade.default_account_code_hash.unwrap_or_default().into(), + verifier: upgrade.verifier_address.unwrap_or_default(), + verifier_params: upgrade.verifier_params.unwrap_or_default().into(), + l1_contracts_upgrade_calldata: vec![], + post_upgrade_calldata: vec![], + upgrade_timestamp: upgrade.timestamp.into(), + new_protocol_version: upgrade.version.pack(), + } + .encode() +} + +fn batch_root_to_log(sl_block_number: u64, l2_batch_number: u64, batch_root: H256) -> Log { + let topic1 = ethabi::long_signature( + "AppendedChainBatchRoot", + &[ + ethabi::ParamType::Uint(256), + ethabi::ParamType::Uint(256), + ethabi::ParamType::FixedBytes(32), + ], + ); + let topic2 = u256_to_h256(L2ChainId::default().as_u64().into()); + let topic3 = u256_to_h256(l2_batch_number.into()); + let data = ethabi::encode(&[batch_root.into_token()]); + + Log { + address: Address::repeat_byte(0x1), + topics: vec![topic1, topic2, topic3], + data: data.into(), + block_hash: Some(H256::repeat_byte(0x11)), + block_number: Some(sl_block_number.into()), + l1_batch_number: Some(sl_block_number.into()), + transaction_hash: Some(H256::random()), + transaction_index: Some(0u64.into()), + log_index: Some(0u64.into()), + transaction_log_index: Some(0u64.into()), + log_type: None, + removed: None, + block_timestamp: None, + } +} diff --git a/core/node/eth_watch/src/tests/mod.rs b/core/node/eth_watch/src/tests/mod.rs new file mode 100644 index 000000000000..118bb9b7e436 --- /dev/null +++ b/core/node/eth_watch/src/tests/mod.rs @@ -0,0 +1,827 @@ +use std::convert::TryInto; + +use zksync_contracts::chain_admin_contract; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use zksync_types::{ + abi, + aggregated_operations::AggregatedActionType, + api::ChainAggProof, + block::L1BatchHeader, + commitment::L1BatchCommitmentArtifacts, + l1::{L1Tx, OpProcessingType, PriorityQueueType}, + l2_to_l1_log::BatchAndChainMerklePath, + protocol_upgrade::{ProtocolUpgradeTx, ProtocolUpgradeTxCommonData}, + protocol_version::ProtocolSemanticVersion, + Address, Execute, L1BatchNumber, L1TxCommonData, L2ChainId, PriorityOpId, ProtocolUpgrade, + ProtocolVersion, ProtocolVersionId, SLChainId, Transaction, H256, U256, +}; + +use crate::{tests::client::MockEthClient, EthWatch, L2EthClient}; + +mod client; + +const SL_CHAIN_ID: SLChainId = SLChainId(505); + +fn build_l1_tx(serial_id: u64, eth_block: u64) -> L1Tx { + let tx = L1Tx { + execute: Execute { + contract_address: Some(Address::repeat_byte(0x11)), + calldata: vec![1, 2, 3], + factory_deps: vec![], + value: U256::zero(), + }, + common_data: L1TxCommonData { + serial_id: PriorityOpId(serial_id), + sender: [1u8; 20].into(), + eth_block, + gas_limit: Default::default(), + max_fee_per_gas: Default::default(), + gas_per_pubdata_limit: 1u32.into(), + full_fee: Default::default(), + layer_2_tip_fee: U256::from(10u8), + refund_recipient: Address::zero(), + to_mint: Default::default(), + priority_queue_type: PriorityQueueType::Deque, + op_processing_type: OpProcessingType::Common, + canonical_tx_hash: H256::default(), + }, + received_timestamp_ms: 0, + }; + // Convert to abi::Transaction and back, so that canonical_tx_hash is computed. + let tx = Transaction::from_abi( + abi::Transaction::try_from(Transaction::from(tx)).unwrap(), + false, + ) + .unwrap(); + tx.try_into().unwrap() +} + +fn build_upgrade_tx(id: ProtocolVersionId, eth_block: u64) -> ProtocolUpgradeTx { + let tx = ProtocolUpgradeTx { + execute: Execute { + contract_address: Some(Address::repeat_byte(0x11)), + calldata: vec![1, 2, 3], + factory_deps: vec![], + value: U256::zero(), + }, + common_data: ProtocolUpgradeTxCommonData { + upgrade_id: id, + sender: [1u8; 20].into(), + eth_block, + gas_limit: Default::default(), + max_fee_per_gas: Default::default(), + gas_per_pubdata_limit: 1u32.into(), + refund_recipient: Address::zero(), + to_mint: Default::default(), + canonical_tx_hash: H256::zero(), + }, + received_timestamp_ms: 0, + }; + // Convert to abi::Transaction and back, so that canonical_tx_hash is computed. + Transaction::from_abi( + abi::Transaction::try_from(Transaction::from(tx)).unwrap(), + false, + ) + .unwrap() + .try_into() + .unwrap() +} + +async fn create_test_watcher( + connection_pool: ConnectionPool, + is_gateway: bool, +) -> (EthWatch, MockEthClient, MockEthClient) { + let l1_client = MockEthClient::new(SLChainId(42)); + let sl_client = MockEthClient::new(SL_CHAIN_ID); + let sl_l2_client: Option> = if is_gateway { + Some(Box::new(sl_client.clone())) + } else { + None + }; + let watcher = EthWatch::new( + &chain_admin_contract(), + Box::new(l1_client.clone()), + sl_l2_client, + connection_pool, + std::time::Duration::from_nanos(1), + L2ChainId::default(), + ) + .await + .unwrap(); + + (watcher, l1_client, sl_client) +} + +async fn create_l1_test_watcher( + connection_pool: ConnectionPool, +) -> (EthWatch, MockEthClient) { + let (watcher, l1_client, _) = create_test_watcher(connection_pool, false).await; + (watcher, l1_client) +} + +async fn create_gateway_test_watcher( + connection_pool: ConnectionPool, +) -> (EthWatch, MockEthClient, MockEthClient) { + create_test_watcher(connection_pool, true).await +} + +#[test_log::test(tokio::test)] +async fn test_normal_operation_l1_txs() { + let connection_pool = ConnectionPool::::test_pool().await; + setup_db(&connection_pool).await; + let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; + + let mut storage = connection_pool.connection().await.unwrap(); + client + .add_transactions(&[build_l1_tx(0, 10), build_l1_tx(1, 14), build_l1_tx(2, 18)]) + .await; + client.set_last_finalized_block_number(15).await; + // second tx will not be processed, as it's block is not finalized yet. + watcher.loop_iteration(&mut storage).await.unwrap(); + let db_txs = get_all_db_txs(&mut storage).await; + let mut db_txs: Vec = db_txs + .into_iter() + .map(|tx| tx.try_into().unwrap()) + .collect(); + db_txs.sort_by_key(|tx| tx.common_data.serial_id); + assert_eq!(db_txs.len(), 2); + let db_tx = db_txs[0].clone(); + assert_eq!(db_tx.common_data.serial_id.0, 0); + let db_tx = db_txs[1].clone(); + assert_eq!(db_tx.common_data.serial_id.0, 1); + + client.set_last_finalized_block_number(20).await; + // now the second tx will be processed + watcher.loop_iteration(&mut storage).await.unwrap(); + let db_txs = get_all_db_txs(&mut storage).await; + let mut db_txs: Vec = db_txs + .into_iter() + .map(|tx| tx.try_into().unwrap()) + .collect(); + db_txs.sort_by_key(|tx| tx.common_data.serial_id); + assert_eq!(db_txs.len(), 3); + let db_tx = db_txs[2].clone(); + assert_eq!(db_tx.common_data.serial_id.0, 2); +} + +#[test_log::test(tokio::test)] +async fn test_gap_in_upgrade_timestamp() { + let connection_pool = ConnectionPool::::test_pool().await; + setup_db(&connection_pool).await; + let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; + + let mut storage = connection_pool.connection().await.unwrap(); + client + .add_upgrade_timestamp(&[( + ProtocolUpgrade { + version: ProtocolSemanticVersion { + minor: ProtocolVersionId::next(), + patch: 0.into(), + }, + tx: None, + ..Default::default() + }, + 10, + )]) + .await; + client.set_last_finalized_block_number(15).await; + watcher.loop_iteration(&mut storage).await.unwrap(); + + let db_versions = storage.protocol_versions_dal().all_versions().await; + // there should be genesis version and just added version + assert_eq!(db_versions.len(), 2); + + let previous_version = (ProtocolVersionId::latest() as u16 - 1).try_into().unwrap(); + let next_version = ProtocolVersionId::next(); + assert_eq!(db_versions[0].minor, previous_version); + assert_eq!(db_versions[1].minor, next_version); +} + +#[test_log::test(tokio::test)] +async fn test_normal_operation_upgrade_timestamp() { + zksync_concurrency::testonly::abort_on_panic(); + let connection_pool = ConnectionPool::::test_pool().await; + setup_db(&connection_pool).await; + + let mut client = MockEthClient::new(SLChainId(42)); + let mut watcher = EthWatch::new( + &chain_admin_contract(), + Box::new(client.clone()), + None, + connection_pool.clone(), + std::time::Duration::from_nanos(1), + L2ChainId::default(), + ) + .await + .unwrap(); + + let mut storage = connection_pool.connection().await.unwrap(); + client + .add_upgrade_timestamp(&[ + ( + ProtocolUpgrade { + tx: None, + ..Default::default() + }, + 10, + ), + ( + ProtocolUpgrade { + version: ProtocolSemanticVersion { + minor: ProtocolVersionId::next(), + patch: 0.into(), + }, + tx: Some(build_upgrade_tx(ProtocolVersionId::next(), 18)), + ..Default::default() + }, + 18, + ), + ( + ProtocolUpgrade { + version: ProtocolSemanticVersion { + minor: ProtocolVersionId::next(), + patch: 1.into(), + }, + tx: None, + ..Default::default() + }, + 19, + ), + ]) + .await; + client.set_last_finalized_block_number(15).await; + // The second upgrade will not be processed, as it has less than 5 confirmations. + watcher.loop_iteration(&mut storage).await.unwrap(); + + let db_versions = storage.protocol_versions_dal().all_versions().await; + // There should be genesis version and just added version. + assert_eq!(db_versions.len(), 2); + assert_eq!(db_versions[1].minor, ProtocolVersionId::latest()); + + client.set_last_finalized_block_number(20).await; + // Now the second and the third upgrades will be processed. + watcher.loop_iteration(&mut storage).await.unwrap(); + let db_versions = storage.protocol_versions_dal().all_versions().await; + let mut expected_version = ProtocolSemanticVersion { + minor: ProtocolVersionId::next(), + patch: 0.into(), + }; + assert_eq!(db_versions.len(), 4); + assert_eq!(db_versions[2], expected_version); + expected_version.patch += 1; + assert_eq!(db_versions[3], expected_version); + + // Check that tx was saved with the second upgrade. + let tx = storage + .protocol_versions_dal() + .get_protocol_upgrade_tx(ProtocolVersionId::next()) + .await + .unwrap() + .expect("no protocol upgrade transaction"); + assert_eq!(tx.common_data.upgrade_id, ProtocolVersionId::next()); +} + +#[test_log::test(tokio::test)] +#[should_panic] +async fn test_gap_in_single_batch() { + let connection_pool = ConnectionPool::::test_pool().await; + setup_db(&connection_pool).await; + let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; + + let mut storage = connection_pool.connection().await.unwrap(); + client + .add_transactions(&[ + build_l1_tx(0, 10), + build_l1_tx(1, 14), + build_l1_tx(2, 14), + build_l1_tx(3, 14), + build_l1_tx(5, 14), + ]) + .await; + client.set_last_finalized_block_number(15).await; + watcher.loop_iteration(&mut storage).await.unwrap(); +} + +#[test_log::test(tokio::test)] +#[should_panic] +async fn test_gap_between_batches() { + let connection_pool = ConnectionPool::::test_pool().await; + setup_db(&connection_pool).await; + let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; + + let mut storage = connection_pool.connection().await.unwrap(); + client + .add_transactions(&[ + // this goes to the first batch + build_l1_tx(0, 10), + build_l1_tx(1, 14), + build_l1_tx(2, 14), + // this goes to the second batch + build_l1_tx(4, 20), + build_l1_tx(5, 22), + ]) + .await; + client.set_last_finalized_block_number(15).await; + watcher.loop_iteration(&mut storage).await.unwrap(); + + let db_txs = get_all_db_txs(&mut storage).await; + assert_eq!(db_txs.len(), 3); + client.set_last_finalized_block_number(25).await; + watcher.loop_iteration(&mut storage).await.unwrap(); +} + +#[test_log::test(tokio::test)] +async fn test_overlapping_batches() { + zksync_concurrency::testonly::abort_on_panic(); + let connection_pool = ConnectionPool::::test_pool().await; + setup_db(&connection_pool).await; + let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; + + let mut storage = connection_pool.connection().await.unwrap(); + client + .add_transactions(&[ + // this goes to the first batch + build_l1_tx(0, 10), + build_l1_tx(1, 14), + build_l1_tx(2, 14), + // this goes to the second batch + build_l1_tx(1, 20), + build_l1_tx(2, 22), + build_l1_tx(3, 23), + build_l1_tx(4, 23), + ]) + .await; + client.set_last_finalized_block_number(15).await; + watcher.loop_iteration(&mut storage).await.unwrap(); + + let db_txs = get_all_db_txs(&mut storage).await; + assert_eq!(db_txs.len(), 3); + + client.set_last_finalized_block_number(25).await; + watcher.loop_iteration(&mut storage).await.unwrap(); + + let db_txs = get_all_db_txs(&mut storage).await; + assert_eq!(db_txs.len(), 5); + let mut db_txs: Vec = db_txs + .into_iter() + .map(|tx| tx.try_into().unwrap()) + .collect(); + db_txs.sort_by_key(|tx| tx.common_data.serial_id); + let tx = db_txs[2].clone(); + assert_eq!(tx.common_data.serial_id.0, 2); + let tx = db_txs[4].clone(); + assert_eq!(tx.common_data.serial_id.0, 4); +} + +#[test_log::test(tokio::test)] +async fn test_transactions_get_gradually_processed_by_gateway() { + zksync_concurrency::testonly::abort_on_panic(); + let connection_pool = ConnectionPool::::test_pool().await; + setup_db(&connection_pool).await; + let (mut watcher, mut l1_client, mut gateway_client) = + create_gateway_test_watcher(connection_pool.clone()).await; + + let mut storage = connection_pool.connection().await.unwrap(); + l1_client + .add_transactions(&[ + build_l1_tx(0, 10), + build_l1_tx(1, 14), + build_l1_tx(2, 14), + build_l1_tx(3, 20), + build_l1_tx(4, 22), + ]) + .await; + l1_client.set_last_finalized_block_number(15).await; + gateway_client + .set_processed_priority_transactions_count(2) + .await; + watcher.loop_iteration(&mut storage).await.unwrap(); + + let db_txs = get_all_db_txs(&mut storage).await; + assert_eq!(db_txs.len(), 2); + + l1_client.set_last_finalized_block_number(25).await; + gateway_client + .set_processed_priority_transactions_count(4) + .await; + watcher.loop_iteration(&mut storage).await.unwrap(); + + let db_txs = get_all_db_txs(&mut storage).await; + assert_eq!(db_txs.len(), 4); + let mut db_txs: Vec = db_txs + .into_iter() + .map(|tx| tx.try_into().unwrap()) + .collect(); + db_txs.sort_by_key(|tx| tx.common_data.serial_id); + let tx = db_txs[2].clone(); + assert_eq!(tx.common_data.serial_id.0, 2); + let tx = db_txs[3].clone(); + assert_eq!(tx.common_data.serial_id.0, 3); +} + +#[test_log::test(tokio::test)] +async fn test_batch_root_processor_from_genesis() { + let connection_pool = ConnectionPool::::test_pool().await; + setup_db(&connection_pool).await; + setup_batch_roots(&connection_pool, 0).await; + let (mut watcher, _, mut sl_client) = + create_gateway_test_watcher(connection_pool.clone()).await; + + let batch_roots = batch_roots(); + sl_client + .add_batch_roots(&[ + (5, 1, batch_roots[0]), + (9, 2, batch_roots[1]), + (11, 3, batch_roots[2]), + ]) + .await; + sl_client + .add_chain_roots(&[ + ( + 5, + H256::from_slice( + &hex::decode( + "10a2ef76e709d318b459be49f1e8d7f02d7120f2b501bc0afddd935f1a813c67", + ) + .unwrap(), + ), + ), + ( + 9, + H256::from_slice( + &hex::decode( + "e0c3330f674b6b2d578f958a1dbd66f164d068b0bb5a9fb077eca013976fda6f", + ) + .unwrap(), + ), + ), + ( + 11, + H256::from_slice( + &hex::decode( + "d22fc9a7b005fefecd33bb56cdbf70bcc23610e693cd21295f9920227c2cb1cc", + ) + .unwrap(), + ), + ), + ]) + .await; + let chain_log_proofs = chain_log_proofs(); + sl_client.add_chain_log_proofs(chain_log_proofs).await; + + sl_client.set_last_finalized_block_number(5).await; + + let mut connection = connection_pool.connection().await.unwrap(); + watcher.loop_iteration(&mut connection).await.unwrap(); + + let proof1 = connection + .blocks_dal() + .get_l1_batch_chain_merkle_path(L1BatchNumber(1)) + .await + .unwrap() + .unwrap(); + let proof1 = hex::encode(&bincode::serialize(&proof1).unwrap()); + assert_eq!(proof1, "000000000600000000000000420000000000000030783030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303042000000000000003078303030303030303030303030303030303030303030303030303030303030303530303030303030303030303030303030303030303030303030303030303030334200000000000000307830303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030316639420000000000000030783031303230303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303042000000000000003078303932343932386331333737613663663234633339633264343666386562396466323365383131623236646333353237653534383339366664346531373362314200000000000000307833373561356266393039636230323134336533363935636136353865303634316537333961613539306630303034646261393335373263343463646239643264"); + + sl_client.set_last_finalized_block_number(11).await; + watcher.loop_iteration(&mut connection).await.unwrap(); + + let proof2 = connection + .blocks_dal() + .get_l1_batch_chain_merkle_path(L1BatchNumber(2)) + .await + .unwrap() + .unwrap(); + let proof2 = hex::encode(&bincode::serialize(&proof2).unwrap()); + assert_eq!(proof2, "0100000007000000000000004200000000000000307830303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303031420000000000000030783130613265663736653730396433313862343539626534396631653864376630326437313230663262353031626330616664646439333566316138313363363742000000000000003078303030303030303030303030303030303030303030303030303030303030303930303030303030303030303030303030303030303030303030303030303030334200000000000000307830303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030316639420000000000000030783031303230303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303042000000000000003078303932343932386331333737613663663234633339633264343666386562396466323365383131623236646333353237653534383339366664346531373362314200000000000000307861333738613230636132376237616533303731643162643763326164613030343639616263353765343239646436663438613833303932646237303539613138"); + + let proof3 = connection + .blocks_dal() + .get_l1_batch_chain_merkle_path(L1BatchNumber(3)) + .await + .unwrap() + .unwrap(); + let proof3 = hex::encode(&bincode::serialize(&proof3).unwrap()); + assert_eq!(proof3, "02000000080000000000000042000000000000003078303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030324200000000000000307834363730306234643430616335633335616632633232646461323738376139316562353637623036633932346138666238616539613035623230633038633231420000000000000030786530633333333066363734623662326435373866393538613164626436366631363464303638623062623561396662303737656361303133393736666461366642000000000000003078303030303030303030303030303030303030303030303030303030303030306230303030303030303030303030303030303030303030303030303030303030334200000000000000307830303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030316639420000000000000030783031303230303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303042000000000000003078303932343932386331333737613663663234633339633264343666386562396466323365383131623236646333353237653534383339366664346531373362314200000000000000307833373561356266393039636230323134336533363935636136353865303634316537333961613539306630303034646261393335373263343463646239643264"); +} + +#[test_log::test(tokio::test)] +async fn test_batch_root_processor_restart() { + let connection_pool = ConnectionPool::::test_pool().await; + setup_db(&connection_pool).await; + setup_batch_roots(&connection_pool, 2).await; + let (mut watcher, _, mut sl_client) = + create_gateway_test_watcher(connection_pool.clone()).await; + + let batch_roots = batch_roots(); + sl_client + .add_batch_roots(&[ + (11, 3, batch_roots[2]), + (13, 4, batch_roots[3]), + (14, 5, batch_roots[4]), + (14, 6, batch_roots[5]), + ]) + .await; + sl_client + .add_chain_roots(&[ + ( + 11, + H256::from_slice( + &hex::decode( + "d22fc9a7b005fefecd33bb56cdbf70bcc23610e693cd21295f9920227c2cb1cc", + ) + .unwrap(), + ), + ), + ( + 13, + H256::from_slice( + &hex::decode( + "53edc1f5ad79c5999bd578dfc135f9c51ebd7fafa4585b64f71d15b2dce1b728", + ) + .unwrap(), + ), + ), + ( + 14, + H256::from_slice( + &hex::decode( + "61b35796307159a6da8aa45448e6941e3438380582e2f3cb358db59598ae156f", + ) + .unwrap(), + ), + ), + ]) + .await; + let chain_log_proofs = chain_log_proofs(); + sl_client.add_chain_log_proofs(chain_log_proofs).await; + + sl_client.set_last_finalized_block_number(14).await; + + let mut connection = connection_pool.connection().await.unwrap(); + watcher.loop_iteration(&mut connection).await.unwrap(); + + let proof = connection + .blocks_dal() + .get_l1_batch_chain_merkle_path(L1BatchNumber(3)) + .await + .unwrap() + .unwrap(); + let proof = hex::encode(&bincode::serialize(&proof).unwrap()); + assert_eq!(proof, "02000000080000000000000042000000000000003078303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030324200000000000000307834363730306234643430616335633335616632633232646461323738376139316562353637623036633932346138666238616539613035623230633038633231420000000000000030786530633333333066363734623662326435373866393538613164626436366631363464303638623062623561396662303737656361303133393736666461366642000000000000003078303030303030303030303030303030303030303030303030303030303030306230303030303030303030303030303030303030303030303030303030303030334200000000000000307830303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030316639420000000000000030783031303230303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303042000000000000003078303932343932386331333737613663663234633339633264343666386562396466323365383131623236646333353237653534383339366664346531373362314200000000000000307833373561356266393039636230323134336533363935636136353865303634316537333961613539306630303034646261393335373263343463646239643264"); + + let proof = connection + .blocks_dal() + .get_l1_batch_chain_merkle_path(L1BatchNumber(4)) + .await + .unwrap() + .unwrap(); + let proof = hex::encode(&bincode::serialize(&proof).unwrap()); + assert_eq!(proof, "02000000080000000000000042000000000000003078303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030334200000000000000307837623765373735373139343639366666393634616233353837393131373362636337663735356132656161393334653935373061636533393139383435313265420000000000000030786530633333333066363734623662326435373866393538613164626436366631363464303638623062623561396662303737656361303133393736666461366642000000000000003078303030303030303030303030303030303030303030303030303030303030306430303030303030303030303030303030303030303030303030303030303030334200000000000000307830303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030316639420000000000000030783031303230303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303042000000000000003078303932343932386331333737613663663234633339633264343666386562396466323365383131623236646333353237653534383339366664346531373362314200000000000000307835353063313735316338653764626166633839303939326634353532333636663064643565623665343362653535353936386264616338633732656466316261"); + + let proof = connection + .blocks_dal() + .get_l1_batch_chain_merkle_path(L1BatchNumber(5)) + .await + .unwrap() + .unwrap(); + let proof = hex::encode(&bincode::serialize(&proof).unwrap()); + assert_eq!(proof, "030000000900000000000000420000000000000030783030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303442000000000000003078303235663065363031353230366661626364326263613930316432633438396536336263356564346231356266356330633963363066396531363735383564614200000000000000307863633463343165646230633230333133343862323932623736386539626163316565386339326330396566386133323737633265636534303963313264383661420000000000000030783533656463316635616437396335393939626435373864666331333566396335316562643766616661343538356236346637316431356232646365316237323842000000000000003078303030303030303030303030303030303030303030303030303030303030306530303030303030303030303030303030303030303030303030303030303030334200000000000000307830303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030316639420000000000000030783031303230303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303042000000000000003078303932343932386331333737613663663234633339633264343666386562396466323365383131623236646333353237653534383339366664346531373362314200000000000000307833373561356266393039636230323134336533363935636136353865303634316537333961613539306630303034646261393335373263343463646239643264"); + + let proof = connection + .blocks_dal() + .get_l1_batch_chain_merkle_path(L1BatchNumber(6)) + .await + .unwrap() + .unwrap(); + let proof = hex::encode(&bincode::serialize(&proof).unwrap()); + assert_eq!(proof, "030000000900000000000000420000000000000030783030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303542000000000000003078323465653435363834376535373364313635613832333634306632303834383139636331613865333433316562633635633865363064333435343266313637324200000000000000307863633463343165646230633230333133343862323932623736386539626163316565386339326330396566386133323737633265636534303963313264383661420000000000000030783533656463316635616437396335393939626435373864666331333566396335316562643766616661343538356236346637316431356232646365316237323842000000000000003078303030303030303030303030303030303030303030303030303030303030306530303030303030303030303030303030303030303030303030303030303030334200000000000000307830303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030316639420000000000000030783031303230303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303042000000000000003078303932343932386331333737613663663234633339633264343666386562396466323365383131623236646333353237653534383339366664346531373362314200000000000000307833373561356266393039636230323134336533363935636136353865303634316537333961613539306630303034646261393335373263343463646239643264"); +} + +async fn get_all_db_txs(storage: &mut Connection<'_, Core>) -> Vec { + storage.transactions_dal().reset_mempool().await.unwrap(); + storage + .transactions_dal() + .sync_mempool(&[], &[], 0, 0, 1000) + .await + .unwrap() + .into_iter() + .map(|x| x.0) + .collect() +} + +async fn setup_db(connection_pool: &ConnectionPool) { + connection_pool + .connection() + .await + .unwrap() + .protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion { + version: ProtocolSemanticVersion { + minor: (ProtocolVersionId::latest() as u16 - 1).try_into().unwrap(), + patch: 0.into(), + }, + ..Default::default() + }) + .await + .unwrap(); +} + +fn batch_roots() -> Vec { + [ + "5EEBBC173358620F7F61B69D80AFE503F76190396918EB7B27CEF4DB7C51D60A", + "B7E66115CDAAF5FFE70B53EF0AC6D0FF7D7BEB4341FEC6352A670B805AE15935", + "09BD2AD9C01C05F760BBEC6E59BF728566551B48C0DCBD01DB797D1C703122F8", + "B6E530FF878093B2D0CAF87780451A8F07922570E2D820B7A8541114E0D70FB5", + "B4F195844BA1792F3C1FB57C826B2DA60EA6EEBB90BF53F706120E49BB0486EF", + "118F6FAC96824D4E0845F7C7DF716969378F3F2038D9E9D0FEAD1FE01BA11A93", + ] + .into_iter() + .map(|s| H256::from_slice(&hex::decode(s).unwrap())) + .collect() +} + +fn chain_log_proofs() -> Vec<(L1BatchNumber, ChainAggProof)> { + vec![ + ( + L1BatchNumber(5), + ChainAggProof { + chain_id_leaf_proof: vec![ + H256::from_slice( + &hex::decode( + "0924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1", + ) + .unwrap(), + ), + H256::from_slice( + &hex::decode( + "375a5bf909cb02143e3695ca658e0641e739aa590f0004dba93572c44cdb9d2d", + ) + .unwrap(), + ), + ], + chain_id_leaf_proof_mask: 3u32.into(), + }, + ), + ( + L1BatchNumber(9), + ChainAggProof { + chain_id_leaf_proof: vec![ + H256::from_slice( + &hex::decode( + "0924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1", + ) + .unwrap(), + ), + H256::from_slice( + &hex::decode( + "a378a20ca27b7ae3071d1bd7c2ada00469abc57e429dd6f48a83092db7059a18", + ) + .unwrap(), + ), + ], + chain_id_leaf_proof_mask: 3u32.into(), + }, + ), + ( + L1BatchNumber(11), + ChainAggProof { + chain_id_leaf_proof: vec![ + H256::from_slice( + &hex::decode( + "0924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1", + ) + .unwrap(), + ), + H256::from_slice( + &hex::decode( + "375a5bf909cb02143e3695ca658e0641e739aa590f0004dba93572c44cdb9d2d", + ) + .unwrap(), + ), + ], + chain_id_leaf_proof_mask: 3u32.into(), + }, + ), + ( + L1BatchNumber(13), + ChainAggProof { + chain_id_leaf_proof: vec![ + H256::from_slice( + &hex::decode( + "0924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1", + ) + .unwrap(), + ), + H256::from_slice( + &hex::decode( + "550c1751c8e7dbafc890992f4552366f0dd5eb6e43be555968bdac8c72edf1ba", + ) + .unwrap(), + ), + ], + chain_id_leaf_proof_mask: 3u32.into(), + }, + ), + ( + L1BatchNumber(14), + ChainAggProof { + chain_id_leaf_proof: vec![ + H256::from_slice( + &hex::decode( + "0924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1", + ) + .unwrap(), + ), + H256::from_slice( + &hex::decode( + "375a5bf909cb02143e3695ca658e0641e739aa590f0004dba93572c44cdb9d2d", + ) + .unwrap(), + ), + ], + chain_id_leaf_proof_mask: 3u32.into(), + }, + ), + ] +} + +async fn setup_batch_roots( + connection_pool: &ConnectionPool, + number_of_processed_batches: usize, +) { + let batch_roots = batch_roots(); + + let mut connection = connection_pool.connection().await.unwrap(); + + assert!(number_of_processed_batches <= batch_roots.len()); + for (i, root) in batch_roots.into_iter().enumerate() { + let batch_number = L1BatchNumber(i as u32 + 1); + let header = L1BatchHeader::new( + batch_number, + i as u64, + Default::default(), + (ProtocolVersionId::latest() as u16 - 1).try_into().unwrap(), + ); + connection + .blocks_dal() + .insert_mock_l1_batch(&header) + .await + .unwrap(); + connection + .blocks_dal() + .save_l1_batch_commitment_artifacts( + batch_number, + &L1BatchCommitmentArtifacts { + l2_l1_merkle_root: root, + ..Default::default() + }, + ) + .await + .unwrap(); + + let eth_tx_id = connection + .eth_sender_dal() + .save_eth_tx( + i as u64, + Default::default(), + AggregatedActionType::Execute, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + true, + ) + .await + .unwrap() + .id; + connection + .eth_sender_dal() + .set_chain_id(eth_tx_id, SL_CHAIN_ID.0) + .await + .unwrap(); + connection + .blocks_dal() + .set_eth_tx_id( + batch_number..=batch_number, + eth_tx_id, + AggregatedActionType::Execute, + ) + .await + .unwrap(); + + if i < number_of_processed_batches { + connection + .blocks_dal() + .set_batch_chain_merkle_path( + batch_number, + BatchAndChainMerklePath { + batch_proof_len: 0, + proof: Vec::new(), + }, + ) + .await + .unwrap() + } + } +} diff --git a/core/node/node_framework/src/implementations/layers/consistency_checker.rs b/core/node/node_framework/src/implementations/layers/consistency_checker.rs index a9e99eb89ac4..a76b358b53b4 100644 --- a/core/node/node_framework/src/implementations/layers/consistency_checker.rs +++ b/core/node/node_framework/src/implementations/layers/consistency_checker.rs @@ -1,9 +1,9 @@ use zksync_consistency_checker::ConsistencyChecker; -use zksync_types::{commitment::L1BatchCommitmentMode, Address}; +use zksync_types::{commitment::L1BatchCommitmentMode, Address, L2ChainId}; use crate::{ implementations::resources::{ - eth_interface::EthInterfaceResource, + eth_interface::{EthInterfaceResource, GatewayEthInterfaceResource}, healthcheck::AppHealthCheckResource, pools::{MasterPool, PoolResource}, }, @@ -16,15 +16,17 @@ use crate::{ /// Wiring layer for the `ConsistencyChecker` (used by the external node). #[derive(Debug)] pub struct ConsistencyCheckerLayer { - diamond_proxy_addr: Address, + l1_diamond_proxy_addr: Address, max_batches_to_recheck: u32, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, } #[derive(Debug, FromContext)] #[context(crate = crate)] pub struct Input { pub l1_client: EthInterfaceResource, + pub gateway_client: Option, pub master_pool: PoolResource, #[context(default)] pub app_health: AppHealthCheckResource, @@ -39,14 +41,16 @@ pub struct Output { impl ConsistencyCheckerLayer { pub fn new( - diamond_proxy_addr: Address, + l1_diamond_proxy_addr: Address, max_batches_to_recheck: u32, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, ) -> ConsistencyCheckerLayer { Self { - diamond_proxy_addr, + l1_diamond_proxy_addr, max_batches_to_recheck, commitment_mode, + l2_chain_id, } } } @@ -63,17 +67,21 @@ impl WiringLayer for ConsistencyCheckerLayer { async fn wire(self, input: Self::Input) -> Result { // Get resources. let l1_client = input.l1_client.0; + let gateway_client = input.gateway_client.map(|c| c.0); let singleton_pool = input.master_pool.get_singleton().await?; let consistency_checker = ConsistencyChecker::new( l1_client, + gateway_client, self.max_batches_to_recheck, singleton_pool, self.commitment_mode, + self.l2_chain_id, ) + .await .map_err(WiringError::Internal)? - .with_diamond_proxy_addr(self.diamond_proxy_addr); + .with_l1_diamond_proxy_addr(self.l1_diamond_proxy_addr); input .app_health diff --git a/core/node/node_framework/src/implementations/layers/eth_watch.rs b/core/node/node_framework/src/implementations/layers/eth_watch.rs index e19828d85ccd..e871f5661d22 100644 --- a/core/node/node_framework/src/implementations/layers/eth_watch.rs +++ b/core/node/node_framework/src/implementations/layers/eth_watch.rs @@ -1,6 +1,7 @@ use zksync_config::{ContractsConfig, EthWatchConfig}; use zksync_contracts::chain_admin_contract; use zksync_eth_watch::{EthHttpQueryClient, EthWatch}; +use zksync_types::L2ChainId; use crate::{ implementations::resources::{ @@ -21,6 +22,7 @@ use crate::{ pub struct EthWatchLayer { eth_watch_config: EthWatchConfig, contracts_config: ContractsConfig, + chain_id: L2ChainId, } #[derive(Debug, FromContext)] @@ -38,10 +40,15 @@ pub struct Output { } impl EthWatchLayer { - pub fn new(eth_watch_config: EthWatchConfig, contracts_config: ContractsConfig) -> Self { + pub fn new( + eth_watch_config: EthWatchConfig, + contracts_config: ContractsConfig, + chain_id: L2ChainId, + ) -> Self { Self { eth_watch_config, contracts_config, + chain_id, } } } @@ -72,10 +79,11 @@ impl WiringLayer for EthWatchLayer { let eth_watch = EthWatch::new( &chain_admin_contract(), - Box::new(eth_client.clone()), Box::new(eth_client), + None, main_pool, self.eth_watch_config.poll_interval(), + self.chain_id, ) .await?; diff --git a/core/node/node_framework/src/implementations/layers/query_eth_client.rs b/core/node/node_framework/src/implementations/layers/query_eth_client.rs index 116823d92d8a..e1a8dd71fed8 100644 --- a/core/node/node_framework/src/implementations/layers/query_eth_client.rs +++ b/core/node/node_framework/src/implementations/layers/query_eth_client.rs @@ -1,9 +1,11 @@ use anyhow::Context; -use zksync_types::{settlement::SettlementMode, url::SensitiveUrl, L2ChainId, SLChainId}; +use zksync_types::{url::SensitiveUrl, L2ChainId, SLChainId}; use zksync_web3_decl::client::Client; use crate::{ - implementations::resources::eth_interface::{EthInterfaceResource, L2InterfaceResource}, + implementations::resources::eth_interface::{ + EthInterfaceResource, GatewayEthInterfaceResource, L2InterfaceResource, + }, wiring_layer::{WiringError, WiringLayer}, IntoContext, }; @@ -13,19 +15,19 @@ use crate::{ pub struct QueryEthClientLayer { chain_id: SLChainId, web3_url: SensitiveUrl, - settlement_mode: SettlementMode, + gateway_web3_url: Option, } impl QueryEthClientLayer { pub fn new( chain_id: SLChainId, web3_url: SensitiveUrl, - settlement_mode: SettlementMode, + gateway_web3_url: Option, ) -> Self { Self { chain_id, web3_url, - settlement_mode, + gateway_web3_url, } } } @@ -35,6 +37,7 @@ impl QueryEthClientLayer { pub struct Output { query_client_l1: EthInterfaceResource, query_client_l2: Option, + query_client_gateway: Option, } #[async_trait::async_trait] @@ -55,12 +58,29 @@ impl WiringLayer for QueryEthClientLayer { .for_network(self.chain_id.into()) .build(), )), - query_client_l2: if self.settlement_mode.is_gateway() { + query_client_l2: if self.gateway_web3_url.is_some() { Some(L2InterfaceResource(Box::new( - Client::http(self.web3_url.clone()) - .context("Client::new()")? - .for_network(L2ChainId::try_from(self.chain_id.0).unwrap().into()) - .build(), + Client::http( + self.gateway_web3_url + .clone() + .expect("gateway url is required"), + ) + .context("Client::new()")? + .for_network(L2ChainId::try_from(self.chain_id.0).unwrap().into()) + .build(), + ))) + } else { + None + }, + query_client_gateway: if self.gateway_web3_url.is_some() { + Some(GatewayEthInterfaceResource(Box::new( + Client::http( + self.gateway_web3_url + .clone() + .expect("gateway url is required"), + ) + .context("Client::new()")? + .build(), ))) } else { None diff --git a/core/node/node_framework/src/implementations/layers/tree_data_fetcher.rs b/core/node/node_framework/src/implementations/layers/tree_data_fetcher.rs index ca2e80142401..9abbf2daa7d6 100644 --- a/core/node/node_framework/src/implementations/layers/tree_data_fetcher.rs +++ b/core/node/node_framework/src/implementations/layers/tree_data_fetcher.rs @@ -1,9 +1,9 @@ use zksync_node_sync::tree_data_fetcher::TreeDataFetcher; -use zksync_types::Address; +use zksync_types::{Address, L2ChainId}; use crate::{ implementations::resources::{ - eth_interface::EthInterfaceResource, + eth_interface::{EthInterfaceResource, GatewayEthInterfaceResource}, healthcheck::AppHealthCheckResource, main_node_client::MainNodeClientResource, pools::{MasterPool, PoolResource}, @@ -17,7 +17,8 @@ use crate::{ /// Wiring layer for [`TreeDataFetcher`]. #[derive(Debug)] pub struct TreeDataFetcherLayer { - diamond_proxy_addr: Address, + l1_diamond_proxy_addr: Address, + l2_chain_id: L2ChainId, } #[derive(Debug, FromContext)] @@ -25,7 +26,8 @@ pub struct TreeDataFetcherLayer { pub struct Input { pub master_pool: PoolResource, pub main_node_client: MainNodeClientResource, - pub eth_client: EthInterfaceResource, + pub l1_client: EthInterfaceResource, + pub gateway_client: Option, #[context(default)] pub app_health: AppHealthCheckResource, } @@ -38,8 +40,11 @@ pub struct Output { } impl TreeDataFetcherLayer { - pub fn new(diamond_proxy_addr: Address) -> Self { - Self { diamond_proxy_addr } + pub fn new(l1_diamond_proxy_addr: Address, l2_chain_id: L2ChainId) -> Self { + Self { + l1_diamond_proxy_addr, + l2_chain_id, + } } } @@ -55,14 +60,22 @@ impl WiringLayer for TreeDataFetcherLayer { async fn wire(self, input: Self::Input) -> Result { let pool = input.master_pool.get().await?; let MainNodeClientResource(client) = input.main_node_client; - let EthInterfaceResource(eth_client) = input.eth_client; + let EthInterfaceResource(l1_client) = input.l1_client; + let gateway_client = input.gateway_client.map(|c| c.0); tracing::warn!( "Running tree data fetcher (allows a node to operate w/o a Merkle tree or w/o waiting the tree to catch up). \ This is an experimental feature; do not use unless you know what you're doing" ); - let task = - TreeDataFetcher::new(client, pool).with_l1_data(eth_client, self.diamond_proxy_addr)?; + let task = TreeDataFetcher::new(client, pool.clone()) + .with_l1_data( + l1_client, + self.l1_diamond_proxy_addr, + gateway_client, + pool, + self.l2_chain_id, + ) + .await?; // Insert healthcheck input diff --git a/core/node/node_framework/src/implementations/resources/eth_interface.rs b/core/node/node_framework/src/implementations/resources/eth_interface.rs index 24b7df327f63..f1bc17027f90 100644 --- a/core/node/node_framework/src/implementations/resources/eth_interface.rs +++ b/core/node/node_framework/src/implementations/resources/eth_interface.rs @@ -13,6 +13,15 @@ impl Resource for EthInterfaceResource { } } +#[derive(Debug, Clone)] +pub struct GatewayEthInterfaceResource(pub Box>); + +impl Resource for GatewayEthInterfaceResource { + fn name() -> String { + "common/gateway_eth_interface".into() + } +} + /// A resource that provides L2 interface object to the service. /// It is expected to have the same URL as the `EthInterfaceResource`, but have different capabilities. /// diff --git a/core/node/node_sync/src/batch_status_updater/mod.rs b/core/node/node_sync/src/batch_status_updater/mod.rs index 3f6bb9ff33f8..de3bb88f8b93 100644 --- a/core/node/node_sync/src/batch_status_updater/mod.rs +++ b/core/node/node_sync/src/batch_status_updater/mod.rs @@ -13,7 +13,7 @@ use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_shared_metrics::EN_METRICS; use zksync_types::{ - aggregated_operations::AggregatedActionType, api, L1BatchNumber, L2BlockNumber, H256, + aggregated_operations::AggregatedActionType, api, L1BatchNumber, SLChainId, H256, }; use zksync_web3_decl::{ client::{DynClient, L2}, @@ -41,6 +41,7 @@ struct BatchStatusChange { number: L1BatchNumber, l1_tx_hash: H256, happened_at: DateTime, + sl_chain_id: Option, } #[derive(Debug, Default)] @@ -73,42 +74,21 @@ impl From for UpdaterError { #[async_trait] trait MainNodeClient: fmt::Debug + Send + Sync { - /// Returns any L2 block in the specified L1 batch. - async fn resolve_l1_batch_to_l2_block( + async fn batch_details( &self, number: L1BatchNumber, - ) -> EnrichedClientResult>; - - async fn block_details( - &self, - number: L2BlockNumber, - ) -> EnrichedClientResult>; + ) -> EnrichedClientResult>; } #[async_trait] impl MainNodeClient for Box> { - async fn resolve_l1_batch_to_l2_block( + async fn batch_details( &self, number: L1BatchNumber, - ) -> EnrichedClientResult> { - let request_latency = FETCHER_METRICS.requests[&FetchStage::GetL2BlockRange].start(); - let number = self - .get_l2_block_range(number) - .rpc_context("resolve_l1_batch_to_l2_block") - .with_arg("number", &number) - .await? - .map(|(start, _)| L2BlockNumber(start.as_u32())); - request_latency.observe(); - Ok(number) - } - - async fn block_details( - &self, - number: L2BlockNumber, - ) -> EnrichedClientResult> { - let request_latency = FETCHER_METRICS.requests[&FetchStage::GetBlockDetails].start(); + ) -> EnrichedClientResult> { + let request_latency = FETCHER_METRICS.requests[&FetchStage::GetL1BatchDetails].start(); let details = self - .get_block_details(number) + .get_l1_batch_details(number) .rpc_context("block_details") .with_arg("number", &number) .await?; @@ -155,27 +135,34 @@ impl UpdaterCursor { }) } - fn extract_tx_hash_and_timestamp( - batch_info: &api::BlockDetails, + /// Extracts tx hash, timestamp and chain id of the operation. + fn extract_op_data( + batch_info: &api::L1BatchDetails, stage: AggregatedActionType, - ) -> (Option, Option>) { + ) -> (Option, Option>, Option) { match stage { - AggregatedActionType::Commit => { - (batch_info.base.commit_tx_hash, batch_info.base.committed_at) - } - AggregatedActionType::PublishProofOnchain => { - (batch_info.base.prove_tx_hash, batch_info.base.proven_at) - } - AggregatedActionType::Execute => { - (batch_info.base.execute_tx_hash, batch_info.base.executed_at) - } + AggregatedActionType::Commit => ( + batch_info.base.commit_tx_hash, + batch_info.base.committed_at, + batch_info.base.commit_chain_id, + ), + AggregatedActionType::PublishProofOnchain => ( + batch_info.base.prove_tx_hash, + batch_info.base.proven_at, + batch_info.base.prove_chain_id, + ), + AggregatedActionType::Execute => ( + batch_info.base.execute_tx_hash, + batch_info.base.executed_at, + batch_info.base.execute_chain_id, + ), } } fn update( &mut self, status_changes: &mut StatusChanges, - batch_info: &api::BlockDetails, + batch_info: &api::L1BatchDetails, ) -> anyhow::Result<()> { for stage in [ AggregatedActionType::Commit, @@ -190,10 +177,10 @@ impl UpdaterCursor { fn update_stage( &mut self, status_changes: &mut StatusChanges, - batch_info: &api::BlockDetails, + batch_info: &api::L1BatchDetails, stage: AggregatedActionType, ) -> anyhow::Result<()> { - let (l1_tx_hash, happened_at) = Self::extract_tx_hash_and_timestamp(batch_info, stage); + let (l1_tx_hash, happened_at, sl_chain_id) = Self::extract_op_data(batch_info, stage); let (last_l1_batch, changes_to_update) = match stage { AggregatedActionType::Commit => ( &mut self.last_committed_l1_batch, @@ -212,7 +199,7 @@ impl UpdaterCursor { let Some(l1_tx_hash) = l1_tx_hash else { return Ok(()); }; - if batch_info.l1_batch_number != last_l1_batch.next() { + if batch_info.number != last_l1_batch.next() { return Ok(()); } @@ -221,12 +208,13 @@ impl UpdaterCursor { format!("Malformed API response: batch is {action_str}, but has no relevant timestamp") })?; changes_to_update.push(BatchStatusChange { - number: batch_info.l1_batch_number, + number: batch_info.number, l1_tx_hash, happened_at, + sl_chain_id, }); - tracing::info!("Batch {}: {action_str}", batch_info.l1_batch_number); - FETCHER_METRICS.l1_batch[&stage.into()].set(batch_info.l1_batch_number.0.into()); + tracing::info!("Batch {}: {action_str}", batch_info.number); + FETCHER_METRICS.l1_batch[&stage.into()].set(batch_info.number.0.into()); *last_l1_batch += 1; Ok(()) } @@ -348,22 +336,11 @@ impl BatchStatusUpdater { // update all three statuses (e.g. if the node is still syncing), but also skipping the gaps in the statuses // (e.g. if the last executed batch is 10, but the last proven is 20, we don't need to check the batches 11-19). while batch <= last_sealed_batch { - // While we may receive `None` for the `self.current_l1_batch`, it's OK: open batch is guaranteed to not - // be sent to L1. - let l2_block_number = self.client.resolve_l1_batch_to_l2_block(batch).await?; - let Some(l2_block_number) = l2_block_number else { + let Some(batch_info) = self.client.batch_details(batch).await? else { + // Batch is not ready yet return Ok(()); }; - let Some(batch_info) = self.client.block_details(l2_block_number).await? else { - // We cannot recover from an external API inconsistency. - let err = anyhow::anyhow!( - "Node API is inconsistent: L2 block {l2_block_number} was reported to be a part of {batch} L1 batch, \ - but API has no information about this L2 block", - ); - return Err(err.into()); - }; - cursor.update(status_changes, &batch_info)?; // Check whether we can skip a part of the range. @@ -407,10 +384,11 @@ impl BatchStatusUpdater { for change in &changes.commit { tracing::info!( - "Commit status change: number {}, hash {}, happened at {}", + "Commit status change: number {}, hash {}, happened at {}, on chainID {:?}", change.number, change.l1_tx_hash, - change.happened_at + change.happened_at, + change.sl_chain_id ); anyhow::ensure!( change.number <= last_sealed_batch, @@ -424,6 +402,7 @@ impl BatchStatusUpdater { AggregatedActionType::Commit, change.l1_tx_hash, change.happened_at, + change.sl_chain_id, ) .await?; cursor.last_committed_l1_batch = change.number; @@ -431,10 +410,11 @@ impl BatchStatusUpdater { for change in &changes.prove { tracing::info!( - "Prove status change: number {}, hash {}, happened at {}", + "Prove status change: number {}, hash {}, happened at {}, on chainID {:?}", change.number, change.l1_tx_hash, - change.happened_at + change.happened_at, + change.sl_chain_id ); anyhow::ensure!( change.number <= cursor.last_committed_l1_batch, @@ -448,6 +428,7 @@ impl BatchStatusUpdater { AggregatedActionType::PublishProofOnchain, change.l1_tx_hash, change.happened_at, + change.sl_chain_id, ) .await?; cursor.last_proven_l1_batch = change.number; @@ -455,10 +436,11 @@ impl BatchStatusUpdater { for change in &changes.execute { tracing::info!( - "Execute status change: number {}, hash {}, happened at {}", + "Execute status change: number {}, hash {}, happened at {}, on chainID {:?}", change.number, change.l1_tx_hash, - change.happened_at + change.happened_at, + change.sl_chain_id ); anyhow::ensure!( change.number <= cursor.last_proven_l1_batch, @@ -472,6 +454,7 @@ impl BatchStatusUpdater { AggregatedActionType::Execute, change.l1_tx_hash, change.happened_at, + change.sl_chain_id, ) .await?; cursor.last_executed_l1_batch = change.number; diff --git a/core/node/node_sync/src/batch_status_updater/tests.rs b/core/node/node_sync/src/batch_status_updater/tests.rs index 28b89f86f6a7..4ea1a5937075 100644 --- a/core/node/node_sync/src/batch_status_updater/tests.rs +++ b/core/node/node_sync/src/batch_status_updater/tests.rs @@ -8,7 +8,7 @@ use tokio::sync::{watch, Mutex}; use zksync_contracts::BaseSystemContractsHashes; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_node_test_utils::{create_l1_batch, create_l2_block, prepare_recovery_snapshot}; -use zksync_types::{Address, ProtocolVersionId}; +use zksync_types::L2BlockNumber; use super::*; use crate::metrics::L1BatchStage; @@ -104,11 +104,11 @@ impl L1BatchStagesMap { for (number, stage) in self.iter() { let local_details = storage .blocks_web3_dal() - .get_block_details(L2BlockNumber(number.0)) + .get_l1_batch_details(L1BatchNumber(number.0)) .await .unwrap() .unwrap_or_else(|| panic!("no details for block #{number}")); - let expected_details = mock_block_details(number.0, stage); + let expected_details = mock_batch_details(number.0, stage); assert_eq!( local_details.base.commit_tx_hash, @@ -118,6 +118,10 @@ impl L1BatchStagesMap { local_details.base.committed_at, expected_details.base.committed_at ); + assert_eq!( + local_details.base.commit_chain_id, + expected_details.base.commit_chain_id, + ); assert_eq!( local_details.base.prove_tx_hash, expected_details.base.prove_tx_hash @@ -126,6 +130,10 @@ impl L1BatchStagesMap { local_details.base.proven_at, expected_details.base.proven_at ); + assert_eq!( + local_details.base.prove_chain_id, + expected_details.base.prove_chain_id, + ); assert_eq!( local_details.base.execute_tx_hash, expected_details.base.execute_tx_hash @@ -134,14 +142,17 @@ impl L1BatchStagesMap { local_details.base.executed_at, expected_details.base.executed_at ); + assert_eq!( + local_details.base.execute_chain_id, + expected_details.base.execute_chain_id, + ); } } } -fn mock_block_details(number: u32, stage: L1BatchStage) -> api::BlockDetails { - api::BlockDetails { - number: L2BlockNumber(number), - l1_batch_number: L1BatchNumber(number), +fn mock_batch_details(number: u32, stage: L1BatchStage) -> api::L1BatchDetails { + api::L1BatchDetails { + number: L1BatchNumber(number), base: api::BlockDetailsBase { timestamp: number.into(), l1_tx_count: 0, @@ -151,18 +162,19 @@ fn mock_block_details(number: u32, stage: L1BatchStage) -> api::BlockDetails { commit_tx_hash: (stage >= L1BatchStage::Committed).then(|| H256::repeat_byte(1)), committed_at: (stage >= L1BatchStage::Committed) .then(|| Utc.timestamp_opt(100, 0).unwrap()), + commit_chain_id: (stage >= L1BatchStage::Committed).then_some(SLChainId(11)), prove_tx_hash: (stage >= L1BatchStage::Proven).then(|| H256::repeat_byte(2)), proven_at: (stage >= L1BatchStage::Proven).then(|| Utc.timestamp_opt(200, 0).unwrap()), + prove_chain_id: (stage >= L1BatchStage::Proven).then_some(SLChainId(22)), execute_tx_hash: (stage >= L1BatchStage::Executed).then(|| H256::repeat_byte(3)), executed_at: (stage >= L1BatchStage::Executed) .then(|| Utc.timestamp_opt(300, 0).unwrap()), + execute_chain_id: (stage >= L1BatchStage::Executed).then_some(SLChainId(33)), l1_gas_price: 1, l2_fair_gas_price: 2, fair_pubdata_price: None, base_system_contracts_hashes: BaseSystemContractsHashes::default(), }, - operator_address: Address::zero(), - protocol_version: Some(ProtocolVersionId::default()), } } @@ -177,23 +189,15 @@ impl From for MockMainNodeClient { #[async_trait] impl MainNodeClient for MockMainNodeClient { - async fn resolve_l1_batch_to_l2_block( + async fn batch_details( &self, number: L1BatchNumber, - ) -> EnrichedClientResult> { - let map = self.0.lock().await; - Ok(map.get(number).is_some().then_some(L2BlockNumber(number.0))) - } - - async fn block_details( - &self, - number: L2BlockNumber, - ) -> EnrichedClientResult> { + ) -> EnrichedClientResult> { let map = self.0.lock().await; let Some(stage) = map.get(L1BatchNumber(number.0)) else { return Ok(None); }; - Ok(Some(mock_block_details(number.0, stage))) + Ok(Some(mock_batch_details(number.0, stage))) } } @@ -202,6 +206,7 @@ fn mock_change(number: L1BatchNumber) -> BatchStatusChange { number, l1_tx_hash: H256::zero(), happened_at: DateTime::default(), + sl_chain_id: Some(SLChainId(0)), } } diff --git a/core/node/node_sync/src/metrics.rs b/core/node/node_sync/src/metrics.rs index 805c6f913df2..02ac1b3837af 100644 --- a/core/node/node_sync/src/metrics.rs +++ b/core/node/node_sync/src/metrics.rs @@ -8,10 +8,7 @@ use zksync_types::aggregated_operations::AggregatedActionType; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "stage", rename_all = "snake_case")] pub(super) enum FetchStage { - // uses legacy naming for L2 blocks for compatibility reasons - #[metrics(name = "get_miniblock_range")] - GetL2BlockRange, - GetBlockDetails, + GetL1BatchDetails, } #[derive( diff --git a/core/node/node_sync/src/tree_data_fetcher/mod.rs b/core/node/node_sync/src/tree_data_fetcher/mod.rs index 52037dac4edc..fa1c2cec253a 100644 --- a/core/node/node_sync/src/tree_data_fetcher/mod.rs +++ b/core/node/node_sync/src/tree_data_fetcher/mod.rs @@ -11,7 +11,7 @@ use zksync_dal::{Connection, ConnectionPool, Core, CoreDal, DalError}; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_types::{ block::{L1BatchTreeData, L2BlockHeader}, - Address, L1BatchNumber, + Address, L1BatchNumber, L2ChainId, }; use zksync_web3_decl::{ client::{DynClient, L1, L2}, @@ -127,10 +127,13 @@ impl TreeDataFetcher { /// Attempts to fetch root hashes from L1 (namely, `BlockCommit` events emitted by the diamond proxy) if possible. /// The main node will still be used as a fallback in case communicating with L1 fails, or for newer batches, /// which may not be committed on L1. - pub fn with_l1_data( + pub async fn with_l1_data( mut self, - eth_client: Box>, - diamond_proxy_address: Address, + l1_client: Box>, + l1_diamond_proxy_addr: Address, + gateway_client: Option>>, + pool: ConnectionPool, + l2_chain_id: L2ChainId, ) -> anyhow::Result { anyhow::ensure!( self.diamond_proxy_address.is_none(), @@ -138,11 +141,15 @@ impl TreeDataFetcher { ); let l1_provider = L1DataProvider::new( - eth_client.for_component("tree_data_fetcher"), - diamond_proxy_address, - )?; + l1_client.for_component("tree_data_fetcher"), + l1_diamond_proxy_addr, + gateway_client.map(|c| c.for_component("tree_data_fetcher")), + pool, + l2_chain_id, + ) + .await?; self.data_provider.set_l1(l1_provider); - self.diamond_proxy_address = Some(diamond_proxy_address); + self.diamond_proxy_address = Some(l1_diamond_proxy_addr); Ok(self) } diff --git a/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs b/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs index e4f68cade6a4..7aea5de8c6b2 100644 --- a/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs +++ b/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs @@ -2,8 +2,13 @@ use std::fmt; use anyhow::Context; use async_trait::async_trait; -use zksync_eth_client::EthInterface; -use zksync_types::{block::L2BlockHeader, web3, Address, L1BatchNumber, H256, U256, U64}; +use zksync_contracts::bridgehub_contract; +use zksync_dal::{ConnectionPool, Core, CoreDal}; +use zksync_eth_client::{CallFunctionArgs, EthInterface}; +use zksync_system_constants::L2_BRIDGEHUB_ADDRESS; +use zksync_types::{ + block::L2BlockHeader, web3, Address, L1BatchNumber, L2ChainId, SLChainId, H256, U256, U64, +}; use zksync_web3_decl::{ client::{DynClient, L1, L2}, error::{ClientRpcContext, EnrichedClientError, EnrichedClientResult}, @@ -13,7 +18,7 @@ use zksync_web3_decl::{ use super::{ metrics::{ProcessingStage, TreeDataProviderSource, METRICS}, - TreeDataFetcherResult, + TreeDataFetcherError, TreeDataFetcherResult, }; #[cfg(test)] @@ -89,6 +94,14 @@ struct PastL1BatchInfo { number: L1BatchNumber, l1_commit_block_number: U64, l1_commit_block_timestamp: U256, + chain_id: SLChainId, +} + +#[derive(Debug)] +struct SLChainData { + client: Box>, + chain_id: SLChainId, + diamond_proxy_addr: Address, } /// Provider of tree data loading it from L1 `BlockCommit` events emitted by the diamond proxy contract. @@ -103,10 +116,11 @@ struct PastL1BatchInfo { /// (provided it's not too far behind the seal timestamp of the batch). #[derive(Debug)] pub(super) struct L1DataProvider { - eth_client: Box>, - diamond_proxy_address: Address, + l1_chain_data: SLChainData, + gateway_chain_data: Option, block_commit_signature: H256, past_l1_batch: Option, + pool: ConnectionPool, } impl L1DataProvider { @@ -116,19 +130,46 @@ impl L1DataProvider { /// `L1_BLOCK_ACCURACY`, but not large enough to trigger request limiting on the L1 RPC provider. const L1_BLOCK_RANGE: U64 = U64([20_000]); - pub fn new( - eth_client: Box>, - diamond_proxy_address: Address, + pub async fn new( + l1_client: Box>, + l1_diamond_proxy_addr: Address, + gateway_client: Option>>, + pool: ConnectionPool, + l2_chain_id: L2ChainId, ) -> anyhow::Result { + let l1_chain_id = l1_client.fetch_chain_id().await?; + let l1_chain_data = SLChainData { + client: l1_client, + chain_id: l1_chain_id, + diamond_proxy_addr: l1_diamond_proxy_addr, + }; + let gateway_chain_data = if let Some(client) = gateway_client { + let gateway_diamond_proxy = CallFunctionArgs::new( + "getZKChain", + zksync_types::ethabi::Token::Uint(l2_chain_id.as_u64().into()), + ) + .for_contract(L2_BRIDGEHUB_ADDRESS, &bridgehub_contract()) + .call(&client) + .await?; + let chain_id = client.fetch_chain_id().await?; + Some(SLChainData { + client, + chain_id, + diamond_proxy_addr: gateway_diamond_proxy, + }) + } else { + None + }; let block_commit_signature = zksync_contracts::hyperchain_contract() .event("BlockCommit") .context("missing `BlockCommit` event")? .signature(); Ok(Self { - eth_client, - diamond_proxy_address, + l1_chain_data, + gateway_chain_data, block_commit_signature, past_l1_batch: None, + pool, }) } @@ -186,6 +227,16 @@ impl L1DataProvider { })?; Ok((number, block.timestamp)) } + + fn chain_data_by_id(&self, searched_chain_id: SLChainId) -> Option<&SLChainData> { + if searched_chain_id == self.l1_chain_data.chain_id { + Some(&self.l1_chain_data) + } else if Some(searched_chain_id) == self.gateway_chain_data.as_ref().map(|d| d.chain_id) { + self.gateway_chain_data.as_ref() + } else { + None + } + } } #[async_trait] @@ -195,12 +246,36 @@ impl TreeDataProvider for L1DataProvider { number: L1BatchNumber, last_l2_block: &L2BlockHeader, ) -> TreeDataProviderResult { + let sl_chain_id = self + .pool + .connection_tagged("tree_data_fetcher") + .await + .map_err(|err| TreeDataFetcherError::Internal(err.into()))? + .eth_sender_dal() + .get_batch_commit_chain_id(number) + .await + .map_err(|err| TreeDataFetcherError::Internal(err.into()))?; + let chain_data = match sl_chain_id { + Some(chain_id) => { + let Some(chain_data) = self.chain_data_by_id(chain_id) else { + return Err(TreeDataFetcherError::Internal(anyhow::anyhow!( + "failed to find client for chain id {chain_id}" + ))); + }; + chain_data + } + None => &self.l1_chain_data, + }; + let l1_batch_seal_timestamp = last_l2_block.timestamp; let from_block = self.past_l1_batch.and_then(|info| { assert!( info.number < number, "`batch_details()` must be called with monotonically increasing numbers" ); + if info.chain_id != chain_data.chain_id { + return None; + } let threshold_timestamp = info.l1_commit_block_timestamp + Self::L1_BLOCK_RANGE.as_u64() / 2; if U256::from(l1_batch_seal_timestamp) > threshold_timestamp { tracing::debug!( @@ -219,7 +294,7 @@ impl TreeDataProvider for L1DataProvider { Some(number) => number, None => { let (approximate_block, steps) = - Self::guess_l1_commit_block_number(&self.eth_client, l1_batch_seal_timestamp) + Self::guess_l1_commit_block_number(&chain_data.client, l1_batch_seal_timestamp) .await?; tracing::debug!( number = number.0, @@ -235,7 +310,7 @@ impl TreeDataProvider for L1DataProvider { let number_topic = H256::from_low_u64_be(number.0.into()); let filter = web3::FilterBuilder::default() - .address(vec![self.diamond_proxy_address]) + .address(vec![chain_data.diamond_proxy_addr]) .from_block(web3::BlockNumber::Number(from_block)) .to_block(web3::BlockNumber::Number(from_block + Self::L1_BLOCK_RANGE)) .topics( @@ -245,7 +320,7 @@ impl TreeDataProvider for L1DataProvider { None, ) .build(); - let mut logs = self.eth_client.logs(&filter).await?; + let mut logs = chain_data.client.logs(&filter).await?; logs.retain(|log| !log.is_removed() && log.block_number.is_some()); match logs.as_slice() { @@ -266,7 +341,10 @@ impl TreeDataProvider for L1DataProvider { {diff} block(s) after the `from` block from the filter" ); - let l1_commit_block = self.eth_client.block(l1_commit_block_number.into()).await?; + let l1_commit_block = chain_data + .client + .block(l1_commit_block_number.into()) + .await?; let l1_commit_block = l1_commit_block.ok_or_else(|| { let err = "Block disappeared from L1 RPC provider"; EnrichedClientError::new(ClientError::Custom(err.into()), "batch_details") @@ -276,6 +354,7 @@ impl TreeDataProvider for L1DataProvider { number, l1_commit_block_number, l1_commit_block_timestamp: l1_commit_block.timestamp, + chain_id: chain_data.chain_id, }); Ok(Ok(root_hash)) } diff --git a/core/node/node_sync/src/tree_data_fetcher/provider/tests.rs b/core/node/node_sync/src/tree_data_fetcher/provider/tests.rs index 09fa16f16077..55bed282f48a 100644 --- a/core/node/node_sync/src/tree_data_fetcher/provider/tests.rs +++ b/core/node/node_sync/src/tree_data_fetcher/provider/tests.rs @@ -3,10 +3,15 @@ use assert_matches::assert_matches; use once_cell::sync::Lazy; use test_casing::test_casing; -use zksync_dal::{ConnectionPool, Core}; +use zksync_dal::{Connection, ConnectionPool, Core}; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_node_test_utils::create_l2_block; -use zksync_types::{api, L2BlockNumber, ProtocolVersionId}; +use zksync_types::{ + aggregated_operations::AggregatedActionType, + api, ethabi, + web3::{BlockId, CallRequest}, + L2BlockNumber, ProtocolVersionId, +}; use zksync_web3_decl::client::MockClient; use super::*; @@ -14,7 +19,11 @@ use crate::tree_data_fetcher::tests::{ get_last_l2_block, seal_l1_batch_with_timestamp, MockMainNodeClient, }; -const DIAMOND_PROXY_ADDRESS: Address = Address::repeat_byte(0x22); +const L1_DIAMOND_PROXY_ADDRESS: Address = Address::repeat_byte(0x22); +const GATEWAY_DIAMOND_PROXY_ADDRESS: Address = Address::repeat_byte(0x33); +const L1_CHAIN_ID: u64 = 9; +const GATEWAY_CHAIN_ID: u64 = 505; +const ERA_CHAIN_ID: u64 = 270; static BLOCK_COMMIT_SIGNATURE: Lazy = Lazy::new(|| { zksync_contracts::hyperchain_contract() @@ -33,10 +42,13 @@ fn mock_block_details_base(number: u32, hash: Option) -> api::BlockDetails status: api::BlockStatus::Sealed, commit_tx_hash: None, committed_at: None, + commit_chain_id: None, prove_tx_hash: None, proven_at: None, + prove_chain_id: None, execute_tx_hash: None, executed_at: None, + execute_chain_id: None, l1_gas_price: 10, l2_fair_gas_price: 100, fair_pubdata_price: None, @@ -117,53 +129,66 @@ async fn rpc_data_provider_with_block_hash_divergence() { assert_matches!(output, Err(MissingData::PossibleReorg)); } +#[derive(Debug)] struct EthereumParameters { block_number: U64, - // L1 block numbers in which L1 batches are committed starting from L1 batch #1 - l1_blocks_for_commits: Vec, + // L1 batch numbers and SL block numbers in which they are committed. + batches_and_sl_blocks_for_commits: Vec<(L1BatchNumber, U64)>, + chain_id: SLChainId, + diamond_proxy: Address, } impl EthereumParameters { - fn new(block_number: u64) -> Self { + fn new_l1(block_number: u64) -> Self { + Self::new(block_number, L1_CHAIN_ID, L1_DIAMOND_PROXY_ADDRESS) + } + + fn new(block_number: u64, chain_id: u64, diamond_proxy: Address) -> Self { Self { block_number: block_number.into(), - l1_blocks_for_commits: vec![], + batches_and_sl_blocks_for_commits: vec![], + chain_id: SLChainId(chain_id), + diamond_proxy, } } - fn push_commit(&mut self, l1_block_number: u64) { + fn push_commit(&mut self, l1_batch_number: L1BatchNumber, l1_block_number: u64) { assert!(l1_block_number <= self.block_number.as_u64()); let l1_block_number = U64::from(l1_block_number); - let last_commit = self.l1_blocks_for_commits.last().copied(); - let is_increasing = last_commit.map_or(true, |last_number| last_number <= l1_block_number); - assert!(is_increasing, "Invalid L1 block number for commit"); + let last_commit = self.batches_and_sl_blocks_for_commits.last().copied(); + let is_increasing = last_commit.map_or(true, |last| { + last.0 <= l1_batch_number && last.1 <= l1_block_number + }); + assert!( + is_increasing, + "Invalid batch number or L1 block number for commit" + ); - self.l1_blocks_for_commits.push(l1_block_number); + self.batches_and_sl_blocks_for_commits + .push((l1_batch_number, l1_block_number)); } fn client(&self) -> MockClient { - let logs = self - .l1_blocks_for_commits - .iter() - .enumerate() - .map(|(i, &l1_block_number)| { - let l1_batch_number = H256::from_low_u64_be(i as u64 + 1); - let root_hash = H256::repeat_byte(i as u8 + 1); + let logs = self.batches_and_sl_blocks_for_commits.iter().map( + |&(l1_batch_number, l1_block_number)| { + let l1_batch_number_topic = H256::from_low_u64_be(l1_batch_number.0 as u64); + let root_hash = H256::repeat_byte(l1_batch_number.0 as u8); web3::Log { - address: DIAMOND_PROXY_ADDRESS, + address: self.diamond_proxy, topics: vec![ *BLOCK_COMMIT_SIGNATURE, - l1_batch_number, + l1_batch_number_topic, root_hash, H256::zero(), // commitment hash; not used ], block_number: Some(l1_block_number), ..web3::Log::default() } - }); + }, + ); let logs: Vec<_> = logs.collect(); - mock_l1_client(self.block_number, logs) + mock_l1_client(self.block_number, logs, self.chain_id) } } @@ -201,7 +226,7 @@ fn filter_logs(logs: &[web3::Log], filter: web3::Filter) -> Vec { filtered_logs.cloned().collect() } -fn mock_l1_client(block_number: U64, logs: Vec) -> MockClient { +fn mock_l1_client(block_number: U64, logs: Vec, chain_id: SLChainId) -> MockClient { MockClient::builder(L1::default()) .method("eth_blockNumber", move || Ok(block_number)) .method( @@ -228,12 +253,44 @@ fn mock_l1_client(block_number: U64, logs: Vec) -> MockClient { .method("eth_getLogs", move |filter: web3::Filter| { Ok(filter_logs(&logs, filter)) }) + .method("eth_chainId", move || Ok(U64::from(chain_id.0))) + .method("eth_call", move |req: CallRequest, _block_id: BlockId| { + let contract = bridgehub_contract(); + let expected_input = contract + .function("getZKChain") + .unwrap() + .encode_input(&[ethabi::Token::Uint(ERA_CHAIN_ID.into())]) + .unwrap(); + assert_eq!(req.to, Some(L2_BRIDGEHUB_ADDRESS)); + assert_eq!(req.data, Some(expected_input.into())); + Ok(web3::Bytes(ethabi::encode(&[ethabi::Token::Address( + GATEWAY_DIAMOND_PROXY_ADDRESS, + )]))) + }) .build() } +pub(super) async fn insert_l1_batch_commit_chain_id( + storage: &mut Connection<'_, Core>, + number: L1BatchNumber, + chain_id: SLChainId, +) { + storage + .eth_sender_dal() + .insert_bogus_confirmed_eth_tx( + number, + AggregatedActionType::Commit, + H256::from_low_u64_be(number.0 as u64), + chrono::Utc::now(), + Some(chain_id), + ) + .await + .unwrap(); +} + #[tokio::test] async fn guessing_l1_commit_block_number() { - let eth_params = EthereumParameters::new(100_000); + let eth_params = EthereumParameters::new_l1(100_000); let eth_client = eth_params.client(); for timestamp in [0, 100, 1_000, 5_000, 10_000, 100_000] { @@ -251,6 +308,21 @@ async fn guessing_l1_commit_block_number() { } } +async fn create_l1_data_provider( + l1_client: Box>, + pool: ConnectionPool, +) -> L1DataProvider { + L1DataProvider::new( + l1_client, + L1_DIAMOND_PROXY_ADDRESS, + None, + pool, + L2ChainId::new(ERA_CHAIN_ID).unwrap(), + ) + .await + .unwrap() +} + async fn test_using_l1_data_provider(l1_batch_timestamps: &[u64]) { let pool = ConnectionPool::::test_pool().await; let mut storage = pool.connection().await.unwrap(); @@ -258,15 +330,14 @@ async fn test_using_l1_data_provider(l1_batch_timestamps: &[u64]) { .await .unwrap(); - let mut eth_params = EthereumParameters::new(1_000_000); + let mut eth_params = EthereumParameters::new_l1(1_000_000); for (number, &ts) in l1_batch_timestamps.iter().enumerate() { let number = L1BatchNumber(number as u32 + 1); seal_l1_batch_with_timestamp(&mut storage, number, ts).await; - eth_params.push_commit(ts + 1_000); // have a reasonable small diff between batch generation and commitment + eth_params.push_commit(number, ts + 1_000); // have a reasonable small diff between batch generation and commitment } - let mut provider = - L1DataProvider::new(Box::new(eth_params.client()), DIAMOND_PROXY_ADDRESS).unwrap(); + let mut provider = create_l1_data_provider(Box::new(eth_params.client()), pool.clone()).await; for i in 0..l1_batch_timestamps.len() { let number = L1BatchNumber(i as u32 + 1); let root_hash = provider @@ -278,7 +349,7 @@ async fn test_using_l1_data_provider(l1_batch_timestamps: &[u64]) { let past_l1_batch = provider.past_l1_batch.unwrap(); assert_eq!(past_l1_batch.number, number); - let expected_l1_block_number = eth_params.l1_blocks_for_commits[i]; + let expected_l1_block_number = eth_params.batches_and_sl_blocks_for_commits[i].1; assert_eq!( past_l1_batch.l1_commit_block_number, expected_l1_block_number @@ -297,13 +368,78 @@ async fn using_l1_data_provider(batch_spacing: u64) { test_using_l1_data_provider(&l1_batch_timestamps).await; } +#[tokio::test] +async fn using_different_settlement_layers() { + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + + let l1_eth_params = EthereumParameters::new_l1(1_000_000); + let gateway_eth_params = + EthereumParameters::new(1_000_000, GATEWAY_CHAIN_ID, GATEWAY_DIAMOND_PROXY_ADDRESS); + let mut params_array = [l1_eth_params, gateway_eth_params]; + + // (index of sl: 0 for l1, 1 for gw; sl block number) + let batch_commit_info = [ + (0, 50_000), + (0, 50_500), + (1, 30_000), + (1, 32_000), + (0, 51_000), + (1, 60_000), + ]; + let chain_ids = [SLChainId(L1_CHAIN_ID), SLChainId(GATEWAY_CHAIN_ID)]; + for (i, &(sl_idx, ts)) in batch_commit_info.iter().enumerate() { + let number = L1BatchNumber(i as u32 + 1); + seal_l1_batch_with_timestamp(&mut storage, number, ts).await; + insert_l1_batch_commit_chain_id(&mut storage, number, chain_ids[sl_idx]).await; + params_array[sl_idx].push_commit(number, ts + 1_000); // have a reasonable small diff between batch generation and commitment + } + + let mut provider = L1DataProvider::new( + Box::new(params_array[0].client()), + L1_DIAMOND_PROXY_ADDRESS, + Some(Box::new(params_array[1].client())), + pool, + L2ChainId::new(ERA_CHAIN_ID).unwrap(), + ) + .await + .unwrap(); + for i in 0..batch_commit_info.len() { + let number = L1BatchNumber(i as u32 + 1); + let root_hash = provider + .batch_details(number, &get_last_l2_block(&mut storage, number).await) + .await + .unwrap() + .expect(&format!("no root hash for batch #{number}")); + assert_eq!(root_hash, H256::repeat_byte(number.0 as u8)); + + let past_l1_batch = provider.past_l1_batch.unwrap(); + assert_eq!(past_l1_batch.number, number); + let expected_l1_block_number = batch_commit_info[i].1 + 1_000; + assert_eq!( + past_l1_batch.l1_commit_block_number, + expected_l1_block_number.into() + ); + assert_eq!( + past_l1_batch.l1_commit_block_timestamp, + expected_l1_block_number.into() + ); + let expected_chain_id = chain_ids[batch_commit_info[i].0]; + assert_eq!(past_l1_batch.chain_id, expected_chain_id); + } +} + #[tokio::test] async fn detecting_reorg_in_l1_data_provider() { let l1_batch_number = H256::from_low_u64_be(1); + let pool = ConnectionPool::::test_pool().await; // Generate two logs for the same L1 batch #1 let logs = vec![ web3::Log { - address: DIAMOND_PROXY_ADDRESS, + address: L1_DIAMOND_PROXY_ADDRESS, topics: vec![ *BLOCK_COMMIT_SIGNATURE, l1_batch_number, @@ -314,7 +450,7 @@ async fn detecting_reorg_in_l1_data_provider() { ..web3::Log::default() }, web3::Log { - address: DIAMOND_PROXY_ADDRESS, + address: L1_DIAMOND_PROXY_ADDRESS, topics: vec![ *BLOCK_COMMIT_SIGNATURE, l1_batch_number, @@ -325,9 +461,9 @@ async fn detecting_reorg_in_l1_data_provider() { ..web3::Log::default() }, ]; - let l1_client = mock_l1_client(200.into(), logs); + let l1_client = mock_l1_client(200.into(), logs, SLChainId(9)); - let mut provider = L1DataProvider::new(Box::new(l1_client), DIAMOND_PROXY_ADDRESS).unwrap(); + let mut provider = create_l1_data_provider(Box::new(l1_client), pool.clone()).await; let output = provider .batch_details(L1BatchNumber(1), &create_l2_block(1)) .await @@ -343,16 +479,15 @@ async fn combined_data_provider_errors() { .await .unwrap(); - let mut eth_params = EthereumParameters::new(1_000_000); + let mut eth_params = EthereumParameters::new_l1(1_000_000); seal_l1_batch_with_timestamp(&mut storage, L1BatchNumber(1), 50_000).await; - eth_params.push_commit(51_000); + eth_params.push_commit(L1BatchNumber(1), 51_000); seal_l1_batch_with_timestamp(&mut storage, L1BatchNumber(2), 52_000).await; let mut main_node_client = MockMainNodeClient::default(); main_node_client.insert_batch(L1BatchNumber(2), H256::repeat_byte(2)); let mut provider = CombinedDataProvider::new(main_node_client); - let l1_provider = - L1DataProvider::new(Box::new(eth_params.client()), DIAMOND_PROXY_ADDRESS).unwrap(); + let l1_provider = create_l1_data_provider(Box::new(eth_params.client()), pool.clone()).await; provider.set_l1(l1_provider); // L1 batch #1 should be obtained from L1 diff --git a/zkstack_cli/crates/zkstack/src/commands/external_node/prepare_configs.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/prepare_configs.rs index d714a0f8e843..122c1f549697 100644 --- a/zkstack_cli/crates/zkstack/src/commands/external_node/prepare_configs.rs +++ b/zkstack_cli/crates/zkstack/src/commands/external_node/prepare_configs.rs @@ -74,7 +74,6 @@ fn prepare_configs( .http_url, )?, main_node_rate_limit_rps: None, - gateway_url: None, bridge_addresses_refresh_interval_sec: None, }; let mut general_en = general.clone(); @@ -112,6 +111,7 @@ fn prepare_configs( }), l1: Some(L1Secrets { l1_rpc_url: SensitiveUrl::from_str(&args.l1_rpc_url).context("l1_rpc_url")?, + gateway_url: None, }), data_availability: None, }; From 2f357f70f56cd4925e7283c4a4e828db2b397d0a Mon Sep 17 00:00:00 2001 From: perekopskiy Date: Thu, 14 Nov 2024 11:49:30 +0200 Subject: [PATCH 02/11] fix sqlx data --- ...2f79d780d239409d426b602aa0cf9b0c5b1ef39b7d07d6309454fcd.json | 2 +- ...72787fe25bfaa8fd345094de508af93f4085be7cf3b54b1e8ecdadd.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/lib/dal/.sqlx/query-2fa2ba4a62f79d780d239409d426b602aa0cf9b0c5b1ef39b7d07d6309454fcd.json b/core/lib/dal/.sqlx/query-2fa2ba4a62f79d780d239409d426b602aa0cf9b0c5b1ef39b7d07d6309454fcd.json index 1d515edba819..0db6ba6f51b6 100644 --- a/core/lib/dal/.sqlx/query-2fa2ba4a62f79d780d239409d426b602aa0cf9b0c5b1ef39b7d07d6309454fcd.json +++ b/core/lib/dal/.sqlx/query-2fa2ba4a62f79d780d239409d426b602aa0cf9b0c5b1ef39b7d07d6309454fcd.json @@ -69,7 +69,7 @@ false, false, false, - false, + true, false, false, true, diff --git a/core/lib/dal/.sqlx/query-a115f795672787fe25bfaa8fd345094de508af93f4085be7cf3b54b1e8ecdadd.json b/core/lib/dal/.sqlx/query-a115f795672787fe25bfaa8fd345094de508af93f4085be7cf3b54b1e8ecdadd.json index ebe8ce232cfb..ac7989a5be77 100644 --- a/core/lib/dal/.sqlx/query-a115f795672787fe25bfaa8fd345094de508af93f4085be7cf3b54b1e8ecdadd.json +++ b/core/lib/dal/.sqlx/query-a115f795672787fe25bfaa8fd345094de508af93f4085be7cf3b54b1e8ecdadd.json @@ -67,7 +67,7 @@ false, false, false, - false, + true, false, false, true, From e3898d503d9a9e00690d99796d8b58c11a4d18a2 Mon Sep 17 00:00:00 2001 From: perekopskiy Date: Thu, 14 Nov 2024 11:51:15 +0200 Subject: [PATCH 03/11] fix abi --- core/lib/contracts/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index f387f791be12..5626b7040961 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -1442,7 +1442,7 @@ pub static POST_SHARED_BRIDGE_EXECUTE_FUNCTION: Lazy = Lazy::new(|| { // Temporary thing, should be removed when new contracts are merged. pub static MESSAGE_ROOT_CONTRACT: Lazy = Lazy::new(|| { let abi = r#" - { + [{ "inputs": [ { "internalType": "uint256", @@ -1460,6 +1460,6 @@ pub static MESSAGE_ROOT_CONTRACT: Lazy = Lazy::new(|| { ], "stateMutability": "view", "type": "function" - }"#; + }]"#; serde_json::from_str(abi).unwrap() }); From fcaf4184c21391b81dcd78e948bbeeae227641cc Mon Sep 17 00:00:00 2001 From: perekopskiy Date: Thu, 14 Nov 2024 14:07:39 +0200 Subject: [PATCH 04/11] fix tests --- core/node/consistency_checker/src/lib.rs | 10 ++++++++-- core/node/consistency_checker/src/tests/mod.rs | 13 +++++++++++-- .../node_sync/src/tree_data_fetcher/provider/mod.rs | 10 ++++++++-- .../src/tree_data_fetcher/provider/tests.rs | 7 ++++++- etc/env/file_based/external_node.yaml | 2 -- 5 files changed, 33 insertions(+), 9 deletions(-) diff --git a/core/node/consistency_checker/src/lib.rs b/core/node/consistency_checker/src/lib.rs index 340b1ccc1ec0..d5e1af93fa09 100644 --- a/core/node/consistency_checker/src/lib.rs +++ b/core/node/consistency_checker/src/lib.rs @@ -406,9 +406,15 @@ impl ConsistencyChecker { }; let gateway_chain_data = if let Some(client) = gateway_client { + let contract = bridgehub_contract(); + let function_name = if contract.function("getZKChain").is_ok() { + "getZKChain" + } else { + "getHyperchain" + }; let gateway_diamond_proxy = - CallFunctionArgs::new("getZKChain", Token::Uint(l2_chain_id.as_u64().into())) - .for_contract(L2_BRIDGEHUB_ADDRESS, &bridgehub_contract()) + CallFunctionArgs::new(function_name, Token::Uint(l2_chain_id.as_u64().into())) + .for_contract(L2_BRIDGEHUB_ADDRESS, &contract) .call(&client) .await?; let chain_id = client.fetch_chain_id().await?; diff --git a/core/node/consistency_checker/src/tests/mod.rs b/core/node/consistency_checker/src/tests/mod.rs index 5571c3b74d9c..5b1d49d43df9 100644 --- a/core/node/consistency_checker/src/tests/mod.rs +++ b/core/node/consistency_checker/src/tests/mod.rs @@ -134,8 +134,13 @@ fn create_mock_sl(chain_id: u64, with_get_zk_chain: bool) -> MockSettlementLayer } Some(addr) if with_get_zk_chain && addr == L2_BRIDGEHUB_ADDRESS => { let contract = zksync_contracts::bridgehub_contract(); + let function_name = if contract.function("getZKChain").is_ok() { + "getZKChain" + } else { + "getHyperchain" + }; let expected_input = contract - .function("getZKChain") + .function(function_name) .unwrap() .encode_input(&[Token::Uint(ERA_CHAIN_ID.into())]) .unwrap(); @@ -192,7 +197,11 @@ fn build_commit_tx_input_data_is_correct(commitment_mode: L1BatchCommitmentMode) &commit_tx_input_data, commit_function, batch.header.number, - false, + batch + .header + .protocol_version + .map(|v| v.is_pre_gateway()) + .unwrap_or(true), ) .unwrap(); assert_eq!( diff --git a/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs b/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs index 7aea5de8c6b2..305650203f58 100644 --- a/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs +++ b/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs @@ -144,11 +144,17 @@ impl L1DataProvider { diamond_proxy_addr: l1_diamond_proxy_addr, }; let gateway_chain_data = if let Some(client) = gateway_client { + let contract = bridgehub_contract(); + let function_name = if contract.function("getZKChain").is_ok() { + "getZKChain" + } else { + "getHyperchain" + }; let gateway_diamond_proxy = CallFunctionArgs::new( - "getZKChain", + function_name, zksync_types::ethabi::Token::Uint(l2_chain_id.as_u64().into()), ) - .for_contract(L2_BRIDGEHUB_ADDRESS, &bridgehub_contract()) + .for_contract(L2_BRIDGEHUB_ADDRESS, &contract) .call(&client) .await?; let chain_id = client.fetch_chain_id().await?; diff --git a/core/node/node_sync/src/tree_data_fetcher/provider/tests.rs b/core/node/node_sync/src/tree_data_fetcher/provider/tests.rs index 55bed282f48a..75bf96092335 100644 --- a/core/node/node_sync/src/tree_data_fetcher/provider/tests.rs +++ b/core/node/node_sync/src/tree_data_fetcher/provider/tests.rs @@ -256,8 +256,13 @@ fn mock_l1_client(block_number: U64, logs: Vec, chain_id: SLChainId) .method("eth_chainId", move || Ok(U64::from(chain_id.0))) .method("eth_call", move |req: CallRequest, _block_id: BlockId| { let contract = bridgehub_contract(); + let function_name = if contract.function("getZKChain").is_ok() { + "getZKChain" + } else { + "getHyperchain" + }; let expected_input = contract - .function("getZKChain") + .function(function_name) .unwrap() .encode_input(&[ethabi::Token::Uint(ERA_CHAIN_ID.into())]) .unwrap(); diff --git a/etc/env/file_based/external_node.yaml b/etc/env/file_based/external_node.yaml index e97b04fb3900..675baf739686 100644 --- a/etc/env/file_based/external_node.yaml +++ b/etc/env/file_based/external_node.yaml @@ -4,5 +4,3 @@ l1_batch_commit_data_generator_mode: Rollup main_node_url: http://localhost:3050 main_node_rate_limit_rps: 1000 - -gateway_url: http://localhost:3052 From 55352a779671416640809246ed7986ff830f463f Mon Sep 17 00:00:00 2001 From: perekopskiy Date: Thu, 14 Nov 2024 14:14:56 +0200 Subject: [PATCH 05/11] ignore rollup_da_output_hash_match test --- core/lib/multivm/src/versions/shadow/tests.rs | 1 + core/lib/multivm/src/versions/vm_fast/tests/l1_messenger.rs | 1 + core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs | 1 + 3 files changed, 3 insertions(+) diff --git a/core/lib/multivm/src/versions/shadow/tests.rs b/core/lib/multivm/src/versions/shadow/tests.rs index 28ea79e4d3e4..30414b130210 100644 --- a/core/lib/multivm/src/versions/shadow/tests.rs +++ b/core/lib/multivm/src/versions/shadow/tests.rs @@ -320,6 +320,7 @@ mod l1_messenger { use crate::versions::testonly::l1_messenger::*; #[test] + #[ignore] // Requires post-gateway system contracts fn rollup_da_output_hash_match() { test_rollup_da_output_hash_match::(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l1_messenger.rs b/core/lib/multivm/src/versions/vm_fast/tests/l1_messenger.rs index 0bd01c7de134..c7d4594d7692 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l1_messenger.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l1_messenger.rs @@ -1,6 +1,7 @@ use crate::{versions::testonly::l1_messenger::test_rollup_da_output_hash_match, vm_fast::Vm}; #[test] +#[ignore] // Requires post-gateway system contracts fn rollup_da_output_hash_match() { test_rollup_da_output_hash_match::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs index f1dade9dd8e6..7d301f33a131 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs @@ -4,6 +4,7 @@ use crate::{ }; #[test] +#[ignore] // Requires post-gateway system contracts fn rollup_da_output_hash_match() { test_rollup_da_output_hash_match::>(); } From c2785fc73ee0d3d417ce0e2aebb41032639972a8 Mon Sep 17 00:00:00 2001 From: perekopskiy Date: Tue, 19 Nov 2024 15:43:00 +0200 Subject: [PATCH 06/11] address comments --- core/bin/external_node/src/config/mod.rs | 4 ++++ .../src/i_executor/methods/commit_batches.rs | 10 ++++---- .../src/i_executor/methods/execute_batches.rs | 8 +------ .../structures/commit_batch_info.rs | 8 ++++--- core/lib/multivm/src/versions/shadow/tests.rs | 22 ++++------------- .../src/versions/testonly/l1_messenger.rs | 24 ++++++++++--------- .../multivm/src/versions/testonly/refunds.rs | 9 +++---- .../src/versions/testonly/tester/mod.rs | 9 ++----- .../multivm/src/versions/vm_fast/tests/mod.rs | 9 ++----- .../src/versions/vm_latest/tests/mod.rs | 9 ++----- core/lib/types/src/l2_to_l1_log.rs | 12 ++++++++++ .../src/web3/namespaces/unstable/mod.rs | 6 ++--- .../appended_chain_batch_root.rs | 4 ++-- 13 files changed, 58 insertions(+), 76 deletions(-) diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 6f3222af6dfa..59f0dab1f48d 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -102,10 +102,14 @@ impl ConfigurationSource for Environment { /// This part of the external node config is fetched directly from the main node. #[derive(Debug, Deserialize)] pub(crate) struct RemoteENConfig { + #[serde(alias = "bridgehub_proxy_addr")] pub l1_bridgehub_proxy_addr: Option
, + #[serde(alias = "state_transition_proxy_addr")] pub l1_state_transition_proxy_addr: Option
, + #[serde(alias = "transparent_proxy_admin_addr")] pub l1_transparent_proxy_admin_addr: Option
, /// Should not be accessed directly. Use [`ExternalNodeConfig::l1_diamond_proxy_address`] instead. + #[serde(alias = "diamond_proxy_addr")] l1_diamond_proxy_addr: Address, // While on L1 shared bridge and legacy bridge are different contracts with different addresses, // the `l2_erc20_bridge_addr` and `l2_shared_bridge_addr` are basically the same contract, but with diff --git a/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs b/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs index b883cbdbabc8..01e362fb7d65 100644 --- a/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs +++ b/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs @@ -30,20 +30,18 @@ impl Tokenize for CommitBatches<'_> { if protocol_version.is_pre_gateway() { vec![stored_batch_info, Token::Array(l1_batches_to_commit)] } else { - let encoded_data = encode(&[ + let mut encoded_data = encode(&[ stored_batch_info.clone(), - Token::Array(l1_batches_to_commit.clone()), + Token::Array(l1_batches_to_commit), ]); - let commit_data = [[SUPPORTED_ENCODING_VERSION].to_vec(), encoded_data] - .concat() - .to_vec(); + encoded_data.insert(0, SUPPORTED_ENCODING_VERSION); vec![ Token::Uint((self.last_committed_l1_batch.header.number.0 + 1).into()), Token::Uint( (self.last_committed_l1_batch.header.number.0 + self.l1_batches.len() as u32) .into(), ), - Token::Bytes(commit_data), + Token::Bytes(encoded_data), ] } } diff --git a/core/lib/l1_contract_interface/src/i_executor/methods/execute_batches.rs b/core/lib/l1_contract_interface/src/i_executor/methods/execute_batches.rs index e2e29bfefcfe..649a7ca2b419 100644 --- a/core/lib/l1_contract_interface/src/i_executor/methods/execute_batches.rs +++ b/core/lib/l1_contract_interface/src/i_executor/methods/execute_batches.rs @@ -47,13 +47,7 @@ impl Tokenize for &ExecuteBatches { vec![ Token::Uint(self.l1_batches[0].header.number.0.into()), - Token::Uint( - self.l1_batches[self.l1_batches.len() - 1] - .header - .number - .0 - .into(), - ), + Token::Uint(self.l1_batches.last().unwrap().header.number.0.into()), Token::Bytes(execute_data), ] } diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs index b0aa0a291826..573b3c65a3e3 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs @@ -361,9 +361,11 @@ fn compose_header_for_l1_commit_rollup(state_diff_hash: H256, pubdata: Vec) // Now, we need to calculate the linear hashes of the blobs. // Firstly, let's pad the pubdata to the size of the blob. if full_pubdata.len() % ZK_SYNC_BYTES_PER_BLOB != 0 { - let padding = - vec![0u8; ZK_SYNC_BYTES_PER_BLOB - full_pubdata.len() % ZK_SYNC_BYTES_PER_BLOB]; - full_pubdata.extend(padding); + full_pubdata.resize( + full_pubdata.len() + ZK_SYNC_BYTES_PER_BLOB + - full_pubdata.len() % ZK_SYNC_BYTES_PER_BLOB, + 0, + ); } full_header.push((full_pubdata.len() / ZK_SYNC_BYTES_PER_BLOB) as u8); diff --git a/core/lib/multivm/src/versions/shadow/tests.rs b/core/lib/multivm/src/versions/shadow/tests.rs index 30414b130210..354459853f11 100644 --- a/core/lib/multivm/src/versions/shadow/tests.rs +++ b/core/lib/multivm/src/versions/shadow/tests.rs @@ -120,23 +120,11 @@ impl TestedVm for ShadowedFastVm { }); } - fn push_transaction_with_refund_and_compression( - &mut self, - tx: Transaction, - refund: u64, - compression: bool, - ) { - self.get_mut( - "push_transaction_with_refund_and_compression", - |r| match r { - ShadowMut::Main(vm) => { - vm.push_transaction_with_refund_and_compression(tx.clone(), refund, compression) - } - ShadowMut::Shadow(vm) => { - vm.push_transaction_with_refund_and_compression(tx.clone(), refund, compression) - } - }, - ); + fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64) { + self.get_mut("push_transaction_with_refund", |r| match r { + ShadowMut::Main(vm) => vm.push_transaction_with_refund(tx.clone(), refund), + ShadowMut::Shadow(vm) => vm.push_transaction_with_refund(tx.clone(), refund), + }); } fn pubdata_input(&self) -> PubdataInput { diff --git a/core/lib/multivm/src/versions/testonly/l1_messenger.rs b/core/lib/multivm/src/versions/testonly/l1_messenger.rs index 7659a1cee7c8..e144c01ce4f5 100644 --- a/core/lib/multivm/src/versions/testonly/l1_messenger.rs +++ b/core/lib/multivm/src/versions/testonly/l1_messenger.rs @@ -9,11 +9,6 @@ use zksync_types::{ }; use zksync_vm_interface::SystemEnv; -// Bytecode is temporary hardcoded, should be removed after contracts are merged. -fn l2_rollup_da_validator_bytecode() -> Vec { - hex::decode("0012000000000002000a000000000002000000000301001900000060043002700000012703400197000100000031035500020000003103550003000000310355000400000031035500050000003103550006000000310355000700000031035500080000003103550009000000310355000a000000310355000b000000310355000c000000310355000d000000310355000e000000310355000f00000031035500100000003103550011000000010355000001270040019d0000008004000039000000400040043f00000001002001900000005d0000c13d000000040030008c000000fe0000413d000000000201043b00000129022001970000012a0020009c000000fe0000c13d000000a40030008c000000fe0000413d0000000002000416000000000002004b000000fe0000c13d0000008402100370000000000202043b000300000002001d0000012b0020009c000000fe0000213d00000003020000290000002302200039000000000032004b000000fe0000813d00000003020000290000000402200039000000000421034f000000000604043b0000012b0060009c000000fe0000213d0000000304000029000700240040003d0000000704600029000000000034004b000000fe0000213d0000004403100370000000000303043b000400000003001d0000006403100370000000000303043b000200000003001d000000040060008c000000fe0000413d0000002002200039000000000221034f000000000202043b000000e00220027000000058022000c90000000804200039000000000064004b000000fe0000213d00000003022000290000002802200039000000000121034f000000000101043b000500e00010027a000600000006001d000000650000c13d00000000090000190000000403000029000000000039004b000000f10000c13d0000014e0040009c000000fb0000a13d0000014001000041000000000010043f0000001101000039000000040010043f00000138010000410000049a000104300000000001000416000000000001004b000000fe0000c13d0000002001000039000001000010044300000120000004430000012801000041000004990001042e000000000800001900000000090000190000014f0040009c000000570000813d0000000403400039000000000063004b000000fe0000213d00000007024000290000001101000367000000000221034f000000000502043b000000e004500270000000000034001a000000570000413d0000000007340019000000000067004b000000fe0000213d00000000020004140000012c0050009c0000007b0000813d0000000003000031000000840000013d000000070330002900000127053001970001000000510355000000000034001a000000570000413d0000000003340019000000000330007b000000570000413d000000000151034f000a00000009001d000800000008001d000900000007001d000001270330019700010000003103e50000012d0020009c000003c20000813d00000000013103df000000c0022002100000012e022001970000012c022001c700010000002103b500000000012103af0000801002000039049804930000040f0000000003010019000000600330027000000127033001970000000100200190000002450000613d0000001f0230003900000130052001970000003f025000390000013104200197000000400200043d0000000004420019000000000024004b000000000600003900000001060040390000012b0040009c0000023f0000213d00000001006001900000023f0000c13d000000400040043f0000000004320436000000000005004b000000b10000613d0000000005540019000000000600003100000011066003670000000007040019000000006806043c0000000007870436000000000057004b000000ad0000c13d0000012f063001980000000005640019000000ba0000613d000000000701034f0000000008040019000000007907043c0000000008980436000000000058004b000000b60000c13d0000001f03300190000000c70000613d000000000161034f0000000303300210000000000605043300000000063601cf000000000636022f000000000101043b0000010003300089000000000131022f00000000013101cf000000000161019f00000000001504350000000001020433000000200010008c0000000a05000029000004210000c13d0000000002040433000000400100043d000000400310003900000000002304350000002002100039000000000052043500000040030000390000000000310435000001320010009c0000023f0000213d0000006003100039000000400030043f000001270020009c000001270200804100000040022002100000000001010433000001270010009c00000127010080410000006001100210000000000121019f0000000002000414000001270020009c0000012702008041000000c002200210000000000112019f00000133011001c700008010020000390498048e0000040f0000000100200190000000060600002900000009040000290000000808000029000000fe0000613d000000000901043b0000000108800039000000050080006c000000670000413d000000520000013d000000400100043d000000440210003900000000009204350000002402100039000000000032043500000134020000410000000000210435000000040210003900000000000204350000042d0000013d0000000403400039000000000063004b000001000000a13d00000000010000190000049a0001043000000007014000290000001101100367000000000101043b000400e00010027a0000025d0000c13d000000000900001900000000050300190000000003090019000000020090006c000002f20000c13d000000060050006c000002fd0000813d00000007015000290000001102000367000000000112034f000000000101043b000000f801100270000000010010008c000003030000c13d00000000060500190000014e0060009c0000000604000029000000570000213d0000000403600039000000000043004b000000fe0000213d00000003016000290000002501100039000000000112034f000000000101043b000000000043004b000002fd0000813d000000e8011002700000000703300029000000000432034f0000000503500039000000000404043b000000000031001a0000000607000029000000570000413d000a00000031001d0000000a0070006b000000fe0000213d000000050600008a0000000a0060006b000000570000213d0000000a050000290000000405500039000000000075004b000000fe0000213d0000000a08000029000300070080002d0000000306200360000000000606043b000400000006001d000000e006600272000500000006001d00090110006000cd0000013f0000613d000000090800002900000005068000fa000001100060008c000000570000c13d000000090050002a000000570000413d000200090050002d000000020070006c000000fe0000413d000000f804400270000000400a00043d0000004406a00039000000800700003900000000007604350000002406a000390000000000460435000001410400004100000000004a043500000007055000290000008404a00039000000090900002900000000009404350000000404a0003900000005060000290000000000640435000000000752034f0000001f0890018f00080000000a001d000000a405a0003900000142099001980000000006950019000001610000613d000000000a07034f000000000b05001900000000ac0a043c000000000bcb043600000000006b004b0000015d0000c13d0000000703300029000000000008004b0000016f0000613d000000000797034f0000000308800210000000000906043300000000098901cf000000000989022f000000000707043b0000010008800089000000000787022f00000000078701cf000000000797019f00000000007604350000000907000029000000000675001900000000000604350000001f06700039000001430660019700000000066500190000000004460049000000080500002900000064055000390000000000450435000000000432034f0000001f0510018f000000000216043600000144061001980000000003620019000001850000613d000000000704034f0000000008020019000000007907043c0000000008980436000000000038004b000001810000c13d000000000005004b000001920000613d000000000464034f0000000305500210000000000603043300000000065601cf000000000656022f000000000404043b0000010005500089000000000454022f00000000045401cf000000000464019f0000000000430435000000000312001900000000000304350000001f011000390000014501100197000000080300002900000000013100490000000001210019000001270010009c00000127010080410000006001100210000001270030009c000001270200004100000000020340190000004002200210000000000121019f0000000002000414000001270020009c0000012702008041000000c002200210000000000112019f0000800e02000039049804890000040f000000000301001900000060033002700000012703300197000000200030008c000000200400003900000000040340190000001f0640018f00000020074001900000000805700029000001b80000613d000000000801034f0000000809000029000000008a08043c0000000009a90436000000000059004b000001b40000c13d000000000006004b000001c50000613d000000000771034f0000000306600210000000000805043300000000086801cf000000000868022f000000000707043b0000010006600089000000000767022f00000000066701cf000000000686019f00000000006504350000000100200190000003480000613d0000001f01400039000000600110018f0000000802100029000000000012004b00000000010000390000000101004039000100000002001d0000012b0020009c0000023f0000213d00000001001001900000023f0000c13d0000000101000029000000400010043f000000200030008c0000000604000029000000fe0000413d00000008010000290000000001010433000800000001001d00000004010000290000012c0010009c000001e10000413d000000090200002900000005012000fa000001100010008c000000570000c13d0000000103000029000000440130003900000024023000390000000403300039000000020440006c000003660000c13d000001460400004100000001050000290000000000450435000000200400003900000000004304350000000a04000029000000000042043500000150034001980000001f0440018f000000000231001900000007050000290000001105500367000001fa0000613d000000000605034f0000000007010019000000006806043c0000000007870436000000000027004b000001f60000c13d000000000004004b000002070000613d000000000335034f0000000304400210000000000502043300000000054501cf000000000545022f000000000303043b0000010004400089000000000343022f00000000034301cf000000000353019f00000000003204350000000a030000290000001f023000390000015002200197000000000131001900000000000104350000004401200039000001270010009c000001270100804100000060011002100000000102000029000001270020009c00000127020080410000004002200210000000000112019f0000000002000414000001270020009c0000012702008041000000c002200210000000000112019f00008011020000390498048e0000040f000000000301001900000060033002700000001f0430018f0000012f0530019700000127033001970000000100200190000003720000613d0000000102500029000000000005004b0000022c0000613d000000000601034f0000000107000029000000006806043c0000000007870436000000000027004b000002280000c13d000000000004004b000002390000613d000000000151034f0000000304400210000000000502043300000000054501cf000000000545022f000000000101043b0000010004400089000000000141022f00000000014101cf000000000151019f00000000001204350000001f0130003900000130011001970000000101100029000900000001001d0000012b0010009c0000038a0000a13d0000014001000041000000000010043f0000004101000039000000040010043f00000138010000410000049a000104300000001f0430018f0000012f023001980000024e0000613d000000000501034f0000000006000019000000005705043c0000000006760436000000000026004b0000024a0000c13d000000000004004b0000025b0000613d000000000121034f0000000304400210000000000502043300000000054501cf000000000545022f000000000101043b0000010004400089000000000141022f00000000014101cf000000000151019f000000000012043500000060013002100000049a00010430000000000800001900000000090000190000014e0030009c000000570000213d0000000402300039000000000062004b000000fe0000213d00000007033000290000001101000367000000000331034f000000000303043b000000e00a30027000000000002a001a000000570000413d00000000072a0019000000000067004b000000fe0000213d0000013600300198000003130000c13d000001390030009c000003190000813d0000013a003001980000031f0000613d000000070420002900000127034001970000000002000414000100000031035500000000004a001a000000570000413d00000000044a0019000000000440007b000000570000413d00090000000a001d000a00000009001d000500000008001d000800000007001d000000000131034f000001270340019700010000003103e5000001270020009c000003c20000213d00000000013103df000000c0022002100000012e022001970000012c022001c700010000002103b500000000012103af0000000202000039049804930000040f00000000030100190000006003300270000001270330019700000001002001900000032a0000613d0000001f0230003900000130052001970000003f025000390000013104200197000000400200043d0000000004420019000000000024004b000000000600003900000001060040390000012b0040009c0000023f0000213d00000001006001900000023f0000c13d000000400040043f0000000004320436000000000005004b000000090a000029000002ad0000613d0000000005540019000000000600003100000011066003670000000007040019000000006806043c0000000007870436000000000057004b000002a90000c13d0000012f063001980000000005640019000002b60000613d000000000701034f0000000008040019000000007907043c0000000008980436000000000058004b000002b20000c13d0000001f03300190000002c30000613d000000000161034f0000000303300210000000000605043300000000063601cf000000000636022f000000000101043b0000010003300089000000000131022f00000000013101cf000000000161019f0000000000150435000000400100043d0000000002020433000000200020008c0000000a05000029000003420000c13d00000000020404330000013d02200197000000db03a002100000013e03300197000000000223019f0000013f022001c7000000400310003900000000002304350000002002100039000000000052043500000040030000390000000000310435000001320010009c0000023f0000213d0000006003100039000000400030043f000001270020009c000001270200804100000040022002100000000001010433000001270010009c00000127010080410000006001100210000000000121019f0000000002000414000001270020009c0000012702008041000000c002200210000000000112019f00000133011001c700008010020000390498048e0000040f0000000100200190000000060600002900000008030000290000000508000029000000fe0000613d000000000901043b0000000108800039000000040080006c0000025f0000413d000001060000013d000000400100043d0000004402100039000000000032043500000024021000390000000203000029000000000032043500000134020000410000000000210435000000040210003900000001030000390000042c0000013d0000014001000041000000000010043f0000003201000039000000040010043f00000138010000410000049a00010430000000400200043d0000004403200039000000000013043500000024012000390000000103000039000000000031043500000134010000410000000000120435000000040120003900000002030000390000000000310435000001270020009c0000012702008041000000400120021000000135011001c70000049a00010430000000400100043d0000013702000041000000000021043500000004021000390000000203000039000003240000013d000000400100043d0000013702000041000000000021043500000004021000390000000103000039000003240000013d000000400100043d00000137020000410000000000210435000000040210003900000003030000390000000000320435000001270010009c0000012701008041000000400110021000000138011001c70000049a000104300000001f0430018f0000012f02300198000003330000613d000000000501034f0000000006000019000000005705043c0000000006760436000000000026004b0000032f0000c13d000000000004004b000003400000613d000000000121034f0000000304400210000000000502043300000000054501cf000000000545022f000000000101043b0000010004400089000000000141022f00000000014101cf000000000151019f000000000012043500000060013002100000049a0001043000000044021000390000013b03000041000000000032043500000024021000390000001903000039000004270000013d0000001f0530018f0000012f06300198000000400200043d0000000004620019000003530000613d000000000701034f0000000008020019000000007907043c0000000008980436000000000048004b0000034f0000c13d000000000005004b000003600000613d000000000161034f0000000305500210000000000604043300000000065601cf000000000656022f000000000101043b0000010005500089000000000151022f00000000015101cf000000000161019f00000000001404350000006001300210000001270020009c00000127020080410000004002200210000000000112019f0000049a000104300000013405000041000000010600002900000000005604350000000305000039000000000053043500000000000204350000000000410435000001270060009c0000012706008041000000400160021000000135011001c70000049a00010430000000400200043d0000000006520019000000000005004b0000037c0000613d000000000701034f0000000008020019000000007907043c0000000008980436000000000068004b000003780000c13d000000000004004b000003600000613d000000000151034f0000000304400210000000000506043300000000054501cf000000000545022f000000000101043b0000010004400089000000000141022f00000000014101cf000000000151019f0000000000160435000003600000013d0000000901000029000000400010043f000000200030008c000000fe0000413d000000010100002900000000010104330000012b0010009c000000fe0000213d000000010230002900000001011000290000001f03100039000000000023004b000000fe0000813d00000000140104340000012b0040009c0000023f0000213d00000005034002100000003f05300039000001470550019700000009055000290000012b0050009c0000023f0000213d000000400050043f000000090500002900000000004504350000000003130019000000000023004b000000fe0000213d000000000004004b000003ae0000613d0000000902000029000000200220003900000000140104340000000000420435000000000031004b000003a90000413d000000000100041400000011020003670000000a0000006b000003b40000c13d0000000003000031000003be0000013d00000007030000290000012704300197000100000042035500000003050000290000000a0050006c000000570000413d0000000305000029000000000350007b000000570000413d000000000242034f000001270330019700010000003203e5000001270010009c000003c90000a13d000000400100043d00000044021000390000014d03000041000000000032043500000024021000390000000803000039000004270000013d00000000023203df000000c0011002100000012e011001970000012c011001c700010000001203b500000000011203af0000801002000039049804930000040f0000000003010019000000600330027000000127033001970000000100200190000004320000613d0000001f0230003900000130052001970000003f025000390000013104200197000000400200043d0000000004420019000000000024004b000000000600003900000001060040390000012b0040009c0000023f0000213d00000001006001900000023f0000c13d000000400040043f0000000004320436000000000005004b000003ef0000613d0000000005540019000000000600003100000011066003670000000007040019000000006806043c0000000007870436000000000057004b000003eb0000c13d0000001f0530018f0000012f063001980000000003640019000003f90000613d000000000701034f0000000008040019000000007907043c0000000008980436000000000038004b000003f50000c13d000000000005004b000004060000613d000000000161034f0000000305500210000000000603043300000000065601cf000000000656022f000000000101043b0000010005500089000000000151022f00000000015101cf000000000161019f00000000001304350000000001020433000000200010008c000004210000c13d000000400100043d00000009020000290000000002020433000001000020008c0000044a0000413d00000064021000390000014a03000041000000000032043500000044021000390000014b0300004100000000003204350000002402100039000000250300003900000000003204350000013c020000410000000000210435000000040210003900000020030000390000000000320435000001270010009c000001270100804100000040011002100000014c011001c70000049a00010430000000400100043d00000044021000390000014803000041000000000032043500000024021000390000001f0300003900000000003204350000013c020000410000000000210435000000040210003900000020030000390000000000320435000001270010009c0000012701008041000000400110021000000135011001c70000049a000104300000001f0430018f0000012f023001980000043b0000613d000000000501034f0000000006000019000000005705043c0000000006760436000000000026004b000004370000c13d000000000004004b000004480000613d000000000121034f0000000304400210000000000502043300000000054501cf000000000545022f000000000101043b0000010004400089000000000141022f00000000014101cf000000000151019f000000000012043500000060013002100000049a000104300000000003040433000000f8022002100000006004100039000000000024043500000040021000390000000000320435000000200210003900000008030000290000000000320435000000610310003900000009040000290000000004040433000000000004004b000004610000613d000000000500001900000009060000290000002006600039000900000006001d000000000606043300000000036304360000000105500039000000000045004b000004590000413d0000000003130049000000200430008a00000000004104350000001f0330003900000150043001970000000003140019000000000043004b000000000400003900000001040040390000012b0030009c0000023f0000213d00000001004001900000023f0000c13d000000400030043f000001270020009c000001270200804100000040022002100000000001010433000001270010009c00000127010080410000006001100210000000000121019f0000000002000414000001270020009c0000012702008041000000c002200210000000000112019f00000133011001c700008010020000390498048e0000040f0000000100200190000000fe0000613d000000000101043b000000400200043d0000000000120435000001270020009c0000012702008041000000400120021000000149011001c7000004990001042e0000048c002104210000000102000039000000000001042d0000000002000019000000000001042d00000491002104230000000102000039000000000001042d0000000002000019000000000001042d00000496002104230000000102000039000000000001042d0000000002000019000000000001042d0000049800000432000004990001042e0000049a00010430000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0000000200000000000000000000000000000040000001000000000000000000ffffffff0000000000000000000000000000000000000000000000000000000089f9a07200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffff0000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000ffffffff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffe000000000000000000000000000000000000000000000000000000001ffffffe000000000000000000000000000000000000000000000000000000003ffffffe0000000000000000000000000000000000000000000000000ffffffffffffff9f02000000000000000000000000000000000000000000000000000000000000007f7b0cf70000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000001f0000000000000000000000000000000000000000000000000000000043e266b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000024000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000007368612072657475726e656420696e76616c696420646174610000000000000008c379a00000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff06ffffff0000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004e487b71000000000000000000000000000000000000000000000000000000006006d8b500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ffffffffe0000000000000000000000000000000000000000000000000000003ffffffffe00000000000000000000000000000000000000000000000000000000000ffffe00000000000000000000000000000000000000000000000000000000001ffffe018876a04000000000000000000000000000000000000000000000000000000007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe06b656363616b3235362072657475726e656420696e76616c69642064617461000000000000000000000000000000000000000020000000000000000000000000206269747300000000000000000000000000000000000000000000000000000053616665436173743a2076616c756520646f65736e27742066697420696e203800000000000000000000000000000000000000840000000000000000000000004f766572666c6f77000000000000000000000000000000000000000000000000fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe00000000000000000000000000000000000000000000000000000000000000000e901f5bd8811df26e614332e2110b9bc002e2cbadd82065c67e102f858079d5a").unwrap() -} - use super::{default_system_env, read_test_contract, ContractToDeploy, TestedVm, VmTesterBuilder}; use crate::{ interface::{ @@ -27,6 +22,11 @@ use crate::{ const L2_DA_VALIDATOR_OUTPUT_HASH_KEY: usize = 5; const USED_L2_DA_VALIDATOR_ADDRESS_KEY: usize = 6; +// Bytecode is temporary hardcoded, should be removed after contracts are merged. +fn l2_rollup_da_validator_bytecode() -> Vec { + hex::decode("0012000000000002000a000000000002000000000301001900000060043002700000012703400197000100000031035500020000003103550003000000310355000400000031035500050000003103550006000000310355000700000031035500080000003103550009000000310355000a000000310355000b000000310355000c000000310355000d000000310355000e000000310355000f00000031035500100000003103550011000000010355000001270040019d0000008004000039000000400040043f00000001002001900000005d0000c13d000000040030008c000000fe0000413d000000000201043b00000129022001970000012a0020009c000000fe0000c13d000000a40030008c000000fe0000413d0000000002000416000000000002004b000000fe0000c13d0000008402100370000000000202043b000300000002001d0000012b0020009c000000fe0000213d00000003020000290000002302200039000000000032004b000000fe0000813d00000003020000290000000402200039000000000421034f000000000604043b0000012b0060009c000000fe0000213d0000000304000029000700240040003d0000000704600029000000000034004b000000fe0000213d0000004403100370000000000303043b000400000003001d0000006403100370000000000303043b000200000003001d000000040060008c000000fe0000413d0000002002200039000000000221034f000000000202043b000000e00220027000000058022000c90000000804200039000000000064004b000000fe0000213d00000003022000290000002802200039000000000121034f000000000101043b000500e00010027a000600000006001d000000650000c13d00000000090000190000000403000029000000000039004b000000f10000c13d0000014e0040009c000000fb0000a13d0000014001000041000000000010043f0000001101000039000000040010043f00000138010000410000049a000104300000000001000416000000000001004b000000fe0000c13d0000002001000039000001000010044300000120000004430000012801000041000004990001042e000000000800001900000000090000190000014f0040009c000000570000813d0000000403400039000000000063004b000000fe0000213d00000007024000290000001101000367000000000221034f000000000502043b000000e004500270000000000034001a000000570000413d0000000007340019000000000067004b000000fe0000213d00000000020004140000012c0050009c0000007b0000813d0000000003000031000000840000013d000000070330002900000127053001970001000000510355000000000034001a000000570000413d0000000003340019000000000330007b000000570000413d000000000151034f000a00000009001d000800000008001d000900000007001d000001270330019700010000003103e50000012d0020009c000003c20000813d00000000013103df000000c0022002100000012e022001970000012c022001c700010000002103b500000000012103af0000801002000039049804930000040f0000000003010019000000600330027000000127033001970000000100200190000002450000613d0000001f0230003900000130052001970000003f025000390000013104200197000000400200043d0000000004420019000000000024004b000000000600003900000001060040390000012b0040009c0000023f0000213d00000001006001900000023f0000c13d000000400040043f0000000004320436000000000005004b000000b10000613d0000000005540019000000000600003100000011066003670000000007040019000000006806043c0000000007870436000000000057004b000000ad0000c13d0000012f063001980000000005640019000000ba0000613d000000000701034f0000000008040019000000007907043c0000000008980436000000000058004b000000b60000c13d0000001f03300190000000c70000613d000000000161034f0000000303300210000000000605043300000000063601cf000000000636022f000000000101043b0000010003300089000000000131022f00000000013101cf000000000161019f00000000001504350000000001020433000000200010008c0000000a05000029000004210000c13d0000000002040433000000400100043d000000400310003900000000002304350000002002100039000000000052043500000040030000390000000000310435000001320010009c0000023f0000213d0000006003100039000000400030043f000001270020009c000001270200804100000040022002100000000001010433000001270010009c00000127010080410000006001100210000000000121019f0000000002000414000001270020009c0000012702008041000000c002200210000000000112019f00000133011001c700008010020000390498048e0000040f0000000100200190000000060600002900000009040000290000000808000029000000fe0000613d000000000901043b0000000108800039000000050080006c000000670000413d000000520000013d000000400100043d000000440210003900000000009204350000002402100039000000000032043500000134020000410000000000210435000000040210003900000000000204350000042d0000013d0000000403400039000000000063004b000001000000a13d00000000010000190000049a0001043000000007014000290000001101100367000000000101043b000400e00010027a0000025d0000c13d000000000900001900000000050300190000000003090019000000020090006c000002f20000c13d000000060050006c000002fd0000813d00000007015000290000001102000367000000000112034f000000000101043b000000f801100270000000010010008c000003030000c13d00000000060500190000014e0060009c0000000604000029000000570000213d0000000403600039000000000043004b000000fe0000213d00000003016000290000002501100039000000000112034f000000000101043b000000000043004b000002fd0000813d000000e8011002700000000703300029000000000432034f0000000503500039000000000404043b000000000031001a0000000607000029000000570000413d000a00000031001d0000000a0070006b000000fe0000213d000000050600008a0000000a0060006b000000570000213d0000000a050000290000000405500039000000000075004b000000fe0000213d0000000a08000029000300070080002d0000000306200360000000000606043b000400000006001d000000e006600272000500000006001d00090110006000cd0000013f0000613d000000090800002900000005068000fa000001100060008c000000570000c13d000000090050002a000000570000413d000200090050002d000000020070006c000000fe0000413d000000f804400270000000400a00043d0000004406a00039000000800700003900000000007604350000002406a000390000000000460435000001410400004100000000004a043500000007055000290000008404a00039000000090900002900000000009404350000000404a0003900000005060000290000000000640435000000000752034f0000001f0890018f00080000000a001d000000a405a0003900000142099001980000000006950019000001610000613d000000000a07034f000000000b05001900000000ac0a043c000000000bcb043600000000006b004b0000015d0000c13d0000000703300029000000000008004b0000016f0000613d000000000797034f0000000308800210000000000906043300000000098901cf000000000989022f000000000707043b0000010008800089000000000787022f00000000078701cf000000000797019f00000000007604350000000907000029000000000675001900000000000604350000001f06700039000001430660019700000000066500190000000004460049000000080500002900000064055000390000000000450435000000000432034f0000001f0510018f000000000216043600000144061001980000000003620019000001850000613d000000000704034f0000000008020019000000007907043c0000000008980436000000000038004b000001810000c13d000000000005004b000001920000613d000000000464034f0000000305500210000000000603043300000000065601cf000000000656022f000000000404043b0000010005500089000000000454022f00000000045401cf000000000464019f0000000000430435000000000312001900000000000304350000001f011000390000014501100197000000080300002900000000013100490000000001210019000001270010009c00000127010080410000006001100210000001270030009c000001270200004100000000020340190000004002200210000000000121019f0000000002000414000001270020009c0000012702008041000000c002200210000000000112019f0000800e02000039049804890000040f000000000301001900000060033002700000012703300197000000200030008c000000200400003900000000040340190000001f0640018f00000020074001900000000805700029000001b80000613d000000000801034f0000000809000029000000008a08043c0000000009a90436000000000059004b000001b40000c13d000000000006004b000001c50000613d000000000771034f0000000306600210000000000805043300000000086801cf000000000868022f000000000707043b0000010006600089000000000767022f00000000066701cf000000000686019f00000000006504350000000100200190000003480000613d0000001f01400039000000600110018f0000000802100029000000000012004b00000000010000390000000101004039000100000002001d0000012b0020009c0000023f0000213d00000001001001900000023f0000c13d0000000101000029000000400010043f000000200030008c0000000604000029000000fe0000413d00000008010000290000000001010433000800000001001d00000004010000290000012c0010009c000001e10000413d000000090200002900000005012000fa000001100010008c000000570000c13d0000000103000029000000440130003900000024023000390000000403300039000000020440006c000003660000c13d000001460400004100000001050000290000000000450435000000200400003900000000004304350000000a04000029000000000042043500000150034001980000001f0440018f000000000231001900000007050000290000001105500367000001fa0000613d000000000605034f0000000007010019000000006806043c0000000007870436000000000027004b000001f60000c13d000000000004004b000002070000613d000000000335034f0000000304400210000000000502043300000000054501cf000000000545022f000000000303043b0000010004400089000000000343022f00000000034301cf000000000353019f00000000003204350000000a030000290000001f023000390000015002200197000000000131001900000000000104350000004401200039000001270010009c000001270100804100000060011002100000000102000029000001270020009c00000127020080410000004002200210000000000112019f0000000002000414000001270020009c0000012702008041000000c002200210000000000112019f00008011020000390498048e0000040f000000000301001900000060033002700000001f0430018f0000012f0530019700000127033001970000000100200190000003720000613d0000000102500029000000000005004b0000022c0000613d000000000601034f0000000107000029000000006806043c0000000007870436000000000027004b000002280000c13d000000000004004b000002390000613d000000000151034f0000000304400210000000000502043300000000054501cf000000000545022f000000000101043b0000010004400089000000000141022f00000000014101cf000000000151019f00000000001204350000001f0130003900000130011001970000000101100029000900000001001d0000012b0010009c0000038a0000a13d0000014001000041000000000010043f0000004101000039000000040010043f00000138010000410000049a000104300000001f0430018f0000012f023001980000024e0000613d000000000501034f0000000006000019000000005705043c0000000006760436000000000026004b0000024a0000c13d000000000004004b0000025b0000613d000000000121034f0000000304400210000000000502043300000000054501cf000000000545022f000000000101043b0000010004400089000000000141022f00000000014101cf000000000151019f000000000012043500000060013002100000049a00010430000000000800001900000000090000190000014e0030009c000000570000213d0000000402300039000000000062004b000000fe0000213d00000007033000290000001101000367000000000331034f000000000303043b000000e00a30027000000000002a001a000000570000413d00000000072a0019000000000067004b000000fe0000213d0000013600300198000003130000c13d000001390030009c000003190000813d0000013a003001980000031f0000613d000000070420002900000127034001970000000002000414000100000031035500000000004a001a000000570000413d00000000044a0019000000000440007b000000570000413d00090000000a001d000a00000009001d000500000008001d000800000007001d000000000131034f000001270340019700010000003103e5000001270020009c000003c20000213d00000000013103df000000c0022002100000012e022001970000012c022001c700010000002103b500000000012103af0000000202000039049804930000040f00000000030100190000006003300270000001270330019700000001002001900000032a0000613d0000001f0230003900000130052001970000003f025000390000013104200197000000400200043d0000000004420019000000000024004b000000000600003900000001060040390000012b0040009c0000023f0000213d00000001006001900000023f0000c13d000000400040043f0000000004320436000000000005004b000000090a000029000002ad0000613d0000000005540019000000000600003100000011066003670000000007040019000000006806043c0000000007870436000000000057004b000002a90000c13d0000012f063001980000000005640019000002b60000613d000000000701034f0000000008040019000000007907043c0000000008980436000000000058004b000002b20000c13d0000001f03300190000002c30000613d000000000161034f0000000303300210000000000605043300000000063601cf000000000636022f000000000101043b0000010003300089000000000131022f00000000013101cf000000000161019f0000000000150435000000400100043d0000000002020433000000200020008c0000000a05000029000003420000c13d00000000020404330000013d02200197000000db03a002100000013e03300197000000000223019f0000013f022001c7000000400310003900000000002304350000002002100039000000000052043500000040030000390000000000310435000001320010009c0000023f0000213d0000006003100039000000400030043f000001270020009c000001270200804100000040022002100000000001010433000001270010009c00000127010080410000006001100210000000000121019f0000000002000414000001270020009c0000012702008041000000c002200210000000000112019f00000133011001c700008010020000390498048e0000040f0000000100200190000000060600002900000008030000290000000508000029000000fe0000613d000000000901043b0000000108800039000000040080006c0000025f0000413d000001060000013d000000400100043d0000004402100039000000000032043500000024021000390000000203000029000000000032043500000134020000410000000000210435000000040210003900000001030000390000042c0000013d0000014001000041000000000010043f0000003201000039000000040010043f00000138010000410000049a00010430000000400200043d0000004403200039000000000013043500000024012000390000000103000039000000000031043500000134010000410000000000120435000000040120003900000002030000390000000000310435000001270020009c0000012702008041000000400120021000000135011001c70000049a00010430000000400100043d0000013702000041000000000021043500000004021000390000000203000039000003240000013d000000400100043d0000013702000041000000000021043500000004021000390000000103000039000003240000013d000000400100043d00000137020000410000000000210435000000040210003900000003030000390000000000320435000001270010009c0000012701008041000000400110021000000138011001c70000049a000104300000001f0430018f0000012f02300198000003330000613d000000000501034f0000000006000019000000005705043c0000000006760436000000000026004b0000032f0000c13d000000000004004b000003400000613d000000000121034f0000000304400210000000000502043300000000054501cf000000000545022f000000000101043b0000010004400089000000000141022f00000000014101cf000000000151019f000000000012043500000060013002100000049a0001043000000044021000390000013b03000041000000000032043500000024021000390000001903000039000004270000013d0000001f0530018f0000012f06300198000000400200043d0000000004620019000003530000613d000000000701034f0000000008020019000000007907043c0000000008980436000000000048004b0000034f0000c13d000000000005004b000003600000613d000000000161034f0000000305500210000000000604043300000000065601cf000000000656022f000000000101043b0000010005500089000000000151022f00000000015101cf000000000161019f00000000001404350000006001300210000001270020009c00000127020080410000004002200210000000000112019f0000049a000104300000013405000041000000010600002900000000005604350000000305000039000000000053043500000000000204350000000000410435000001270060009c0000012706008041000000400160021000000135011001c70000049a00010430000000400200043d0000000006520019000000000005004b0000037c0000613d000000000701034f0000000008020019000000007907043c0000000008980436000000000068004b000003780000c13d000000000004004b000003600000613d000000000151034f0000000304400210000000000506043300000000054501cf000000000545022f000000000101043b0000010004400089000000000141022f00000000014101cf000000000151019f0000000000160435000003600000013d0000000901000029000000400010043f000000200030008c000000fe0000413d000000010100002900000000010104330000012b0010009c000000fe0000213d000000010230002900000001011000290000001f03100039000000000023004b000000fe0000813d00000000140104340000012b0040009c0000023f0000213d00000005034002100000003f05300039000001470550019700000009055000290000012b0050009c0000023f0000213d000000400050043f000000090500002900000000004504350000000003130019000000000023004b000000fe0000213d000000000004004b000003ae0000613d0000000902000029000000200220003900000000140104340000000000420435000000000031004b000003a90000413d000000000100041400000011020003670000000a0000006b000003b40000c13d0000000003000031000003be0000013d00000007030000290000012704300197000100000042035500000003050000290000000a0050006c000000570000413d0000000305000029000000000350007b000000570000413d000000000242034f000001270330019700010000003203e5000001270010009c000003c90000a13d000000400100043d00000044021000390000014d03000041000000000032043500000024021000390000000803000039000004270000013d00000000023203df000000c0011002100000012e011001970000012c011001c700010000001203b500000000011203af0000801002000039049804930000040f0000000003010019000000600330027000000127033001970000000100200190000004320000613d0000001f0230003900000130052001970000003f025000390000013104200197000000400200043d0000000004420019000000000024004b000000000600003900000001060040390000012b0040009c0000023f0000213d00000001006001900000023f0000c13d000000400040043f0000000004320436000000000005004b000003ef0000613d0000000005540019000000000600003100000011066003670000000007040019000000006806043c0000000007870436000000000057004b000003eb0000c13d0000001f0530018f0000012f063001980000000003640019000003f90000613d000000000701034f0000000008040019000000007907043c0000000008980436000000000038004b000003f50000c13d000000000005004b000004060000613d000000000161034f0000000305500210000000000603043300000000065601cf000000000656022f000000000101043b0000010005500089000000000151022f00000000015101cf000000000161019f00000000001304350000000001020433000000200010008c000004210000c13d000000400100043d00000009020000290000000002020433000001000020008c0000044a0000413d00000064021000390000014a03000041000000000032043500000044021000390000014b0300004100000000003204350000002402100039000000250300003900000000003204350000013c020000410000000000210435000000040210003900000020030000390000000000320435000001270010009c000001270100804100000040011002100000014c011001c70000049a00010430000000400100043d00000044021000390000014803000041000000000032043500000024021000390000001f0300003900000000003204350000013c020000410000000000210435000000040210003900000020030000390000000000320435000001270010009c0000012701008041000000400110021000000135011001c70000049a000104300000001f0430018f0000012f023001980000043b0000613d000000000501034f0000000006000019000000005705043c0000000006760436000000000026004b000004370000c13d000000000004004b000004480000613d000000000121034f0000000304400210000000000502043300000000054501cf000000000545022f000000000101043b0000010004400089000000000141022f00000000014101cf000000000151019f000000000012043500000060013002100000049a000104300000000003040433000000f8022002100000006004100039000000000024043500000040021000390000000000320435000000200210003900000008030000290000000000320435000000610310003900000009040000290000000004040433000000000004004b000004610000613d000000000500001900000009060000290000002006600039000900000006001d000000000606043300000000036304360000000105500039000000000045004b000004590000413d0000000003130049000000200430008a00000000004104350000001f0330003900000150043001970000000003140019000000000043004b000000000400003900000001040040390000012b0030009c0000023f0000213d00000001004001900000023f0000c13d000000400030043f000001270020009c000001270200804100000040022002100000000001010433000001270010009c00000127010080410000006001100210000000000121019f0000000002000414000001270020009c0000012702008041000000c002200210000000000112019f00000133011001c700008010020000390498048e0000040f0000000100200190000000fe0000613d000000000101043b000000400200043d0000000000120435000001270020009c0000012702008041000000400120021000000149011001c7000004990001042e0000048c002104210000000102000039000000000001042d0000000002000019000000000001042d00000491002104230000000102000039000000000001042d0000000002000019000000000001042d00000496002104230000000102000039000000000001042d0000000002000019000000000001042d0000049800000432000004990001042e0000049a00010430000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0000000200000000000000000000000000000040000001000000000000000000ffffffff0000000000000000000000000000000000000000000000000000000089f9a07200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffff0000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000ffffffff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffe000000000000000000000000000000000000000000000000000000001ffffffe000000000000000000000000000000000000000000000000000000003ffffffe0000000000000000000000000000000000000000000000000ffffffffffffff9f02000000000000000000000000000000000000000000000000000000000000007f7b0cf70000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000001f0000000000000000000000000000000000000000000000000000000043e266b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000024000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000007368612072657475726e656420696e76616c696420646174610000000000000008c379a00000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff06ffffff0000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004e487b71000000000000000000000000000000000000000000000000000000006006d8b500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ffffffffe0000000000000000000000000000000000000000000000000000003ffffffffe00000000000000000000000000000000000000000000000000000000000ffffe00000000000000000000000000000000000000000000000000000000001ffffe018876a04000000000000000000000000000000000000000000000000000000007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe06b656363616b3235362072657475726e656420696e76616c69642064617461000000000000000000000000000000000000000020000000000000000000000000206269747300000000000000000000000000000000000000000000000000000053616665436173743a2076616c756520646f65736e27742066697420696e203800000000000000000000000000000000000000840000000000000000000000004f766572666c6f77000000000000000000000000000000000000000000000000fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe00000000000000000000000000000000000000000000000000000000000000000e901f5bd8811df26e614332e2110b9bc002e2cbadd82065c67e102f858079d5a").unwrap() +} + fn encoded_uncompressed_state_diffs(input: &PubdataInput) -> Vec { let mut result = vec![]; for state_diff in input.state_diffs.iter() { @@ -57,9 +57,11 @@ fn compose_header_for_l1_commit_rollup(input: PubdataInput) -> Vec { // Now, we need to calculate the linear hashes of the blobs. // Firstly, let's pad the pubdata to the size of the blob. if full_pubdata.len() % ZK_SYNC_BYTES_PER_BLOB != 0 { - let padding = - vec![0u8; ZK_SYNC_BYTES_PER_BLOB - full_pubdata.len() % ZK_SYNC_BYTES_PER_BLOB]; - full_pubdata.extend(padding); + full_pubdata.resize( + full_pubdata.len() + ZK_SYNC_BYTES_PER_BLOB + - full_pubdata.len() % ZK_SYNC_BYTES_PER_BLOB, + 0, + ); } full_header.push((full_pubdata.len() / ZK_SYNC_BYTES_PER_BLOB) as u8); @@ -100,9 +102,9 @@ pub(crate) fn test_rollup_da_output_hash_match() { let tx = account.get_deploy_tx(&counter, None, TxType::L2).tx; // We do not use compression here, to have the bytecode published in full. - vm.vm - .push_transaction_with_refund_and_compression(tx, 0, false); - let result = vm.vm.execute(InspectExecutionMode::OneTx); + let (_, result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, false); assert!(!result.result.is_failed(), "Transaction wasn't successful"); // Then, we call the l1 messenger to also send an L2->L1 message. diff --git a/core/lib/multivm/src/versions/testonly/refunds.rs b/core/lib/multivm/src/versions/testonly/refunds.rs index c98d62a86487..874425fc435c 100644 --- a/core/lib/multivm/src/versions/testonly/refunds.rs +++ b/core/lib/multivm/src/versions/testonly/refunds.rs @@ -56,11 +56,8 @@ pub(crate) fn test_predetermined_refunded_gas() { .build::(); assert_eq!(account.address(), vm.rich_accounts[0].address()); - vm.vm.push_transaction_with_refund_and_compression( - tx.clone(), - result.refunds.gas_refunded, - true, - ); + vm.vm + .push_transaction_with_refund(tx.clone(), result.refunds.gas_refunded); let result_with_predefined_refunds = vm .vm @@ -115,7 +112,7 @@ pub(crate) fn test_predetermined_refunded_gas() { let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; vm.vm - .push_transaction_with_refund_and_compression(tx, changed_operator_suggested_refund, true); + .push_transaction_with_refund(tx, changed_operator_suggested_refund); let result = vm .vm .finish_batch(default_pubdata_builder()) diff --git a/core/lib/multivm/src/versions/testonly/tester/mod.rs b/core/lib/multivm/src/versions/testonly/tester/mod.rs index 663942410a41..ff851ef5f3f1 100644 --- a/core/lib/multivm/src/versions/testonly/tester/mod.rs +++ b/core/lib/multivm/src/versions/testonly/tester/mod.rs @@ -225,13 +225,8 @@ pub(crate) trait TestedVm: /// Same as `start_new_l2_block`, but should skip consistency checks (to verify they are performed by the bootloader). fn push_l2_block_unchecked(&mut self, block: L2BlockEnv); - /// Pushes a transaction with predefined refund value and compression. - fn push_transaction_with_refund_and_compression( - &mut self, - tx: Transaction, - refund: u64, - compression: bool, - ); + /// Pushes a transaction with predefined refund value. + fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64); /// Returns pubdata input. fn pubdata_input(&self) -> PubdataInput; diff --git a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs index fef14671ed12..2093d0ec496f 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs @@ -166,13 +166,8 @@ impl TestedVm for Vm> { self.bootloader_state.push_l2_block(block); } - fn push_transaction_with_refund_and_compression( - &mut self, - tx: Transaction, - refund: u64, - compression: bool, - ) { - self.push_transaction_inner(tx, refund, compression); + fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64) { + self.push_transaction_inner(tx, refund, true); } fn pubdata_input(&self) -> PubdataInput { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs index fad79b16acf3..aac3b1655b3a 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs @@ -184,15 +184,10 @@ impl TestedVm for TestedLatestVm { self.bootloader_state.push_l2_block(block); } - fn push_transaction_with_refund_and_compression( - &mut self, - tx: Transaction, - refund: u64, - compression: bool, - ) { + fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64) { let tx = TransactionData::new(tx, false); let overhead = tx.overhead_gas(); - self.push_raw_transaction(tx, overhead, refund, compression) + self.push_raw_transaction(tx, overhead, refund, true) } fn pubdata_input(&self) -> PubdataInput { diff --git a/core/lib/types/src/l2_to_l1_log.rs b/core/lib/types/src/l2_to_l1_log.rs index 0ee0547930ff..be4275ac5fb5 100644 --- a/core/lib/types/src/l2_to_l1_log.rs +++ b/core/lib/types/src/l2_to_l1_log.rs @@ -87,11 +87,13 @@ pub struct BatchAndChainMerklePath { pub const LOG_PROOF_SUPPORTED_METADATA_VERSION: u8 = 1; +// keccak256("zkSync:BatchLeaf") pub const BATCH_LEAF_PADDING: H256 = H256([ 0xd8, 0x2f, 0xec, 0x4a, 0x37, 0xcb, 0xdc, 0x47, 0xf1, 0xe5, 0xcc, 0x4a, 0xd6, 0x4d, 0xea, 0xcf, 0x34, 0xa4, 0x8e, 0x6f, 0x7c, 0x61, 0xfa, 0x5b, 0x68, 0xfd, 0x58, 0xe5, 0x43, 0x25, 0x9c, 0xf4, ]); +// keccak256("zkSync:ChainIdLeaf") pub const CHAIN_ID_LEAF_PADDING: H256 = H256([ 0x39, 0xbc, 0x69, 0x36, 0x3b, 0xb9, 0xe2, 0x6c, 0xf1, 0x42, 0x40, 0xde, 0x4e, 0x22, 0x56, 0x9e, 0x95, 0xcf, 0x17, 0x5c, 0xfb, 0xcf, 0x1a, 0xde, 0x1a, 0x47, 0xa2, 0x53, 0xb4, 0xbf, 0x7f, 0x61, @@ -135,6 +137,7 @@ pub fn parse_system_logs_for_blob_hashes_pre_gateway( #[cfg(test)] mod tests { + use zksync_basic_types::web3::keccak256; use zksync_system_constants::L1_MESSENGER_ADDRESS; use super::*; @@ -160,4 +163,13 @@ mod tests { assert_eq!(expected_log_bytes, log.to_bytes()); } + + #[test] + fn check_padding_constants() { + let batch_leaf_padding_expected = keccak256("zkSync:BatchLeaf".as_bytes()); + assert_eq!(batch_leaf_padding_expected, BATCH_LEAF_PADDING.0); + + let chain_id_leaf_padding_expected = keccak256("zkSync:ChainIdLeaf".as_bytes()); + assert_eq!(chain_id_leaf_padding_expected, CHAIN_ID_LEAF_PADDING.0); + } } diff --git a/core/node/api_server/src/web3/namespaces/unstable/mod.rs b/core/node/api_server/src/web3/namespaces/unstable/mod.rs index fdfa7631f7c4..7f59f803ba48 100644 --- a/core/node/api_server/src/web3/namespaces/unstable/mod.rs +++ b/core/node/api_server/src/web3/namespaces/unstable/mod.rs @@ -103,15 +103,15 @@ impl UnstableNamespace { return Ok(None); }; - let mut leafs = Vec::new(); + let mut leaves = Vec::new(); for chain_id in chain_ids { let chain_root = get_chain_root_from_id(&mut connection, chain_id, l2_block_number).await?; - leafs.push(chain_id_leaf_preimage(chain_root, chain_id)); + leaves.push(chain_id_leaf_preimage(chain_root, chain_id)); } let chain_merkle_tree = - MiniMerkleTree::<[u8; 96], KeccakHasher>::new(leafs.into_iter(), None); + MiniMerkleTree::<[u8; 96], KeccakHasher>::new(leaves.into_iter(), None); let mut chain_id_leaf_proof = chain_merkle_tree .merkle_root_and_path(chain_id_leaf_proof_mask) diff --git a/core/node/eth_watch/src/event_processors/appended_chain_batch_root.rs b/core/node/eth_watch/src/event_processors/appended_chain_batch_root.rs index 79b0d9a19f05..83071997e26f 100644 --- a/core/node/eth_watch/src/event_processors/appended_chain_batch_root.rs +++ b/core/node/eth_watch/src/event_processors/appended_chain_batch_root.rs @@ -217,8 +217,8 @@ impl BatchRootProcessor { chain_agg_proof: ChainAggProof, sl_chain_id: SLChainId, ) -> Vec { - let sl_encoded_data = U256::from(sl_l1_batch_number.0) * U256::from(2).pow(128.into()) - + chain_agg_proof.chain_id_leaf_proof_mask; + let sl_encoded_data = + (U256::from(sl_l1_batch_number.0) << 128u32) + chain_agg_proof.chain_id_leaf_proof_mask; let mut metadata = [0u8; 32]; metadata[0] = LOG_PROOF_SUPPORTED_METADATA_VERSION; From e8569511c72be7abeabf4af6b9706a92fc4d0e0f Mon Sep 17 00:00:00 2001 From: perekopskiy Date: Tue, 19 Nov 2024 16:07:00 +0200 Subject: [PATCH 07/11] address comments 2 --- core/lib/types/src/api/mod.rs | 2 +- .../src/web3/namespaces/unstable/mod.rs | 2 +- core/node/consistency_checker/src/lib.rs | 12 ++++---- .../node/consistency_checker/src/tests/mod.rs | 2 +- core/node/eth_watch/src/tests/mod.rs | 10 +++---- .../layers/query_eth_client.rs | 28 +++++++------------ .../layers/tree_data_fetcher.rs | 3 +- .../node_sync/src/tree_data_fetcher/mod.rs | 3 +- .../src/tree_data_fetcher/provider/mod.rs | 12 ++++---- 9 files changed, 32 insertions(+), 42 deletions(-) diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index bd5b4ac5f2ab..37fc6cec7dbc 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -200,7 +200,7 @@ pub struct L2ToL1LogProof { #[serde(rename_all = "camelCase")] pub struct ChainAggProof { pub chain_id_leaf_proof: Vec, - pub chain_id_leaf_proof_mask: U256, + pub chain_id_leaf_proof_mask: u64, } /// A struct with the two default bridge contracts. diff --git a/core/node/api_server/src/web3/namespaces/unstable/mod.rs b/core/node/api_server/src/web3/namespaces/unstable/mod.rs index 7f59f803ba48..203412dbb78a 100644 --- a/core/node/api_server/src/web3/namespaces/unstable/mod.rs +++ b/core/node/api_server/src/web3/namespaces/unstable/mod.rs @@ -133,7 +133,7 @@ impl UnstableNamespace { Ok(Some(ChainAggProof { chain_id_leaf_proof, - chain_id_leaf_proof_mask: chain_id_leaf_proof_mask.into(), + chain_id_leaf_proof_mask: chain_id_leaf_proof_mask as u64, })) } } diff --git a/core/node/consistency_checker/src/lib.rs b/core/node/consistency_checker/src/lib.rs index d5e1af93fa09..a73adc44b83e 100644 --- a/core/node/consistency_checker/src/lib.rs +++ b/core/node/consistency_checker/src/lib.rs @@ -364,7 +364,7 @@ pub fn detect_da( } #[derive(Debug)] -pub struct SLChainData { +pub struct SLChainAccess { client: Box>, chain_id: SLChainId, diamond_proxy_addr: Option
, @@ -377,8 +377,8 @@ pub struct ConsistencyChecker { /// How many past batches to check when starting max_batches_to_recheck: u32, sleep_interval: Duration, - l1_chain_data: SLChainData, - gateway_chain_data: Option, + l1_chain_data: SLChainAccess, + gateway_chain_data: Option, event_handler: Box, l1_data_mismatch_behavior: L1DataMismatchBehavior, pool: ConnectionPool, @@ -399,7 +399,7 @@ impl ConsistencyChecker { ) -> anyhow::Result { let (health_check, health_updater) = ConsistencyCheckerHealthUpdater::new(); let l1_chain_id = l1_client.fetch_chain_id().await?; - let l1_chain_data = SLChainData { + let l1_chain_data = SLChainAccess { client: l1_client.for_component("consistency_checker"), chain_id: l1_chain_id, diamond_proxy_addr: None, @@ -418,7 +418,7 @@ impl ConsistencyChecker { .call(&client) .await?; let chain_id = client.fetch_chain_id().await?; - Some(SLChainData { + Some(SLChainAccess { client: client.for_component("consistency_checker"), chain_id, diamond_proxy_addr: Some(gateway_diamond_proxy), @@ -826,7 +826,7 @@ impl ConsistencyChecker { Ok(()) } - fn chain_data_by_id(&self, searched_chain_id: SLChainId) -> Option<&SLChainData> { + fn chain_data_by_id(&self, searched_chain_id: SLChainId) -> Option<&SLChainAccess> { if searched_chain_id == self.l1_chain_data.chain_id { Some(&self.l1_chain_data) } else if Some(searched_chain_id) == self.gateway_chain_data.as_ref().map(|d| d.chain_id) { diff --git a/core/node/consistency_checker/src/tests/mod.rs b/core/node/consistency_checker/src/tests/mod.rs index 5b1d49d43df9..1635bddffb83 100644 --- a/core/node/consistency_checker/src/tests/mod.rs +++ b/core/node/consistency_checker/src/tests/mod.rs @@ -98,7 +98,7 @@ pub(crate) async fn create_mock_checker( let (health_check, health_updater) = ConsistencyCheckerHealthUpdater::new(); let client = client.into_client(); let chain_id = client.fetch_chain_id().await.unwrap(); - let l1_chain_data = SLChainData { + let l1_chain_data = SLChainAccess { client: Box::new(client), chain_id, diamond_proxy_addr: Some(L1_DIAMOND_PROXY_ADDR), diff --git a/core/node/eth_watch/src/tests/mod.rs b/core/node/eth_watch/src/tests/mod.rs index 118bb9b7e436..df91074beb18 100644 --- a/core/node/eth_watch/src/tests/mod.rs +++ b/core/node/eth_watch/src/tests/mod.rs @@ -660,7 +660,7 @@ fn chain_log_proofs() -> Vec<(L1BatchNumber, ChainAggProof)> { .unwrap(), ), ], - chain_id_leaf_proof_mask: 3u32.into(), + chain_id_leaf_proof_mask: 3, }, ), ( @@ -680,7 +680,7 @@ fn chain_log_proofs() -> Vec<(L1BatchNumber, ChainAggProof)> { .unwrap(), ), ], - chain_id_leaf_proof_mask: 3u32.into(), + chain_id_leaf_proof_mask: 3, }, ), ( @@ -700,7 +700,7 @@ fn chain_log_proofs() -> Vec<(L1BatchNumber, ChainAggProof)> { .unwrap(), ), ], - chain_id_leaf_proof_mask: 3u32.into(), + chain_id_leaf_proof_mask: 3, }, ), ( @@ -720,7 +720,7 @@ fn chain_log_proofs() -> Vec<(L1BatchNumber, ChainAggProof)> { .unwrap(), ), ], - chain_id_leaf_proof_mask: 3u32.into(), + chain_id_leaf_proof_mask: 3, }, ), ( @@ -740,7 +740,7 @@ fn chain_log_proofs() -> Vec<(L1BatchNumber, ChainAggProof)> { .unwrap(), ), ], - chain_id_leaf_proof_mask: 3u32.into(), + chain_id_leaf_proof_mask: 3, }, ), ] diff --git a/core/node/node_framework/src/implementations/layers/query_eth_client.rs b/core/node/node_framework/src/implementations/layers/query_eth_client.rs index e1a8dd71fed8..73d28f6a2aae 100644 --- a/core/node/node_framework/src/implementations/layers/query_eth_client.rs +++ b/core/node/node_framework/src/implementations/layers/query_eth_client.rs @@ -50,7 +50,7 @@ impl WiringLayer for QueryEthClientLayer { } async fn wire(self, _input: Self::Input) -> Result { - // Both the L1 and L2 client have the same URL, but provide different type guarantees. + // Both `query_client_gateway` and `query_client_l2` use the same URL, but provide different type guarantees. Ok(Output { query_client_l1: EthInterfaceResource(Box::new( Client::http(self.web3_url.clone()) @@ -58,29 +58,21 @@ impl WiringLayer for QueryEthClientLayer { .for_network(self.chain_id.into()) .build(), )), - query_client_l2: if self.gateway_web3_url.is_some() { + query_client_l2: if let Some(gateway_web3_url) = self.gateway_web3_url.clone() { Some(L2InterfaceResource(Box::new( - Client::http( - self.gateway_web3_url - .clone() - .expect("gateway url is required"), - ) - .context("Client::new()")? - .for_network(L2ChainId::try_from(self.chain_id.0).unwrap().into()) - .build(), + Client::http(gateway_web3_url) + .context("Client::new()")? + .for_network(L2ChainId::try_from(self.chain_id.0).unwrap().into()) + .build(), ))) } else { None }, - query_client_gateway: if self.gateway_web3_url.is_some() { + query_client_gateway: if let Some(gateway_web3_url) = self.gateway_web3_url { Some(GatewayEthInterfaceResource(Box::new( - Client::http( - self.gateway_web3_url - .clone() - .expect("gateway url is required"), - ) - .context("Client::new()")? - .build(), + Client::http(gateway_web3_url) + .context("Client::new()")? + .build(), ))) } else { None diff --git a/core/node/node_framework/src/implementations/layers/tree_data_fetcher.rs b/core/node/node_framework/src/implementations/layers/tree_data_fetcher.rs index 9abbf2daa7d6..cdf0700a0e73 100644 --- a/core/node/node_framework/src/implementations/layers/tree_data_fetcher.rs +++ b/core/node/node_framework/src/implementations/layers/tree_data_fetcher.rs @@ -67,12 +67,11 @@ impl WiringLayer for TreeDataFetcherLayer { "Running tree data fetcher (allows a node to operate w/o a Merkle tree or w/o waiting the tree to catch up). \ This is an experimental feature; do not use unless you know what you're doing" ); - let task = TreeDataFetcher::new(client, pool.clone()) + let task = TreeDataFetcher::new(client, pool) .with_l1_data( l1_client, self.l1_diamond_proxy_addr, gateway_client, - pool, self.l2_chain_id, ) .await?; diff --git a/core/node/node_sync/src/tree_data_fetcher/mod.rs b/core/node/node_sync/src/tree_data_fetcher/mod.rs index fa1c2cec253a..9f8ac18c39bd 100644 --- a/core/node/node_sync/src/tree_data_fetcher/mod.rs +++ b/core/node/node_sync/src/tree_data_fetcher/mod.rs @@ -132,7 +132,6 @@ impl TreeDataFetcher { l1_client: Box>, l1_diamond_proxy_addr: Address, gateway_client: Option>>, - pool: ConnectionPool, l2_chain_id: L2ChainId, ) -> anyhow::Result { anyhow::ensure!( @@ -144,7 +143,7 @@ impl TreeDataFetcher { l1_client.for_component("tree_data_fetcher"), l1_diamond_proxy_addr, gateway_client.map(|c| c.for_component("tree_data_fetcher")), - pool, + self.pool.clone(), l2_chain_id, ) .await?; diff --git a/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs b/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs index 305650203f58..432808422632 100644 --- a/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs +++ b/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs @@ -98,7 +98,7 @@ struct PastL1BatchInfo { } #[derive(Debug)] -struct SLChainData { +struct SLChainAccess { client: Box>, chain_id: SLChainId, diamond_proxy_addr: Address, @@ -116,8 +116,8 @@ struct SLChainData { /// (provided it's not too far behind the seal timestamp of the batch). #[derive(Debug)] pub(super) struct L1DataProvider { - l1_chain_data: SLChainData, - gateway_chain_data: Option, + l1_chain_data: SLChainAccess, + gateway_chain_data: Option, block_commit_signature: H256, past_l1_batch: Option, pool: ConnectionPool, @@ -138,7 +138,7 @@ impl L1DataProvider { l2_chain_id: L2ChainId, ) -> anyhow::Result { let l1_chain_id = l1_client.fetch_chain_id().await?; - let l1_chain_data = SLChainData { + let l1_chain_data = SLChainAccess { client: l1_client, chain_id: l1_chain_id, diamond_proxy_addr: l1_diamond_proxy_addr, @@ -158,7 +158,7 @@ impl L1DataProvider { .call(&client) .await?; let chain_id = client.fetch_chain_id().await?; - Some(SLChainData { + Some(SLChainAccess { client, chain_id, diamond_proxy_addr: gateway_diamond_proxy, @@ -234,7 +234,7 @@ impl L1DataProvider { Ok((number, block.timestamp)) } - fn chain_data_by_id(&self, searched_chain_id: SLChainId) -> Option<&SLChainData> { + fn chain_data_by_id(&self, searched_chain_id: SLChainId) -> Option<&SLChainAccess> { if searched_chain_id == self.l1_chain_data.chain_id { Some(&self.l1_chain_data) } else if Some(searched_chain_id) == self.gateway_chain_data.as_ref().map(|d| d.chain_id) { From e472017e809829af98ef2d1b041c04709a8742b5 Mon Sep 17 00:00:00 2001 From: perekopskiy Date: Tue, 19 Nov 2024 16:12:19 +0200 Subject: [PATCH 08/11] extend comment --- .../src/event_processors/appended_chain_batch_root.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/core/node/eth_watch/src/event_processors/appended_chain_batch_root.rs b/core/node/eth_watch/src/event_processors/appended_chain_batch_root.rs index 83071997e26f..68f731120c65 100644 --- a/core/node/eth_watch/src/event_processors/appended_chain_batch_root.rs +++ b/core/node/eth_watch/src/event_processors/appended_chain_batch_root.rs @@ -18,7 +18,9 @@ use crate::{ event_processors::{EventProcessor, EventProcessorError, EventsSource}, }; -/// Responsible for `AppendedChainBatchRoot` events and saving `BatchAndChainMerklePath` for batches. +/// Listens to `AppendedChainBatchRoot` events and saves `BatchAndChainMerklePath` for batches. +/// These events are emitted on SL each time L1 batch is executed. Processor uses them to track which batches are already executed +/// and group them by SL's batch number they are executed in as this data is required to build `BatchAndChainMerklePath`. #[derive(Debug)] pub struct BatchRootProcessor { next_batch_number_lower_bound: L1BatchNumber, From d81501d7b593c0b36392c01f660b351714de1998 Mon Sep 17 00:00:00 2001 From: perekopskiy Date: Tue, 19 Nov 2024 16:35:07 +0200 Subject: [PATCH 09/11] fix test --- .../lib/multivm/src/versions/testonly/l1_messenger.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/core/lib/multivm/src/versions/testonly/l1_messenger.rs b/core/lib/multivm/src/versions/testonly/l1_messenger.rs index e144c01ce4f5..daf07b2750f7 100644 --- a/core/lib/multivm/src/versions/testonly/l1_messenger.rs +++ b/core/lib/multivm/src/versions/testonly/l1_messenger.rs @@ -2,14 +2,14 @@ use std::rc::Rc; use ethabi::Token; use zksync_contracts::l1_messenger_contract; -use zksync_test_account::TxType; +use zksync_test_contracts::{TestContract, TxType}; use zksync_types::{ address_to_h256, u256_to_h256, web3::keccak256, Address, Execute, ProtocolVersionId, L1_MESSENGER_ADDRESS, U256, }; use zksync_vm_interface::SystemEnv; -use super::{default_system_env, read_test_contract, ContractToDeploy, TestedVm, VmTesterBuilder}; +use super::{default_system_env, ContractToDeploy, TestedVm, VmTesterBuilder}; use crate::{ interface::{ pubdata::{PubdataBuilder, PubdataInput}, @@ -98,9 +98,10 @@ pub(crate) fn test_rollup_da_output_hash_match() { let account = &mut vm.rich_accounts[0]; // Firstly, deploy tx. It should publish the bytecode of the "test contract" - let counter = read_test_contract(); - - let tx = account.get_deploy_tx(&counter, None, TxType::L2).tx; + let counter_bytecode = TestContract::counter().bytecode; + let tx = account + .get_deploy_tx(&counter_bytecode, None, TxType::L2) + .tx; // We do not use compression here, to have the bytecode published in full. let (_, result) = vm .vm From 013815fdd14bece558a2bdf8745d01cd277665b8 Mon Sep 17 00:00:00 2001 From: perekopskiy Date: Wed, 27 Nov 2024 14:57:58 +0200 Subject: [PATCH 10/11] rename gateway_url --- core/bin/external_node/src/config/mod.rs | 5 ++++- core/bin/zksync_server/src/node_builder.rs | 2 +- core/lib/config/src/configs/secrets.rs | 2 +- core/lib/config/src/testonly.rs | 2 +- core/lib/env_config/src/eth_sender.rs | 4 ++-- .../lib/protobuf_config/src/proto/config/secrets.proto | 2 +- core/lib/protobuf_config/src/secrets.rs | 10 +++++----- .../src/commands/external_node/prepare_configs.rs | 2 +- 8 files changed, 16 insertions(+), 13 deletions(-) diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 59f0dab1f48d..49f37116de75 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -728,7 +728,10 @@ impl OptionalENConfig { .unwrap_or_else(Self::default_main_node_rate_limit_rps), api_namespaces, contracts_diamond_proxy_addr: None, - gateway_url: secrets.l1.as_ref().and_then(|l1| l1.gateway_url.clone()), + gateway_url: secrets + .l1 + .as_ref() + .and_then(|l1| l1.gateway_rpc_url.clone()), bridge_addresses_refresh_interval_sec: enconfig.bridge_addresses_refresh_interval_sec, timestamp_asserter_min_time_till_end_sec: general_config .timestamp_asserter_config diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 118cd43cbb98..d74928e8fbc7 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -161,7 +161,7 @@ impl MainNodeBuilder { let query_eth_client_layer = QueryEthClientLayer::new( genesis.settlement_layer_id(), eth_config.l1_rpc_url, - eth_config.gateway_url, + eth_config.gateway_rpc_url, ); self.node.add_layer(query_eth_client_layer); Ok(self) diff --git a/core/lib/config/src/configs/secrets.rs b/core/lib/config/src/configs/secrets.rs index 0572c65e9e94..8285d81e4bd2 100644 --- a/core/lib/config/src/configs/secrets.rs +++ b/core/lib/config/src/configs/secrets.rs @@ -16,7 +16,7 @@ pub struct DatabaseSecrets { #[derive(Debug, Clone, PartialEq)] pub struct L1Secrets { pub l1_rpc_url: SensitiveUrl, - pub gateway_url: Option, + pub gateway_rpc_url: Option, } #[derive(Debug, Clone, PartialEq)] diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index b685939c3774..d58867ea7418 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -852,7 +852,7 @@ impl Distribution for EncodeDist { use configs::secrets::L1Secrets; L1Secrets { l1_rpc_url: format!("localhost:{}", rng.gen::()).parse().unwrap(), - gateway_url: Some(format!("localhost:{}", rng.gen::()).parse().unwrap()), + gateway_rpc_url: Some(format!("localhost:{}", rng.gen::()).parse().unwrap()), } } } diff --git a/core/lib/env_config/src/eth_sender.rs b/core/lib/env_config/src/eth_sender.rs index 7029881b0c6d..d95c6dd3658f 100644 --- a/core/lib/env_config/src/eth_sender.rs +++ b/core/lib/env_config/src/eth_sender.rs @@ -23,7 +23,7 @@ impl FromEnv for L1Secrets { .context("ETH_CLIENT_WEB3_URL")? .parse() .context("ETH_CLIENT_WEB3_URL")?, - gateway_url: std::env::var("ETH_CLIENT_GATEWAY_WEB3_URL") + gateway_rpc_url: std::env::var("ETH_CLIENT_GATEWAY_WEB3_URL") .ok() .map(|url| url.parse().expect("ETH_CLIENT_GATEWAY_WEB3_URL")), }) @@ -100,7 +100,7 @@ mod tests { }, L1Secrets { l1_rpc_url: "http://127.0.0.1:8545".to_string().parse().unwrap(), - gateway_url: Some("http://127.0.0.1:8547".to_string().parse().unwrap()), + gateway_rpc_url: Some("http://127.0.0.1:8547".to_string().parse().unwrap()), }, ) } diff --git a/core/lib/protobuf_config/src/proto/config/secrets.proto b/core/lib/protobuf_config/src/proto/config/secrets.proto index b9622b5d6a2e..46392920369c 100644 --- a/core/lib/protobuf_config/src/proto/config/secrets.proto +++ b/core/lib/protobuf_config/src/proto/config/secrets.proto @@ -11,7 +11,7 @@ message DatabaseSecrets { message L1Secrets { optional string l1_rpc_url = 1; // required - optional string gateway_url = 2; // optional + optional string gateway_rpc_url = 2; // optional } message ConsensusSecrets { diff --git a/core/lib/protobuf_config/src/secrets.rs b/core/lib/protobuf_config/src/secrets.rs index ca7218c0b278..e0f41e325b79 100644 --- a/core/lib/protobuf_config/src/secrets.rs +++ b/core/lib/protobuf_config/src/secrets.rs @@ -86,20 +86,20 @@ impl ProtoRepr for proto::L1Secrets { fn read(&self) -> anyhow::Result { Ok(Self::Type { l1_rpc_url: SensitiveUrl::from_str(required(&self.l1_rpc_url).context("l1_rpc_url")?)?, - gateway_url: self - .gateway_url + gateway_rpc_url: self + .gateway_rpc_url .clone() .map(|url| SensitiveUrl::from_str(&url)) .transpose() - .context("gateway_url")?, + .context("gateway_rpc_url")?, }) } fn build(this: &Self::Type) -> Self { Self { l1_rpc_url: Some(this.l1_rpc_url.expose_str().to_string()), - gateway_url: this - .gateway_url + gateway_rpc_url: this + .gateway_rpc_url .as_ref() .map(|url| url.expose_url().to_string()), } diff --git a/zkstack_cli/crates/zkstack/src/commands/external_node/prepare_configs.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/prepare_configs.rs index 122c1f549697..03a586a0652a 100644 --- a/zkstack_cli/crates/zkstack/src/commands/external_node/prepare_configs.rs +++ b/zkstack_cli/crates/zkstack/src/commands/external_node/prepare_configs.rs @@ -111,7 +111,7 @@ fn prepare_configs( }), l1: Some(L1Secrets { l1_rpc_url: SensitiveUrl::from_str(&args.l1_rpc_url).context("l1_rpc_url")?, - gateway_url: None, + gateway_rpc_url: None, }), data_availability: None, }; From c71686a28d54872b10698988c3479777cabebd43 Mon Sep 17 00:00:00 2001 From: perekopskiy Date: Wed, 27 Nov 2024 16:17:39 +0200 Subject: [PATCH 11/11] add comment --- core/lib/constants/src/contracts.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/lib/constants/src/contracts.rs b/core/lib/constants/src/contracts.rs index f9138b2bbf17..6e402c117bfe 100644 --- a/core/lib/constants/src/contracts.rs +++ b/core/lib/constants/src/contracts.rs @@ -135,7 +135,8 @@ pub const EVM_GAS_MANAGER_ADDRESS: Address = H160([ 0x00, 0x00, 0x80, 0x13, ]); -/// Note, that the `Create2Factory` and higher are explicitly deployed on a non-system-contract address. +/// Note, that the `Create2Factory` and higher are explicitly deployed on a non-system-contract address +/// as they don't require any kernel space features. pub const CREATE2_FACTORY_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,