diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index b2044d025c4c..7c7695ce56e1 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -397,7 +397,7 @@ jobs: - name: Fee projection tests run: | ci_run killall -INT zksync_server || true - ci_run ./bin/run_on_all_chains.sh "zk_supervisor test fees --no-deps --no-kill" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} + ci_run ./bin/run_on_all_chains.sh "zk_supervisor test fees --no-deps --no-kill" ${{ env.CHAINS }} ${{ env.FEES_LOGS_DIR }} - name: Run revert tests run: | diff --git a/.github/workflows/release-test-stage.yml b/.github/workflows/release-test-stage.yml index 9d78c11a1830..5767584d5e19 100644 --- a/.github/workflows/release-test-stage.yml +++ b/.github/workflows/release-test-stage.yml @@ -112,7 +112,6 @@ jobs: if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }}-avx512 - ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} CUDA_ARCH: "60;70;75;80;89" WITNESS_GENERATOR_RUST_FLAGS: "-Ctarget_feature=+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl " secrets: diff --git a/Cargo.lock b/Cargo.lock index bd9f2d5ef28d..f9f7a88764ef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1539,6 +1539,15 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "const-decoder" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b381abde2cdc1bc3817e394b24e05667a2dc89f37570cbd34d9c397d99e56e3f" +dependencies = [ + "compile-fmt", +] + [[package]] name = "const-oid" version = "0.9.6" @@ -10488,6 +10497,7 @@ dependencies = [ "itertools 0.10.5", "once_cell", "pretty_assertions", + "test-casing", "thiserror", "tracing", "vise", @@ -10515,6 +10525,7 @@ dependencies = [ "async-trait", "axum", "chrono", + "const-decoder", "futures 0.3.30", "governor", "hex", diff --git a/Cargo.toml b/Cargo.toml index d597f4af7542..60b5628f4191 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -114,6 +114,7 @@ blake2 = "0.10" chrono = "0.4" clap = "4.2.2" codegen = "0.2.0" +const-decoder = "0.4.0" criterion = "0.4.0" ctrlc = "3.1" dashmap = "5.5.3" diff --git a/core/lib/config/src/configs/consensus.rs b/core/lib/config/src/configs/consensus.rs index 918d8f4adab9..7f5a0f56aa17 100644 --- a/core/lib/config/src/configs/consensus.rs +++ b/core/lib/config/src/configs/consensus.rs @@ -115,6 +115,7 @@ impl RpcConfig { /// Config (shared between main node and external node). #[derive(Clone, Debug, PartialEq)] pub struct ConsensusConfig { + pub port: Option, /// Local socket address to listen for the incoming connections. pub server_addr: std::net::SocketAddr, /// Public address of this node (should forward to `server_addr`) diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index a6ff30e04a90..960808aa6a69 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -802,6 +802,7 @@ impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::consensus::ConsensusConfig { use configs::consensus::{ConsensusConfig, Host, NodePublicKey}; ConsensusConfig { + port: self.sample(rng), server_addr: self.sample(rng), public_addr: Host(self.sample(rng)), max_payload_size: self.sample(rng), diff --git a/core/lib/dal/.sqlx/query-369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980.json b/core/lib/dal/.sqlx/query-369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980.json deleted file mode 100644 index 7245fa3059ed..000000000000 --- a/core/lib/dal/.sqlx/query-369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n bytecode\n FROM\n (\n SELECT\n *\n FROM\n storage_logs\n WHERE\n storage_logs.hashed_key = $1\n AND storage_logs.miniblock_number <= $2\n ORDER BY\n storage_logs.miniblock_number DESC,\n storage_logs.operation_number DESC\n LIMIT\n 1\n ) t\n JOIN factory_deps ON value = factory_deps.bytecode_hash\n WHERE\n value != $3\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "bytecode", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Bytea", - "Int8", - "Bytea" - ] - }, - "nullable": [ - false - ] - }, - "hash": "369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980" -} diff --git a/core/lib/dal/.sqlx/query-c61b15a9591e65eab7d226f5b9035bfda52cc5bb5a4bfb11270a2a784491c967.json b/core/lib/dal/.sqlx/query-c61b15a9591e65eab7d226f5b9035bfda52cc5bb5a4bfb11270a2a784491c967.json new file mode 100644 index 000000000000..20b791991650 --- /dev/null +++ b/core/lib/dal/.sqlx/query-c61b15a9591e65eab7d226f5b9035bfda52cc5bb5a4bfb11270a2a784491c967.json @@ -0,0 +1,30 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n bytecode_hash,\n bytecode\n FROM\n (\n SELECT\n value\n FROM\n storage_logs\n WHERE\n storage_logs.hashed_key = $1\n AND storage_logs.miniblock_number <= $2\n ORDER BY\n storage_logs.miniblock_number DESC,\n storage_logs.operation_number DESC\n LIMIT\n 1\n ) deploy_log\n JOIN factory_deps ON value = factory_deps.bytecode_hash\n WHERE\n value != $3\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "bytecode_hash", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "bytecode", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Bytea", + "Int8", + "Bytea" + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "c61b15a9591e65eab7d226f5b9035bfda52cc5bb5a4bfb11270a2a784491c967" +} diff --git a/core/lib/dal/src/storage_web3_dal.rs b/core/lib/dal/src/storage_web3_dal.rs index 78c1dc0c3d0d..10d2cfe61525 100644 --- a/core/lib/dal/src/storage_web3_dal.rs +++ b/core/lib/dal/src/storage_web3_dal.rs @@ -15,6 +15,13 @@ use zksync_utils::h256_to_u256; use crate::{models::storage_block::ResolvedL1BatchForL2Block, Core, CoreDal}; +/// Raw bytecode information returned by [`StorageWeb3Dal::get_contract_code_unchecked()`]. +#[derive(Debug)] +pub struct RawBytecode { + pub bytecode_hash: H256, + pub bytecode: Vec, +} + #[derive(Debug)] pub struct StorageWeb3Dal<'a, 'c> { pub(crate) storage: &'a mut Connection<'c, Core>, @@ -234,16 +241,17 @@ impl StorageWeb3Dal<'_, '_> { &mut self, address: Address, block_number: L2BlockNumber, - ) -> DalResult>> { + ) -> DalResult> { let hashed_key = get_code_key(&address).hashed_key(); let row = sqlx::query!( r#" SELECT + bytecode_hash, bytecode FROM ( SELECT - * + value FROM storage_logs WHERE @@ -254,7 +262,7 @@ impl StorageWeb3Dal<'_, '_> { storage_logs.operation_number DESC LIMIT 1 - ) t + ) deploy_log JOIN factory_deps ON value = factory_deps.bytecode_hash WHERE value != $3 @@ -268,7 +276,11 @@ impl StorageWeb3Dal<'_, '_> { .with_arg("block_number", &block_number) .fetch_optional(self.storage) .await?; - Ok(row.map(|row| row.bytecode)) + + Ok(row.map(|row| RawBytecode { + bytecode_hash: H256::from_slice(&row.bytecode_hash), + bytecode: row.bytecode, + })) } /// Given bytecode hash, returns bytecode and L2 block number at which it was inserted. diff --git a/core/lib/multivm/Cargo.toml b/core/lib/multivm/Cargo.toml index ab418d24cd12..e49086a6b8b1 100644 --- a/core/lib/multivm/Cargo.toml +++ b/core/lib/multivm/Cargo.toml @@ -42,5 +42,6 @@ ethabi.workspace = true [dev-dependencies] assert_matches.workspace = true pretty_assertions.workspace = true +test-casing.workspace = true zksync_test_account.workspace = true zksync_eth_signer.workspace = true diff --git a/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs b/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs index ca8157b170d4..34780b73eb05 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs @@ -1,28 +1,47 @@ +use std::collections::HashMap; + use ethabi::Token; -use zksync_contracts::read_bytecode; -use zksync_system_constants::{CONTRACT_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS}; -use zksync_types::{get_code_key, get_known_code_key, Execute, H256}; -use zksync_utils::{be_words_to_bytes, bytecode::hash_bytecode, h256_to_u256}; -use zksync_vm_interface::VmInterfaceExt; +use test_casing::{test_casing, Product}; +use zksync_contracts::{load_contract, read_bytecode, SystemContractCode}; +use zksync_system_constants::{ + CONTRACT_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L2_BASE_TOKEN_ADDRESS, +}; +use zksync_test_account::TxType; +use zksync_types::{ + get_code_key, get_known_code_key, + utils::{key_for_eth_balance, storage_key_for_eth_balance}, + AccountTreeId, Address, Execute, StorageKey, H256, U256, +}; +use zksync_utils::{ + be_words_to_bytes, + bytecode::{hash_bytecode, hash_evm_bytecode}, + bytes_to_be_words, h256_to_u256, +}; use crate::{ - interface::{storage::InMemoryStorage, TxExecutionMode}, + interface::{ + storage::InMemoryStorage, TxExecutionMode, VmExecutionResultAndLogs, VmInterfaceExt, + }, versions::testonly::default_system_env, - vm_latest::{tests::tester::VmTesterBuilder, utils::hash_evm_bytecode, HistoryEnabled}, + vm_latest::{ + tests::tester::{VmTester, VmTesterBuilder}, + HistoryEnabled, + }, }; const MOCK_DEPLOYER_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/MockContractDeployer.json"; const MOCK_KNOWN_CODE_STORAGE_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/MockKnownCodeStorage.json"; +const MOCK_EMULATOR_PATH: &str = + "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/MockEvmEmulator.json"; +const RECURSIVE_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/NativeRecursiveContract.json"; +const INCREMENTING_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/IncrementingContract.json"; -#[test] -fn tracing_evm_contract_deployment() { +fn override_system_contracts(storage: &mut InMemoryStorage) { let mock_deployer = read_bytecode(MOCK_DEPLOYER_PATH); let mock_deployer_hash = hash_bytecode(&mock_deployer); let mock_known_code_storage = read_bytecode(MOCK_KNOWN_CODE_STORAGE_PATH); let mock_known_code_storage_hash = hash_bytecode(&mock_known_code_storage); - // Override - let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); storage.set_value(get_code_key(&CONTRACT_DEPLOYER_ADDRESS), mock_deployer_hash); storage.set_value( get_known_code_key(&mock_deployer_hash), @@ -38,6 +57,81 @@ fn tracing_evm_contract_deployment() { ); storage.store_factory_dep(mock_deployer_hash, mock_deployer); storage.store_factory_dep(mock_known_code_storage_hash, mock_known_code_storage); +} + +#[derive(Debug)] +struct EvmTestBuilder { + deploy_emulator: bool, + storage: InMemoryStorage, + evm_contract_addresses: Vec
, +} + +impl EvmTestBuilder { + fn new(deploy_emulator: bool, evm_contract_address: Address) -> Self { + Self { + deploy_emulator, + storage: InMemoryStorage::with_system_contracts(hash_bytecode), + evm_contract_addresses: vec![evm_contract_address], + } + } + + fn with_mock_deployer(mut self) -> Self { + override_system_contracts(&mut self.storage); + self + } + + fn with_evm_address(mut self, address: Address) -> Self { + self.evm_contract_addresses.push(address); + self + } + + fn build(self) -> VmTester { + let mock_emulator = read_bytecode(MOCK_EMULATOR_PATH); + let mut storage = self.storage; + let mut system_env = default_system_env(); + if self.deploy_emulator { + let evm_bytecode: Vec<_> = (0..32).collect(); + let evm_bytecode_hash = hash_evm_bytecode(&evm_bytecode); + storage.set_value( + get_known_code_key(&evm_bytecode_hash), + H256::from_low_u64_be(1), + ); + for evm_address in self.evm_contract_addresses { + storage.set_value(get_code_key(&evm_address), evm_bytecode_hash); + } + + system_env.base_system_smart_contracts.evm_emulator = Some(SystemContractCode { + hash: hash_bytecode(&mock_emulator), + code: bytes_to_be_words(mock_emulator), + }); + } else { + let emulator_hash = hash_bytecode(&mock_emulator); + storage.set_value(get_known_code_key(&emulator_hash), H256::from_low_u64_be(1)); + storage.store_factory_dep(emulator_hash, mock_emulator); + + for evm_address in self.evm_contract_addresses { + storage.set_value(get_code_key(&evm_address), emulator_hash); + // Set `isUserSpace` in the emulator storage to `true`, so that it skips emulator-specific checks + storage.set_value( + StorageKey::new(AccountTreeId::new(evm_address), H256::zero()), + H256::from_low_u64_be(1), + ); + } + } + + VmTesterBuilder::new(HistoryEnabled) + .with_system_env(system_env) + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build() + } +} + +#[test] +fn tracing_evm_contract_deployment() { + let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); + override_system_contracts(&mut storage); let mut system_env = default_system_env(); // The EVM emulator will not be accessed, so we set it to a dummy value. @@ -51,7 +145,7 @@ fn tracing_evm_contract_deployment() { .build(); let account = &mut vm.rich_accounts[0]; - let args = [Token::Bytes((0..=u8::MAX).collect())]; + let args = [Token::Bytes((0..32).collect())]; let evm_bytecode = ethabi::encode(&args); let expected_bytecode_hash = hash_evm_bytecode(&evm_bytecode); let execute = Execute::for_deploy(expected_bytecode_hash, vec![0; 32], &args); @@ -74,3 +168,343 @@ fn tracing_evm_contract_deployment() { evm_bytecode ); } + +#[test] +fn mock_emulator_basics() { + let called_address = Address::repeat_byte(0x23); + let mut vm = EvmTestBuilder::new(true, called_address).build(); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(called_address), + calldata: vec![], + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +const RECIPIENT_ADDRESS: Address = Address::repeat_byte(0x12); + +/// `deploy_emulator = false` here and below tests the mock emulator as an ordinary contract (i.e., sanity-checks its logic). +#[test_casing(2, [false, true])] +#[test] +fn mock_emulator_with_payment(deploy_emulator: bool) { + let mock_emulator_abi = load_contract(MOCK_EMULATOR_PATH); + let mut vm = EvmTestBuilder::new(deploy_emulator, RECIPIENT_ADDRESS).build(); + + let mut current_balance = U256::zero(); + for i in 1_u64..=5 { + let transferred_value = (1_000_000_000 * i).into(); + let vm_result = test_payment( + &mut vm, + &mock_emulator_abi, + &mut current_balance, + transferred_value, + ); + + let balance_storage_logs = vm_result.logs.storage_logs.iter().filter_map(|log| { + (*log.log.key.address() == L2_BASE_TOKEN_ADDRESS) + .then_some((*log.log.key.key(), h256_to_u256(log.log.value))) + }); + let balances: HashMap<_, _> = balance_storage_logs.collect(); + assert_eq!( + balances[&key_for_eth_balance(&RECIPIENT_ADDRESS)], + current_balance + ); + } +} + +fn test_payment( + vm: &mut VmTester, + mock_emulator_abi: ðabi::Contract, + balance: &mut U256, + transferred_value: U256, +) -> VmExecutionResultAndLogs { + *balance += transferred_value; + let test_payment_fn = mock_emulator_abi.function("testPayment").unwrap(); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(RECIPIENT_ADDRESS), + calldata: test_payment_fn + .encode_input(&[Token::Uint(transferred_value), Token::Uint(*balance)]) + .unwrap(), + value: transferred_value, + factory_deps: vec![], + }, + None, + ); + + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); + vm_result +} + +#[test_casing(4, Product(([false, true], [false, true])))] +#[test] +fn mock_emulator_with_recursion(deploy_emulator: bool, is_external: bool) { + let mock_emulator_abi = load_contract(MOCK_EMULATOR_PATH); + let recipient_address = Address::repeat_byte(0x12); + let mut vm = EvmTestBuilder::new(deploy_emulator, recipient_address).build(); + let account = &mut vm.rich_accounts[0]; + + let test_recursion_fn = mock_emulator_abi + .function(if is_external { + "testExternalRecursion" + } else { + "testRecursion" + }) + .unwrap(); + let mut expected_value = U256::one(); + let depth = 50_u32; + for i in 2..=depth { + expected_value *= i; + } + + let factory_deps = if is_external { + vec![read_bytecode(RECURSIVE_CONTRACT_PATH)] + } else { + vec![] + }; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(recipient_address), + calldata: test_recursion_fn + .encode_input(&[Token::Uint(depth.into()), Token::Uint(expected_value)]) + .unwrap(), + value: 0.into(), + factory_deps, + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); +} + +#[test] +fn calling_to_mock_emulator_from_native_contract() { + let recipient_address = Address::repeat_byte(0x12); + let mut vm = EvmTestBuilder::new(true, recipient_address).build(); + let account = &mut vm.rich_accounts[0]; + + // Deploy a native contract. + let native_contract = read_bytecode(RECURSIVE_CONTRACT_PATH); + let native_contract_abi = load_contract(RECURSIVE_CONTRACT_PATH); + let deploy_tx = account.get_deploy_tx( + &native_contract, + Some(&[Token::Address(recipient_address)]), + TxType::L2, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + + // Call from the native contract to the EVM emulator. + let test_fn = native_contract_abi.function("recurse").unwrap(); + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(deploy_tx.address), + calldata: test_fn.encode_input(&[Token::Uint(50.into())]).unwrap(), + value: Default::default(), + factory_deps: vec![], + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +#[test] +fn mock_emulator_with_deployment() { + let contract_address = Address::repeat_byte(0xaa); + let mut vm = EvmTestBuilder::new(true, contract_address) + .with_mock_deployer() + .build(); + let account = &mut vm.rich_accounts[0]; + + let mock_emulator_abi = load_contract(MOCK_EMULATOR_PATH); + let new_evm_bytecode = vec![0xfe; 96]; + let new_evm_bytecode_hash = hash_evm_bytecode(&new_evm_bytecode); + + let test_fn = mock_emulator_abi.function("testDeploymentAndCall").unwrap(); + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(contract_address), + calldata: test_fn + .encode_input(&[ + Token::FixedBytes(new_evm_bytecode_hash.0.into()), + Token::Bytes(new_evm_bytecode.clone()), + ]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); + + let factory_deps = vm_result.new_known_factory_deps.unwrap(); + assert_eq!( + factory_deps, + HashMap::from([(new_evm_bytecode_hash, new_evm_bytecode)]) + ); +} + +#[test] +fn mock_emulator_with_delegate_call() { + let evm_contract_address = Address::repeat_byte(0xaa); + let other_evm_contract_address = Address::repeat_byte(0xbb); + let mut builder = EvmTestBuilder::new(true, evm_contract_address); + builder.storage.set_value( + storage_key_for_eth_balance(&evm_contract_address), + H256::from_low_u64_be(1_000_000), + ); + builder.storage.set_value( + storage_key_for_eth_balance(&other_evm_contract_address), + H256::from_low_u64_be(2_000_000), + ); + let mut vm = builder.with_evm_address(other_evm_contract_address).build(); + let account = &mut vm.rich_accounts[0]; + + // Deploy a native contract. + let native_contract = read_bytecode(INCREMENTING_CONTRACT_PATH); + let native_contract_abi = load_contract(INCREMENTING_CONTRACT_PATH); + let deploy_tx = account.get_deploy_tx(&native_contract, None, TxType::L2); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + + let test_fn = native_contract_abi.function("testDelegateCall").unwrap(); + // Delegate to the native contract from EVM. + test_delegate_call(&mut vm, test_fn, evm_contract_address, deploy_tx.address); + // Delegate to EVM from the native contract. + test_delegate_call(&mut vm, test_fn, deploy_tx.address, evm_contract_address); + // Delegate to EVM from EVM. + test_delegate_call( + &mut vm, + test_fn, + evm_contract_address, + other_evm_contract_address, + ); +} + +fn test_delegate_call( + vm: &mut VmTester, + test_fn: ðabi::Function, + from: Address, + to: Address, +) { + let account = &mut vm.rich_accounts[0]; + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(from), + calldata: test_fn.encode_input(&[Token::Address(to)]).unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); +} + +#[test] +fn mock_emulator_with_static_call() { + let evm_contract_address = Address::repeat_byte(0xaa); + let other_evm_contract_address = Address::repeat_byte(0xbb); + let mut builder = EvmTestBuilder::new(true, evm_contract_address); + builder.storage.set_value( + storage_key_for_eth_balance(&evm_contract_address), + H256::from_low_u64_be(1_000_000), + ); + builder.storage.set_value( + storage_key_for_eth_balance(&other_evm_contract_address), + H256::from_low_u64_be(2_000_000), + ); + // Set differing read values for tested contracts. The slot index is defined in the contract. + let value_slot = H256::from_low_u64_be(0x123); + builder.storage.set_value( + StorageKey::new(AccountTreeId::new(evm_contract_address), value_slot), + H256::from_low_u64_be(100), + ); + builder.storage.set_value( + StorageKey::new(AccountTreeId::new(other_evm_contract_address), value_slot), + H256::from_low_u64_be(200), + ); + let mut vm = builder.with_evm_address(other_evm_contract_address).build(); + let account = &mut vm.rich_accounts[0]; + + // Deploy a native contract. + let native_contract = read_bytecode(INCREMENTING_CONTRACT_PATH); + let native_contract_abi = load_contract(INCREMENTING_CONTRACT_PATH); + let deploy_tx = account.get_deploy_tx(&native_contract, None, TxType::L2); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + + let test_fn = native_contract_abi.function("testStaticCall").unwrap(); + // Call to the native contract from EVM. + test_static_call(&mut vm, test_fn, evm_contract_address, deploy_tx.address, 0); + // Call to EVM from the native contract. + test_static_call( + &mut vm, + test_fn, + deploy_tx.address, + evm_contract_address, + 100, + ); + // Call to EVM from EVM. + test_static_call( + &mut vm, + test_fn, + evm_contract_address, + other_evm_contract_address, + 200, + ); +} + +fn test_static_call( + vm: &mut VmTester, + test_fn: ðabi::Function, + from: Address, + to: Address, + expected_value: u64, +) { + let account = &mut vm.rich_accounts[0]; + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(from), + calldata: test_fn + .encode_input(&[Token::Address(to), Token::Uint(expected_value.into())]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs index d91ee13a920a..becc4f225276 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs @@ -8,16 +8,14 @@ use zk_evm_1_5_0::{ }, }; use zksync_types::{CONTRACT_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS}; -use zksync_utils::{bytes_to_be_words, h256_to_u256}; +use zksync_utils::{bytecode::hash_evm_bytecode, bytes_to_be_words, h256_to_u256}; use zksync_vm_interface::storage::StoragePtr; use super::{traits::VmTracer, utils::read_pointer}; use crate::{ interface::{storage::WriteStorage, tracer::TracerExecutionStatus}, tracers::dynamic::vm_1_5_0::DynTracer, - vm_latest::{ - utils::hash_evm_bytecode, BootloaderState, HistoryMode, SimpleMemory, ZkSyncVmState, - }, + vm_latest::{BootloaderState, HistoryMode, SimpleMemory, ZkSyncVmState}, }; /// Tracer responsible for collecting information about EVM deploys and providing those diff --git a/core/lib/multivm/src/versions/vm_latest/utils/mod.rs b/core/lib/multivm/src/versions/vm_latest/utils/mod.rs index e07d3eda7c4c..aeb66755f514 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/mod.rs @@ -1,11 +1,7 @@ //! Utility functions for the VM. use once_cell::sync::Lazy; -use zk_evm_1_5_0::{ - aux_structures::MemoryPage, - sha2, - zkevm_opcode_defs::{BlobSha256Format, VersionedHashLen32}, -}; +use zk_evm_1_5_0::aux_structures::MemoryPage; use zksync_types::{H256, KNOWN_CODES_STORAGE_ADDRESS}; use zksync_vm_interface::VmEvent; @@ -15,22 +11,6 @@ pub(crate) mod logs; pub mod overhead; pub mod transaction_encoding; -pub(crate) fn hash_evm_bytecode(bytecode: &[u8]) -> H256 { - use sha2::{Digest, Sha256}; - let mut hasher = Sha256::new(); - let len = bytecode.len() as u16; - hasher.update(bytecode); - let result = hasher.finalize(); - - let mut output = [0u8; 32]; - output[..].copy_from_slice(result.as_slice()); - output[0] = BlobSha256Format::VERSION_BYTE; - output[1] = 0; - output[2..4].copy_from_slice(&len.to_be_bytes()); - - H256(output) -} - pub const fn heap_page_from_base(base: MemoryPage) -> MemoryPage { MemoryPage(base.0 + 2) } diff --git a/core/lib/protobuf_config/src/consensus.rs b/core/lib/protobuf_config/src/consensus.rs index 81cad437fe4b..2219b6a82ea8 100644 --- a/core/lib/protobuf_config/src/consensus.rs +++ b/core/lib/protobuf_config/src/consensus.rs @@ -148,6 +148,7 @@ impl ProtoRepr for proto::Config { }; Ok(Self::Type { + port: self.port.and_then(|x| x.try_into().ok()), server_addr: required(&self.server_addr) .and_then(|x| Ok(x.parse()?)) .context("server_addr")?, @@ -182,6 +183,7 @@ impl ProtoRepr for proto::Config { fn build(this: &Self::Type) -> Self { Self { + port: this.port.map(|x| x.into()), server_addr: Some(this.server_addr.to_string()), public_addr: Some(this.public_addr.0.clone()), max_payload_size: Some(this.max_payload_size.try_into().unwrap()), diff --git a/core/lib/protobuf_config/src/lib.rs b/core/lib/protobuf_config/src/lib.rs index 48bc5f1ce137..68f7f699de20 100644 --- a/core/lib/protobuf_config/src/lib.rs +++ b/core/lib/protobuf_config/src/lib.rs @@ -64,7 +64,7 @@ pub fn read_optional_repr(field: &Option

) -> Option { .transpose() // This error will printed, only if the config partially filled, allows to debug config issues easier .map_err(|err| { - tracing::error!("Failed to serialize config: {err}"); + tracing::error!("Failed to parse config: {err:#}"); err }) .ok() diff --git a/core/lib/protobuf_config/src/proto/core/consensus.proto b/core/lib/protobuf_config/src/proto/core/consensus.proto index 92527df739aa..9b0d69e7270c 100644 --- a/core/lib/protobuf_config/src/proto/core/consensus.proto +++ b/core/lib/protobuf_config/src/proto/core/consensus.proto @@ -70,6 +70,9 @@ message Config { reserved 3; reserved "validators"; + // Port to listen on, for incoming TCP connections. + optional uint32 port = 12; + // IP:port to listen on, for incoming TCP connections. // Use `0.0.0.0:` to listen on all network interfaces (i.e. on all IPs exposed by this VM). optional string server_addr = 1; // required; IpAddr diff --git a/core/lib/utils/src/bytecode.rs b/core/lib/utils/src/bytecode.rs index 48bdb4330207..01cce5bc34d0 100644 --- a/core/lib/utils/src/bytecode.rs +++ b/core/lib/utils/src/bytecode.rs @@ -1,5 +1,6 @@ // FIXME: move to basic_types? +use zk_evm::k256::sha2::{Digest, Sha256}; use zksync_basic_types::H256; use crate::bytes_to_chunks; @@ -40,6 +41,7 @@ pub fn validate_bytecode(code: &[u8]) -> Result<(), InvalidBytecodeError> { Ok(()) } +/// Hashes the provided EraVM bytecode. pub fn hash_bytecode(code: &[u8]) -> H256 { let chunked_code = bytes_to_chunks(code); let hash = zk_evm::zkevm_opcode_defs::utils::bytecode_to_code_hash(&chunked_code) @@ -55,3 +57,62 @@ pub fn bytecode_len_in_words(bytecodehash: &H256) -> u16 { pub fn bytecode_len_in_bytes(bytecodehash: H256) -> usize { bytecode_len_in_words(&bytecodehash) as usize * 32 } + +/// Bytecode marker encoded in the first byte of the bytecode hash. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[repr(u8)] +pub enum BytecodeMarker { + /// EraVM bytecode marker (1). + EraVm = 1, + /// EVM bytecode marker (2). + Evm = 2, +} + +impl BytecodeMarker { + /// Parses a marker from the bytecode hash. + pub fn new(bytecode_hash: H256) -> Option { + Some(match bytecode_hash.as_bytes()[0] { + val if val == Self::EraVm as u8 => Self::EraVm, + val if val == Self::Evm as u8 => Self::Evm, + _ => return None, + }) + } +} + +/// Hashes the provided EVM bytecode. The bytecode must be padded to an odd number of 32-byte words; +/// bytecodes stored in the known codes storage satisfy this requirement automatically. +pub fn hash_evm_bytecode(bytecode: &[u8]) -> H256 { + validate_bytecode(bytecode).expect("invalid EVM bytecode"); + + let mut hasher = Sha256::new(); + let len = bytecode.len() as u16; + hasher.update(bytecode); + let result = hasher.finalize(); + + let mut output = [0u8; 32]; + output[..].copy_from_slice(result.as_slice()); + output[0] = BytecodeMarker::Evm as u8; + output[1] = 0; + output[2..4].copy_from_slice(&len.to_be_bytes()); + + H256(output) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytecode_markers_are_valid() { + let bytecode_hash = hash_bytecode(&[0; 32]); + assert_eq!( + BytecodeMarker::new(bytecode_hash), + Some(BytecodeMarker::EraVm) + ); + let bytecode_hash = hash_evm_bytecode(&[0; 32]); + assert_eq!( + BytecodeMarker::new(bytecode_hash), + Some(BytecodeMarker::Evm) + ); + } +} diff --git a/core/node/api_server/Cargo.toml b/core/node/api_server/Cargo.toml index d0723a9d23e7..067b9b3e3722 100644 --- a/core/node/api_server/Cargo.toml +++ b/core/node/api_server/Cargo.toml @@ -61,4 +61,5 @@ zksync_node_genesis.workspace = true zksync_node_test_utils.workspace = true assert_matches.workspace = true +const-decoder.workspace = true test-casing.workspace = true diff --git a/core/node/api_server/src/testonly.rs b/core/node/api_server/src/testonly.rs index 8dc7915385a1..45ed802d68f5 100644 --- a/core/node/api_server/src/testonly.rs +++ b/core/node/api_server/src/testonly.rs @@ -2,6 +2,7 @@ use std::{collections::HashMap, iter}; +use const_decoder::Decoder; use zk_evm_1_5_0::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; use zksync_contracts::{ eth_contract, get_loadnext_contract, load_contract, read_bytecode, @@ -26,6 +27,30 @@ use zksync_types::{ }; use zksync_utils::{address_to_u256, u256_to_h256}; +pub(crate) const RAW_EVM_BYTECODE: &[u8] = &const_decoder::decode!( + Decoder::Hex, + b"00000000000000000000000000000000000000000000000000000000000001266080604052348015\ + 600e575f80fd5b50600436106030575f3560e01c8063816898ff146034578063fb5343f314604c57\ + 5b5f80fd5b604a60048036038101906046919060a6565b6066565b005b6052606f565b604051605d\ + 919060d9565b60405180910390f35b805f8190555050565b5f5481565b5f80fd5b5f819050919050\ + 565b6088816078565b81146091575f80fd5b50565b5f8135905060a0816081565b92915050565b5f\ + 6020828403121560b85760b76074565b5b5f60c3848285016094565b91505092915050565b60d381\ + 6078565b82525050565b5f60208201905060ea5f83018460cc565b9291505056fea2646970667358\ + 221220caca1247066da378f2ec77c310f2ae51576272367b4fa11cc4350af4e9ce4d0964736f6c63\ + 4300081a00330000000000000000000000000000000000000000000000000000" +); +pub(crate) const PROCESSED_EVM_BYTECODE: &[u8] = &const_decoder::decode!( + Decoder::Hex, + b"6080604052348015600e575f80fd5b50600436106030575f3560e01c8063816898ff146034578063\ + fb5343f314604c575b5f80fd5b604a60048036038101906046919060a6565b6066565b005b605260\ + 6f565b604051605d919060d9565b60405180910390f35b805f8190555050565b5f5481565b5f80fd\ + 5b5f819050919050565b6088816078565b81146091575f80fd5b50565b5f8135905060a081608156\ + 5b92915050565b5f6020828403121560b85760b76074565b5b5f60c3848285016094565b91505092\ + 915050565b60d3816078565b82525050565b5f60208201905060ea5f83018460cc565b9291505056\ + fea2646970667358221220caca1247066da378f2ec77c310f2ae51576272367b4fa11cc4350af4e9\ + ce4d0964736f6c634300081a0033" +); + const EXPENSIVE_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; const PRECOMPILES_CONTRACT_PATH: &str = diff --git a/core/node/api_server/src/utils.rs b/core/node/api_server/src/utils.rs index 6769e773dc77..c7a1134682bf 100644 --- a/core/node/api_server/src/utils.rs +++ b/core/node/api_server/src/utils.rs @@ -6,9 +6,33 @@ use std::{ time::{Duration, Instant}, }; +use anyhow::Context; use zksync_dal::{Connection, Core, DalError}; +use zksync_multivm::circuit_sequencer_api_latest::boojum::ethereum_types::U256; use zksync_web3_decl::error::Web3Error; +pub(crate) fn prepare_evm_bytecode(raw: &[u8]) -> anyhow::Result> { + // EVM bytecodes are prefixed with a big-endian `U256` bytecode length. + let bytecode_len_bytes = raw.get(..32).context("length < 32")?; + let bytecode_len = U256::from_big_endian(bytecode_len_bytes); + let bytecode_len: usize = bytecode_len + .try_into() + .map_err(|_| anyhow::anyhow!("length ({bytecode_len}) overflow"))?; + let bytecode = raw.get(32..(32 + bytecode_len)).with_context(|| { + format!( + "prefixed length ({bytecode_len}) exceeds real length ({})", + raw.len() - 32 + ) + })?; + // Since slicing above succeeded, this one is safe. + let padding = &raw[(32 + bytecode_len)..]; + anyhow::ensure!( + padding.iter().all(|&b| b == 0), + "bytecode padding contains non-zero bytes" + ); + Ok(bytecode.to_vec()) +} + /// Opens a readonly transaction over the specified connection. pub(crate) async fn open_readonly_transaction<'r>( conn: &'r mut Connection<'_, Core>, @@ -66,3 +90,15 @@ macro_rules! report_filter { ReportFilter::new($interval, &LAST_TIMESTAMP) }}; } + +#[cfg(test)] +mod tests { + use super::*; + use crate::testonly::{PROCESSED_EVM_BYTECODE, RAW_EVM_BYTECODE}; + + #[test] + fn preparing_evm_bytecode() { + let prepared = prepare_evm_bytecode(RAW_EVM_BYTECODE).unwrap(); + assert_eq!(prepared, PROCESSED_EVM_BYTECODE); + } +} diff --git a/core/node/api_server/src/web3/namespaces/eth.rs b/core/node/api_server/src/web3/namespaces/eth.rs index 1248ef07aeee..4439fc257cfb 100644 --- a/core/node/api_server/src/web3/namespaces/eth.rs +++ b/core/node/api_server/src/web3/namespaces/eth.rs @@ -12,7 +12,7 @@ use zksync_types::{ web3::{self, Bytes, SyncInfo, SyncState}, AccountTreeId, L2BlockNumber, StorageKey, H256, L2_BASE_TOKEN_ADDRESS, U256, }; -use zksync_utils::u256_to_h256; +use zksync_utils::{bytecode::BytecodeMarker, u256_to_h256}; use zksync_web3_decl::{ error::Web3Error, types::{Address, Block, Filter, FilterChanges, Log, U64}, @@ -21,7 +21,7 @@ use zksync_web3_decl::{ use crate::{ execution_sandbox::BlockArgs, tx_sender::BinarySearchKind, - utils::open_readonly_transaction, + utils::{open_readonly_transaction, prepare_evm_bytecode}, web3::{backend_jsonrpsee::MethodTracer, metrics::API_METRICS, state::RpcState, TypedFilter}, }; @@ -397,7 +397,22 @@ impl EthNamespace { .get_contract_code_unchecked(address, block_number) .await .map_err(DalError::generalize)?; - Ok(contract_code.unwrap_or_default().into()) + let Some(contract_code) = contract_code else { + return Ok(Bytes::default()); + }; + // Check if the bytecode is an EVM bytecode, and if so, pre-process it correspondingly. + let marker = BytecodeMarker::new(contract_code.bytecode_hash); + let prepared_bytecode = if marker == Some(BytecodeMarker::Evm) { + prepare_evm_bytecode(&contract_code.bytecode).with_context(|| { + format!( + "malformed EVM bytecode at address {address:?}, hash = {:?}", + contract_code.bytecode_hash + ) + })? + } else { + contract_code.bytecode + }; + Ok(prepared_bytecode.into()) } pub fn chain_id_impl(&self) -> U64 { diff --git a/core/node/api_server/src/web3/tests/mod.rs b/core/node/api_server/src/web3/tests/mod.rs index 94d47abe7ef1..a8d90c281a75 100644 --- a/core/node/api_server/src/web3/tests/mod.rs +++ b/core/node/api_server/src/web3/tests/mod.rs @@ -32,18 +32,22 @@ use zksync_system_constants::{ }; use zksync_types::{ api, - block::{pack_block_info, L2BlockHeader}, + block::{pack_block_info, L2BlockHasher, L2BlockHeader}, fee_model::{BatchFeeInput, FeeParams}, get_nonce_key, l2::L2Tx, storage::get_code_key, + system_contracts::get_system_smart_contracts, tokens::{TokenInfo, TokenMetadata}, tx::IncludedTxLocation, utils::{storage_key_for_eth_balance, storage_key_for_standard_token_balance}, AccountTreeId, Address, L1BatchNumber, Nonce, ProtocolVersionId, StorageKey, StorageLog, H256, U256, U64, }; -use zksync_utils::u256_to_h256; +use zksync_utils::{ + bytecode::{hash_bytecode, hash_evm_bytecode}, + u256_to_h256, +}; use zksync_vm_executor::oneshot::MockOneshotExecutor; use zksync_web3_decl::{ client::{Client, DynClient, L2}, @@ -60,7 +64,10 @@ use zksync_web3_decl::{ }; use super::*; -use crate::web3::testonly::TestServerBuilder; +use crate::{ + testonly::{PROCESSED_EVM_BYTECODE, RAW_EVM_BYTECODE}, + web3::testonly::TestServerBuilder, +}; mod debug; mod filters; @@ -635,7 +642,7 @@ impl HttpTest for StorageAccessWithSnapshotRecovery { fn storage_initialization(&self) -> StorageInitialization { let address = Address::repeat_byte(1); let code_key = get_code_key(&address); - let code_hash = H256::repeat_byte(2); + let code_hash = hash_bytecode(&[0; 32]); let balance_key = storage_key_for_eth_balance(&address); let logs = vec![ StorageLog::new_write_log(code_key, code_hash), @@ -1113,6 +1120,125 @@ async fn tracing_genesis_config() { test_http_server(GenesisConfigTest).await; } +#[derive(Debug)] +struct GetBytecodeTest; + +impl GetBytecodeTest { + async fn insert_evm_bytecode( + connection: &mut Connection<'_, Core>, + at_block: L2BlockNumber, + address: Address, + ) -> anyhow::Result<()> { + let evm_bytecode_hash = hash_evm_bytecode(RAW_EVM_BYTECODE); + let code_log = StorageLog::new_write_log(get_code_key(&address), evm_bytecode_hash); + connection + .storage_logs_dal() + .append_storage_logs(at_block, &[code_log]) + .await?; + + let factory_deps = HashMap::from([(evm_bytecode_hash, RAW_EVM_BYTECODE.to_vec())]); + connection + .factory_deps_dal() + .insert_factory_deps(at_block, &factory_deps) + .await?; + Ok(()) + } +} + +#[async_trait] +impl HttpTest for GetBytecodeTest { + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let genesis_evm_address = Address::repeat_byte(1); + let mut connection = pool.connection().await?; + Self::insert_evm_bytecode(&mut connection, L2BlockNumber(0), genesis_evm_address).await?; + + for contract in get_system_smart_contracts(false) { + let bytecode = client + .get_code(*contract.account_id.address(), None) + .await?; + assert_eq!(bytecode.0, contract.bytecode); + } + + let bytecode = client.get_code(genesis_evm_address, None).await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + + let latest_block_variants = [ + api::BlockNumber::Pending, + api::BlockNumber::Latest, + api::BlockNumber::Committed, + ]; + let latest_block_variants = latest_block_variants.map(api::BlockIdVariant::BlockNumber); + + let genesis_block_variants = [ + api::BlockIdVariant::BlockNumber(api::BlockNumber::Earliest), + api::BlockIdVariant::BlockNumber(api::BlockNumber::Number(0.into())), + api::BlockIdVariant::BlockHashObject(api::BlockHashObject { + block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + }), + ]; + for at_block in latest_block_variants + .into_iter() + .chain(genesis_block_variants) + { + println!("Testing {at_block:?} with genesis EVM code, latest block: 0"); + let bytecode = client.get_code(genesis_evm_address, Some(at_block)).await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + } + + // Create another block with an EVM bytecode. + let new_bytecode_address = Address::repeat_byte(2); + let mut connection = pool.connection().await?; + let block_header = store_l2_block(&mut connection, L2BlockNumber(1), &[]).await?; + Self::insert_evm_bytecode(&mut connection, L2BlockNumber(1), new_bytecode_address).await?; + + let bytecode = client.get_code(genesis_evm_address, None).await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + let bytecode = client.get_code(new_bytecode_address, None).await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + + let new_block_variants = [ + api::BlockIdVariant::BlockNumber(api::BlockNumber::Number(1.into())), + api::BlockIdVariant::BlockHashObject(api::BlockHashObject { + block_hash: block_header.hash, + }), + ]; + for at_block in latest_block_variants.into_iter().chain(new_block_variants) { + println!("Testing {at_block:?} with new EVM code, latest block: 1"); + let bytecode = client + .get_code(new_bytecode_address, Some(at_block)) + .await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + } + for at_block in genesis_block_variants { + println!("Testing {at_block:?} with new EVM code, latest block: 1"); + let bytecode = client + .get_code(new_bytecode_address, Some(at_block)) + .await?; + assert!(bytecode.0.is_empty()); + } + + for at_block in latest_block_variants + .into_iter() + .chain(new_block_variants) + .chain(genesis_block_variants) + { + println!("Testing {at_block:?} with genesis EVM code, latest block: 1"); + let bytecode = client.get_code(genesis_evm_address, Some(at_block)).await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + } + Ok(()) + } +} + +#[tokio::test] +async fn getting_bytecodes() { + test_http_server(GetBytecodeTest).await; +} + #[derive(Debug)] struct FeeHistoryTest; diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index c358974fb0c1..518a7ebb29aa 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -4,7 +4,7 @@ use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, scope, time}; use zksync_consensus_executor::{self as executor, attestation}; use zksync_consensus_roles::{attester, validator}; -use zksync_consensus_storage::BlockStore; +use zksync_consensus_storage::{BlockStore, PersistentBlockStore as _}; use zksync_dal::consensus_dal; use zksync_node_sync::{fetcher::FetchedBlock, sync_action::ActionQueueSender, SyncState}; use zksync_types::L2BlockNumber; @@ -21,6 +21,10 @@ use crate::{ storage::{self, ConnectionPool}, }; +/// If less than TEMPORARY_FETCHER_THRESHOLD certificates are missing, +/// the temporary fetcher will stop fetching blocks. +pub(crate) const TEMPORARY_FETCHER_THRESHOLD: u64 = 10; + /// External node. pub(super) struct EN { pub(super) pool: ConnectionPool, @@ -120,6 +124,20 @@ impl EN { .wrap("Store::new()")?; s.spawn_bg(async { Ok(runner.run(ctx).await?) }); + // Run the temporary fetcher until the certificates are backfilled. + // Temporary fetcher should be removed once json RPC syncing is fully deprecated. + s.spawn_bg({ + let store = store.clone(); + async { + let store = store; + self.temporary_block_fetcher(ctx, &store).await?; + tracing::info!( + "temporary block fetcher finished, switching to p2p fetching only" + ); + Ok(()) + } + }); + let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) .await .wrap("BlockStore::new()")?; @@ -358,8 +376,42 @@ impl EN { } } + /// Fetches blocks from the main node directly, until the certificates + /// are backfilled. This allows for smooth transition from json RPC to p2p block syncing. + pub(crate) async fn temporary_block_fetcher( + &self, + ctx: &ctx::Ctx, + store: &Store, + ) -> ctx::Result<()> { + const MAX_CONCURRENT_REQUESTS: usize = 30; + scope::run!(ctx, |ctx, s| async { + let (send, mut recv) = ctx::channel::bounded(MAX_CONCURRENT_REQUESTS); + s.spawn(async { + let Some(mut next) = store.next_block(ctx).await? else { + return Ok(()); + }; + while store.persisted().borrow().next().0 + TEMPORARY_FETCHER_THRESHOLD < next.0 { + let n = L2BlockNumber(next.0.try_into().context("overflow")?); + self.sync_state.wait_for_main_node_block(ctx, n).await?; + send.send(ctx, s.spawn(self.fetch_block(ctx, n))).await?; + next = next.next(); + } + drop(send); + Ok(()) + }); + while let Ok(block) = recv.recv_or_disconnected(ctx).await? { + store + .queue_next_fetched_block(ctx, block.join(ctx).await?) + .await + .wrap("queue_next_fetched_block()")?; + } + Ok(()) + }) + .await + } + /// Fetches blocks from the main node in range `[cursor.next()..end)`. - pub(super) async fn fetch_blocks( + async fn fetch_blocks( &self, ctx: &ctx::Ctx, queue: &mut storage::PayloadQueue, @@ -373,7 +425,7 @@ impl EN { s.spawn(async { let send = send; while end.map_or(true, |end| next < end) { - let n = L2BlockNumber(next.0.try_into().unwrap()); + let n = L2BlockNumber(next.0.try_into().context("overflow")?); self.sync_state.wait_for_main_node_block(ctx, n).await?; send.send(ctx, s.spawn(self.fetch_block(ctx, n))).await?; next = next.next(); diff --git a/core/node/consensus/src/registry/tests.rs b/core/node/consensus/src/registry/tests.rs index 33392a7f206d..89afc20e1d57 100644 --- a/core/node/consensus/src/registry/tests.rs +++ b/core/node/consensus/src/registry/tests.rs @@ -80,7 +80,7 @@ async fn test_attester_committee() { .wrap("wait_for_batch_info()")?; // Read the attester committee using the vm. - let batch = attester::BatchNumber(node.last_batch().0.into()); + let batch = attester::BatchNumber(node.last_batch().0); assert_eq!( Some(committee), registry diff --git a/core/node/consensus/src/storage/store.rs b/core/node/consensus/src/storage/store.rs index ed83758ba9f0..7267d7e1c822 100644 --- a/core/node/consensus/src/storage/store.rs +++ b/core/node/consensus/src/storage/store.rs @@ -111,6 +111,30 @@ impl Store { async fn conn(&self, ctx: &ctx::Ctx) -> ctx::Result { self.pool.connection(ctx).await.wrap("connection") } + + /// Number of the next block to queue. + pub(crate) async fn next_block( + &self, + ctx: &ctx::Ctx, + ) -> ctx::OrCanceled> { + Ok(sync::lock(ctx, &self.block_payloads) + .await? + .as_ref() + .map(|p| p.next())) + } + + /// Queues the next block. + pub(crate) async fn queue_next_fetched_block( + &self, + ctx: &ctx::Ctx, + block: FetchedBlock, + ) -> ctx::Result<()> { + let mut payloads = sync::lock(ctx, &self.block_payloads).await?.into_async(); + if let Some(payloads) = &mut *payloads { + payloads.send(block).await.context("payloads.send()")?; + } + Ok(()) + } } impl PersistedBlockState { diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 4538337109a4..4ebcf5c9a617 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -45,7 +45,10 @@ use zksync_types::{ }; use zksync_web3_decl::client::{Client, DynClient, L2}; -use crate::{en, storage::ConnectionPool}; +use crate::{ + en, + storage::{ConnectionPool, Store}, +}; /// Fake StateKeeper for tests. #[derive(Debug)] @@ -154,6 +157,7 @@ fn make_config( genesis_spec: Option, ) -> config::ConsensusConfig { config::ConsensusConfig { + port: Some(cfg.server_addr.port()), server_addr: *cfg.server_addr, public_addr: config::Host(cfg.public_addr.0.clone()), max_payload_size: usize::MAX, @@ -416,6 +420,40 @@ impl StateKeeper { .await } + pub async fn run_temporary_fetcher( + self, + ctx: &ctx::Ctx, + client: Box>, + ) -> ctx::Result<()> { + scope::run!(ctx, |ctx, s| async { + let payload_queue = self + .pool + .connection(ctx) + .await + .wrap("connection()")? + .new_payload_queue(ctx, self.actions_sender, self.sync_state.clone()) + .await + .wrap("new_payload_queue()")?; + let (store, runner) = Store::new( + ctx, + self.pool.clone(), + Some(payload_queue), + Some(client.clone()), + ) + .await + .wrap("Store::new()")?; + s.spawn_bg(async { Ok(runner.run(ctx).await?) }); + en::EN { + pool: self.pool.clone(), + client, + sync_state: self.sync_state.clone(), + } + .temporary_block_fetcher(ctx, &store) + .await + }) + .await + } + /// Runs consensus node for the external node. pub async fn run_consensus( self, diff --git a/core/node/consensus/src/tests/attestation.rs b/core/node/consensus/src/tests/attestation.rs index bd3886bd4c89..2701a986e9e9 100644 --- a/core/node/consensus/src/tests/attestation.rs +++ b/core/node/consensus/src/tests/attestation.rs @@ -79,7 +79,7 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { let status = fetch_status().await?; assert_eq!( status.next_batch_to_attest, - attester::BatchNumber(first_batch.0.into()) + attester::BatchNumber(first_batch.0) ); tracing::info!("Insert a cert"); @@ -237,7 +237,7 @@ async fn test_multiple_attesters(version: ProtocolVersionId, pregenesis: bool) { } tracing::info!("Wait for the batches to be attested"); - let want_last = attester::BatchNumber(validator.last_sealed_batch().0.into()); + let want_last = attester::BatchNumber(validator.last_sealed_batch().0); validator_pool .wait_for_batch_certificates_and_verify(ctx, want_last, Some(registry_addr)) .await?; diff --git a/core/node/consensus/src/tests/mod.rs b/core/node/consensus/src/tests/mod.rs index 94fbcbb90d84..8da17cfba8ac 100644 --- a/core/node/consensus/src/tests/mod.rs +++ b/core/node/consensus/src/tests/mod.rs @@ -13,8 +13,10 @@ use zksync_consensus_storage::{BlockStore, PersistentBlockStore}; use zksync_dal::consensus_dal; use zksync_test_account::Account; use zksync_types::ProtocolVersionId; +use zksync_web3_decl::namespaces::EnNamespaceClient as _; use crate::{ + en::TEMPORARY_FETCHER_THRESHOLD, mn::run_main_node, storage::{ConnectionPool, Store}, testonly, @@ -665,6 +667,136 @@ async fn test_p2p_fetcher_backfill_certs( .unwrap(); } +// Test temporary fetcher fetching blocks if a lot of certs are missing. +#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] +#[tokio::test] +async fn test_temporary_fetcher(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); + let rng = &mut ctx.rng(); + // We force certs to be missing on EN by having 1 of the validators permanently offline. + // This way no blocks will be finalized at all, so no one will have certs. + let setup = Setup::new(rng, 2); + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); + let node_cfg = validator_cfg.new_fullnode(rng); + let account = &mut Account::random(); + + scope::run!(ctx, |ctx, s| async { + tracing::info!("Spawn validator."); + let validator_pool = ConnectionPool::test(from_snapshot, version).await; + let (mut validator, runner) = + testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(run_main_node( + ctx, + validator_cfg.config.clone(), + validator_cfg.secrets.clone(), + validator_pool.clone(), + )); + // API server needs at least 1 L1 batch to start. + validator.seal_batch().await; + let client = validator.connect(ctx).await?; + + // Wait for the consensus to be initialized. + while ctx.wait(client.consensus_global_config()).await??.is_none() { + ctx.sleep(time::Duration::milliseconds(100)).await?; + } + + let node_pool = ConnectionPool::test(from_snapshot, version).await; + + tracing::info!("Run centralized fetcher, so that there is a lot of certs missing."); + scope::run!(ctx, |ctx, s| async { + let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(node.run_fetcher(ctx, client.clone())); + validator + .push_random_blocks(rng, account, TEMPORARY_FETCHER_THRESHOLD as usize + 1) + .await; + node_pool + .wait_for_payload(ctx, validator.last_block()) + .await?; + Ok(()) + }) + .await + .unwrap(); + + tracing::info!( + "Run p2p fetcher. Blocks should be fetched by the temporary fetcher anyway." + ); + scope::run!(ctx, |ctx, s| async { + let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg.clone())); + validator.push_random_blocks(rng, account, 5).await; + node_pool + .wait_for_payload(ctx, validator.last_block()) + .await?; + Ok(()) + }) + .await + .unwrap(); + Ok(()) + }) + .await + .unwrap(); +} + +// Test that temporary fetcher terminates once enough blocks have certs. +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[tokio::test] +async fn test_temporary_fetcher_termination(from_snapshot: bool, version: ProtocolVersionId) { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); + let rng = &mut ctx.rng(); + let setup = Setup::new(rng, 1); + let pregenesis = true; + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); + let node_cfg = validator_cfg.new_fullnode(rng); + let account = &mut Account::random(); + + scope::run!(ctx, |ctx, s| async { + tracing::info!("Spawn validator."); + let validator_pool = ConnectionPool::test(from_snapshot, version).await; + let (mut validator, runner) = + testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(run_main_node( + ctx, + validator_cfg.config.clone(), + validator_cfg.secrets.clone(), + validator_pool.clone(), + )); + // API server needs at least 1 L1 batch to start. + validator.seal_batch().await; + let client = validator.connect(ctx).await?; + + let node_pool = ConnectionPool::test(from_snapshot, version).await; + + // Run the EN so the consensus is initialized on EN and wait for it to sync. + scope::run!(ctx, |ctx, s| async { + let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg.clone())); + validator.push_random_blocks(rng, account, 5).await; + node_pool + .wait_for_payload(ctx, validator.last_block()) + .await?; + Ok(()) + }) + .await + .unwrap(); + + // Run the temporary fetcher. It should terminate immediately, since EN is synced. + let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + node.run_temporary_fetcher(ctx, client).await?; + + Ok(()) + }) + .await + .unwrap(); +} + #[test_casing(4, Product((VERSIONS,PREGENESIS)))] #[tokio::test] async fn test_with_pruning(version: ProtocolVersionId, pregenesis: bool) { diff --git a/core/tests/ts-integration/tests/fees.test.ts b/core/tests/ts-integration/tests/fees.test.ts index c41f5943efb5..e99d3b67911b 100644 --- a/core/tests/ts-integration/tests/fees.test.ts +++ b/core/tests/ts-integration/tests/fees.test.ts @@ -278,7 +278,6 @@ testFees('Test fees', function () { }); afterAll(async () => { - await testMaster.deinitialize(); await mainNode.killAndWaitForShutdown(); // Returning the pubdata price to the default one @@ -287,6 +286,7 @@ testFees('Test fees', function () { deleteInternalEnforcedL1GasPrice(pathToHome, fileConfig); deleteInternalEnforcedPubdataPrice(pathToHome, fileConfig); mainNode = await mainNodeSpawner.spawnMainNode(); + await testMaster.deinitialize(); __ZKSYNC_TEST_CONTEXT_OWNER__.setL2NodePid(mainNode.proc.pid!); }); }); diff --git a/docs/specs/README.md b/docs/specs/README.md index 1f163bf7845f..d0b087ae93e3 100644 --- a/docs/specs/README.md +++ b/docs/specs/README.md @@ -33,4 +33,4 @@ 1. [ZK Chain ecosystem](./zk_chains/README.md) - [Overview](./zk_chains/overview.md) - [Shared Bridge](./zk_chains/shared_bridge.md) - - [Hyperbridges](./zk_chains/hyperbridges.md) + - [Interop](./zk_chains/interop.md) diff --git a/docs/specs/zk_chains/README.md b/docs/specs/zk_chains/README.md index 4de575899dd1..ce0a7c311a2f 100644 --- a/docs/specs/zk_chains/README.md +++ b/docs/specs/zk_chains/README.md @@ -2,4 +2,4 @@ - [Overview](./overview.md) - [Shared Bridge](./shared_bridge.md) -- [Hyperbridges](./hyperbridges.md) +- [Interop](./interop.md) diff --git a/docs/specs/zk_chains/gateway.md b/docs/specs/zk_chains/gateway.md new file mode 100644 index 000000000000..f4ee68e242e6 --- /dev/null +++ b/docs/specs/zk_chains/gateway.md @@ -0,0 +1 @@ +# Gateway diff --git a/docs/specs/zk_chains/hyperbridges.md b/docs/specs/zk_chains/hyperbridges.md deleted file mode 100644 index 614fe61427e3..000000000000 --- a/docs/specs/zk_chains/hyperbridges.md +++ /dev/null @@ -1,41 +0,0 @@ -# Hyperbridges - -## Introduction - -In the Shared bridge document we described how the L1 smart contracts work to support multiple chains, and we emphasized -that the core feature is hyperbridging, but we did not outline the hyperbridges themselves. This is because hyperbridges -are mostly L2 contracts. In this document we describe what hyperbridges are, and specify the necessary infrastructure. - -### Hyperbridge description - -Hyperbridges are trustless and cheap general native bridges between ZK Chains, allowing cross-chain function calls. -Trustlessness is achieved by relying on the main ZK Chain bridge to send a compressed message to L1, which is then sent -to and expanded on the destination ZK Chain. - -Technically they are a system of smart contracts that build on top of the enshrined L1<>L2 validating bridges, and can -interpret messages sent from L2 to L2 by verifying Merkle proofs. They are built alongside the protocol, they can -transfer the native asset of the ecosystem, and they can be used for asynchronous function calls between ZK Chains. - -![Hyperbridges](./img/hyperbridges.png) - -The trustless nature of hyperbridges allows the ecosystem to resemble a single VM. To illustrate imagine a new ZK Chain -joining the ecosystem. We will want ether/Dai/etc. to be accessible on this ZK Chain. This can be done automatically. -There will be a central erc20 deployer contract in the ecosystem, which will deploy the new ERC20 contract via the -hyperbridge. After the contract is deployed it will be able to interact other Dai contracts in the ecosystem. - -### High Level design - -![Hyperbridging](./img/hyperbridging.png) - -### L1 - -For the larger context see the [Shared Bridge](./shared_bridge.md) document, here we will focus on - -- HyperMailbox (part of Bridgehub). Contains the Hyperroot, root of Merkle tree of Hyperlogs. Hyperlogs are the L2->L1 - SysLogs that record the sent hyperbridge messages from the L2s. - -### L2 Contracts - -- Outbox system contract. It collects the hyperbridge txs into the hyperlog of the ZK Chain. -- Inbox system contract. This is where the hyperroot is imported and sent to L1 for settlement. Merkle proofs are - verified here, tx calls are started from here, nullifiers are stored here (add epochs later) diff --git a/docs/specs/zk_chains/interop.md b/docs/specs/zk_chains/interop.md new file mode 100644 index 000000000000..947742909b8d --- /dev/null +++ b/docs/specs/zk_chains/interop.md @@ -0,0 +1,49 @@ +# Interop + +## Introduction + +In the Shared bridge document we described how the L1 smart contracts work to support multiple chains, and we emphasized +that the core feature is interop. Interop happens via the same L1->L2 interface as described in the L1SharedBridge doc. +There is (with the interop upgrade) a Bridgehub, AssetRouter, NativeTokenVault and Nullifier deployed on every L2, and +they serve the same feature as their L1 counterparts. Namely: + +- The Bridgehub is used to start the transaction. +- The AssetRouter and NativeTokenVault are the bridge contract that handle the tokens. +- The Nullifier is used to prevent reexecution of xL2 txs. + +### Interop process + +![Interop](./img/hyperbridging.png) + +The interop process has 7 main steps, each with its substeps: + +1. Starting the transaction on the sending chain + + - The user/calls calls the Bridgehub contract. If they want to use a bridge they call + `requestL2TransactionTwoBridges`, if they want to make a direct call they call `requestL2TransactionDirect` + function. + - The Bridgehub collects the base token fees necessary for the interop tx to be processed on the destination chain, + and if using the TwoBridges method the calldata and the destination contract ( for more data see Shared bridge + doc). + - The Bridgehub emits a `NewPriorityRequest` event, this is the same as the one in our Mailbox contract. This event + specifies the xL2 txs, which uses the same format as L1->L2 txs. This event can be picked up and used to receive + the txs. + - This new priority request is sent as an L2->L1 message, it is included in the chains merkle tree of emitted txs. + +2. The chain settles its proof on L1 or the Gateway, whichever is used as the settlement layer for the chain. +3. On the Settlement Layer (SL), the MessageRoot is updated in the MessageRoot contract. The new data includes all the + L2->L1 messages that are emitted from the settling chain. +4. The receiving chain picks up the updated MessgeRoot from the Settlement Layer. +5. Now the xL2 txs can be imported on the destination chain. Along with the txs, a merkle proof needs to be sent to link + it to the MessageRoot. +6. Receiving the tx on the destination chain + + - On the destination chain the xL2 txs is verified. This means the merkle proof is checked agains the MessageRoot. + This shows the the xL2 txs was indeed sent. + - After this the txs can be executed. The tx hash is stored in the L2Nullifier contract, so that the txs cannot be + replayed. + - The specified contract is called, with the calldata, and the message sender = + `keccak256(originalMessageSender, originChainId) >> 160`. This is to prevent the collision of the msg.sender + addresses. + +7. The destination chain settles on the SL and the MessageRoot that it imported is checked. diff --git a/docs/specs/zk_chains/shared_bridge.md b/docs/specs/zk_chains/shared_bridge.md index c464a7a154bf..b43d3082b621 100644 --- a/docs/specs/zk_chains/shared_bridge.md +++ b/docs/specs/zk_chains/shared_bridge.md @@ -17,7 +17,7 @@ If you want to know more about ZK Chains, check this We want to create a system where: - ZK Chains should be launched permissionlessly within the ecosystem. -- Hyperbridges should enable unified liquidity for assets across the ecosystem. +- Interop should enable unified liquidity for assets across the ecosystem. - Multi-chain smart contracts need to be easy to develop, which means easy access to traditional bridges, and other supporting architecture. @@ -58,20 +58,19 @@ be able to leverage them when available). #### Bridgehub - Acts as a hub for bridges, so that they have a single point of communication with all ZK Chain contracts. This allows - L1 assets to be locked in the same contract for all ZK Chains, including L3s and validiums. The `Bridgehub` also - implements the following: + L1 assets to be locked in the same contract for all ZK Chains. The `Bridgehub` also implements the following features: - `Registry` This is where ZK Chains can register, starting in a permissioned manner, but with the goal to be - permissionless in the future. This is where their `chainID` is determined. L3s will also register here. This - `Registry` is also where State Transition contracts should register. Each chain has to specify its desired ST when - registering (Initially, only one will be available). + permissionless in the future. This is where their `chainID` is determined. Chains on Gateway will also register here. + This `Registry` is also where Chain Type Manager contracts should register. Each chain has to specify its desired CTM + when registering (Initially, only one will be available). ``` function newChain( uint256 _chainId, - address _stateTransition + address _chainTypeManager ) external returns (uint256 chainId); - function newStateTransition(address _stateTransition) external; + function newChainTypeManager(address _chainTypeManager) external; ``` - `BridgehubMailbox` routes messages to the Diamond proxy’s Mailbox facet based on chainID @@ -79,43 +78,73 @@ be able to leverage them when available). - Same as the current zkEVM [Mailbox](https://github.com/matter-labs/era-contracts/blob/main/l1-contracts/contracts/zksync/facets/Mailbox.sol), just with chainId, - - Ether needs to be deposited and withdrawn from here. - This is where L2 transactions can be requested. ``` - function requestL2Transaction( - uint256 _chainId, - address _contractL2, - uint256 _l2Value, - bytes calldata _calldata, - uint256 _l2GasLimit, - uint256 _l2GasPerPubdataByteLimit, - bytes[] calldata _factoryDeps, - address _refundRecipient - ) public payable override returns (bytes32 canonicalTxHash) { - address proofChain = bridgeheadStorage.proofChain[_chainId]; - canonicalTxHash = IProofChain(proofChain).requestL2TransactionBridgehead( - _chainId, - msg.value, - msg.sender, - _contractL2, - _l2Value, - _calldata, - _l2GasLimit, - _l2GasPerPubdataByteLimit, - _factoryDeps, - _refundRecipient - ); - } + function requestL2TransactionTwoBridges( + L2TransactionRequestTwoBridgesOuter calldata _request + ) ``` -- `Hypermailbox` - - This will allow general message passing (L2<>L2, L2<>L3, etc). This is where the `Mailbox` sends the `Hyperlogs`. - `Hyperlogs` are commitments to these messages sent from a single ZK Chain. `Hyperlogs` are aggregated into a - `HyperRoot` in the `HyperMailbox`. - - This component has not been implemented yet + ``` + struct L2TransactionRequestTwoBridgesOuter { + uint256 chainId; + uint256 mintValue; + uint256 l2Value; + uint256 l2GasLimit; + uint256 l2GasPerPubdataByteLimit; + address refundRecipient; + address secondBridgeAddress; + uint256 secondBridgeValue; + bytes secondBridgeCalldata; + } + ``` -#### Main asset shared bridges +``` + struct L2TransactionRequestTwoBridgesInner { + bytes32 magicValue; + address l2Contract; + bytes l2Calldata; + bytes[] factoryDeps; + bytes32 txDataHash; +} +``` + +- The `requestL2TransactionTwoBridges` function should be used most of the time when bridging to a chain ( the exeption + is when the user bridges directly to a contract on the L2, without using a bridge contract on L1). The logic of it is + the following: + + - The user wants to bridge to chain with the provided `L2TransactionRequestTwoBridgesOuter.chainId`. + - Two bridges are called, the baseTokenBridge (i.e. the L1SharedBridge or L1AssetRouter after the Gateway upgrade) and + an arbitrary second bridge. The Bridgehub will provide the original caller address to both bridges, which can + request that the appropriate amount of tokens are transferred from the caller to the bridge. The caller has to set + the appropriate allowance for both bridges. (Often the bridges coincide, but they don't have to). + - The `L2TransactionRequestTwoBridgesOuter.mintValue` is the amount of baseTokens that will be minted on L2. This is + the amount of tokens that the baseTokenBridge will request from the user. If the baseToken is Eth, it will be + forwarded to the baseTokenBridge. + - The `L2TransactionRequestTwoBridgesOuter.l2Value` is the amount of tokens that will be deposited on L2. The second + bridge and the Mailbox receives this as an input (although our second bridge does not use the value). + - The `L2TransactionRequestTwoBridgesOuter.l2GasLimit` is the maximum amount of gas that will be spent on L2 to + complete the transaction. The Mailbox receives this as an input. + - The `L2TransactionRequestTwoBridgesOuter.l2GasPerPubdataByteLimit` is the maximum amount of gas per pubdata byte + that will be spent on L2 to complete the transaction. The Mailbox receives this as an input. + - The `L2TransactionRequestTwoBridgesOuter.refundRecipient` is the address that will be refunded for the gas spent on + L2. The Mailbox receives this as an input. + - The `L2TransactionRequestTwoBridgesOuter.secondBridgeAddress` is the address of the second bridge that will be + called. This is the arbitrary address that is called from the Bridgehub. + - The `L2TransactionRequestTwoBridgesOuter.secondBridgeValue` is the amount of tokens that will be deposited on L2. + The second bridge receives this value as the baseToken (i.e. Eth on L1). + - The `L2TransactionRequestTwoBridgesOuter.secondBridgeCalldata` is the calldata that will be passed to the second + bridge. This is the arbitrary calldata that is passed from the Bridgehub to the second bridge. + - The secondBridge returns the `L2TransactionRequestTwoBridgesInner` struct to the Bridgehub. This is also passed to + the Mailbox as input. This is where the destination contract, calldata, factoryDeps are determined on the L2. + + This setup allows the user to bridge the baseToken of the origin chain A to a chain B with some other baseToken, by + specifying the A's token in the secondBridgeValue, which will be minted on the destination chain as an ERC20 token, + and specifying the amount of B's token in the mintValue, which will be minted as the baseToken and used to cover the + gas costs. + +#### Main asset shared bridges L2TransactionRequestTwoBridgesInner - Some assets have to be natively supported (ETH, WETH) and it also makes sense to support some generally accepted token standards (ERC20 tokens), as this makes it easy to bridge those tokens (and ensures a single version of them exists on @@ -147,25 +176,18 @@ be able to leverage them when available). ); ``` -This topic is now covered more thoroughly by the Custom native token discussion. - -[Custom native token compatible with Hyperbridging](https://www.notion.so/Custom-native-token-compatible-with-Hyperbridging-54e190a1a76f44248cf84a38304a0641?pvs=21) +#### Chain Type Manager -#### State Transition - -- `StateTransition` A state transition manages proof verification and DA for multiple chains. It also implements the +- `ChainTypeManager` A chain type manager manages proof verification and DA for multiple chains. It also implements the following functionalities: - - `StateTransitionRegistry` The ST is shared for multiple chains, so initialization and upgrades have to be the same - for all chains. Registration is not permissionless but happens based on the registrations in the bridgehub’s - `Registry`. At registration a `DiamondProxy` is deployed and initialized with the appropriate `Facets` for each ZK - Chain. + - `ChainTypeRegistry` The ST is shared for multiple chains, so initialization and upgrades have to be the same for all + chains. Registration is not permissionless but happens based on the registrations in the bridgehub’s `Registry`. At + registration a `DiamondProxy` is deployed and initialized with the appropriate `Facets` for each ZK Chain. - `Facets` and `Verifier` are shared across chains that relies on the same ST: `Base`, `Executor` , `Getters`, `Admin` , `Mailbox.`The `Verifier` is the contract that actually verifies the proof, and is called by the `Executor`. - Upgrade Mechanism The system requires all chains to be up-to-date with the latest implementation, so whenever an update is needed, we have to “force” each chain to update, but due to decentralization, we have to give each chain a - time frame (more information in the - [Upgrade Mechanism](https://www.notion.so/ZK-Stack-shared-bridge-alpha-version-a37c4746f8b54fb899d67e474bfac3bb?pvs=21) - section). This is done in the update mechanism contract, this is where the bootloader and system contracts are + time frame. This is done in the update mechanism contract, this is where the bootloader and system contracts are published, and the `ProposedUpgrade` is stored. Then each chain can call this upgrade for themselves as needed. After the deadline is over, the not-updated chains are frozen, that is, cannot post new proofs. Frozen chains can unfreeze by updating their proof system. @@ -180,6 +202,7 @@ This topic is now covered more thoroughly by the Custom native token discussion. - A chain might implement its own specific consensus mechanism. This needs its own contracts. Only this contract will be able to submit proofs to the State Transition contract. +- DA contracts. - Currently, the `ValidatorTimelock` is an example of such a contract. ### Components interactions @@ -199,22 +222,6 @@ features required to process proofs. The chain ID is set in the VM in a special -#### WETH Contract - -Ether, the native gas token is part of the core system contracts, so deploying it is not necessary. But WETH is just a -smart contract, it needs to be deployed and initialised. This happens from the L1 WETH bridge. This deploys on L2 the -corresponding bridge and ERC20 contract. This is deployed from L1, but the L2 address is known at deployment time. - -![deployWeth.png](./img/deployWeth.png) - -#### Deposit WETH - -The user can deposit WETH into the ecosystem using the WETH bridge on L1. The destination chain ID has to be specified. -The Bridgehub unwraps the WETH, and keeps the ETH, and send a message to the destination L2 to mint WETH to the -specified address. - -![depositWeth.png](./img/depositWeth.png) - --- ### Common Standards and Upgrades diff --git a/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol b/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol index 5f4de59681fd..baa0d37b7530 100644 --- a/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol +++ b/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol @@ -68,7 +68,7 @@ contract MockContractDeployer { Version1 } - address constant CODE_ORACLE_ADDR = address(0x8012); + IAccountCodeStorage constant ACCOUNT_CODE_STORAGE_CONTRACT = IAccountCodeStorage(address(0x8002)); MockKnownCodeStorage constant KNOWN_CODE_STORAGE_CONTRACT = MockKnownCodeStorage(address(0x8004)); /// The returned value is obviously incorrect in the general case, but works well enough when called by the bootloader. @@ -78,15 +78,166 @@ contract MockContractDeployer { /// Replaces real deployment with publishing a surrogate EVM "bytecode". /// @param _salt bytecode hash - /// @param _bytecodeHash ignored, since it's not possible to set arbitrarily /// @param _input bytecode to publish function create( bytes32 _salt, - bytes32 _bytecodeHash, + bytes32, // ignored, since it's not possible to set arbitrarily bytes calldata _input ) external payable returns (address) { KNOWN_CODE_STORAGE_CONTRACT.setEVMBytecodeHash(_salt); KNOWN_CODE_STORAGE_CONTRACT.publishEVMBytecode(_input); - return address(0); + address newAddress = address(uint160(msg.sender) + 1); + ACCOUNT_CODE_STORAGE_CONTRACT.storeAccountConstructedCodeHash(newAddress, _salt); + return newAddress; + } +} + +interface IAccountCodeStorage { + function getRawCodeHash(address _address) external view returns (bytes32); + function storeAccountConstructedCodeHash(address _address, bytes32 _hash) external; +} + +interface IRecursiveContract { + function recurse(uint _depth) external returns (uint); +} + +/// Native incrementing library. Not actually a library to simplify deployment. +contract IncrementingContract { + // Should not collide with other storage slots + uint constant INCREMENTED_SLOT = 0x123; + + function getIncrementedValue() public view returns (uint _value) { + assembly { + _value := sload(INCREMENTED_SLOT) + } + } + + function increment(address _thisAddress, uint _thisBalance) external { + require(msg.sender == tx.origin, "msg.sender not retained"); + require(address(this) == _thisAddress, "this address"); + require(address(this).balance == _thisBalance, "this balance"); + assembly { + sstore(INCREMENTED_SLOT, add(sload(INCREMENTED_SLOT), 1)) + } + } + + /// Tests delegation to a native or EVM contract at the specified target. + function testDelegateCall(address _target) external { + uint valueSnapshot = getIncrementedValue(); + (bool success, ) = _target.delegatecall(abi.encodeCall( + IncrementingContract.increment, + (address(this), address(this).balance) + )); + require(success, "delegatecall reverted"); + require(getIncrementedValue() == valueSnapshot + 1, "invalid value"); + } + + function testStaticCall(address _target, uint _expectedValue) external { + (bool success, bytes memory rawValue) = _target.staticcall(abi.encodeCall( + this.getIncrementedValue, + () + )); + require(success, "static call reverted"); + (uint value) = abi.decode(rawValue, (uint)); + require(value == _expectedValue, "value mismatch"); + + (success, ) = _target.staticcall(abi.encodeCall( + IncrementingContract.increment, + (address(this), address(this).balance) + )); + require(!success, "staticcall should've reverted"); + } +} + +uint constant EVM_EMULATOR_STIPEND = 1 << 30; + +/** + * Mock EVM emulator used in low-level tests. + */ +contract MockEvmEmulator is IRecursiveContract, IncrementingContract { + IAccountCodeStorage constant ACCOUNT_CODE_STORAGE_CONTRACT = IAccountCodeStorage(address(0x8002)); + + /// Set to `true` for testing logic sanity. + bool isUserSpace; + + modifier validEvmEntry() { + if (!isUserSpace) { + require(gasleft() >= EVM_EMULATOR_STIPEND, "no stipend"); + // Fetch bytecode for the executed contract. + bytes32 bytecodeHash = ACCOUNT_CODE_STORAGE_CONTRACT.getRawCodeHash(address(this)); + require(bytecodeHash != bytes32(0), "called contract not deployed"); + uint bytecodeVersion = uint(bytecodeHash) >> 248; + require(bytecodeVersion == 2, "non-EVM bytecode"); + + // Check that members of the current address are well-defined. + require(address(this).code.length != 0, "invalid code"); + require(address(this).codehash == bytecodeHash, "bytecode hash mismatch"); + } + _; + } + + function testPayment(uint _expectedValue, uint _expectedBalance) public payable validEvmEntry { + require(msg.value == _expectedValue, "unexpected msg.value"); + require(address(this).balance == _expectedBalance, "unexpected balance"); + } + + IRecursiveContract recursionTarget; + + function recurse(uint _depth) public validEvmEntry returns (uint) { + require(gasleft() < 2 * EVM_EMULATOR_STIPEND, "stipend provided multiple times"); + + if (_depth <= 1) { + return 1; + } else { + IRecursiveContract target = (address(recursionTarget) == address(0)) ? this : recursionTarget; + // The real emulator limits amount of gas when performing far calls by EVM gas, so we emulate this behavior as well. + uint gasToSend = isUserSpace ? gasleft() : (gasleft() - EVM_EMULATOR_STIPEND); + return target.recurse{gas: gasToSend}(_depth - 1) * _depth; + } + } + + function testRecursion(uint _depth, uint _expectedValue) external validEvmEntry returns (uint) { + require(recurse(_depth) == _expectedValue, "incorrect recursion"); + } + + function testExternalRecursion(uint _depth, uint _expectedValue) external validEvmEntry returns (uint) { + recursionTarget = new NativeRecursiveContract(IRecursiveContract(this)); + uint returnedValue = recurse(_depth); + recursionTarget = this; // This won't work on revert, but for tests, it's good enough + require(returnedValue == _expectedValue, "incorrect recursion"); + } + + MockContractDeployer constant CONTRACT_DEPLOYER_CONTRACT = MockContractDeployer(address(0x8006)); + + /// Emulates EVM contract deployment and a subsequent call to it in a single transaction. + function testDeploymentAndCall(bytes32 _evmBytecodeHash, bytes calldata _evmBytecode) external validEvmEntry { + IRecursiveContract newContract = IRecursiveContract(CONTRACT_DEPLOYER_CONTRACT.create( + _evmBytecodeHash, + _evmBytecodeHash, + _evmBytecode + )); + require(uint160(address(newContract)) == uint160(address(this)) + 1, "unexpected address"); + require(address(newContract).code.length > 0, "contract code length"); + require(address(newContract).codehash != bytes32(0), "contract code hash"); + + uint gasToSend = gasleft() - EVM_EMULATOR_STIPEND; + require(newContract.recurse{gas: gasToSend}(5) == 120, "unexpected recursive result"); + } + + fallback() external validEvmEntry { + require(msg.data.length == 0, "unsupported call"); + } +} + +contract NativeRecursiveContract is IRecursiveContract { + IRecursiveContract target; + + constructor(IRecursiveContract _target) { + target = _target; + } + + function recurse(uint _depth) external returns (uint) { + require(gasleft() < EVM_EMULATOR_STIPEND, "stipend spilled to native contract"); + return (_depth <= 1) ? 1 : target.recurse(_depth - 1) * _depth; } } diff --git a/etc/env/consensus_config.yaml b/etc/env/consensus_config.yaml index 304ea31fac9c..2564865eeb31 100644 --- a/etc/env/consensus_config.yaml +++ b/etc/env/consensus_config.yaml @@ -1,3 +1,4 @@ +port: 3054 server_addr: "127.0.0.1:3054" public_addr: "127.0.0.1:3054" max_payload_size: 2500000 diff --git a/etc/env/en_consensus_config.yaml b/etc/env/en_consensus_config.yaml index f759e72e891c..5c428866cb6c 100644 --- a/etc/env/en_consensus_config.yaml +++ b/etc/env/en_consensus_config.yaml @@ -1,3 +1,4 @@ +port: 3055 server_addr: '127.0.0.1:3055' public_addr: '127.0.0.1:3055' max_payload_size: 2500000 diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index a4ba8c0201a6..017d79dbe736 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -375,3 +375,10 @@ da_dispatcher: external_proof_integration_api: http_port: 3073 + +consensus: + port: 3054 + server_addr: "127.0.0.1:3054" + public_addr: "127.0.0.1:3054" + max_payload_size: 2500000 + gossip_dynamic_inbound_limit: 100 diff --git a/etc/nix/tee_prover.nix b/etc/nix/tee_prover.nix index 0b424522dffb..55545d1bb8e4 100644 --- a/etc/nix/tee_prover.nix +++ b/etc/nix/tee_prover.nix @@ -1,12 +1,19 @@ -{ cargoArtifacts -, craneLib +{ craneLib , commonArgs }: -craneLib.buildPackage (commonArgs // { +let pname = "zksync_tee_prover"; + cargoExtraArgs = "--locked -p zksync_tee_prover"; +in +craneLib.buildPackage (commonArgs // { + inherit pname; version = (builtins.fromTOML (builtins.readFile ../../core/bin/zksync_tee_prover/Cargo.toml)).package.version; - cargoExtraArgs = "-p zksync_tee_prover --bin zksync_tee_prover"; - inherit cargoArtifacts; + inherit cargoExtraArgs; + + cargoArtifacts = craneLib.buildDepsOnly (commonArgs // { + inherit pname; + inherit cargoExtraArgs; + }); postInstall = '' strip $out/bin/zksync_tee_prover diff --git a/etc/nix/zksync.nix b/etc/nix/zksync.nix index c5fffc48b09d..1ecac58b5d91 100644 --- a/etc/nix/zksync.nix +++ b/etc/nix/zksync.nix @@ -1,12 +1,14 @@ -{ cargoArtifacts -, craneLib +{ craneLib , commonArgs }: craneLib.buildPackage (commonArgs // { pname = "zksync"; version = (builtins.fromTOML (builtins.readFile ../../core/bin/zksync_tee_prover/Cargo.toml)).package.version; cargoExtraArgs = "--all"; - inherit cargoArtifacts; + + cargoArtifacts = craneLib.buildDepsOnly (commonArgs // { + pname = "zksync-era-workspace"; + }); outputs = [ "out" diff --git a/flake.nix b/flake.nix index ef618816f9c9..3321e67e27b7 100644 --- a/flake.nix +++ b/flake.nix @@ -47,7 +47,7 @@ packages = { # to ease potential cross-compilation, the overlay is used inherit (appliedOverlay.zksync-era) zksync tee_prover container-tee-prover-azure container-tee-prover-dcap; - default = appliedOverlay.zksync-era.zksync; + default = appliedOverlay.zksync-era.tee_prover; }; devShells.default = appliedOverlay.zksync-era.devShell; @@ -107,10 +107,6 @@ strictDeps = true; inherit hardeningEnable; }; - - cargoArtifacts = craneLib.buildDepsOnly (commonArgs // { - pname = "zksync-era-workspace"; - }); in { zksync-era = rec { @@ -120,12 +116,11 @@ }; zksync = pkgs.callPackage ./etc/nix/zksync.nix { - inherit cargoArtifacts; inherit craneLib; inherit commonArgs; }; + tee_prover = pkgs.callPackage ./etc/nix/tee_prover.nix { - inherit cargoArtifacts; inherit craneLib; inherit commonArgs; }; diff --git a/zk_toolbox/crates/config/src/consts.rs b/zk_toolbox/crates/config/src/consts.rs index 80b204cc6191..f462ce33b8f8 100644 --- a/zk_toolbox/crates/config/src/consts.rs +++ b/zk_toolbox/crates/config/src/consts.rs @@ -62,8 +62,6 @@ pub const DEFAULT_EXPLORER_WORKER_PORT: u16 = 3001; pub const DEFAULT_EXPLORER_API_PORT: u16 = 3002; /// Default port for the explorer data fetcher service pub const DEFAULT_EXPLORER_DATA_FETCHER_PORT: u16 = 3040; -/// Default port for consensus service -pub const DEFAULT_CONSENSUS_PORT: u16 = 3054; pub const EXPLORER_API_DOCKER_IMAGE: &str = "matterlabs/block-explorer-api"; pub const EXPLORER_DATA_FETCHER_DOCKER_IMAGE: &str = "matterlabs/block-explorer-data-fetcher"; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs index d0897473b832..37ee2e076ab9 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs @@ -2,7 +2,7 @@ use anyhow::Context; use common::logger; use config::{ copy_configs, set_l1_rpc_url, traits::SaveConfigWithBasePath, update_from_chain_config, - ChainConfig, ContractsConfig, EcosystemConfig, DEFAULT_CONSENSUS_PORT, + ChainConfig, ContractsConfig, EcosystemConfig, }; use ethers::types::Address; use xshell::Shell; @@ -15,13 +15,12 @@ use crate::{ }, portal::update_portal_config, }, - defaults::PORT_RANGE_END, messages::{ - MSG_CHAIN_CONFIGS_INITIALIZED, MSG_CHAIN_NOT_FOUND_ERR, + MSG_CHAIN_CONFIGS_INITIALIZED, MSG_CHAIN_NOT_FOUND_ERR, MSG_CONSENSUS_CONFIG_MISSING_ERR, MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, }, utils::{ - consensus::{generate_consensus_keys, get_consensus_config, get_consensus_secrets}, + consensus::{generate_consensus_keys, get_consensus_secrets, get_genesis_specs}, ports::EcosystemPortsScanner, }, }; @@ -57,22 +56,14 @@ pub async fn init_configs( )?; } - // Initialize general config let mut general_config = chain_config.get_general_config()?; - - // TODO: This is a temporary solution. We should allocate consensus port using `EcosystemPorts::allocate_ports_in_yaml` - let offset = ((chain_config.id - 1) * 100) as u16; - let consensus_port_range = DEFAULT_CONSENSUS_PORT + offset..PORT_RANGE_END; - let consensus_port = - ecosystem_ports.allocate_port(consensus_port_range, "Consensus".to_string())?; + let mut consensus_config = general_config + .consensus_config + .context(MSG_CONSENSUS_CONFIG_MISSING_ERR)?; let consensus_keys = generate_consensus_keys(); - let consensus_config = get_consensus_config( - chain_config, - consensus_port, - Some(consensus_keys.clone()), - None, - )?; + consensus_config.genesis_spec = Some(get_genesis_specs(chain_config, &consensus_keys)); + general_config.consensus_config = Some(consensus_config); general_config.save_with_base_path(shell, &chain_config.configs)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/consensus/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/consensus/mod.rs index a21ba2d62cf9..1855a5943dc7 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/consensus/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/consensus/mod.rs @@ -3,7 +3,7 @@ use std::{borrow::Borrow, collections::HashMap, path::PathBuf, sync::Arc}; /// Consensus registry contract operations. /// Includes code duplicated from `zksync_node_consensus::registry::abi`. use anyhow::Context as _; -use common::logger; +use common::{logger, wallets::Wallet}; use config::EcosystemConfig; use conv::*; use ethers::{ @@ -11,7 +11,7 @@ use ethers::{ contract::{FunctionCall, Multicall}, middleware::{Middleware, NonceManagerMiddleware, SignerMiddleware}, providers::{Http, JsonRpcClient, PendingTransaction, Provider, RawCall as _}, - signers::Signer as _, + signers::{LocalWallet, Signer as _}, types::{Address, BlockId, H256}, }; use xshell::Shell; @@ -174,19 +174,20 @@ impl Setup { )?) } - fn governor(&self) -> anyhow::Result> { - let governor = self + fn governor(&self) -> anyhow::Result { + Ok(self .chain .get_wallets_config() .context("get_wallets_config()")? - .governor - .private_key - .context(messages::MSG_GOVERNOR_PRIVATE_KEY_NOT_SET)?; - let governor = governor.with_chain_id(self.genesis.l2_chain_id.as_u64()); + .governor) + } + + fn signer(&self, wallet: LocalWallet) -> anyhow::Result> { + let wallet = wallet.with_chain_id(self.genesis.l2_chain_id.as_u64()); let provider = self.provider().context("provider()")?; - let signer = SignerMiddleware::new(provider, governor.clone()); + let signer = SignerMiddleware::new(provider, wallet.clone()); // Allows us to send next transaction without waiting for the previous to complete. - let signer = NonceManagerMiddleware::new(signer, governor.address()); + let signer = NonceManagerMiddleware::new(signer, wallet.address()); Ok(Arc::new(signer)) } @@ -279,10 +280,25 @@ impl Setup { let provider = self.provider().context("provider()")?; let block_id = self.last_block(&provider).await.context("last_block()")?; let governor = self.governor().context("governor()")?; + let signer = self.signer( + governor + .private_key + .clone() + .context(messages::MSG_GOVERNOR_PRIVATE_KEY_NOT_SET)?, + )?; let consensus_registry = self - .consensus_registry(governor.clone()) + .consensus_registry(signer.clone()) .context("consensus_registry()")?; - let mut multicall = self.multicall(governor.clone()).context("multicall()")?; + let mut multicall = self.multicall(signer).context("multicall()")?; + + let owner = consensus_registry.owner().call().await.context("owner()")?; + if owner != governor.address { + anyhow::bail!( + "governor ({:#x}) is different than the consensus registry owner ({:#x})", + governor.address, + owner + ); + } // Fetch contract state. let n: usize = consensus_registry diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs b/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs index 5ab859d17f0a..d714a0f8e843 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs @@ -6,12 +6,12 @@ use config::{ external_node::ENConfig, set_rocks_db_config, traits::{FileConfigWithDefaultName, SaveConfigWithBasePath}, - ChainConfig, EcosystemConfig, GeneralConfig, SecretsConfig, DEFAULT_CONSENSUS_PORT, + ChainConfig, EcosystemConfig, GeneralConfig, SecretsConfig, }; use xshell::Shell; use zksync_basic_types::url::SensitiveUrl; use zksync_config::configs::{ - consensus::{ConsensusSecrets, NodeSecretKey, Secret}, + consensus::{ConsensusConfig, ConsensusSecrets, NodeSecretKey, Secret}, DatabaseSecrets, L1Secrets, }; use zksync_consensus_crypto::TextFmt; @@ -19,14 +19,13 @@ use zksync_consensus_roles as roles; use crate::{ commands::external_node::args::prepare_configs::{PrepareConfigArgs, PrepareConfigFinal}, - defaults::PORT_RANGE_END, messages::{ msg_preparing_en_config_is_done, MSG_CHAIN_NOT_INITIALIZED, MSG_CONSENSUS_CONFIG_MISSING_ERR, MSG_CONSENSUS_SECRETS_MISSING_ERR, MSG_CONSENSUS_SECRETS_NODE_KEY_MISSING_ERR, MSG_PREPARING_EN_CONFIGS, }, utils::{ - consensus::{get_consensus_config, node_public_key}, + consensus::node_public_key, ports::EcosystemPortsScanner, rocks_db::{recreate_rocksdb_dirs, RocksDBDirOption}, }, @@ -79,19 +78,12 @@ fn prepare_configs( bridge_addresses_refresh_interval_sec: None, }; let mut general_en = general.clone(); + general_en.consensus_config = None; let main_node_consensus_config = general .consensus_config .context(MSG_CONSENSUS_CONFIG_MISSING_ERR)?; - - // TODO: This is a temporary solution. We should allocate consensus port using `EcosystemPorts::allocate_ports_in_yaml` - ports.add_port_info( - main_node_consensus_config.server_addr.port(), - "Main node consensus".to_string(), - ); - let offset = ((config.id - 1) * 100) as u16; - let consensus_port_range = DEFAULT_CONSENSUS_PORT + offset..PORT_RANGE_END; - let consensus_port = ports.allocate_port(consensus_port_range, "Consensus".to_string())?; + let mut en_consensus_config = main_node_consensus_config.clone(); let mut gossip_static_outbound = BTreeMap::new(); let main_node_public_key = node_public_key( @@ -101,13 +93,8 @@ fn prepare_configs( .context(MSG_CONSENSUS_SECRETS_MISSING_ERR)?, )? .context(MSG_CONSENSUS_SECRETS_NODE_KEY_MISSING_ERR)?; - gossip_static_outbound.insert(main_node_public_key, main_node_consensus_config.public_addr); - - let en_consensus_config = - get_consensus_config(config, consensus_port, None, Some(gossip_static_outbound))?; - general_en.consensus_config = Some(en_consensus_config.clone()); - en_consensus_config.save_with_base_path(shell, en_configs_path)?; + en_consensus_config.gossip_static_outbound = gossip_static_outbound; // Set secrets config let node_key = roles::node::SecretKey::generate().encode(); @@ -128,16 +115,25 @@ fn prepare_configs( }), data_availability: None, }; - secrets.save_with_base_path(shell, en_configs_path)?; + let dirs = recreate_rocksdb_dirs(shell, &config.rocks_db_path, RocksDBDirOption::ExternalNode)?; set_rocks_db_config(&mut general_en, dirs)?; + general_en.save_with_base_path(shell, en_configs_path)?; en_config.save_with_base_path(shell, en_configs_path)?; + en_consensus_config.save_with_base_path(shell, en_configs_path)?; + secrets.save_with_base_path(shell, en_configs_path)?; + let offset = 0; // This is zero because general_en ports already have a chain offset ports.allocate_ports_in_yaml( shell, &GeneralConfig::get_path_with_base_path(en_configs_path), - 0, // This is zero because general_en ports already have a chain offset + offset, + )?; + ports.allocate_ports_in_yaml( + shell, + &ConsensusConfig::get_path_with_base_path(en_configs_path), + offset, )?; Ok(()) diff --git a/zk_toolbox/crates/zk_inception/src/consts.rs b/zk_toolbox/crates/zk_inception/src/consts.rs index 9f81847e3336..df27d2f02d2c 100644 --- a/zk_toolbox/crates/zk_inception/src/consts.rs +++ b/zk_toolbox/crates/zk_inception/src/consts.rs @@ -1,5 +1,3 @@ -use std::net::{IpAddr, Ipv4Addr}; - pub const AMOUNT_FOR_DISTRIBUTION_TO_WALLETS: u128 = 1000000000000000000000; pub const MINIMUM_BALANCE_FOR_WALLET: u128 = 5000000000000000000; @@ -12,27 +10,6 @@ pub const DEFAULT_UNSIGNED_TRANSACTIONS_DIR: &str = "transactions"; pub const BELLMAN_CUDA_DIR: &str = "era-bellman-cuda"; pub const L2_BASE_TOKEN_ADDRESS: &str = "0x000000000000000000000000000000000000800A"; -#[allow(non_upper_case_globals)] -const kB: usize = 1024; - -/// Max payload size for consensus in bytes -pub const MAX_PAYLOAD_SIZE: usize = 2_500_000; -/// Max batch size for consensus in bytes -/// Compute a default batch size, so operators are not caught out by the missing setting -/// while we're still working on batch syncing. The batch interval is ~1 minute, -/// so there will be ~60 blocks, and an Ethereum Merkle proof is ~1kB, but under high -/// traffic there can be thousands of huge transactions that quickly fill up blocks -/// and there could be more blocks in a batch then expected. We chose a generous -/// limit so as not to prevent any legitimate batch from being transmitted. -pub const MAX_BATCH_SIZE: usize = MAX_PAYLOAD_SIZE * 5000 + kB; -/// Gossip dynamic inbound limit for consensus -pub const GOSSIP_DYNAMIC_INBOUND_LIMIT: usize = 100; - -/// Public address for consensus -pub const CONSENSUS_PUBLIC_ADDRESS_HOST: IpAddr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); -/// Server address for consensus -pub const CONSENSUS_SERVER_ADDRESS_HOST: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST); - /// Path to the JS runtime config for the block-explorer-app docker container to be mounted to pub const EXPLORER_APP_DOCKER_CONFIG_PATH: &str = "/usr/src/app/packages/app/dist/config.js"; pub const EXPLORER_APP_DOCKER_IMAGE: &str = "matterlabs/block-explorer-app"; diff --git a/zk_toolbox/crates/zk_inception/src/utils/consensus.rs b/zk_toolbox/crates/zk_inception/src/utils/consensus.rs index 2979b4df0c19..946d28a33fbd 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/consensus.rs +++ b/zk_toolbox/crates/zk_inception/src/utils/consensus.rs @@ -1,24 +1,14 @@ -use std::{ - collections::{BTreeMap, BTreeSet}, - net::SocketAddr, -}; - use anyhow::Context as _; use config::ChainConfig; use secrecy::{ExposeSecret, Secret}; use zksync_config::configs::consensus::{ - AttesterPublicKey, AttesterSecretKey, ConsensusConfig, ConsensusSecrets, GenesisSpec, Host, - NodePublicKey, NodeSecretKey, ProtocolVersion, ValidatorPublicKey, ValidatorSecretKey, - WeightedAttester, WeightedValidator, + AttesterPublicKey, AttesterSecretKey, ConsensusSecrets, GenesisSpec, NodePublicKey, + NodeSecretKey, ProtocolVersion, ValidatorPublicKey, ValidatorSecretKey, WeightedAttester, + WeightedValidator, }; use zksync_consensus_crypto::{Text, TextFmt}; use zksync_consensus_roles::{attester, node, validator}; -use crate::consts::{ - CONSENSUS_PUBLIC_ADDRESS_HOST, CONSENSUS_SERVER_ADDRESS_HOST, GOSSIP_DYNAMIC_INBOUND_LIMIT, - MAX_BATCH_SIZE, MAX_PAYLOAD_SIZE, -}; - pub(crate) fn parse_attester_committee( attesters: &[WeightedAttester], ) -> anyhow::Result { @@ -48,32 +38,6 @@ pub struct ConsensusPublicKeys { attester_key: attester::PublicKey, } -pub fn get_consensus_config( - chain_config: &ChainConfig, - consensus_port: u16, - consensus_keys: Option, - gossip_static_outbound: Option>, -) -> anyhow::Result { - let genesis_spec = - consensus_keys.map(|consensus_keys| get_genesis_specs(chain_config, &consensus_keys)); - - let public_addr = SocketAddr::new(CONSENSUS_PUBLIC_ADDRESS_HOST, consensus_port); - let server_addr = SocketAddr::new(CONSENSUS_SERVER_ADDRESS_HOST, consensus_port); - - Ok(ConsensusConfig { - server_addr, - public_addr: Host(public_addr.encode()), - genesis_spec, - max_payload_size: MAX_PAYLOAD_SIZE, - gossip_dynamic_inbound_limit: GOSSIP_DYNAMIC_INBOUND_LIMIT, - max_batch_size: MAX_BATCH_SIZE, - gossip_static_inbound: BTreeSet::new(), - gossip_static_outbound: gossip_static_outbound.unwrap_or_default(), - rpc: None, - debug_page_addr: None, - }) -} - pub fn generate_consensus_keys() -> ConsensusSecretKeys { ConsensusSecretKeys { validator_key: validator::SecretKey::generate(), diff --git a/zk_toolbox/crates/zk_inception/src/utils/ports.rs b/zk_toolbox/crates/zk_inception/src/utils/ports.rs index 018fb79f345d..04c8cef5ff59 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/ports.rs +++ b/zk_toolbox/crates/zk_inception/src/utils/ports.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, fmt, ops::Range, path::Path}; +use std::{collections::HashMap, fmt, net::SocketAddr, ops::Range, path::Path}; use anyhow::{bail, Context, Result}; use config::{ @@ -109,6 +109,12 @@ impl EcosystemPorts { } } } + } else if key.as_str().map(|s| s.ends_with("addr")).unwrap_or(false) { + let socket_addr = val.as_str().unwrap().parse::()?; + if let Some(new_port) = updated_ports.get(&socket_addr.port()) { + let new_socket_addr = SocketAddr::new(socket_addr.ip(), *new_port); + *val = Value::String(new_socket_addr.to_string()); + } } } // Continue traversing