From 6bfb49951eedd0293b39cfab758c0378967b7448 Mon Sep 17 00:00:00 2001 From: aon <21188659+aon@users.noreply.github.com> Date: Fri, 31 May 2024 16:08:04 -0300 Subject: [PATCH 1/8] feat: add integration test ci for zk_toolbox --- .github/workflows/ci-zk-toolbox-reusable.yml | 81 +++++++++++++++++++ .github/workflows/ci.yml | 11 +++ core/tests/ts-integration/src/env.ts | 6 +- zk_toolbox/.gitignore | 1 + .../forge_interface/deploy_ecosystem/input.rs | 7 ++ .../src/commands/integration_tests.rs | 25 ++++++ .../crates/zk_supervisor/src/commands/mod.rs | 1 + zk_toolbox/crates/zk_supervisor/src/main.rs | 11 ++- .../crates/zk_supervisor/src/messages.rs | 9 +++ 9 files changed, 148 insertions(+), 4 deletions(-) create mode 100644 .github/workflows/ci-zk-toolbox-reusable.yml create mode 100644 zk_toolbox/.gitignore create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/integration_tests.rs diff --git a/.github/workflows/ci-zk-toolbox-reusable.yml b/.github/workflows/ci-zk-toolbox-reusable.yml new file mode 100644 index 000000000000..3afa7eede4fa --- /dev/null +++ b/.github/workflows/ci-zk-toolbox-reusable.yml @@ -0,0 +1,81 @@ +name: Workflow template for CI jobs for Core Components +on: + workflow_call: + +jobs: + lint: + name: lint + uses: ./.github/workflows/ci-core-lint-reusable.yml + + build: + runs-on: [matterlabs-ci-runner] + + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + with: + submodules: "recursive" + fetch-depth: 0 + + - name: Setup environment + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo IN_DOCKER=1 >> .env + + - name: Start services + run: | + ci_localnet_up + + - name: Build + run: | + ci_run bash -c "cd zk_toolbox && cargo build --release" + + # Compress with tar to avoid permission loss + # https://github.com/actions/upload-artifact?tab=readme-ov-file#permission-loss + - name: Tar zk_toolbox binaries + run: | + tar -C ./zk_toolbox/target/release -cvf zk_toolbox.tar zk_inception zk_supervisor + + - name: Upload zk_toolbox binaries + uses: actions/upload-artifact@v4 + with: + name: zk_toolbox + path: zk_toolbox.tar + compression-level: 0 + + integration_test: + runs-on: [matterlabs-ci-runner] + needs: [build] + + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + with: + submodules: "recursive" + fetch-depth: 0 + + - name: Download zk_toolbox binaries + uses: actions/download-artifact@v4 + with: + name: zk_toolbox + path: . + + - name: Extract zk_toolbox binaries + run: | + tar -xvf zk_toolbox.tar -C ./bin + + - name: Setup environment + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo IN_DOCKER=1 >> .env + + - name: Run tests + run: | + ci_run zk_inception ecosystem init --deploy-paymaster --deploy-erc20 \ + --deploy-ecosystem --l1-rpc-url=http://localhost:8545 \ + --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --server-db-name=zksync_server_localhost_era \ + --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --prover-db-name=zksync_prover_localhost_era + ci_run zk_inception server & + ci_run zk_supervisor integration-tests diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 21e3104a5dcd..31b388b4cfa1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -20,6 +20,7 @@ jobs: outputs: core: ${{ steps.changed-files.outputs.core_any_changed }} prover: ${{ steps.changed-files.outputs.prover_any_changed }} + zk_toolbox: ${{ steps.changed-files.outputs.zk_toolbox_any_changed }} docs: ${{ steps.changed-files.outputs.docs_any_changed }} all: ${{ steps.changed-files.outputs.all_any_changed }} steps: @@ -60,6 +61,10 @@ jobs: - '!**/*.md' - '!**/*.MD' - 'docker-compose.yml' + zk_toolbox: + - 'zk_toolbox/**' + - '!**/*.md' + - '!**/*.MD' docs: - '**/*.md' - '**/*.MD' @@ -91,6 +96,12 @@ jobs: name: CI for Prover Components uses: ./.github/workflows/ci-prover-reusable.yml + ci-for-zk-toolbox: + needs: changed_files + if: ${{ (needs.changed_files.outputs.zk_toolbox == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} + name: CI for zk_toolbox + uses: ./.github/workflows/ci-zk-toolbox-reusable.yml + ci-for-docs: needs: changed_files if: needs.changed_files.outputs.docs == 'true' diff --git a/core/tests/ts-integration/src/env.ts b/core/tests/ts-integration/src/env.ts index ada8a695e0aa..60175d621da9 100644 --- a/core/tests/ts-integration/src/env.ts +++ b/core/tests/ts-integration/src/env.ts @@ -80,9 +80,12 @@ async function loadTestEnvironmentFromFile(chain: string): Promise anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let _dir_guard = shell.push_dir(ecosystem_config.link_to_code.join(TS_INTEGRATION_PATH)); + + logger::info(MSG_INTEGRATION_TESTS_RUN_INFO); + + Cmd::new( + cmd!(shell, "yarn jest --forceExit --testTimeout 60000") + .env("CHAIN_NAME", ecosystem_config.default_chain), + ) + .with_force_run() + .run()?; + + logger::outro(MSG_INTEGRATION_TESTS_RUN_SUCCESS); + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs index 8fd0a6be869b..98d4cdfe990d 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs @@ -1 +1,2 @@ pub mod database; +pub mod integration_tests; diff --git a/zk_toolbox/crates/zk_supervisor/src/main.rs b/zk_toolbox/crates/zk_supervisor/src/main.rs index 24daaba35347..ab5629465a88 100644 --- a/zk_toolbox/crates/zk_supervisor/src/main.rs +++ b/zk_toolbox/crates/zk_supervisor/src/main.rs @@ -6,7 +6,10 @@ use common::{ init_prompt_theme, logger, }; use config::EcosystemConfig; -use messages::msg_global_chain_does_not_exist; +use messages::{ + msg_global_chain_does_not_exist, MSG_SUBCOMMAND_DATABASE_ABOUT, + MSG_SUBCOMMAND_INTEGRATION_TESTS_ABOUT, +}; use xshell::Shell; mod commands; @@ -24,9 +27,10 @@ struct Supervisor { #[derive(Subcommand, Debug)] enum SupervisorSubcommands { - /// Database related commands - #[command(subcommand)] + #[command(subcommand, about = MSG_SUBCOMMAND_DATABASE_ABOUT)] Database(DatabaseCommands), + #[command(about = MSG_SUBCOMMAND_INTEGRATION_TESTS_ABOUT)] + IntegrationTests, } #[derive(Parser, Debug)] @@ -89,6 +93,7 @@ async fn main() -> anyhow::Result<()> { async fn run_subcommand(args: Supervisor, shell: &Shell) -> anyhow::Result<()> { match args.command { SupervisorSubcommands::Database(command) => commands::database::run(shell, command).await?, + SupervisorSubcommands::IntegrationTests => commands::integration_tests::run(shell)?, } Ok(()) } diff --git a/zk_toolbox/crates/zk_supervisor/src/messages.rs b/zk_toolbox/crates/zk_supervisor/src/messages.rs index 97152396b5e5..fa3c1ae19206 100644 --- a/zk_toolbox/crates/zk_supervisor/src/messages.rs +++ b/zk_toolbox/crates/zk_supervisor/src/messages.rs @@ -4,6 +4,10 @@ pub(super) fn msg_global_chain_does_not_exist(chain: &str, available_chains: &st format!("Chain with name {chain} doesnt exist, please choose one of: {available_chains}") } +// Subcommands help +pub(super) const MSG_SUBCOMMAND_DATABASE_ABOUT: &str = "Database related commands"; +pub(super) const MSG_SUBCOMMAND_INTEGRATION_TESTS_ABOUT: &str = "Run integration tests"; + // Database related messages pub(super) const MSG_NO_DATABASES_SELECTED: &str = "No databases selected"; pub(super) fn msg_database_info(gerund_verb: &str) -> String { @@ -57,3 +61,8 @@ pub(super) fn msg_database_new_migration_loading(dal: &str) -> String { format!("Creating new database migration for dal {}...", dal) } pub(super) const MSG_DATABASE_NEW_MIGRATION_SUCCESS: &str = "Migration created successfully"; + +// Integration tests related messages +pub(super) const MSG_INTEGRATION_TESTS_RUN_ABOUT: &str = "Run integration tests"; +pub(super) const MSG_INTEGRATION_TESTS_RUN_INFO: &str = "Running integration tests"; +pub(super) const MSG_INTEGRATION_TESTS_RUN_SUCCESS: &str = "Integration tests ran successfully"; From 446b9c230e8f68e5319fa20597baa2b9104e55d7 Mon Sep 17 00:00:00 2001 From: aon <21188659+aon@users.noreply.github.com> Date: Tue, 4 Jun 2024 16:27:39 -0300 Subject: [PATCH 2/8] fix: add integration ci and necessary zk_supervisor commands --- .github/release-please/manifest.json | 4 +- .../build-contract-verifier-template.yml | 1 + .github/workflows/build-core-template.yml | 1 + .github/workflows/ci-core-reusable.yml | 8 +- .github/workflows/ci-zk-toolbox-reusable.yml | 39 +- Cargo.lock | 12 +- bin/pre_download_compilers.sh | 43 +++ contracts | 2 +- core/CHANGELOG.md | 27 ++ core/bin/block_reverter/src/main.rs | 20 +- core/bin/external_node/Cargo.toml | 2 +- core/bin/external_node/src/config/mod.rs | 2 - core/bin/external_node/src/helpers.rs | 29 -- core/bin/external_node/src/main.rs | 45 +-- core/bin/external_node/src/tests.rs | 3 +- .../external_node/src/version_sync_task.rs | 131 ------- core/bin/zksync_server/src/main.rs | 4 +- core/bin/zksync_server/src/node_builder.rs | 15 + core/lib/basic_types/src/basic_fri_types.rs | 12 + core/lib/basic_types/src/protocol_version.rs | 18 - core/lib/basic_types/src/web3/mod.rs | 43 ++- core/lib/config/src/configs/general.rs | 2 + core/lib/config/src/configs/mod.rs | 2 + core/lib/config/src/configs/vm_runner.rs | 16 + core/lib/contracts/src/lib.rs | 2 +- ...f125cf30578457040c14fd6882c73a87fb3d6.json | 20 + ...5d03a811221d4ddf26e2e0ddc34147a0d8e23.json | 22 ++ ...1687e91d8367347b3830830a4c76407d60bc5.json | 14 + ..._vm_runner_protective_reads_table.down.sql | 1 + ...dd_vm_runner_protective_reads_table.up.sql | 7 + core/lib/dal/src/consensus/mod.rs | 25 +- core/lib/dal/src/consensus/proto/mod.proto | 15 +- core/lib/dal/src/lib.rs | 9 +- .../lib/dal/src/models/storage_transaction.rs | 11 +- core/lib/dal/src/models/tests.rs | 3 - core/lib/dal/src/tests/mod.rs | 6 +- core/lib/dal/src/vm_runner_dal.rs | 83 +++++ core/lib/env_config/src/lib.rs | 1 + core/lib/env_config/src/vm_runner.rs | 9 + core/lib/eth_client/src/clients/http/query.rs | 47 +-- .../eth_client/src/clients/http/signing.rs | 12 +- core/lib/eth_client/src/clients/mock.rs | 8 +- core/lib/eth_client/src/lib.rs | 53 +-- core/lib/eth_client/src/types.rs | 41 ++- core/lib/mempool/src/tests.rs | 4 +- core/lib/multivm/src/utils.rs | 31 +- .../src/versions/vm_latest/constants.rs | 4 +- core/lib/object_store/Cargo.toml | 2 + core/lib/object_store/src/file.rs | 45 ++- core/lib/object_store/src/gcs.rs | 127 ++++--- core/lib/object_store/src/raw.rs | 78 +++- core/lib/protobuf_config/src/general.rs | 6 + core/lib/protobuf_config/src/lib.rs | 1 + .../src/proto/config/general.proto | 2 + .../src/proto/config/vm_runner.proto | 8 + core/lib/protobuf_config/src/vm_runner.rs | 27 ++ core/lib/snapshots_applier/src/lib.rs | 11 +- core/lib/snapshots_applier/src/tests/mod.rs | 12 +- core/lib/types/src/l1/mod.rs | 95 ++++- core/lib/types/src/protocol_upgrade.rs | 101 +++-- core/lib/types/src/transaction_request.rs | 57 +-- core/lib/utils/src/env.rs | 6 +- core/lib/zksync_core_leftovers/src/lib.rs | 5 + .../src/temp_config_store/mod.rs | 4 +- .../api_server/src/execution_sandbox/apply.rs | 6 +- .../src/execution_sandbox/execute.rs | 21 +- core/node/api_server/src/tx_sender/mod.rs | 20 +- .../api_server/src/web3/namespaces/debug.rs | 16 +- .../api_server/src/web3/namespaces/eth.rs | 21 +- core/node/block_reverter/Cargo.toml | 2 + core/node/block_reverter/src/lib.rs | 40 +- core/node/block_reverter/src/tests.rs | 173 ++++++++- .../src/validation_task.rs | 8 +- core/node/consistency_checker/src/lib.rs | 8 +- core/node/eth_sender/src/error.rs | 13 +- core/node/eth_sender/src/eth_tx_aggregator.rs | 63 ++-- core/node/eth_sender/src/eth_tx_manager.rs | 30 +- core/node/eth_sender/src/lib.rs | 2 +- core/node/eth_sender/src/tests.rs | 4 +- core/node/eth_watch/src/client.rs | 26 +- .../event_processors/governance_upgrades.rs | 15 +- .../eth_watch/src/event_processors/mod.rs | 7 +- core/node/eth_watch/src/tests.rs | 18 +- .../src/l1_gas_price/gas_adjuster/mod.rs | 8 +- core/node/genesis/src/lib.rs | 2 +- core/node/house_keeper/src/prover/metrics.rs | 5 +- .../fri_proof_compressor_queue_reporter.rs | 4 +- .../fri_prover_queue_reporter.rs | 5 +- .../fri_witness_generator_queue_reporter.rs | 9 +- core/node/node_framework/Cargo.toml | 3 + core/node/node_framework/examples/showcase.rs | 14 +- .../layers/circuit_breaker_checker.rs | 6 +- .../layers/commitment_generator.rs | 6 +- .../src/implementations/layers/consensus.rs | 10 +- .../layers/consistency_checker.rs | 6 +- .../layers/contract_verification_api.rs | 6 +- .../src/implementations/layers/eth_sender.rs | 10 +- .../src/implementations/layers/eth_watch.rs | 6 +- .../layers/healtcheck_server.rs | 6 +- .../implementations/layers/house_keeper.rs | 46 +-- .../src/implementations/layers/l1_gas.rs | 6 +- .../layers/main_node_client.rs | 48 +++ .../layers/metadata_calculator.rs | 10 +- .../src/implementations/layers/mod.rs | 4 + .../layers/prometheus_exporter.rs | 6 +- .../layers/proof_data_handler.rs | 6 +- .../layers/reorg_detector_checker.rs | 71 ++++ .../layers/reorg_detector_runner.rs | 73 ++++ .../src/implementations/layers/sigint.rs | 6 +- .../layers/state_keeper/mempool_io.rs | 10 +- .../layers/state_keeper/mod.rs | 10 +- .../layers/tee_verifier_input_producer.rs | 6 +- .../implementations/layers/vm_runner/mod.rs | 34 ++ .../layers/vm_runner/protective_reads.rs | 86 +++++ .../implementations/layers/web3_api/caches.rs | 6 +- .../implementations/layers/web3_api/server.rs | 12 +- .../layers/web3_api/tx_sender.rs | 10 +- .../src/implementations/resources/mod.rs | 1 + .../src/implementations/resources/reverter.rs | 15 + core/node/node_framework/src/precondition.rs | 4 +- .../node_framework/src/service/context.rs | 10 +- .../node_framework/src/service/runnables.rs | 26 +- core/node/node_framework/src/service/tests.rs | 14 +- core/node/node_framework/src/task.rs | 50 ++- core/node/node_sync/Cargo.toml | 1 + .../src/tree_data_fetcher/metrics.rs | 19 +- .../node_sync/src/tree_data_fetcher/mod.rs | 112 +++--- .../src/tree_data_fetcher/provider/mod.rs | 347 ++++++++++++++++++ .../src/tree_data_fetcher/provider/tests.rs | 249 +++++++++++++ .../node_sync/src/tree_data_fetcher/tests.rs | 129 ++++--- .../io/seal_logic/l2_block_seal_subtasks.rs | 17 +- core/node/state_keeper/src/updates/mod.rs | 2 +- core/node/vm_runner/Cargo.toml | 2 +- core/node/vm_runner/src/impls/mod.rs | 3 + .../vm_runner/src/impls/protective_reads.rs | 193 ++++++++++ core/node/vm_runner/src/lib.rs | 4 +- core/node/vm_runner/src/output_handler.rs | 5 + core/node/vm_runner/src/process.rs | 3 +- core/node/vm_runner/src/storage.rs | 17 +- core/tests/loadnext/src/sdk/ethereum/mod.rs | 13 +- core/tests/test_account/src/lib.rs | 4 +- .../interfaces/ISystemContext.sol | 61 +++ core/tests/ts-integration/src/env.ts | 10 +- core/tests/ts-integration/src/helpers.ts | 2 + core/tests/ts-integration/tests/fees.test.ts | 12 + .../tests/ts-integration/tests/system.test.ts | 17 +- docs/guides/advanced/pubdata-with-blobs.md | 300 +++++++++++++++ docs/guides/advanced/pubdata.md | 53 +-- docs/guides/setup-dev.md | 2 +- etc/env/base/vm_runner.toml | 9 + etc/env/file_based/general.yaml | 4 + .../protocol-upgrade/src/transaction.ts | 2 +- infrastructure/zk/src/utils.ts | 3 +- prover/CHANGELOG.md | 14 + prover/Cargo.lock | 13 +- prover/proof_fri_compressor/src/main.rs | 4 +- ...e27807ede6b4db9541198cee2861b874b52f9.json | 32 -- ...f3ad13840d2c497760e9bd0513f68dc4271c.json} | 12 +- ...b99cf505662036f2dd7a9f1807c4c1bad7c7b.json | 38 ++ .../src/fri_proof_compressor_dal.rs | 11 +- prover/prover_dal/src/fri_prover_dal.rs | 7 +- .../src/fri_witness_generator_dal.rs | 11 +- prover/prover_fri/src/main.rs | 6 +- prover/prover_fri_types/src/lib.rs | 14 +- prover/prover_version/Cargo.toml | 2 +- prover/prover_version/src/main.rs | 4 +- prover/setup-data-gpu-keys.json | 6 +- .../data/commitments.json | 6 +- .../data/finalization_hints_basic_1.bin | Bin 276 -> 276 bytes .../snark_verification_scheduler_key.json | 32 +- .../data/verification_basic_1_key.json | 136 +++---- .../data/verification_leaf_3_key.json | 128 +++---- .../data/verification_scheduler_key.json | 128 +++---- .../src/keystore.rs | 13 +- prover/witness_generator/src/main.rs | 4 +- prover/witness_vector_generator/src/main.rs | 4 +- zk_toolbox/.gitignore | 1 - zk_toolbox/Cargo.lock | 19 + zk_toolbox/Cargo.toml | 1 + zk_toolbox/crates/config/Cargo.toml | 1 + zk_toolbox/crates/config/src/consts.rs | 2 +- zk_toolbox/crates/config/src/ecosystem.rs | 19 +- .../src/forge_interface/script_params.rs | 12 +- .../crates/types/src/protocol_version.rs | 8 +- .../zk_inception/src/commands/server.rs | 16 +- .../crates/zk_inception/src/messages.rs | 1 + .../src/commands/integration_tests.rs | 25 +- .../crates/zk_supervisor/src/messages.rs | 2 +- 188 files changed, 3729 insertions(+), 1263 deletions(-) create mode 100755 bin/pre_download_compilers.sh delete mode 100644 core/bin/external_node/src/version_sync_task.rs create mode 100644 core/lib/config/src/configs/vm_runner.rs create mode 100644 core/lib/dal/.sqlx/query-1f38966f65ce0ed8365b969d0a1f125cf30578457040c14fd6882c73a87fb3d6.json create mode 100644 core/lib/dal/.sqlx/query-c31632143b459ea6684908ce7a15d03a811221d4ddf26e2e0ddc34147a0d8e23.json create mode 100644 core/lib/dal/.sqlx/query-f2f1b6c4f4686b423a4c449c56e1687e91d8367347b3830830a4c76407d60bc5.json create mode 100644 core/lib/dal/migrations/20240522215934_add_vm_runner_protective_reads_table.down.sql create mode 100644 core/lib/dal/migrations/20240522215934_add_vm_runner_protective_reads_table.up.sql create mode 100644 core/lib/dal/src/vm_runner_dal.rs create mode 100644 core/lib/env_config/src/vm_runner.rs create mode 100644 core/lib/protobuf_config/src/proto/config/vm_runner.proto create mode 100644 core/lib/protobuf_config/src/vm_runner.rs create mode 100644 core/node/node_framework/src/implementations/layers/main_node_client.rs create mode 100644 core/node/node_framework/src/implementations/layers/reorg_detector_checker.rs create mode 100644 core/node/node_framework/src/implementations/layers/reorg_detector_runner.rs create mode 100644 core/node/node_framework/src/implementations/layers/vm_runner/mod.rs create mode 100644 core/node/node_framework/src/implementations/layers/vm_runner/protective_reads.rs create mode 100644 core/node/node_framework/src/implementations/resources/reverter.rs create mode 100644 core/node/node_sync/src/tree_data_fetcher/provider/mod.rs create mode 100644 core/node/node_sync/src/tree_data_fetcher/provider/tests.rs create mode 100644 core/node/vm_runner/src/impls/mod.rs create mode 100644 core/node/vm_runner/src/impls/protective_reads.rs create mode 100644 core/tests/ts-integration/contracts/custom-account/interfaces/ISystemContext.sol create mode 100644 docs/guides/advanced/pubdata-with-blobs.md create mode 100644 etc/env/base/vm_runner.toml delete mode 100644 prover/prover_dal/.sqlx/query-7496a9df12e409162338266085ce27807ede6b4db9541198cee2861b874b52f9.json rename prover/prover_dal/.sqlx/{query-5449963638944edc62b5ec293bcda37e6c0c6c85fe5f701ebc9919fcad749a23.json => query-a94fffdbc1827dc5df908ea1e99ef3ad13840d2c497760e9bd0513f68dc4271c.json} (52%) create mode 100644 prover/prover_dal/.sqlx/query-e9c9b69852fa68f463f17b6d63ab99cf505662036f2dd7a9f1807c4c1bad7c7b.json delete mode 100644 zk_toolbox/.gitignore diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 3a4443af38b3..421fb661bc01 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,4 +1,4 @@ { - "core": "24.5.1", - "prover": "14.4.0" + "core": "24.7.0", + "prover": "14.5.0" } diff --git a/.github/workflows/build-contract-verifier-template.yml b/.github/workflows/build-contract-verifier-template.yml index 52f03243b414..f4f6939389bf 100644 --- a/.github/workflows/build-contract-verifier-template.yml +++ b/.github/workflows/build-contract-verifier-template.yml @@ -104,6 +104,7 @@ jobs: mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres + ci_run pre_download_compilers.sh ci_run sccache --start-server - name: init diff --git a/.github/workflows/build-core-template.yml b/.github/workflows/build-core-template.yml index e19b644a512c..de8ab1505d8b 100644 --- a/.github/workflows/build-core-template.yml +++ b/.github/workflows/build-core-template.yml @@ -113,6 +113,7 @@ jobs: mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres + ci_run pre_download_compilers.sh ci_run sccache --start-server - name: init diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 02069c4259f4..72e75e085b16 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -104,7 +104,7 @@ jobs: # `sleep 60` because we need to wait until server added all the tokens - name: Run server run: | - ci_run zk server --uring --components api,tree,eth,state_keeper,housekeeper,commitment_generator &>server.log & + ci_run zk server --uring --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log & ci_run sleep 60 - name: Deploy legacy era contracts @@ -134,7 +134,7 @@ jobs: base_token: ["Eth", "Custom"] deployment_mode: ["Rollup", "Validium"] env: - SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator${{ matrix.consensus && ',consensus' || '' }}" + SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads${{ matrix.consensus && ',consensus' || '' }}" runs-on: [matterlabs-ci-runner] steps: @@ -182,6 +182,7 @@ jobs: - name: Start services run: | ci_localnet_up + ci_run pre_download_compilers.sh ci_run sccache --start-server - name: Init @@ -301,7 +302,7 @@ jobs: runs-on: [matterlabs-ci-runner] env: - SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator${{ matrix.consensus && ',consensus' || '' }}" + SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads${{ matrix.consensus && ',consensus' || '' }}" EXT_NODE_FLAGS: "${{ matrix.consensus && '-- --enable-consensus' || '' }}" steps: @@ -322,6 +323,7 @@ jobs: - name: Start services run: | ci_localnet_up + ci_run pre_download_compilers.sh ci_run sccache --start-server - name: Init diff --git a/.github/workflows/ci-zk-toolbox-reusable.yml b/.github/workflows/ci-zk-toolbox-reusable.yml index 3afa7eede4fa..05df367d821a 100644 --- a/.github/workflows/ci-zk-toolbox-reusable.yml +++ b/.github/workflows/ci-zk-toolbox-reusable.yml @@ -2,6 +2,9 @@ name: Workflow template for CI jobs for Core Components on: workflow_call: +env: + CLICOLOR: 1 + jobs: lint: name: lint @@ -69,13 +72,33 @@ jobs: echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env - - name: Run tests + - name: Start services + run: | + ci_localnet_up + + - name: Setup zk + run: | + ci_run zk + + - name: Initialize ecosystem run: | ci_run zk_inception ecosystem init --deploy-paymaster --deploy-erc20 \ - --deploy-ecosystem --l1-rpc-url=http://localhost:8545 \ - --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --server-db-name=zksync_server_localhost_era \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_era - ci_run zk_inception server & - ci_run zk_supervisor integration-tests + --deploy-ecosystem --l1-rpc-url=http://reth:8545 \ + --server-db-url=postgres://postgres:notsecurepassword@postgres:5432 \ + --server-db-name=zksync_server_localhost_era \ + --prover-db-url=postgres://postgres:notsecurepassword@postgres:5432 \ + --prover-db-name=zksync_prover_localhost_era \ + --ignore-prerequisites --verbose + + - name: Run server + run: | + ci_run zk_inception server --ignore-prerequisites &>server.log & + ci_run sleep 5 + + - name: Run integration tests + run: | + ci_run zk_supervisor integration-tests --ignore-prerequisites --verbose + + - name: Show server.log logs + if: always() + run: ci_run cat server.log || true diff --git a/Cargo.lock b/Cargo.lock index 58f83030c7c3..af0d4d352203 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7928,7 +7928,7 @@ dependencies = [ [[package]] name = "zkevm_circuits" version = "1.5.0" -source = "git+https://github.com/matter-labs/era-zkevm_circuits.git?branch=v1.5.0#28fe577bbb2b95c18d3959ba3dd37ca8ce5bd865" +source = "git+https://github.com/matter-labs/era-zkevm_circuits.git?branch=v1.5.0#b7a86c739e8a8f88e788e90893c6e7496f6d7dfc" dependencies = [ "arrayvec 0.7.4", "boojum", @@ -8024,6 +8024,8 @@ version = "0.1.0" dependencies = [ "anyhow", "assert_matches", + "async-trait", + "futures 0.3.28", "serde", "tempfile", "test-casing", @@ -8605,7 +8607,7 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "24.5.1" +version = "24.7.0" dependencies = [ "anyhow", "assert_matches", @@ -8925,6 +8927,7 @@ dependencies = [ "tokio", "tracing", "vlog", + "zksync_block_reverter", "zksync_circuit_breaker", "zksync_commitment_generator", "zksync_concurrency", @@ -8949,12 +8952,14 @@ dependencies = [ "zksync_proof_data_handler", "zksync_protobuf_config", "zksync_queued_job_processor", + "zksync_reorg_detector", "zksync_state", "zksync_state_keeper", "zksync_storage", "zksync_tee_verifier_input_producer", "zksync_types", "zksync_utils", + "zksync_vm_runner", "zksync_web3_decl", ] @@ -8987,6 +8992,7 @@ dependencies = [ "assert_matches", "async-trait", "chrono", + "once_cell", "serde", "test-casing", "thiserror", @@ -9029,6 +9035,7 @@ name = "zksync_object_store" version = "0.1.0" dependencies = [ "anyhow", + "assert_matches", "async-trait", "bincode", "flate2", @@ -9036,6 +9043,7 @@ dependencies = [ "google-cloud-storage", "http", "prost 0.12.1", + "rand 0.8.5", "serde_json", "tempfile", "tokio", diff --git a/bin/pre_download_compilers.sh b/bin/pre_download_compilers.sh new file mode 100755 index 000000000000..8a02dca6f98b --- /dev/null +++ b/bin/pre_download_compilers.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +set -e + +# This ./cache/hardhat-nodejs is coming from the env-paths module +# that hardhat is using. +COMPILER_DIR=/root/.cache/hardhat-nodejs/compilers-v2 +mkdir -p $COMPILER_DIR/{/linux-amd64,/vyper/linux,/zksolc,/zkvyper} + +# Fetch latest compiler version +wget -nv -O $COMPILER_DIR/zksolc/compilerVersionInfo.json "https://raw.githubusercontent.com/matter-labs/zksolc-bin/main/version.json" + + +# These are the versions that we currently have in hardhat.config.ts in zksync-era and era-contracts. +# For now, if there is a new version of compiler, we'd have to modify this file. +# In the future, we should make it more automatic. +(for ver in v1.3.18 v1.3.21 v1.4.0 v1.4.1; do wget -nv -O $COMPILER_DIR/zksolc/zksolc-$ver https://raw.githubusercontent.com/matter-labs/zksolc-bin/main/linux-amd64/zksolc-linux-amd64-musl-$ver; done) + +# Special pre-release 1.5.0 compiler. +# It can be removed once system-contracts/hardhatconfig.ts stops using it. +wget -nv -O $COMPILER_DIR/zksolc/zksolc-remote-4cad2deaa6801d7a419f1ed6503c999948b0d6d8.0 https://github.com/matter-labs/era-compiler-solidity/releases/download/prerelease-a167aa3-code4rena/zksolc-linux-amd64-musl-v1.5.0 + + +wget -nv -O $COMPILER_DIR/zkvyper/compilerVersionInfo.json "https://raw.githubusercontent.com/matter-labs/zkvyper-bin/main/version.json" + +(for ver in v1.3.13; do wget -nv -O $COMPILER_DIR/zkvyper/zkvyper-$ver https://raw.githubusercontent.com/matter-labs/zkvyper-bin/main/linux-amd64/zkvyper-linux-amd64-musl-$ver; done) + + +# This matches VYPER_RELEASES_MIRROR_URL from hardhat-vyper +wget -nv -O $COMPILER_DIR/vyper/linux/list.json https://vyper-releases-mirror.hardhat.org/list.json + +# Currently we only use 0.3.10 release of vyper compiler (path taken from the list.json above) +wget -nv -O $COMPILER_DIR/vyper/linux/0.3.10 https://github.com/vyperlang/vyper/releases/download/v0.3.10/vyper.0.3.10%2Bcommit.91361694.linux + + +# This matches COMPILER_REPOSITORY_URL from hardhat-core. +wget -nv -O $COMPILER_DIR/linux-amd64/list.json https://binaries.soliditylang.org/linux-amd64/list.json + +(for ver in solc-linux-amd64-v0.8.20+commit.a1b79de6 solc-linux-amd64-v0.8.23+commit.f704f362 solc-linux-amd64-v0.8.24+commit.e11b9ed9; do \ + wget -nv -O $COMPILER_DIR/linux-amd64/$ver https://binaries.soliditylang.org/linux-amd64/$ver; \ + done) + +chmod -R +x /root/.cache/hardhat-nodejs/ diff --git a/contracts b/contracts index 32ca4e665da8..8a70bbbc4812 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 32ca4e665da89f5b4f2f705eee40d91024ad5b48 +Subproject commit 8a70bbbc48125f5bde6189b4e3c6a3ee79631678 diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 18d74c9e4468..608af4d9b010 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,32 @@ # Changelog +## [24.7.0](https://github.com/matter-labs/zksync-era/compare/core-v24.6.0...core-v24.7.0) (2024-06-03) + + +### Features + +* **node-framework:** Add reorg detector ([#1551](https://github.com/matter-labs/zksync-era/issues/1551)) ([7c7d352](https://github.com/matter-labs/zksync-era/commit/7c7d352708aa64b55a9b33e273b1a16d3f1d168b)) + + +### Bug Fixes + +* **block-reverter:** Fix reverting snapshot files ([#2064](https://github.com/matter-labs/zksync-era/issues/2064)) ([17a7e78](https://github.com/matter-labs/zksync-era/commit/17a7e782d9e35eaf38acf920c2326d4037c7781e)) +* **env:** Do not print stacktrace for locate workspace ([#2111](https://github.com/matter-labs/zksync-era/issues/2111)) ([5f2677f](https://github.com/matter-labs/zksync-era/commit/5f2677f2c966f4dd23538a02ecd7fffe306bec7f)) +* **eth-watch:** make assert less strict ([#2129](https://github.com/matter-labs/zksync-era/issues/2129)) ([e9bab95](https://github.com/matter-labs/zksync-era/commit/e9bab95539af383c161b357a422d5c45f20f27aa)) + +## [24.6.0](https://github.com/matter-labs/zksync-era/compare/core-v24.5.1...core-v24.6.0) (2024-06-03) + + +### Features + +* **en:** Fetch old L1 batch hashes from L1 ([#2000](https://github.com/matter-labs/zksync-era/issues/2000)) ([dc5a918](https://github.com/matter-labs/zksync-era/commit/dc5a9188a44a51810c9b7609a0887090043507f2)) +* use semver for metrics, move constants to prover workspace ([#2098](https://github.com/matter-labs/zksync-era/issues/2098)) ([7a50a9f](https://github.com/matter-labs/zksync-era/commit/7a50a9f79e516ec150d1f30b9f1c781a5523375b)) + + +### Bug Fixes + +* **api:** correct default fee data in eth call ([#2072](https://github.com/matter-labs/zksync-era/issues/2072)) ([e71f6f9](https://github.com/matter-labs/zksync-era/commit/e71f6f96bda08f8330c643a31df4ef9e82c9afc2)) + ## [24.5.1](https://github.com/matter-labs/zksync-era/compare/core-v24.5.0...core-v24.5.1) (2024-05-31) diff --git a/core/bin/block_reverter/src/main.rs b/core/bin/block_reverter/src/main.rs index faacf15597ff..b5e5c4054a3a 100644 --- a/core/bin/block_reverter/src/main.rs +++ b/core/bin/block_reverter/src/main.rs @@ -69,6 +69,9 @@ enum Command { /// Flag that specifies if RocksDB with state keeper cache should be rolled back. #[arg(long)] rollback_sk_cache: bool, + /// Flag that specifies if snapshot files in GCS should be rolled back. + #[arg(long, requires = "rollback_postgres")] + rollback_snapshots: bool, /// Flag that allows to roll back already executed blocks. It's ultra dangerous and required only for fixing external nodes. #[arg(long)] allow_executed_block_reversion: bool, @@ -187,6 +190,7 @@ async fn main() -> anyhow::Result<()> { rollback_postgres, rollback_tree, rollback_sk_cache, + rollback_snapshots, allow_executed_block_reversion, } => { if !rollback_tree && rollback_postgres { @@ -219,13 +223,15 @@ async fn main() -> anyhow::Result<()> { if rollback_postgres { block_reverter.enable_rolling_back_postgres(); - let object_store_config = SnapshotsObjectStoreConfig::from_env() - .context("SnapshotsObjectStoreConfig::from_env()")?; - block_reverter.enable_rolling_back_snapshot_objects( - ObjectStoreFactory::new(object_store_config.0) - .create_store() - .await, - ); + if rollback_snapshots { + let object_store_config = SnapshotsObjectStoreConfig::from_env() + .context("SnapshotsObjectStoreConfig::from_env()")?; + block_reverter.enable_rolling_back_snapshot_objects( + ObjectStoreFactory::new(object_store_config.0) + .create_store() + .await, + ); + } } if rollback_tree { block_reverter.enable_rolling_back_merkle_tree(db_config.merkle_tree.path); diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index 8ca3abb23eae..d4a883b190f4 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zksync_external_node" -version = "24.5.1" # x-release-please-version +version = "24.7.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 56d66a3a4253..08fd955297ed 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -222,8 +222,6 @@ pub(crate) struct OptionalENConfig { /// Max number of cache misses during one VM execution. If the number of cache misses exceeds this value, the API server panics. /// This is a temporary solution to mitigate API request resulting in thousands of DB queries. pub vm_execution_cache_misses_limit: Option, - /// Note: Deprecated option, no longer in use. Left to display a warning in case someone used them. - pub transactions_per_sec_limit: Option, /// Limit for fee history block range. #[serde(default = "OptionalENConfig::default_fee_history_limit")] pub fee_history_limit: u64, diff --git a/core/bin/external_node/src/helpers.rs b/core/bin/external_node/src/helpers.rs index 0cd0585def5b..3cac556e1d7b 100644 --- a/core/bin/external_node/src/helpers.rs +++ b/core/bin/external_node/src/helpers.rs @@ -41,35 +41,6 @@ impl CheckHealth for MainNodeHealthCheck { } } -/// Ethereum client health check. -#[derive(Debug)] -pub(crate) struct EthClientHealthCheck(Box>); - -impl From>> for EthClientHealthCheck { - fn from(client: Box>) -> Self { - Self(client.for_component("ethereum_health_check")) - } -} - -#[async_trait] -impl CheckHealth for EthClientHealthCheck { - fn name(&self) -> &'static str { - "ethereum_http_rpc" - } - - async fn check_health(&self) -> Health { - if let Err(err) = self.0.block_number().await { - tracing::warn!("Health-check call to Ethereum HTTP RPC failed: {err}"); - let details = serde_json::json!({ - "error": err.to_string(), - }); - // Unlike main node client, losing connection to L1 is not fatal for the node - return Health::from(HealthStatus::Affected).with_details(details); - } - HealthStatus::Ready.into() - } -} - /// Task that validates chain IDs using main node and Ethereum clients. #[derive(Debug)] pub(crate) struct ValidateChainIdsTask { diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index 0f53e8983881..584356e755bf 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -15,7 +15,7 @@ use zksync_concurrency::{ctx, scope}; use zksync_config::configs::{api::MerkleTreeApiConfig, database::MerkleTreeMode}; use zksync_consistency_checker::ConsistencyChecker; use zksync_core_leftovers::setup_sigint_handler; -use zksync_dal::{metrics::PostgresMetrics, ConnectionPool, Core, CoreDal}; +use zksync_dal::{metrics::PostgresMetrics, ConnectionPool, Core}; use zksync_db_connection::{ connection_pool::ConnectionPoolBuilder, healthcheck::ConnectionPoolHealthCheck, }; @@ -54,7 +54,7 @@ use zksync_web3_decl::{ use crate::{ config::ExternalNodeConfig, - helpers::{EthClientHealthCheck, MainNodeHealthCheck, ValidateChainIdsTask}, + helpers::{MainNodeHealthCheck, ValidateChainIdsTask}, init::ensure_storage_initialized, metrics::RUST_METRICS, }; @@ -66,7 +66,6 @@ mod metadata; mod metrics; #[cfg(test)] mod tests; -mod version_sync_task; /// Creates the state keeper configured to work in the external node mode. #[allow(clippy::too_many_arguments)] @@ -437,10 +436,6 @@ async fn run_api( let tx_sender_builder = TxSenderBuilder::new(config.into(), connection_pool.clone(), Arc::new(tx_proxy)); - if config.optional.transactions_per_sec_limit.is_some() { - tracing::warn!("`transactions_per_sec_limit` option is deprecated and ignored"); - }; - let max_concurrency = config.optional.vm_concurrency_limit; let (vm_concurrency_limiter, vm_barrier) = VmConcurrencyLimiter::new(max_concurrency); let mut storage_caches = PostgresStorageCaches::new( @@ -629,7 +624,8 @@ async fn init_tasks( "Running tree data fetcher (allows a node to operate w/o a Merkle tree or w/o waiting the tree to catch up). \ This is an experimental feature; do not use unless you know what you're doing" ); - let fetcher = TreeDataFetcher::new(main_node_client.clone(), connection_pool.clone()); + let fetcher = TreeDataFetcher::new(main_node_client.clone(), connection_pool.clone()) + .with_l1_data(eth_client.clone(), config.remote.diamond_proxy_addr)?; app_health.insert_component(fetcher.health_check())?; task_handles.push(tokio::spawn(fetcher.run(stop_receiver.clone()))); } @@ -696,9 +692,6 @@ async fn shutdown_components( #[derive(Debug, Parser)] #[command(author = "Matter Labs", version)] struct Cli { - /// Revert the pending L1 batch and exit. - #[arg(long)] - revert_pending_l1_batch: bool, /// Enables consensus-based syncing instead of JSON-RPC based one. This is an experimental and incomplete feature; /// do not use unless you know what you're doing. #[arg(long)] @@ -861,7 +854,6 @@ async fn run_node( app_health.insert_custom_component(Arc::new(MainNodeHealthCheck::from( main_node_client.clone(), )))?; - app_health.insert_custom_component(Arc::new(EthClientHealthCheck::from(eth_client.clone())))?; app_health.insert_custom_component(Arc::new(ConnectionPoolHealthCheck::new( connection_pool.clone(), )))?; @@ -912,20 +904,7 @@ async fn run_node( ); let validate_chain_ids_task = tokio::spawn(validate_chain_ids_task.run(stop_receiver.clone())); - let version_sync_task_pool = connection_pool.clone(); - let version_sync_task_main_node_client = main_node_client.clone(); - let mut stop_receiver_for_version_sync = stop_receiver.clone(); - let version_sync_task = tokio::spawn(async move { - version_sync_task::sync_versions( - version_sync_task_pool, - version_sync_task_main_node_client, - ) - .await?; - - stop_receiver_for_version_sync.changed().await.ok(); - Ok(()) - }); - let mut task_handles = vec![metrics_task, validate_chain_ids_task, version_sync_task]; + let mut task_handles = vec![metrics_task, validate_chain_ids_task]; task_handles.extend(prometheus_task); // Make sure that the node storage is initialized either via genesis or snapshot recovery. @@ -979,20 +958,6 @@ async fn run_node( } Err(err) => return Err(err).context("reorg_detector.check_consistency()"), } - if opt.revert_pending_l1_batch { - tracing::info!("Reverting pending L1 batch"); - let mut connection = connection_pool.connection().await?; - let sealed_l1_batch_number = connection - .blocks_dal() - .get_sealed_l1_batch_number() - .await? - .context("Cannot revert pending L1 batch since there are no L1 batches in Postgres")?; - drop(connection); - - tracing::info!("Reverting to l1 batch number {sealed_l1_batch_number}"); - reverter.roll_back(sealed_l1_batch_number).await?; - tracing::info!("Revert successfully completed"); - } app_health.insert_component(reorg_detector.health_check().clone())?; task_handles.push(tokio::spawn({ diff --git a/core/bin/external_node/src/tests.rs b/core/bin/external_node/src/tests.rs index 00301e1b8234..6611ce145c4c 100644 --- a/core/bin/external_node/src/tests.rs +++ b/core/bin/external_node/src/tests.rs @@ -2,6 +2,7 @@ use assert_matches::assert_matches; use test_casing::test_casing; +use zksync_dal::CoreDal; use zksync_eth_client::clients::MockEthereum; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_types::{ @@ -153,7 +154,6 @@ async fn external_node_basics(components_str: &'static str) { let components: ComponentsToRun = components_str.parse().unwrap(); let expected_health_components = expected_health_components(&components); let opt = Cli { - revert_pending_l1_batch: false, enable_consensus: false, components, }; @@ -262,7 +262,6 @@ async fn node_reacts_to_stop_signal_during_initial_reorg_detection() { drop(storage); let opt = Cli { - revert_pending_l1_batch: false, enable_consensus: false, components: "core".parse().unwrap(), }; diff --git a/core/bin/external_node/src/version_sync_task.rs b/core/bin/external_node/src/version_sync_task.rs deleted file mode 100644 index a62241d7ab35..000000000000 --- a/core/bin/external_node/src/version_sync_task.rs +++ /dev/null @@ -1,131 +0,0 @@ -use std::cmp::Ordering; - -use anyhow::Context; -use zksync_dal::{ConnectionPool, Core, CoreDal}; -use zksync_types::{L1BatchNumber, L2BlockNumber, ProtocolVersionId}; -use zksync_web3_decl::{ - client::{DynClient, L2}, - namespaces::{EnNamespaceClient, ZksNamespaceClient}, -}; - -pub async fn get_l1_batch_remote_protocol_version( - main_node_client: &DynClient, - l1_batch_number: L1BatchNumber, -) -> anyhow::Result> { - let Some((miniblock, _)) = main_node_client.get_l2_block_range(l1_batch_number).await? else { - return Ok(None); - }; - let sync_block = main_node_client - .sync_l2_block(L2BlockNumber(miniblock.as_u32()), false) - .await?; - Ok(sync_block.map(|b| b.protocol_version)) -} - -// Synchronizes protocol version in `l1_batches` and `miniblocks` tables between EN and main node. -pub async fn sync_versions( - connection_pool: ConnectionPool, - main_node_client: Box>, -) -> anyhow::Result<()> { - tracing::info!("Starting syncing protocol version of blocks"); - - let mut connection = connection_pool.connection().await?; - - // Load the first local batch number with version 22. - let Some(local_first_v22_l1_batch) = connection - .blocks_dal() - .get_first_l1_batch_number_for_version(ProtocolVersionId::Version22) - .await? - else { - return Ok(()); - }; - tracing::info!("First local v22 batch is #{local_first_v22_l1_batch}"); - - // Find the first remote batch with version 22, assuming it's less than or equal than local one. - // Uses binary search. - let mut left_bound = L1BatchNumber(0); - let mut right_bound = local_first_v22_l1_batch; - let snapshot_recovery = connection - .snapshot_recovery_dal() - .get_applied_snapshot_status() - .await?; - if let Some(snapshot_recovery) = snapshot_recovery { - left_bound = L1BatchNumber(snapshot_recovery.l1_batch_number.0 + 1) - } - - let right_bound_remote_version = - get_l1_batch_remote_protocol_version(main_node_client.as_ref(), right_bound).await?; - if right_bound_remote_version != Some(ProtocolVersionId::Version22) { - anyhow::bail!("Remote protocol versions should be v22 for the first local v22 batch, got {right_bound_remote_version:?}"); - } - - while left_bound < right_bound { - let mid_batch = L1BatchNumber((left_bound.0 + right_bound.0) / 2); - let (mid_miniblock, _) = connection - .blocks_dal() - .get_l2_block_range_of_l1_batch(mid_batch) - .await? - .with_context(|| { - format!("Postgres is inconsistent: missing miniblocks for L1 batch #{mid_batch}") - })?; - let mid_protocol_version = main_node_client - .sync_l2_block(mid_miniblock, false) - .await? - .with_context(|| format!("Main node missing data about miniblock #{mid_miniblock}"))? - .protocol_version; - - match mid_protocol_version.cmp(&ProtocolVersionId::Version22) { - Ordering::Less => { - left_bound = mid_batch + 1; - } - Ordering::Equal => { - right_bound = mid_batch; - } - Ordering::Greater => { - anyhow::bail!("Unexpected remote protocol version: {mid_protocol_version:?} for miniblock #{mid_miniblock}"); - } - } - } - - let remote_first_v22_l1_batch = left_bound; - let (remote_first_v22_miniblock, _) = connection - .blocks_dal() - .get_l2_block_range_of_l1_batch(remote_first_v22_l1_batch) - .await? - .with_context(|| { - format!("Postgres is inconsistent: missing miniblocks for L1 batch #{remote_first_v22_l1_batch}") - })?; - - let mut transaction = connection.start_transaction().await?; - - tracing::info!( - "Setting version 22 for batches {remote_first_v22_l1_batch}..={local_first_v22_l1_batch}" - ); - transaction - .blocks_dal() - .reset_protocol_version_for_l1_batches( - remote_first_v22_l1_batch..=local_first_v22_l1_batch, - ProtocolVersionId::Version22, - ) - .await?; - - let (local_first_v22_miniblock, _) = transaction - .blocks_dal() - .get_l2_block_range_of_l1_batch(local_first_v22_l1_batch) - .await? - .with_context(|| { - format!("Postgres is inconsistent: missing miniblocks for L1 batch #{local_first_v22_l1_batch}") - })?; - - tracing::info!("Setting version 22 for miniblocks {remote_first_v22_miniblock}..={local_first_v22_miniblock}"); - transaction - .blocks_dal() - .reset_protocol_version_for_l2_blocks( - remote_first_v22_miniblock..=local_first_v22_miniblock, - ProtocolVersionId::Version22, - ) - .await?; - - transaction.commit().await?; - - Ok(()) -} diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index 955a0232ae3b..f1eedd592386 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -13,7 +13,8 @@ use zksync_config::{ house_keeper::HouseKeeperConfig, ContractsConfig, DatabaseSecrets, FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig, FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, - L1Secrets, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, Secrets, + L1Secrets, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, + ProtectiveReadsWriterConfig, Secrets, }, ApiConfig, ContractVerifierConfig, DBConfig, EthConfig, EthWatchConfig, GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, @@ -306,5 +307,6 @@ fn load_env_config() -> anyhow::Result { object_store_config: ObjectStoreConfig::from_env().ok(), observability: ObservabilityConfig::from_env().ok(), snapshot_creator: SnapshotsCreatorConfig::from_env().ok(), + protective_reads_writer_config: ProtectiveReadsWriterConfig::from_env().ok(), }) } diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 163835044cac..d67b898c95ca 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -37,6 +37,7 @@ use zksync_node_framework::{ StateKeeperLayer, }, tee_verifier_input_producer::TeeVerifierInputProducerLayer, + vm_runner::protective_reads::ProtectiveReadsWriterLayer, web3_api::{ caches::MempoolCacheLayer, server::{Web3ServerLayer, Web3ServerOptionalConfig}, @@ -399,6 +400,17 @@ impl MainNodeBuilder { Ok(self) } + fn add_vm_runner_protective_reads_layer(mut self) -> anyhow::Result { + let protective_reads_writer_config = + try_load_config!(self.configs.protective_reads_writer_config); + self.node.add_layer(ProtectiveReadsWriterLayer::new( + protective_reads_writer_config, + self.genesis_config.l2_chain_id, + )); + + Ok(self) + } + pub fn build(mut self, mut components: Vec) -> anyhow::Result { // Add "base" layers (resources and helper tasks). self = self @@ -480,6 +492,9 @@ impl MainNodeBuilder { Component::CommitmentGenerator => { self = self.add_commitment_generator_layer()?; } + Component::VmRunnerProtectiveReads => { + self = self.add_vm_runner_protective_reads_layer()?; + } } } Ok(self.node.build()?) diff --git a/core/lib/basic_types/src/basic_fri_types.rs b/core/lib/basic_types/src/basic_fri_types.rs index 33d4fafa5905..a1563ff7e590 100644 --- a/core/lib/basic_types/src/basic_fri_types.rs +++ b/core/lib/basic_types/src/basic_fri_types.rs @@ -6,6 +6,8 @@ use std::{convert::TryFrom, str::FromStr}; use serde::{Deserialize, Serialize}; +use crate::protocol_version::{ProtocolSemanticVersion, ProtocolVersionId, VersionPatch}; + const BLOB_CHUNK_SIZE: usize = 31; const ELEMENTS_PER_4844_BLOCK: usize = 4096; pub const MAX_4844_BLOBS_PER_BLOCK: usize = 16; @@ -189,6 +191,16 @@ pub struct JobIdentifiers { pub circuit_id: u8, pub aggregation_round: u8, pub protocol_version: u16, + pub protocol_version_patch: u32, +} + +impl JobIdentifiers { + pub fn get_semantic_protocol_version(&self) -> ProtocolSemanticVersion { + ProtocolSemanticVersion::new( + ProtocolVersionId::try_from(self.protocol_version).unwrap(), + VersionPatch(self.protocol_version_patch), + ) + } } #[cfg(test)] diff --git a/core/lib/basic_types/src/protocol_version.rs b/core/lib/basic_types/src/protocol_version.rs index 4f29d936a73f..d8083c0f6a31 100644 --- a/core/lib/basic_types/src/protocol_version.rs +++ b/core/lib/basic_types/src/protocol_version.rs @@ -20,16 +20,6 @@ use crate::{ pub const PACKED_SEMVER_MINOR_OFFSET: u32 = 32; pub const PACKED_SEMVER_MINOR_MASK: u32 = 0xFFFF; -// These values should be manually updated for every protocol upgrade -// Otherwise, the prover will not be able to work with new versions. -// TODO(PLA-954): Move to prover workspace -pub const PROVER_PROTOCOL_VERSION: ProtocolVersionId = ProtocolVersionId::Version24; -pub const PROVER_PROTOCOL_PATCH: VersionPatch = VersionPatch(0); -pub const PROVER_PROTOCOL_SEMANTIC_VERSION: ProtocolSemanticVersion = ProtocolSemanticVersion { - minor: PROVER_PROTOCOL_VERSION, - patch: PROVER_PROTOCOL_PATCH, -}; - /// `ProtocolVersionId` is a unique identifier of the protocol version. /// Note, that it is an identifier of the `minor` semver version of the protocol, with /// the `major` version being `0`. Also, the protocol version on the contracts may contain @@ -85,10 +75,6 @@ impl ProtocolVersionId { Self::Version24 } - pub fn current_prover_version() -> Self { - PROVER_PROTOCOL_VERSION - } - pub fn next() -> Self { Self::Version25 } @@ -311,10 +297,6 @@ impl ProtocolSemanticVersion { Self { minor, patch } } - pub fn current_prover_version() -> Self { - PROVER_PROTOCOL_SEMANTIC_VERSION - } - pub fn try_from_packed(packed: U256) -> Result { let minor = ((packed >> U256::from(PACKED_SEMVER_MINOR_OFFSET)) & U256::from(PACKED_SEMVER_MINOR_MASK)) diff --git a/core/lib/basic_types/src/web3/mod.rs b/core/lib/basic_types/src/web3/mod.rs index bb4a24da55e7..d684b9b6c7b2 100644 --- a/core/lib/basic_types/src/web3/mod.rs +++ b/core/lib/basic_types/src/web3/mod.rs @@ -138,30 +138,36 @@ impl<'a> Visitor<'a> for BytesVisitor { // `Log`: from `web3::types::log` /// Filter -#[derive(Default, Debug, PartialEq, Clone, Serialize)] +#[derive(Default, Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct Filter { /// From Block #[serde(rename = "fromBlock", skip_serializing_if = "Option::is_none")] - from_block: Option, + pub from_block: Option, /// To Block #[serde(rename = "toBlock", skip_serializing_if = "Option::is_none")] - to_block: Option, + pub to_block: Option, /// Block Hash #[serde(rename = "blockHash", skip_serializing_if = "Option::is_none")] - block_hash: Option, + pub block_hash: Option, /// Address #[serde(skip_serializing_if = "Option::is_none")] - address: Option>, + pub address: Option>, /// Topics #[serde(skip_serializing_if = "Option::is_none")] - topics: Option>>>, + pub topics: Option>>>, /// Limit #[serde(skip_serializing_if = "Option::is_none")] - limit: Option, + pub limit: Option, } #[derive(Default, Debug, PartialEq, Clone)] -struct ValueOrArray(Vec); +pub struct ValueOrArray(Vec); + +impl ValueOrArray { + pub fn flatten(self) -> Vec { + self.0 + } +} impl Serialize for ValueOrArray where @@ -179,6 +185,25 @@ where } } +impl<'de, T> Deserialize<'de> for ValueOrArray +where + T: Deserialize<'de>, +{ + fn deserialize>(deserializer: D) -> Result { + #[derive(Deserialize)] + #[serde(untagged)] + enum Repr { + Single(T), + Sequence(Vec), + } + + Ok(match Repr::::deserialize(deserializer)? { + Repr::Single(element) => Self(vec![element]), + Repr::Sequence(elements) => Self(elements), + }) + } +} + // Filter Builder #[derive(Default, Clone)] pub struct FilterBuilder { @@ -271,7 +296,7 @@ fn topic_to_option(topic: ethabi::Topic) -> Option> { } /// A log produced by a transaction. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize)] pub struct Log { /// H160 pub address: H160, diff --git a/core/lib/config/src/configs/general.rs b/core/lib/config/src/configs/general.rs index 69d68508a035..ef02f557bc18 100644 --- a/core/lib/config/src/configs/general.rs +++ b/core/lib/config/src/configs/general.rs @@ -3,6 +3,7 @@ use crate::{ chain::{CircuitBreakerConfig, MempoolConfig, OperationsManagerConfig, StateKeeperConfig}, fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, + vm_runner::ProtectiveReadsWriterConfig, FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig, FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, @@ -32,4 +33,5 @@ pub struct GeneralConfig { pub eth: Option, pub snapshot_creator: Option, pub observability: Option, + pub protective_reads_writer_config: Option, } diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index 925c30976f97..b2d9571ad292 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -20,6 +20,7 @@ pub use self::{ secrets::{DatabaseSecrets, L1Secrets, Secrets}, snapshots_creator::SnapshotsCreatorConfig, utils::PrometheusConfig, + vm_runner::ProtectiveReadsWriterConfig, }; pub mod api; @@ -46,6 +47,7 @@ pub mod proof_data_handler; pub mod secrets; pub mod snapshots_creator; pub mod utils; +pub mod vm_runner; pub mod wallets; const BYTES_IN_MEGABYTE: usize = 1_024 * 1_024; diff --git a/core/lib/config/src/configs/vm_runner.rs b/core/lib/config/src/configs/vm_runner.rs new file mode 100644 index 000000000000..6250830398eb --- /dev/null +++ b/core/lib/config/src/configs/vm_runner.rs @@ -0,0 +1,16 @@ +use serde::Deserialize; + +#[derive(Debug, Deserialize, Clone, PartialEq, Default)] +pub struct ProtectiveReadsWriterConfig { + /// Path to the RocksDB data directory that serves state cache. + #[serde(default = "ProtectiveReadsWriterConfig::default_protective_reads_db_path")] + pub protective_reads_db_path: String, + /// How many max batches should be processed at the same time. + pub protective_reads_window_size: u32, +} + +impl ProtectiveReadsWriterConfig { + fn default_protective_reads_db_path() -> String { + "./db/protective_reads_writer".to_owned() + } +} diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index e27728272150..50fc20c5916f 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -30,7 +30,7 @@ pub enum ContractLanguage { /// Meanwhile, hardhat has one more intermediate folder. That's why, we have to represent each contract /// by two constants, intermediate folder and actual contract name. For Forge we use only second part const HARDHAT_PATH_PREFIX: &str = "contracts/l1-contracts/artifacts/contracts"; -const FORGE_PATH_PREFIX: &str = "contracts/l1-contracts-foundry/out"; +const FORGE_PATH_PREFIX: &str = "contracts/l1-contracts/out"; const BRIDGEHUB_CONTRACT_FILE: (&str, &str) = ("bridgehub", "IBridgehub.sol/IBridgehub.json"); const STATE_TRANSITION_CONTRACT_FILE: (&str, &str) = ( diff --git a/core/lib/dal/.sqlx/query-1f38966f65ce0ed8365b969d0a1f125cf30578457040c14fd6882c73a87fb3d6.json b/core/lib/dal/.sqlx/query-1f38966f65ce0ed8365b969d0a1f125cf30578457040c14fd6882c73a87fb3d6.json new file mode 100644 index 000000000000..94a17c87888e --- /dev/null +++ b/core/lib/dal/.sqlx/query-1f38966f65ce0ed8365b969d0a1f125cf30578457040c14fd6882c73a87fb3d6.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n COALESCE(MAX(l1_batch_number), 0) AS \"last_processed_l1_batch!\"\n FROM\n vm_runner_protective_reads\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "last_processed_l1_batch!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "1f38966f65ce0ed8365b969d0a1f125cf30578457040c14fd6882c73a87fb3d6" +} diff --git a/core/lib/dal/.sqlx/query-c31632143b459ea6684908ce7a15d03a811221d4ddf26e2e0ddc34147a0d8e23.json b/core/lib/dal/.sqlx/query-c31632143b459ea6684908ce7a15d03a811221d4ddf26e2e0ddc34147a0d8e23.json new file mode 100644 index 000000000000..dcbfb1d0bd24 --- /dev/null +++ b/core/lib/dal/.sqlx/query-c31632143b459ea6684908ce7a15d03a811221d4ddf26e2e0ddc34147a0d8e23.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n available_batches AS (\n SELECT\n MAX(number) AS \"last_batch\"\n FROM\n l1_batches\n ),\n processed_batches AS (\n SELECT\n COALESCE(MAX(l1_batch_number), 0) + $1 AS \"last_ready_batch\"\n FROM\n vm_runner_protective_reads\n )\n SELECT\n LEAST(last_batch, last_ready_batch) AS \"last_ready_batch!\"\n FROM\n available_batches\n FULL JOIN processed_batches ON TRUE\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "last_ready_batch!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + true + ] + }, + "hash": "c31632143b459ea6684908ce7a15d03a811221d4ddf26e2e0ddc34147a0d8e23" +} diff --git a/core/lib/dal/.sqlx/query-f2f1b6c4f4686b423a4c449c56e1687e91d8367347b3830830a4c76407d60bc5.json b/core/lib/dal/.sqlx/query-f2f1b6c4f4686b423a4c449c56e1687e91d8367347b3830830a4c76407d60bc5.json new file mode 100644 index 000000000000..e49cc211cdcd --- /dev/null +++ b/core/lib/dal/.sqlx/query-f2f1b6c4f4686b423a4c449c56e1687e91d8367347b3830830a4c76407d60bc5.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n vm_runner_protective_reads (l1_batch_number, created_at, updated_at)\n VALUES\n ($1, NOW(), NOW())\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "f2f1b6c4f4686b423a4c449c56e1687e91d8367347b3830830a4c76407d60bc5" +} diff --git a/core/lib/dal/migrations/20240522215934_add_vm_runner_protective_reads_table.down.sql b/core/lib/dal/migrations/20240522215934_add_vm_runner_protective_reads_table.down.sql new file mode 100644 index 000000000000..773b22aa4fa1 --- /dev/null +++ b/core/lib/dal/migrations/20240522215934_add_vm_runner_protective_reads_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS vm_runner_protective_reads; diff --git a/core/lib/dal/migrations/20240522215934_add_vm_runner_protective_reads_table.up.sql b/core/lib/dal/migrations/20240522215934_add_vm_runner_protective_reads_table.up.sql new file mode 100644 index 000000000000..170569508281 --- /dev/null +++ b/core/lib/dal/migrations/20240522215934_add_vm_runner_protective_reads_table.up.sql @@ -0,0 +1,7 @@ +CREATE TABLE IF NOT EXISTS vm_runner_protective_reads +( + l1_batch_number BIGINT NOT NULL PRIMARY KEY, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL, + time_taken TIME +); diff --git a/core/lib/dal/src/consensus/mod.rs b/core/lib/dal/src/consensus/mod.rs index 1829c130970d..f7a3b0666241 100644 --- a/core/lib/dal/src/consensus/mod.rs +++ b/core/lib/dal/src/consensus/mod.rs @@ -109,6 +109,17 @@ impl ProtoRepr for proto::Transaction { Ok(Self::Type { common_data: match common_data { proto::transaction::CommonData::L1(common_data) => { + anyhow::ensure!( + *required(&common_data.deadline_block) + .context("common_data.deadline_block")? + == 0 + ); + anyhow::ensure!( + required(&common_data.eth_hash) + .and_then(|x| parse_h256(x)) + .context("common_data.eth_hash")? + == H256::default() + ); ExecuteTransactionCommon::L1(L1TxCommonData { sender: required(&common_data.sender_address) .and_then(|x| parse_h160(x)) @@ -116,8 +127,6 @@ impl ProtoRepr for proto::Transaction { serial_id: required(&common_data.serial_id) .map(|x| PriorityOpId(*x)) .context("common_data.serial_id")?, - deadline_block: *required(&common_data.deadline_block) - .context("common_data.deadline_block")?, layer_2_tip_fee: required(&common_data.layer_2_tip_fee) .and_then(|x| parse_h256(x)) .map(h256_to_u256) @@ -150,9 +159,6 @@ impl ProtoRepr for proto::Transaction { .map_err(|_| anyhow!("u8::try_from")) }) .context("common_data.priority_queue_type")?, - eth_hash: required(&common_data.eth_hash) - .and_then(|x| parse_h256(x)) - .context("common_data.eth_hash")?, eth_block: *required(&common_data.eth_block) .context("common_data.eth_block")?, canonical_tx_hash: required(&common_data.canonical_tx_hash) @@ -247,9 +253,6 @@ impl ProtoRepr for proto::Transaction { .and_then(|x| parse_h256(x)) .map(h256_to_u256) .context("common_data.gas_per_pubdata_limit")?, - eth_hash: required(&common_data.eth_hash) - .and_then(|x| parse_h256(x)) - .context("common_data.eth_hash")?, eth_block: *required(&common_data.eth_block) .context("common_data.eth_block")?, canonical_tx_hash: required(&common_data.canonical_tx_hash) @@ -290,7 +293,7 @@ impl ProtoRepr for proto::Transaction { proto::transaction::CommonData::L1(proto::L1TxCommonData { sender_address: Some(data.sender.as_bytes().into()), serial_id: Some(data.serial_id.0), - deadline_block: Some(data.deadline_block), + deadline_block: Some(0), layer_2_tip_fee: Some(u256_to_h256(data.layer_2_tip_fee).as_bytes().into()), full_fee: Some(u256_to_h256(data.full_fee).as_bytes().into()), max_fee_per_gas: Some(u256_to_h256(data.max_fee_per_gas).as_bytes().into()), @@ -300,7 +303,7 @@ impl ProtoRepr for proto::Transaction { ), op_processing_type: Some(data.op_processing_type as u32), priority_queue_type: Some(data.priority_queue_type as u32), - eth_hash: Some(data.eth_hash.as_bytes().into()), + eth_hash: Some(H256::default().as_bytes().into()), eth_block: Some(data.eth_block), canonical_tx_hash: Some(data.canonical_tx_hash.as_bytes().into()), to_mint: Some(u256_to_h256(data.to_mint).as_bytes().into()), @@ -345,7 +348,7 @@ impl ProtoRepr for proto::Transaction { gas_per_pubdata_limit: Some( u256_to_h256(data.gas_per_pubdata_limit).as_bytes().into(), ), - eth_hash: Some(data.eth_hash.as_bytes().into()), + eth_hash: Some(H256::default().as_bytes().into()), eth_block: Some(data.eth_block), canonical_tx_hash: Some(data.canonical_tx_hash.as_bytes().into()), to_mint: Some(u256_to_h256(data.to_mint).as_bytes().into()), diff --git a/core/lib/dal/src/consensus/proto/mod.proto b/core/lib/dal/src/consensus/proto/mod.proto index 711c964f5341..89e3568fbb5e 100644 --- a/core/lib/dal/src/consensus/proto/mod.proto +++ b/core/lib/dal/src/consensus/proto/mod.proto @@ -30,7 +30,6 @@ message Transaction { message L1TxCommonData { optional bytes sender_address = 1; // required; H160 optional uint64 serial_id = 2; // required - optional uint64 deadline_block = 3; // required optional bytes layer_2_tip_fee = 4; // required; U256 optional bytes full_fee = 5; // required; U256 optional bytes max_fee_per_gas = 6; // required; U256 @@ -38,11 +37,15 @@ message L1TxCommonData { optional bytes gas_per_pubdata_limit = 8; // required; U256 optional uint32 op_processing_type = 9; // required optional uint32 priority_queue_type = 10; // required; U256 - optional bytes eth_hash = 11; // required; H256 - optional uint64 eth_block = 12; // required + optional bytes canonical_tx_hash = 13; // // required; H256 optional bytes to_mint = 14; // required; U256 optional bytes refund_recipient_address = 15; // required; H160 + + // deprecated. + optional uint64 deadline_block = 3; // required; constant = 0 + optional bytes eth_hash = 11; // required; constant = [0;32] + optional uint64 eth_block = 12; // required } message L2TxCommonData { @@ -64,11 +67,13 @@ message ProtocolUpgradeTxCommonData { optional bytes max_fee_per_gas = 3; // required; U256 optional bytes gas_limit = 4; // required; U256 optional bytes gas_per_pubdata_limit = 5; // required; U256 - optional bytes eth_hash = 6; // required; U256 - optional uint64 eth_block = 7; // required optional bytes canonical_tx_hash = 8; // required; H256 optional bytes to_mint = 9; // required; U256 optional bytes refund_recipient_address = 10; // required; H160 + + // deprecated. + optional bytes eth_hash = 6; // required; constant = [0;32] + optional uint64 eth_block = 7; // required } message Execute { diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index f9c585758c4d..8b048a035121 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -23,7 +23,7 @@ use crate::{ sync_dal::SyncDal, system_dal::SystemDal, tee_verifier_input_producer_dal::TeeVerifierInputProducerDal, tokens_dal::TokensDal, tokens_web3_dal::TokensWeb3Dal, transactions_dal::TransactionsDal, - transactions_web3_dal::TransactionsWeb3Dal, + transactions_web3_dal::TransactionsWeb3Dal, vm_runner_dal::VmRunnerDal, }; pub mod blocks_dal; @@ -55,6 +55,7 @@ pub mod tokens_dal; pub mod tokens_web3_dal; pub mod transactions_dal; pub mod transactions_web3_dal; +pub mod vm_runner_dal; #[cfg(test)] mod tests; @@ -119,6 +120,8 @@ where fn snapshot_recovery_dal(&mut self) -> SnapshotRecoveryDal<'_, 'a>; fn pruning_dal(&mut self) -> PruningDal<'_, 'a>; + + fn vm_runner_dal(&mut self) -> VmRunnerDal<'_, 'a>; } #[derive(Clone, Debug)] @@ -229,4 +232,8 @@ impl<'a> CoreDal<'a> for Connection<'a, Core> { fn pruning_dal(&mut self) -> PruningDal<'_, 'a> { PruningDal { storage: self } } + + fn vm_runner_dal(&mut self) -> VmRunnerDal<'_, 'a> { + VmRunnerDal { storage: self } + } } diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index ed9c9b981dbf..8d575bb8ab6b 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -42,7 +42,6 @@ pub struct StorageTransaction { pub received_at: NaiveDateTime, pub in_mempool: bool, - pub l1_block_number: Option, pub l1_batch_number: Option, pub l1_batch_tx_index: Option, pub miniblock_number: Option, @@ -66,6 +65,9 @@ pub struct StorageTransaction { pub created_at: NaiveDateTime, pub updated_at: NaiveDateTime, + + // DEPRECATED. + pub l1_block_number: Option, } impl From for L1TxCommonData { @@ -137,10 +139,9 @@ impl From for L1TxCommonData { .gas_per_pubdata_limit .map(bigdecimal_to_u256) .unwrap_or_else(|| U256::from(1u32)), - deadline_block: 0, - eth_hash: Default::default(), - eth_block: tx.l1_block_number.unwrap_or_default() as u64, canonical_tx_hash, + // DEPRECATED. + eth_block: tx.l1_block_number.unwrap_or_default() as u64, } } } @@ -282,7 +283,7 @@ impl From for ProtocolUpgradeTxCommonData { .gas_per_pubdata_limit .map(bigdecimal_to_u256) .expect("gas_per_pubdata_limit field is missing for protocol upgrade tx"), - eth_hash: Default::default(), + // DEPRECATED. eth_block: tx.l1_block_number.unwrap_or_default() as u64, canonical_tx_hash, } diff --git a/core/lib/dal/src/models/tests.rs b/core/lib/dal/src/models/tests.rs index 6ed3d0844315..373fbf3a7b48 100644 --- a/core/lib/dal/src/models/tests.rs +++ b/core/lib/dal/src/models/tests.rs @@ -145,8 +145,6 @@ fn storage_tx_to_l1_tx() { .unwrap(), l1_data.gas_per_pubdata_limit ); - assert_eq!(0, l1_data.deadline_block); - assert_eq!(l1_data.eth_hash, Default::default()); assert_eq!(stx.l1_block_number.unwrap() as u64, l1_data.eth_block); assert_eq!(stx.hash.as_slice(), l1_data.canonical_tx_hash.as_bytes()); } else { @@ -211,7 +209,6 @@ fn storage_tx_to_protocol_upgrade_tx() { .unwrap(), l1_data.gas_per_pubdata_limit ); - assert_eq!(l1_data.eth_hash, Default::default()); assert_eq!(stx.l1_block_number.unwrap() as u64, l1_data.eth_block); assert_eq!(stx.hash.as_slice(), l1_data.canonical_tx_hash.as_bytes()); } else { diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index 246578f4584b..500da25ace8e 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -81,7 +81,6 @@ pub(crate) fn mock_l1_execute() -> L1Tx { sender: H160::random(), canonical_tx_hash: H256::from_low_u64_be(serial_id), serial_id: PriorityOpId(serial_id), - deadline_block: 100000, layer_2_tip_fee: U256::zero(), full_fee: U256::zero(), gas_limit: U256::from(100_100), @@ -89,10 +88,10 @@ pub(crate) fn mock_l1_execute() -> L1Tx { gas_per_pubdata_limit: 100.into(), op_processing_type: OpProcessingType::Common, priority_queue_type: PriorityQueueType::Deque, - eth_hash: H256::random(), to_mint: U256::zero(), refund_recipient: Address::random(), - eth_block: 1, + // DEPRECATED. + eth_block: 0, }; let execute = Execute { @@ -118,7 +117,6 @@ pub(crate) fn mock_protocol_upgrade_transaction() -> ProtocolUpgradeTx { gas_limit: U256::from(100_100), max_fee_per_gas: U256::from(1u32), gas_per_pubdata_limit: 100.into(), - eth_hash: H256::random(), to_mint: U256::zero(), refund_recipient: Address::random(), eth_block: 1, diff --git a/core/lib/dal/src/vm_runner_dal.rs b/core/lib/dal/src/vm_runner_dal.rs new file mode 100644 index 000000000000..3693f78a6a7a --- /dev/null +++ b/core/lib/dal/src/vm_runner_dal.rs @@ -0,0 +1,83 @@ +use zksync_db_connection::{connection::Connection, error::DalResult, instrument::InstrumentExt}; +use zksync_types::L1BatchNumber; + +use crate::Core; + +#[derive(Debug)] +pub struct VmRunnerDal<'c, 'a> { + pub(crate) storage: &'c mut Connection<'a, Core>, +} + +impl VmRunnerDal<'_, '_> { + pub async fn get_protective_reads_latest_processed_batch( + &mut self, + ) -> DalResult { + let row = sqlx::query!( + r#" + SELECT + COALESCE(MAX(l1_batch_number), 0) AS "last_processed_l1_batch!" + FROM + vm_runner_protective_reads + "# + ) + .instrument("get_protective_reads_latest_processed_batch") + .report_latency() + .fetch_one(self.storage) + .await?; + Ok(L1BatchNumber(row.last_processed_l1_batch as u32)) + } + + pub async fn get_protective_reads_last_ready_batch( + &mut self, + window_size: u32, + ) -> DalResult { + let row = sqlx::query!( + r#" + WITH + available_batches AS ( + SELECT + MAX(number) AS "last_batch" + FROM + l1_batches + ), + processed_batches AS ( + SELECT + COALESCE(MAX(l1_batch_number), 0) + $1 AS "last_ready_batch" + FROM + vm_runner_protective_reads + ) + SELECT + LEAST(last_batch, last_ready_batch) AS "last_ready_batch!" + FROM + available_batches + FULL JOIN processed_batches ON TRUE + "#, + window_size as i32 + ) + .instrument("get_protective_reads_last_ready_batch") + .report_latency() + .fetch_one(self.storage) + .await?; + Ok(L1BatchNumber(row.last_ready_batch as u32)) + } + + pub async fn mark_protective_reads_batch_as_completed( + &mut self, + l1_batch_number: L1BatchNumber, + ) -> DalResult<()> { + sqlx::query!( + r#" + INSERT INTO + vm_runner_protective_reads (l1_batch_number, created_at, updated_at) + VALUES + ($1, NOW(), NOW()) + "#, + i64::from(l1_batch_number.0), + ) + .instrument("mark_protective_reads_batch_as_completed") + .report_latency() + .execute(self.storage) + .await?; + Ok(()) + } +} diff --git a/core/lib/env_config/src/lib.rs b/core/lib/env_config/src/lib.rs index f6290020f38d..9218467fdaba 100644 --- a/core/lib/env_config/src/lib.rs +++ b/core/lib/env_config/src/lib.rs @@ -24,6 +24,7 @@ mod utils; mod genesis; #[cfg(test)] mod test_utils; +mod vm_runner; mod wallets; pub trait FromEnv: Sized { diff --git a/core/lib/env_config/src/vm_runner.rs b/core/lib/env_config/src/vm_runner.rs new file mode 100644 index 000000000000..8a99ea2dc8e2 --- /dev/null +++ b/core/lib/env_config/src/vm_runner.rs @@ -0,0 +1,9 @@ +use zksync_config::configs::ProtectiveReadsWriterConfig; + +use crate::{envy_load, FromEnv}; + +impl FromEnv for ProtectiveReadsWriterConfig { + fn from_env() -> anyhow::Result { + envy_load("vm_runner.protective_reads", "VM_RUNNER_PROTECTIVE_READS_") + } +} diff --git a/core/lib/eth_client/src/clients/http/query.rs b/core/lib/eth_client/src/clients/http/query.rs index 984804953f68..33d9838dc735 100644 --- a/core/lib/eth_client/src/clients/http/query.rs +++ b/core/lib/eth_client/src/clients/http/query.rs @@ -3,11 +3,11 @@ use std::fmt; use async_trait::async_trait; use jsonrpsee::core::ClientError; use zksync_types::{web3, Address, L1ChainId, H256, U256, U64}; -use zksync_web3_decl::error::{ClientRpcContext, EnrichedClientError}; +use zksync_web3_decl::error::{ClientRpcContext, EnrichedClientError, EnrichedClientResult}; use super::{decl::L1EthNamespaceClient, Method, COUNTERS, LATENCIES}; use crate::{ - types::{Error, ExecutedTxStatus, FailureInfo}, + types::{ExecutedTxStatus, FailureInfo}, EthInterface, RawTransactionBytes, }; @@ -16,15 +16,14 @@ impl EthInterface for T where T: L1EthNamespaceClient + fmt::Debug + Send + Sync, { - async fn fetch_chain_id(&self) -> Result { + async fn fetch_chain_id(&self) -> EnrichedClientResult { COUNTERS.call[&(Method::ChainId, self.component())].inc(); let latency = LATENCIES.direct[&Method::ChainId].start(); let raw_chain_id = self.chain_id().rpc_context("chain_id").await?; latency.observe(); let chain_id = u64::try_from(raw_chain_id).map_err(|err| { let err = ClientError::Custom(format!("invalid chainId: {err}")); - let err = EnrichedClientError::new(err, "chain_id").with_arg("chain_id", &raw_chain_id); - Error::EthereumGateway(err) + EnrichedClientError::new(err, "chain_id").with_arg("chain_id", &raw_chain_id) })?; Ok(L1ChainId(chain_id)) } @@ -33,7 +32,7 @@ where &self, account: Address, block: web3::BlockNumber, - ) -> Result { + ) -> EnrichedClientResult { COUNTERS.call[&(Method::NonceAtForAccount, self.component())].inc(); let latency = LATENCIES.direct[&Method::NonceAtForAccount].start(); let nonce = self @@ -46,7 +45,7 @@ where Ok(nonce) } - async fn block_number(&self) -> Result { + async fn block_number(&self) -> EnrichedClientResult { COUNTERS.call[&(Method::BlockNumber, self.component())].inc(); let latency = LATENCIES.direct[&Method::BlockNumber].start(); let block_number = self @@ -57,7 +56,7 @@ where Ok(block_number) } - async fn get_gas_price(&self) -> Result { + async fn get_gas_price(&self) -> EnrichedClientResult { COUNTERS.call[&(Method::GetGasPrice, self.component())].inc(); let latency = LATENCIES.direct[&Method::GetGasPrice].start(); let network_gas_price = self.gas_price().rpc_context("gas_price").await?; @@ -65,7 +64,7 @@ where Ok(network_gas_price) } - async fn send_raw_tx(&self, tx: RawTransactionBytes) -> Result { + async fn send_raw_tx(&self, tx: RawTransactionBytes) -> EnrichedClientResult { let latency = LATENCIES.direct[&Method::SendRawTx].start(); let tx = self .send_raw_transaction(web3::Bytes(tx.0)) @@ -79,7 +78,7 @@ where &self, upto_block: usize, block_count: usize, - ) -> Result, Error> { + ) -> EnrichedClientResult> { const MAX_REQUEST_CHUNK: usize = 1024; COUNTERS.call[&(Method::BaseFeeHistory, self.component())].inc(); @@ -111,7 +110,7 @@ where Ok(history.into_iter().map(|fee| fee.as_u64()).collect()) } - async fn get_pending_block_base_fee_per_gas(&self) -> Result { + async fn get_pending_block_base_fee_per_gas(&self) -> EnrichedClientResult { COUNTERS.call[&(Method::PendingBlockBaseFee, self.component())].inc(); let latency = LATENCIES.direct[&Method::PendingBlockBaseFee].start(); @@ -140,7 +139,7 @@ where Ok(block.base_fee_per_gas.unwrap()) } - async fn get_tx_status(&self, hash: H256) -> Result, Error> { + async fn get_tx_status(&self, hash: H256) -> EnrichedClientResult> { COUNTERS.call[&(Method::GetTxStatus, self.component())].inc(); let latency = LATENCIES.direct[&Method::GetTxStatus].start(); @@ -162,7 +161,7 @@ where Ok(res) } - async fn failure_reason(&self, tx_hash: H256) -> Result, Error> { + async fn failure_reason(&self, tx_hash: H256) -> EnrichedClientResult> { let latency = LATENCIES.direct[&Method::FailureReason].start(); let transaction = self .get_transaction_by_hash(tx_hash) @@ -218,7 +217,7 @@ where gas_limit, })) } else { - Err(err.into()) + Err(err) } } Ok(_) => Ok(None), @@ -231,7 +230,7 @@ where } } - async fn get_tx(&self, hash: H256) -> Result, Error> { + async fn get_tx(&self, hash: H256) -> EnrichedClientResult> { COUNTERS.call[&(Method::GetTx, self.component())].inc(); let tx = self .get_transaction_by_hash(hash) @@ -245,7 +244,7 @@ where &self, request: web3::CallRequest, block: Option, - ) -> Result { + ) -> EnrichedClientResult { let latency = LATENCIES.direct[&Method::CallContractFunction].start(); let block = block.unwrap_or_else(|| web3::BlockNumber::Latest.into()); let output_bytes = self @@ -258,7 +257,10 @@ where Ok(output_bytes) } - async fn tx_receipt(&self, tx_hash: H256) -> Result, Error> { + async fn tx_receipt( + &self, + tx_hash: H256, + ) -> EnrichedClientResult> { COUNTERS.call[&(Method::TxReceipt, self.component())].inc(); let latency = LATENCIES.direct[&Method::TxReceipt].start(); let receipt = self @@ -270,7 +272,7 @@ where Ok(receipt) } - async fn eth_balance(&self, address: Address) -> Result { + async fn eth_balance(&self, address: Address) -> EnrichedClientResult { COUNTERS.call[&(Method::EthBalance, self.component())].inc(); let latency = LATENCIES.direct[&Method::EthBalance].start(); let balance = self @@ -282,19 +284,22 @@ where Ok(balance) } - async fn logs(&self, filter: web3::Filter) -> Result, Error> { + async fn logs(&self, filter: &web3::Filter) -> EnrichedClientResult> { COUNTERS.call[&(Method::Logs, self.component())].inc(); let latency = LATENCIES.direct[&Method::Logs].start(); let logs = self .get_logs(filter.clone()) .rpc_context("get_logs") - .with_arg("filter", &filter) + .with_arg("filter", filter) .await?; latency.observe(); Ok(logs) } - async fn block(&self, block_id: web3::BlockId) -> Result>, Error> { + async fn block( + &self, + block_id: web3::BlockId, + ) -> EnrichedClientResult>> { COUNTERS.call[&(Method::Block, self.component())].inc(); let latency = LATENCIES.direct[&Method::Block].start(); let block = match block_id { diff --git a/core/lib/eth_client/src/clients/http/signing.rs b/core/lib/eth_client/src/clients/http/signing.rs index bdb7be8aea91..2b89af97a773 100644 --- a/core/lib/eth_client/src/clients/http/signing.rs +++ b/core/lib/eth_client/src/clients/http/signing.rs @@ -10,7 +10,7 @@ use zksync_web3_decl::client::{DynClient, L1}; use super::{Method, LATENCIES}; use crate::{ - types::{encode_blob_tx_with_sidecar, Error, SignedCallResult}, + types::{encode_blob_tx_with_sidecar, ContractCallError, SignedCallResult, SigningError}, BoundEthInterface, CallFunctionArgs, EthInterface, Options, RawTransactionBytes, }; @@ -114,7 +114,7 @@ impl BoundEthInterface for SigningClient { data: Vec, contract_addr: H160, options: Options, - ) -> Result { + ) -> Result { let latency = LATENCIES.direct[&Method::SignPreparedTx].start(); // Fetch current max priority fee per gas let max_priority_fee_per_gas = match options.max_priority_fee_per_gas { @@ -124,10 +124,10 @@ impl BoundEthInterface for SigningClient { if options.transaction_type == Some(EIP_4844_TX_TYPE.into()) { if options.max_fee_per_blob_gas.is_none() { - return Err(Error::Eip4844MissingMaxFeePerBlobGas); + return Err(SigningError::Eip4844MissingMaxFeePerBlobGas); } if options.blob_versioned_hashes.is_none() { - return Err(Error::Eip4844MissingBlobVersionedHashes); + return Err(SigningError::Eip4844MissingBlobVersionedHashes); } } @@ -140,7 +140,7 @@ impl BoundEthInterface for SigningClient { }; if max_fee_per_gas < max_priority_fee_per_gas { - return Err(Error::WrongFeeProvided( + return Err(SigningError::WrongFeeProvided( max_fee_per_gas, max_priority_fee_per_gas, )); @@ -197,7 +197,7 @@ impl BoundEthInterface for SigningClient { token_address: Address, address: Address, erc20_abi: ðabi::Contract, - ) -> Result { + ) -> Result { let latency = LATENCIES.direct[&Method::Allowance].start(); let allowance: U256 = CallFunctionArgs::new("allowance", (self.inner.sender_account, address)) diff --git a/core/lib/eth_client/src/clients/mock.rs b/core/lib/eth_client/src/clients/mock.rs index a6f8f391de73..a3f9dde7c6ea 100644 --- a/core/lib/eth_client/src/clients/mock.rs +++ b/core/lib/eth_client/src/clients/mock.rs @@ -13,7 +13,7 @@ use zksync_types::{ use zksync_web3_decl::client::{DynClient, MockClient, L1}; use crate::{ - types::{Error, SignedCallResult}, + types::{ContractCallError, SignedCallResult, SigningError}, BoundEthInterface, Options, RawTransactionBytes, }; @@ -474,7 +474,7 @@ impl MockEthereum { mut raw_tx: Vec, contract_addr: Address, options: Options, - ) -> Result { + ) -> Result { let max_fee_per_gas = options.max_fee_per_gas.unwrap_or(self.max_fee_per_gas); let max_priority_fee_per_gas = options .max_priority_fee_per_gas @@ -569,7 +569,7 @@ impl BoundEthInterface for MockEthereum { data: Vec, contract_addr: H160, options: Options, - ) -> Result { + ) -> Result { self.sign_prepared_tx(data, contract_addr, options) } @@ -578,7 +578,7 @@ impl BoundEthInterface for MockEthereum { _token_address: Address, _contract_address: Address, _erc20_abi: ðabi::Contract, - ) -> Result { + ) -> Result { unimplemented!("Not needed right now") } } diff --git a/core/lib/eth_client/src/lib.rs b/core/lib/eth_client/src/lib.rs index b2433df9d761..2adac587b66c 100644 --- a/core/lib/eth_client/src/lib.rs +++ b/core/lib/eth_client/src/lib.rs @@ -11,11 +11,14 @@ use zksync_types::{ Address, L1ChainId, H160, H256, U256, U64, }; use zksync_web3_decl::client::{DynClient, L1}; -pub use zksync_web3_decl::{error::EnrichedClientError, jsonrpsee::core::ClientError}; +pub use zksync_web3_decl::{ + error::{EnrichedClientError, EnrichedClientResult}, + jsonrpsee::core::ClientError, +}; pub use crate::types::{ - encode_blob_tx_with_sidecar, CallFunctionArgs, ContractCall, ContractError, Error, - ExecutedTxStatus, FailureInfo, RawTransactionBytes, SignedCallResult, + encode_blob_tx_with_sidecar, CallFunctionArgs, ContractCall, ContractCallError, + ExecutedTxStatus, FailureInfo, RawTransactionBytes, SignedCallResult, SigningError, }; pub mod clients; @@ -76,14 +79,14 @@ impl Options { pub trait EthInterface: Sync + Send { /// Fetches the L1 chain ID (in contrast to [`BoundEthInterface::chain_id()`] which returns /// the *expected* L1 chain ID). - async fn fetch_chain_id(&self) -> Result; + async fn fetch_chain_id(&self) -> EnrichedClientResult; /// Returns the nonce of the provided account at the specified block. async fn nonce_at_for_account( &self, account: Address, block: BlockNumber, - ) -> Result; + ) -> EnrichedClientResult; /// Collects the base fee history for the specified block range. /// @@ -93,25 +96,25 @@ pub trait EthInterface: Sync + Send { &self, from_block: usize, block_count: usize, - ) -> Result, Error>; + ) -> EnrichedClientResult>; /// Returns the `base_fee_per_gas` value for the currently pending L1 block. - async fn get_pending_block_base_fee_per_gas(&self) -> Result; + async fn get_pending_block_base_fee_per_gas(&self) -> EnrichedClientResult; /// Returns the current gas price. - async fn get_gas_price(&self) -> Result; + async fn get_gas_price(&self) -> EnrichedClientResult; /// Returns the current block number. - async fn block_number(&self) -> Result; + async fn block_number(&self) -> EnrichedClientResult; /// Sends a transaction to the Ethereum network. - async fn send_raw_tx(&self, tx: RawTransactionBytes) -> Result; + async fn send_raw_tx(&self, tx: RawTransactionBytes) -> EnrichedClientResult; /// Fetches the transaction status for a specified transaction hash. /// /// Returns `Ok(None)` if the transaction is either not found or not executed yet. /// Returns `Err` only if the request fails (e.g. due to network issues). - async fn get_tx_status(&self, hash: H256) -> Result, Error>; + async fn get_tx_status(&self, hash: H256) -> EnrichedClientResult>; /// For a reverted transaction, attempts to recover information on the revert reason. /// @@ -119,29 +122,29 @@ pub trait EthInterface: Sync + Send { /// Returns `Ok(None)` if the transaction isn't found, wasn't executed yet, or if it was /// executed successfully. /// Returns `Err` only if the request fails (e.g. due to network issues). - async fn failure_reason(&self, tx_hash: H256) -> Result, Error>; + async fn failure_reason(&self, tx_hash: H256) -> EnrichedClientResult>; /// Returns the transaction for the specified hash. - async fn get_tx(&self, hash: H256) -> Result, Error>; + async fn get_tx(&self, hash: H256) -> EnrichedClientResult>; /// Returns the receipt for the specified transaction hash. - async fn tx_receipt(&self, tx_hash: H256) -> Result, Error>; + async fn tx_receipt(&self, tx_hash: H256) -> EnrichedClientResult>; /// Returns the ETH balance of the specified token for the specified address. - async fn eth_balance(&self, address: Address) -> Result; + async fn eth_balance(&self, address: Address) -> EnrichedClientResult; /// Invokes a function on a contract specified by `contract_address` / `contract_abi` using `eth_call`. async fn call_contract_function( &self, request: web3::CallRequest, block: Option, - ) -> Result; + ) -> EnrichedClientResult; /// Returns the logs for the specified filter. - async fn logs(&self, filter: Filter) -> Result, Error>; + async fn logs(&self, filter: &Filter) -> EnrichedClientResult>; /// Returns the block header for the specified block number or hash. - async fn block(&self, block_id: BlockId) -> Result>, Error>; + async fn block(&self, block_id: BlockId) -> EnrichedClientResult>>; } /// An extension of `EthInterface` trait, which is used to perform queries that are bound to @@ -187,7 +190,7 @@ pub trait BoundEthInterface: AsRef> + 'static + Sync + Send + fmt: token_address: Address, address: Address, erc20_abi: ðabi::Contract, - ) -> Result; + ) -> Result; /// Signs the transaction and sends it to the Ethereum network. /// Expected to use credentials associated with `Self::sender_account()`. @@ -196,7 +199,7 @@ pub trait BoundEthInterface: AsRef> + 'static + Sync + Send + fmt: data: Vec, contract_addr: H160, options: Options, - ) -> Result; + ) -> Result; } impl Clone for Box { @@ -207,19 +210,19 @@ impl Clone for Box { impl dyn BoundEthInterface { /// Returns the nonce of the `Self::sender_account()` at the specified block. - pub async fn nonce_at(&self, block: BlockNumber) -> Result { + pub async fn nonce_at(&self, block: BlockNumber) -> EnrichedClientResult { self.as_ref() .nonce_at_for_account(self.sender_account(), block) .await } /// Returns the current nonce of the `Self::sender_account()`. - pub async fn current_nonce(&self) -> Result { + pub async fn current_nonce(&self) -> EnrichedClientResult { self.nonce_at(BlockNumber::Latest).await } /// Returns the pending nonce of the `Self::sender_account()`. - pub async fn pending_nonce(&self) -> Result { + pub async fn pending_nonce(&self) -> EnrichedClientResult { self.nonce_at(BlockNumber::Pending).await } @@ -228,13 +231,13 @@ impl dyn BoundEthInterface { &self, data: Vec, options: Options, - ) -> Result { + ) -> Result { self.sign_prepared_tx_for_addr(data, self.contract_addr(), options) .await } /// Returns the ETH balance of `Self::sender_account()`. - pub async fn sender_eth_balance(&self) -> Result { + pub async fn sender_eth_balance(&self) -> EnrichedClientResult { self.as_ref().eth_balance(self.sender_account()).await } diff --git a/core/lib/eth_client/src/types.rs b/core/lib/eth_client/src/types.rs index bb1a5f4b6a20..8ac5ff427fb8 100644 --- a/core/lib/eth_client/src/types.rs +++ b/core/lib/eth_client/src/types.rs @@ -79,18 +79,21 @@ impl ContractCall<'_> { &self.inner.params } - pub async fn call(&self, client: &DynClient) -> Result { + pub async fn call( + &self, + client: &DynClient, + ) -> Result { let func = self .contract_abi .function(&self.inner.name) - .map_err(ContractError::Function)?; - let encoded_input = - func.encode_input(&self.inner.params) - .map_err(|source| ContractError::EncodeInput { - signature: func.signature(), - input: self.inner.params.clone(), - source, - })?; + .map_err(ContractCallError::Function)?; + let encoded_input = func.encode_input(&self.inner.params).map_err(|source| { + ContractCallError::EncodeInput { + signature: func.signature(), + input: self.inner.params.clone(), + source, + } + })?; let request = web3::CallRequest { from: self.inner.from, @@ -110,25 +113,28 @@ impl ContractCall<'_> { .call_contract_function(request, self.inner.block) .await?; let output_tokens = func.decode_output(&encoded_output.0).map_err(|source| { - ContractError::DecodeOutput { + ContractCallError::DecodeOutput { signature: func.signature(), output: encoded_output, source, } })?; - Ok(Res::from_tokens(output_tokens.clone()).map_err(|source| { - ContractError::DetokenizeOutput { + Res::from_tokens(output_tokens.clone()).map_err(|source| { + ContractCallError::DetokenizeOutput { signature: func.signature(), output: output_tokens, source, } - })?) + }) } } /// Contract-related subset of Ethereum client errors. #[derive(Debug, thiserror::Error)] -pub enum ContractError { +pub enum ContractCallError { + /// Problem on the Ethereum client side (e.g. bad RPC call, network issues). + #[error("Request to ethereum gateway failed: {0}")] + EthereumGateway(#[from] EnrichedClientError), /// Failed resolving a function specified for the contract call in the contract ABI. #[error("failed resolving contract function: {0}")] Function(#[source] ethabi::Error), @@ -158,15 +164,12 @@ pub enum ContractError { }, } -/// Common error type exposed by the crate, +/// Common error type exposed by the crate. #[derive(Debug, thiserror::Error)] -pub enum Error { +pub enum SigningError { /// Problem on the Ethereum client side (e.g. bad RPC call, network issues). #[error("Request to ethereum gateway failed: {0}")] EthereumGateway(#[from] EnrichedClientError), - /// Problem with a contract call. - #[error("Call to contract failed: {0}")] - Contract(#[from] ContractError), /// Problem with transaction signer. #[error("Transaction signing failed: {0}")] Signer(#[from] zksync_eth_signer::SignerError), diff --git a/core/lib/mempool/src/tests.rs b/core/lib/mempool/src/tests.rs index 656d90c63d14..a8c7128baa9c 100644 --- a/core/lib/mempool/src/tests.rs +++ b/core/lib/mempool/src/tests.rs @@ -394,7 +394,6 @@ fn gen_l1_tx(priority_id: PriorityOpId) -> Transaction { let op_data = L1TxCommonData { sender: Address::random(), serial_id: priority_id, - deadline_block: 100000, layer_2_tip_fee: U256::zero(), full_fee: U256::zero(), gas_limit: U256::zero(), @@ -402,8 +401,7 @@ fn gen_l1_tx(priority_id: PriorityOpId) -> Transaction { gas_per_pubdata_limit: U256::one(), op_processing_type: OpProcessingType::Common, priority_queue_type: PriorityQueueType::Deque, - eth_hash: H256::zero(), - eth_block: 1, + eth_block: 0, canonical_tx_hash: H256::zero(), to_mint: U256::zero(), refund_recipient: Address::random(), diff --git a/core/lib/multivm/src/utils.rs b/core/lib/multivm/src/utils.rs index 1f4d55ea66ae..a15fdba6b703 100644 --- a/core/lib/multivm/src/utils.rs +++ b/core/lib/multivm/src/utils.rs @@ -441,8 +441,35 @@ pub fn get_max_batch_gas_limit(version: VmVersion) -> u64 { } VmVersion::Vm1_4_1 => crate::vm_1_4_1::constants::BLOCK_GAS_LIMIT as u64, VmVersion::Vm1_4_2 => crate::vm_1_4_2::constants::BLOCK_GAS_LIMIT as u64, - VmVersion::Vm1_5_0SmallBootloaderMemory => crate::vm_latest::constants::BATCH_GAS_LIMIT, - VmVersion::Vm1_5_0IncreasedBootloaderMemory => crate::vm_latest::constants::BATCH_GAS_LIMIT, + VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { + crate::vm_latest::constants::BATCH_GAS_LIMIT + } + } +} + +pub fn get_eth_call_gas_limit(version: VmVersion) -> u64 { + match version { + VmVersion::M5WithRefunds | VmVersion::M5WithoutRefunds => { + crate::vm_m5::utils::ETH_CALL_GAS_LIMIT as u64 + } + VmVersion::M6Initial | VmVersion::M6BugWithCompressionFixed => { + crate::vm_m6::utils::ETH_CALL_GAS_LIMIT as u64 + } + VmVersion::Vm1_3_2 => crate::vm_1_3_2::utils::ETH_CALL_GAS_LIMIT as u64, + VmVersion::VmVirtualBlocks => { + crate::vm_virtual_blocks::constants::ETH_CALL_GAS_LIMIT as u64 + } + VmVersion::VmVirtualBlocksRefundsEnhancement => { + crate::vm_refunds_enhancement::constants::ETH_CALL_GAS_LIMIT as u64 + } + VmVersion::VmBoojumIntegration => { + crate::vm_boojum_integration::constants::ETH_CALL_GAS_LIMIT as u64 + } + VmVersion::Vm1_4_1 => crate::vm_1_4_1::constants::ETH_CALL_GAS_LIMIT as u64, + VmVersion::Vm1_4_2 => crate::vm_1_4_2::constants::ETH_CALL_GAS_LIMIT as u64, + VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { + crate::vm_latest::constants::ETH_CALL_GAS_LIMIT + } } } diff --git a/core/lib/multivm/src/versions/vm_latest/constants.rs b/core/lib/multivm/src/versions/vm_latest/constants.rs index 1f02162f7348..01f697ec91a2 100644 --- a/core/lib/multivm/src/versions/vm_latest/constants.rs +++ b/core/lib/multivm/src/versions/vm_latest/constants.rs @@ -3,7 +3,7 @@ use zk_evm_1_5_0::aux_structures::MemoryPage; pub use zk_evm_1_5_0::zkevm_opcode_defs::system_params::{ ERGS_PER_CIRCUIT, INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; -use zksync_system_constants::{MAX_L2_TX_GAS_LIMIT, MAX_NEW_FACTORY_DEPS}; +use zksync_system_constants::MAX_NEW_FACTORY_DEPS; use super::vm::MultiVMSubversion; use crate::vm_latest::old_vm::utils::heap_page_from_base; @@ -160,7 +160,7 @@ pub const BATCH_COMPUTATIONAL_GAS_LIMIT: u32 = pub const BATCH_GAS_LIMIT: u64 = 1 << 50; /// How many gas is allowed to spend on a single transaction in eth_call method -pub const ETH_CALL_GAS_LIMIT: u32 = MAX_L2_TX_GAS_LIMIT as u32; +pub const ETH_CALL_GAS_LIMIT: u64 = BATCH_GAS_LIMIT; /// ID of the transaction from L1 pub const L1_TX_TYPE: u8 = 255; diff --git a/core/lib/object_store/Cargo.toml b/core/lib/object_store/Cargo.toml index e8d5322765ec..3e33c9097153 100644 --- a/core/lib/object_store/Cargo.toml +++ b/core/lib/object_store/Cargo.toml @@ -22,9 +22,11 @@ google-cloud-auth.workspace = true http.workspace = true serde_json.workspace = true flate2.workspace = true +rand.workspace = true tokio = { workspace = true, features = ["full"] } tracing.workspace = true prost.workspace = true [dev-dependencies] +assert_matches.workspace = true tempfile.workspace = true diff --git a/core/lib/object_store/src/file.rs b/core/lib/object_store/src/file.rs index aea10cccd8e8..f641ab9c74a1 100644 --- a/core/lib/object_store/src/file.rs +++ b/core/lib/object_store/src/file.rs @@ -9,7 +9,10 @@ impl From for ObjectStoreError { fn from(err: io::Error) -> Self { match err.kind() { io::ErrorKind::NotFound => ObjectStoreError::KeyNotFound(err.into()), - _ => ObjectStoreError::Other(err.into()), + kind => ObjectStoreError::Other { + is_transient: matches!(kind, io::ErrorKind::Interrupted | io::ErrorKind::TimedOut), + source: err.into(), + }, } } } @@ -20,7 +23,7 @@ pub(crate) struct FileBackedObjectStore { } impl FileBackedObjectStore { - pub async fn new(base_dir: String) -> Self { + pub async fn new(base_dir: String) -> Result { for bucket in &[ Bucket::ProverJobs, Bucket::WitnessInput, @@ -36,13 +39,9 @@ impl FileBackedObjectStore { Bucket::TeeVerifierInput, ] { let bucket_path = format!("{base_dir}/{bucket}"); - fs::create_dir_all(&bucket_path) - .await - .unwrap_or_else(|err| { - panic!("failed creating bucket `{bucket_path}`: {err}"); - }); + fs::create_dir_all(&bucket_path).await?; } - FileBackedObjectStore { base_dir } + Ok(FileBackedObjectStore { base_dir }) } fn filename(&self, bucket: Bucket, key: &str) -> String { @@ -87,12 +86,12 @@ mod test { async fn test_get() { let dir = TempDir::new().unwrap(); let path = dir.into_path().into_os_string().into_string().unwrap(); - let object_store = FileBackedObjectStore::new(path).await; + let object_store = FileBackedObjectStore::new(path).await.unwrap(); let expected = vec![9, 0, 8, 9, 0, 7]; - let result = object_store + object_store .put_raw(Bucket::ProverJobs, "test-key.bin", expected.clone()) - .await; - assert!(result.is_ok(), "result must be OK"); + .await + .unwrap(); let bytes = object_store .get_raw(Bucket::ProverJobs, "test-key.bin") .await @@ -104,26 +103,26 @@ mod test { async fn test_put() { let dir = TempDir::new().unwrap(); let path = dir.into_path().into_os_string().into_string().unwrap(); - let object_store = FileBackedObjectStore::new(path).await; + let object_store = FileBackedObjectStore::new(path).await.unwrap(); let bytes = vec![9, 0, 8, 9, 0, 7]; - let result = object_store + object_store .put_raw(Bucket::ProverJobs, "test-key.bin", bytes) - .await; - assert!(result.is_ok(), "result must be OK"); + .await + .unwrap(); } #[tokio::test] async fn test_remove() { let dir = TempDir::new().unwrap(); let path = dir.into_path().into_os_string().into_string().unwrap(); - let object_store = FileBackedObjectStore::new(path).await; - let result = object_store + let object_store = FileBackedObjectStore::new(path).await.unwrap(); + object_store .put_raw(Bucket::ProverJobs, "test-key.bin", vec![0, 1]) - .await; - assert!(result.is_ok(), "result must be OK"); - let result = object_store + .await + .unwrap(); + object_store .remove_raw(Bucket::ProverJobs, "test-key.bin") - .await; - assert!(result.is_ok(), "result must be OK"); + .await + .unwrap(); } } diff --git a/core/lib/object_store/src/gcs.rs b/core/lib/object_store/src/gcs.rs index d2650a48ea50..8cd7b982a058 100644 --- a/core/lib/object_store/src/gcs.rs +++ b/core/lib/object_store/src/gcs.rs @@ -3,7 +3,7 @@ use std::{fmt, future::Future, time::Duration}; use async_trait::async_trait; -use google_cloud_auth::{credentials::CredentialsFile, error::Error}; +use google_cloud_auth::{credentials::CredentialsFile, error::Error as AuthError}; use google_cloud_storage::{ client::{Client, ClientConfig}, http::{ @@ -17,37 +17,45 @@ use google_cloud_storage::{ }, }; use http::StatusCode; +use rand::Rng; use crate::{ metrics::GCS_METRICS, raw::{Bucket, ObjectStore, ObjectStoreError}, }; -async fn retry(max_retries: u16, mut f: F) -> Result +async fn retry(max_retries: u16, mut f: F) -> Result where - E: fmt::Display, - Fut: Future>, + Fut: Future>, F: FnMut() -> Fut, { let mut retries = 1; - let mut backoff = 1; + let mut backoff_secs = 1; loop { match f().await { Ok(result) => return Ok(result), - Err(err) => { - tracing::warn!(%err, "Failed GCS request {retries}/{max_retries}, retrying."); + Err(err) if err.is_transient() => { if retries > max_retries { + tracing::warn!(%err, "Exhausted {max_retries} retries performing GCS request; returning last error"); return Err(err); } + tracing::info!(%err, "Failed GCS request {retries}/{max_retries}, retrying."); retries += 1; - tokio::time::sleep(Duration::from_secs(backoff)).await; - backoff *= 2; + // Randomize sleep duration to prevent stampeding the server if multiple requests are initiated at the same time. + let sleep_duration = Duration::from_secs(backoff_secs) + .mul_f32(rand::thread_rng().gen_range(0.8..1.2)); + tokio::time::sleep(sleep_duration).await; + backoff_secs *= 2; + } + Err(err) => { + tracing::warn!(%err, "Failed GCS request with a fatal error"); + return Err(err); } } } } -pub struct GoogleCloudStorage { +pub(crate) struct GoogleCloudStorage { bucket_prefix: String, max_retries: u16, client: Client, @@ -64,7 +72,7 @@ impl fmt::Debug for GoogleCloudStorage { } #[derive(Debug, Clone)] -pub enum GoogleCloudStorageAuthMode { +pub(crate) enum GoogleCloudStorageAuthMode { AuthenticatedWithCredentialFile(String), Authenticated, Anonymous, @@ -75,26 +83,27 @@ impl GoogleCloudStorage { auth_mode: GoogleCloudStorageAuthMode, bucket_prefix: String, max_retries: u16, - ) -> Self { - let client_config = retry(max_retries, || Self::get_client_config(auth_mode.clone())) - .await - .expect("failed fetching GCS client config after retries"); + ) -> Result { + let client_config = retry(max_retries, || async { + Self::get_client_config(auth_mode.clone()) + .await + .map_err(Into::into) + }) + .await?; - Self { + Ok(Self { client: Client::new(client_config), bucket_prefix, max_retries, - } + }) } async fn get_client_config( auth_mode: GoogleCloudStorageAuthMode, - ) -> Result { + ) -> Result { match auth_mode { GoogleCloudStorageAuthMode::AuthenticatedWithCredentialFile(path) => { - let cred_file = CredentialsFile::new_from_file(path) - .await - .expect("failed loading GCS credential file"); + let cred_file = CredentialsFile::new_from_file(path).await?; ClientConfig::default().with_credentials(cred_file).await } GoogleCloudStorageAuthMode::Authenticated => ClientConfig::default().with_auth().await, @@ -127,9 +136,24 @@ impl GoogleCloudStorage { ..DeleteObjectRequest::default() }; async move { - retry(self.max_retries, || self.client.delete_object(&request)) - .await - .map_err(ObjectStoreError::from) + retry(self.max_retries, || async { + self.client + .delete_object(&request) + .await + .map_err(ObjectStoreError::from) + }) + .await + } + } +} + +impl From for ObjectStoreError { + fn from(err: AuthError) -> Self { + let is_transient = + matches!(&err, AuthError::HttpError(err) if err.is_timeout() || err.is_connect()); + Self::Initialization { + source: err.into(), + is_transient, } } } @@ -147,7 +171,12 @@ impl From for ObjectStoreError { if is_not_found { ObjectStoreError::KeyNotFound(err.into()) } else { - ObjectStoreError::Other(err.into()) + let is_transient = + matches!(&err, HttpError::HttpClient(err) if err.is_timeout() || err.is_connect()); + ObjectStoreError::Other { + is_transient, + source: err.into(), + } } } } @@ -168,8 +197,11 @@ impl ObjectStore for GoogleCloudStorage { ..GetObjectRequest::default() }; let range = Range::default(); - let blob = retry(self.max_retries, || { - self.client.download_object(&request, &range) + let blob = retry(self.max_retries, || async { + self.client + .download_object(&request, &range) + .await + .map_err(Into::into) }) .await; @@ -177,7 +209,7 @@ impl ObjectStore for GoogleCloudStorage { tracing::trace!( "Fetched data from GCS for key {key} from bucket {bucket} and it took: {elapsed:?}" ); - blob.map_err(ObjectStoreError::from) + blob } async fn put_raw( @@ -198,9 +230,11 @@ impl ObjectStore for GoogleCloudStorage { bucket: self.bucket_prefix.clone(), ..Default::default() }; - let object = retry(self.max_retries, || { + let object = retry(self.max_retries, || async { self.client .upload_object(&request, value.clone(), &upload_type) + .await + .map_err(Into::into) }) .await; @@ -208,7 +242,7 @@ impl ObjectStore for GoogleCloudStorage { tracing::trace!( "Stored data to GCS for key {key} from bucket {bucket} and it took: {elapsed:?}" ); - object.map(drop).map_err(ObjectStoreError::from) + object.map(drop) } async fn remove_raw(&self, bucket: Bucket, key: &str) -> Result<(), ObjectStoreError> { @@ -228,38 +262,47 @@ impl ObjectStore for GoogleCloudStorage { mod test { use std::sync::atomic::{AtomicU16, Ordering}; + use assert_matches::assert_matches; + use super::*; + fn transient_error() -> ObjectStoreError { + ObjectStoreError::Other { + is_transient: true, + source: "oops".into(), + } + } + #[tokio::test] async fn test_retry_success_immediate() { - let result = retry(2, || async { Ok::<_, &'static str>(42) }).await; - assert_eq!(result, Ok(42)); + let result = retry(2, || async { Ok(42) }).await.unwrap(); + assert_eq!(result, 42); } #[tokio::test] async fn test_retry_failure_exhausted() { - let result = retry(2, || async { Err::("oops") }).await; - assert_eq!(result, Err("oops")); + let err = retry(2, || async { Err::(transient_error()) }) + .await + .unwrap_err(); + assert_matches!(err, ObjectStoreError::Other { .. }); } - async fn retry_success_after_n_retries(n: u16) -> Result { + async fn retry_success_after_n_retries(n: u16) -> Result { let retries = AtomicU16::new(0); - let result = retry(n, || async { + retry(n, || async { let retries = retries.fetch_add(1, Ordering::Relaxed); if retries + 1 == n { Ok(42) } else { - Err("oops") + Err(transient_error()) } }) - .await; - - result.map_err(|_| "Retry failed".to_string()) + .await } #[tokio::test] async fn test_retry_success_after_retry() { - let result = retry(2, || retry_success_after_n_retries(2)).await; - assert_eq!(result, Ok(42)); + let result = retry(2, || retry_success_after_n_retries(2)).await.unwrap(); + assert_eq!(result, 42); } } diff --git a/core/lib/object_store/src/raw.rs b/core/lib/object_store/src/raw.rs index 6bc1a61c9885..d415ae431aaa 100644 --- a/core/lib/object_store/src/raw.rs +++ b/core/lib/object_store/src/raw.rs @@ -57,21 +57,58 @@ pub type BoxedError = Box; /// Errors during [`ObjectStore`] operations. #[derive(Debug)] +#[non_exhaustive] pub enum ObjectStoreError { + /// Object store initialization failed. + Initialization { + source: BoxedError, + is_transient: bool, + }, /// An object with the specified key is not found. KeyNotFound(BoxedError), /// Object (de)serialization failed. Serialization(BoxedError), /// Other error has occurred when accessing the store (e.g., a network error). - Other(BoxedError), + Other { + source: BoxedError, + is_transient: bool, + }, +} + +impl ObjectStoreError { + /// Gives a best-effort estimate whether this error is transient. + pub fn is_transient(&self) -> bool { + match self { + Self::Initialization { is_transient, .. } | Self::Other { is_transient, .. } => { + *is_transient + } + Self::KeyNotFound(_) | Self::Serialization(_) => false, + } + } } impl fmt::Display for ObjectStoreError { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { match self { + Self::Initialization { + source, + is_transient, + } => { + let kind = if *is_transient { "transient" } else { "fatal" }; + write!( + formatter, + "{kind} error initializing object store: {source}" + ) + } Self::KeyNotFound(err) => write!(formatter, "key not found: {err}"), Self::Serialization(err) => write!(formatter, "serialization error: {err}"), - Self::Other(err) => write!(formatter, "other error: {err}"), + Self::Other { + source, + is_transient, + } => { + let kind = if *is_transient { "transient" } else { "fatal" }; + write!(formatter, "{kind} error accessing object store: {source}") + } } } } @@ -79,9 +116,10 @@ impl fmt::Display for ObjectStoreError { impl error::Error for ObjectStoreError { fn source(&self) -> Option<&(dyn error::Error + 'static)> { match self { - Self::KeyNotFound(err) | Self::Serialization(err) | Self::Other(err) => { - Some(err.as_ref()) + Self::Initialization { source, .. } | Self::Other { source, .. } => { + Some(source.as_ref()) } + Self::KeyNotFound(err) | Self::Serialization(err) => Some(err.as_ref()), } } } @@ -184,14 +222,26 @@ impl ObjectStoreFactory { } /// Creates an [`ObjectStore`]. + /// + /// # Panics + /// + /// Panics if store initialization fails (e.g., because of incorrect configuration). pub async fn create_store(&self) -> Arc { match &self.origin { - ObjectStoreOrigin::Config(config) => Self::create_from_config(config).await, + ObjectStoreOrigin::Config(config) => Self::create_from_config(config) + .await + .unwrap_or_else(|err| { + panic!( + "failed creating object store factory with configuration {config:?}: {err}" + ) + }), ObjectStoreOrigin::Mock(store) => Arc::new(Arc::clone(store)), } } - async fn create_from_config(config: &ObjectStoreConfig) -> Arc { + async fn create_from_config( + config: &ObjectStoreConfig, + ) -> Result, ObjectStoreError> { match &config.mode { ObjectStoreMode::GCS { bucket_base_url } => { tracing::trace!( @@ -202,8 +252,8 @@ impl ObjectStoreFactory { bucket_base_url.clone(), config.max_retries, ) - .await; - Arc::new(store) + .await?; + Ok(Arc::new(store)) } ObjectStoreMode::GCSWithCredentialFile { bucket_base_url, @@ -217,15 +267,15 @@ impl ObjectStoreFactory { bucket_base_url.clone(), config.max_retries, ) - .await; - Arc::new(store) + .await?; + Ok(Arc::new(store)) } ObjectStoreMode::FileBacked { file_backed_base_path, } => { tracing::trace!("Initialized FileBacked Object store"); - let store = FileBackedObjectStore::new(file_backed_base_path.clone()).await; - Arc::new(store) + let store = FileBackedObjectStore::new(file_backed_base_path.clone()).await?; + Ok(Arc::new(store)) } ObjectStoreMode::GCSAnonymousReadOnly { bucket_base_url } => { tracing::trace!("Initialized GoogleCloudStoragePublicReadOnly store"); @@ -234,8 +284,8 @@ impl ObjectStoreFactory { bucket_base_url.clone(), config.max_retries, ) - .await; - Arc::new(store) + .await?; + Ok(Arc::new(store)) } } } diff --git a/core/lib/protobuf_config/src/general.rs b/core/lib/protobuf_config/src/general.rs index ccd55a71c2ec..ba2076a09a14 100644 --- a/core/lib/protobuf_config/src/general.rs +++ b/core/lib/protobuf_config/src/general.rs @@ -37,6 +37,8 @@ impl ProtoRepr for proto::GeneralConfig { snapshot_creator: read_optional_repr(&self.snapshot_creator) .context("snapshot_creator")?, observability: read_optional_repr(&self.observability).context("observability")?, + protective_reads_writer_config: read_optional_repr(&self.protective_reads_writer) + .context("vm_runner")?, }) } @@ -68,6 +70,10 @@ impl ProtoRepr for proto::GeneralConfig { eth: this.eth.as_ref().map(ProtoRepr::build), snapshot_creator: this.snapshot_creator.as_ref().map(ProtoRepr::build), observability: this.observability.as_ref().map(ProtoRepr::build), + protective_reads_writer: this + .protective_reads_writer_config + .as_ref() + .map(ProtoRepr::build), } } } diff --git a/core/lib/protobuf_config/src/lib.rs b/core/lib/protobuf_config/src/lib.rs index 25d5662b9ddb..2fd9bbd9e059 100644 --- a/core/lib/protobuf_config/src/lib.rs +++ b/core/lib/protobuf_config/src/lib.rs @@ -27,6 +27,7 @@ pub mod testonly; #[cfg(test)] mod tests; mod utils; +mod vm_runner; mod wallets; use std::str::FromStr; diff --git a/core/lib/protobuf_config/src/proto/config/general.proto b/core/lib/protobuf_config/src/proto/config/general.proto index fdc60c57cfdd..b606417d129a 100644 --- a/core/lib/protobuf_config/src/proto/config/general.proto +++ b/core/lib/protobuf_config/src/proto/config/general.proto @@ -13,6 +13,7 @@ import "zksync/config/house_keeper.proto"; import "zksync/config/observability.proto"; import "zksync/config/snapshots_creator.proto"; import "zksync/config/utils.proto"; +import "zksync/config/vm_runner.proto"; message GeneralConfig { optional config.database.Postgres postgres = 1; @@ -35,4 +36,5 @@ message GeneralConfig { optional config.prover.ProverGateway prover_gateway = 30; optional config.snapshot_creator.SnapshotsCreator snapshot_creator = 31; optional config.observability.Observability observability = 32; + optional config.vm_runner.ProtectiveReadsWriter protective_reads_writer = 33; } diff --git a/core/lib/protobuf_config/src/proto/config/vm_runner.proto b/core/lib/protobuf_config/src/proto/config/vm_runner.proto new file mode 100644 index 000000000000..a7c829f05869 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/config/vm_runner.proto @@ -0,0 +1,8 @@ +syntax = "proto3"; + +package zksync.config.vm_runner; + +message ProtectiveReadsWriter { + optional string protective_reads_db_path = 1; // required; fs path + optional uint64 protective_reads_window_size = 2; // required +} diff --git a/core/lib/protobuf_config/src/vm_runner.rs b/core/lib/protobuf_config/src/vm_runner.rs new file mode 100644 index 000000000000..227e22cd5d22 --- /dev/null +++ b/core/lib/protobuf_config/src/vm_runner.rs @@ -0,0 +1,27 @@ +use anyhow::Context; +use zksync_config::configs::{self}; +use zksync_protobuf::{required, ProtoRepr}; + +use crate::proto::vm_runner as proto; + +impl ProtoRepr for proto::ProtectiveReadsWriter { + type Type = configs::ProtectiveReadsWriterConfig; + + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + protective_reads_db_path: required(&self.protective_reads_db_path) + .context("protective_reads_db_path")? + .clone(), + protective_reads_window_size: *required(&self.protective_reads_window_size) + .context("protective_reads_window_size")? + as u32, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + protective_reads_db_path: Some(this.protective_reads_db_path.clone()), + protective_reads_window_size: Some(this.protective_reads_window_size as u64), + } + } +} diff --git a/core/lib/snapshots_applier/src/lib.rs b/core/lib/snapshots_applier/src/lib.rs index 8e6543a80958..bcf4b3c14329 100644 --- a/core/lib/snapshots_applier/src/lib.rs +++ b/core/lib/snapshots_applier/src/lib.rs @@ -78,13 +78,10 @@ enum SnapshotsApplierError { impl SnapshotsApplierError { fn object_store(err: ObjectStoreError, context: String) -> Self { - match err { - ObjectStoreError::KeyNotFound(_) | ObjectStoreError::Serialization(_) => { - Self::Fatal(anyhow::Error::from(err).context(context)) - } - ObjectStoreError::Other(_) => { - Self::Retryable(anyhow::Error::from(err).context(context)) - } + if err.is_transient() { + Self::Retryable(anyhow::Error::from(err).context(context)) + } else { + Self::Fatal(anyhow::Error::from(err).context(context)) } } } diff --git a/core/lib/snapshots_applier/src/tests/mod.rs b/core/lib/snapshots_applier/src/tests/mod.rs index 33ba37b55771..59a95792c1ca 100644 --- a/core/lib/snapshots_applier/src/tests/mod.rs +++ b/core/lib/snapshots_applier/src/tests/mod.rs @@ -50,7 +50,10 @@ async fn snapshots_creator_can_successfully_recover_db( if error_counter.fetch_add(1, Ordering::SeqCst) >= 3 { Ok(()) // "recover" after 3 retries } else { - Err(ObjectStoreError::Other("transient error".into())) + Err(ObjectStoreError::Other { + is_transient: true, + source: "transient error".into(), + }) } }); Arc::new(object_store_with_errors) @@ -315,7 +318,10 @@ async fn applier_returns_error_after_too_many_object_store_retries() { let storage_logs = random_storage_logs(expected_status.l1_batch_number, 100); let (object_store, client) = prepare_clients(&expected_status, &storage_logs).await; let object_store = ObjectStoreWithErrors::new(object_store, |_| { - Err(ObjectStoreError::Other("service not available".into())) + Err(ObjectStoreError::Other { + is_transient: true, + source: "service not available".into(), + }) }); let task = SnapshotsApplierTask::new( @@ -328,7 +334,7 @@ async fn applier_returns_error_after_too_many_object_store_retries() { assert!(err.chain().any(|cause| { matches!( cause.downcast_ref::(), - Some(ObjectStoreError::Other(_)) + Some(ObjectStoreError::Other { .. }) ) })); } diff --git a/core/lib/types/src/l1/mod.rs b/core/lib/types/src/l1/mod.rs index e94be684f9d7..615574278d29 100644 --- a/core/lib/types/src/l1/mod.rs +++ b/core/lib/types/src/l1/mod.rs @@ -73,15 +73,39 @@ pub fn is_l1_tx_type(tx_type: u8) -> bool { tx_type == PRIORITY_OPERATION_L2_TX_TYPE || tx_type == PROTOCOL_UPGRADE_TX_TYPE } -#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +// TODO(PLA-962): remove once all nodes start treating the deprecated fields as optional. +#[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] +struct L1TxCommonDataSerde { + pub sender: Address, + pub serial_id: PriorityOpId, + pub layer_2_tip_fee: U256, + pub full_fee: U256, + pub max_fee_per_gas: U256, + pub gas_limit: U256, + pub gas_per_pubdata_limit: U256, + pub op_processing_type: OpProcessingType, + pub priority_queue_type: PriorityQueueType, + pub canonical_tx_hash: H256, + pub to_mint: U256, + pub refund_recipient: Address, + + /// DEPRECATED. + #[serde(default)] + pub deadline_block: u64, + #[serde(default)] + pub eth_hash: H256, + #[serde(default)] + pub eth_block: u64, +} + +#[derive(Default, Debug, Clone, PartialEq)] pub struct L1TxCommonData { /// Sender of the transaction. pub sender: Address, /// Unique ID of the priority operation. pub serial_id: PriorityOpId, - /// Ethereum deadline block until which operation must be processed. - pub deadline_block: u64, + /// Additional payment to the operator as an incentive to perform the operation. The contract uses a value of 192 bits. pub layer_2_tip_fee: U256, /// The total cost the sender paid for the transaction. @@ -96,16 +120,63 @@ pub struct L1TxCommonData { pub op_processing_type: OpProcessingType, /// Priority operations queue type. pub priority_queue_type: PriorityQueueType, - /// Hash of the corresponding Ethereum transaction. Size should be 32 bytes. - pub eth_hash: H256, - /// Block in which Ethereum transaction was included. - pub eth_block: u64, /// Tx hash of the transaction in the zkSync network. Calculated as the encoded transaction data hash. pub canonical_tx_hash: H256, /// The amount of ETH that should be minted with this transaction pub to_mint: U256, /// The recipient of the refund of the transaction pub refund_recipient: Address, + + // DEPRECATED. + pub eth_block: u64, +} + +impl serde::Serialize for L1TxCommonData { + fn serialize(&self, s: S) -> Result { + L1TxCommonDataSerde { + sender: self.sender, + serial_id: self.serial_id, + layer_2_tip_fee: self.layer_2_tip_fee, + full_fee: self.full_fee, + max_fee_per_gas: self.max_fee_per_gas, + gas_limit: self.gas_limit, + gas_per_pubdata_limit: self.gas_per_pubdata_limit, + op_processing_type: self.op_processing_type, + priority_queue_type: self.priority_queue_type, + canonical_tx_hash: self.canonical_tx_hash, + to_mint: self.to_mint, + refund_recipient: self.refund_recipient, + + /// DEPRECATED. + deadline_block: 0, + eth_hash: H256::default(), + eth_block: self.eth_block, + } + .serialize(s) + } +} + +impl<'de> serde::Deserialize<'de> for L1TxCommonData { + fn deserialize>(d: D) -> Result { + let x = L1TxCommonDataSerde::deserialize(d)?; + Ok(Self { + sender: x.sender, + serial_id: x.serial_id, + layer_2_tip_fee: x.layer_2_tip_fee, + full_fee: x.full_fee, + max_fee_per_gas: x.max_fee_per_gas, + gas_limit: x.gas_limit, + gas_per_pubdata_limit: x.gas_per_pubdata_limit, + op_processing_type: x.op_processing_type, + priority_queue_type: x.priority_queue_type, + canonical_tx_hash: x.canonical_tx_hash, + to_mint: x.to_mint, + refund_recipient: x.refund_recipient, + + // DEPRECATED. + eth_block: x.eth_block, + }) + } } impl L1TxCommonData { @@ -229,9 +300,6 @@ impl TryFrom for L1Tx { &event.data.0, )?; - let eth_hash = event - .transaction_hash - .expect("Event transaction hash is missing"); let eth_block = event .block_number .expect("Event block number is missing") @@ -248,7 +316,8 @@ impl TryFrom for L1Tx { let canonical_tx_hash = H256::from_slice(&dec_ev.remove(0).into_fixed_bytes().unwrap()); - let deadline_block = dec_ev.remove(0).into_uint().unwrap().as_u64(); + // DEPRECATED. + let _deadline_block = dec_ev.remove(0).into_uint().unwrap().as_u64(); // Decoding transaction bytes let mut transaction = match dec_ev.remove(0) { @@ -325,7 +394,6 @@ impl TryFrom for L1Tx { serial_id, canonical_tx_hash, sender, - deadline_block, layer_2_tip_fee: U256::zero(), to_mint, refund_recipient, @@ -335,7 +403,8 @@ impl TryFrom for L1Tx { gas_per_pubdata_limit, op_processing_type: OpProcessingType::Common, priority_queue_type: PriorityQueueType::Deque, - eth_hash, + // DEPRECATED. + // TODO (PLA-962): start setting it to 0 for all new transactions. eth_block, }; diff --git a/core/lib/types/src/protocol_upgrade.rs b/core/lib/types/src/protocol_upgrade.rs index d374854b8139..804a4083a82a 100644 --- a/core/lib/types/src/protocol_upgrade.rs +++ b/core/lib/types/src/protocol_upgrade.rs @@ -102,11 +102,7 @@ fn get_transaction_param_type() -> ParamType { } impl ProtocolUpgrade { - fn try_from_decoded_tokens( - tokens: Vec, - transaction_hash: H256, - transaction_block_number: u64, - ) -> Result { + fn try_from_decoded_tokens(tokens: Vec) -> Result { let init_calldata = tokens[2].clone().into_bytes().unwrap(); let transaction_param_type: ParamType = get_transaction_param_type(); @@ -144,12 +140,7 @@ impl ProtocolUpgrade { let factory_deps = decoded.remove(0).into_array().unwrap(); - let tx = ProtocolUpgradeTx::decode_tx( - transaction, - transaction_hash, - transaction_block_number, - factory_deps, - ); + let tx = ProtocolUpgradeTx::decode_tx(transaction, factory_deps); let bootloader_code_hash = H256::from_slice(&decoded.remove(0).into_fixed_bytes().unwrap()); let default_account_code_hash = H256::from_slice(&decoded.remove(0).into_fixed_bytes().unwrap()); @@ -205,18 +196,10 @@ pub fn decode_set_chain_id_event( let protocol_version = ProtocolVersionId::try_from_packed_semver(full_version_id) .unwrap_or_else(|_| panic!("Version is not supported, packed version: {full_version_id}")); - let eth_hash = event - .transaction_hash - .expect("Event transaction hash is missing"); - let eth_block = event - .block_number - .expect("Event block number is missing") - .as_u64(); - let factory_deps: Vec = Vec::new(); - let upgrade_tx = ProtocolUpgradeTx::decode_tx(transaction, eth_hash, eth_block, factory_deps) - .expect("Upgrade tx is missing"); + let upgrade_tx = + ProtocolUpgradeTx::decode_tx(transaction, factory_deps).expect("Upgrade tx is missing"); Ok((protocol_version, upgrade_tx)) } @@ -224,8 +207,6 @@ pub fn decode_set_chain_id_event( impl ProtocolUpgradeTx { pub fn decode_tx( mut transaction: Vec, - eth_hash: H256, - eth_block: u64, factory_deps: Vec, ) -> Option { let canonical_tx_hash = H256(keccak256(&encode(&[Token::Tuple(transaction.clone())]))); @@ -308,8 +289,7 @@ impl ProtocolUpgradeTx { gas_limit, max_fee_per_gas, gas_per_pubdata_limit, - eth_hash, - eth_block, + eth_block: 0, }; let factory_deps = factory_deps @@ -336,12 +316,7 @@ impl TryFrom for ProtocolUpgrade { type Error = crate::ethabi::Error; fn try_from(call: Call) -> Result { - let Call { - data, - eth_hash, - eth_block, - .. - } = call; + let Call { data, .. } = call; if data.len() < 4 { return Err(crate::ethabi::Error::InvalidData); @@ -376,7 +351,7 @@ impl TryFrom for ProtocolUpgrade { return Err(crate::ethabi::Error::InvalidData); }; - ProtocolUpgrade::try_from_decoded_tokens(diamond_cut_tokens, eth_hash, eth_block) + ProtocolUpgrade::try_from_decoded_tokens(diamond_cut_tokens) } } @@ -492,8 +467,27 @@ impl ProtocolVersion { } } -#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +// TODO(PLA-962): remove once all nodes start treating the deprecated fields as optional. +#[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] +struct ProtocolUpgradeTxCommonDataSerde { + pub sender: Address, + pub upgrade_id: ProtocolVersionId, + pub max_fee_per_gas: U256, + pub gas_limit: U256, + pub gas_per_pubdata_limit: U256, + pub canonical_tx_hash: H256, + pub to_mint: U256, + pub refund_recipient: Address, + + /// DEPRECATED. + #[serde(default)] + pub eth_hash: H256, + #[serde(default)] + pub eth_block: u64, +} + +#[derive(Default, Debug, Clone, PartialEq)] pub struct ProtocolUpgradeTxCommonData { /// Sender of the transaction. pub sender: Address, @@ -505,8 +499,6 @@ pub struct ProtocolUpgradeTxCommonData { pub gas_limit: U256, /// The maximum number of gas per 1 byte of pubdata. pub gas_per_pubdata_limit: U256, - /// Hash of the corresponding Ethereum transaction. Size should be 32 bytes. - pub eth_hash: H256, /// Block in which Ethereum transaction was included. pub eth_block: u64, /// Tx hash of the transaction in the zkSync network. Calculated as the encoded transaction data hash. @@ -527,6 +519,45 @@ impl ProtocolUpgradeTxCommonData { } } +impl serde::Serialize for ProtocolUpgradeTxCommonData { + fn serialize(&self, s: S) -> Result { + ProtocolUpgradeTxCommonDataSerde { + sender: self.sender, + upgrade_id: self.upgrade_id, + max_fee_per_gas: self.max_fee_per_gas, + gas_limit: self.gas_limit, + gas_per_pubdata_limit: self.gas_per_pubdata_limit, + canonical_tx_hash: self.canonical_tx_hash, + to_mint: self.to_mint, + refund_recipient: self.refund_recipient, + + /// DEPRECATED. + eth_hash: H256::default(), + eth_block: self.eth_block, + } + .serialize(s) + } +} + +impl<'de> serde::Deserialize<'de> for ProtocolUpgradeTxCommonData { + fn deserialize>(d: D) -> Result { + let x = ProtocolUpgradeTxCommonDataSerde::deserialize(d)?; + Ok(Self { + sender: x.sender, + upgrade_id: x.upgrade_id, + max_fee_per_gas: x.max_fee_per_gas, + gas_limit: x.gas_limit, + gas_per_pubdata_limit: x.gas_per_pubdata_limit, + canonical_tx_hash: x.canonical_tx_hash, + to_mint: x.to_mint, + refund_recipient: x.refund_recipient, + + // DEPRECATED. + eth_block: x.eth_block, + }) + } +} + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ProtocolUpgradeTx { pub execute: Execute, diff --git a/core/lib/types/src/transaction_request.rs b/core/lib/types/src/transaction_request.rs index 72551d762d18..c2526cc3ed6f 100644 --- a/core/lib/types/src/transaction_request.rs +++ b/core/lib/types/src/transaction_request.rs @@ -66,11 +66,32 @@ pub struct CallRequest { pub eip712_meta: Option, } +/// While some default parameters are usually provided for the `eth_call` methods, +/// sometimes users may want to override those. +pub struct CallOverrides { + pub enforced_base_fee: Option, +} + impl CallRequest { /// Function to return a builder for a Call Request pub fn builder() -> CallRequestBuilder { CallRequestBuilder::default() } + + pub fn get_call_overrides(&self) -> Result { + let provided_gas_price = self.max_fee_per_gas.or(self.gas_price); + let enforced_base_fee = if let Some(provided_gas_price) = provided_gas_price { + Some( + provided_gas_price + .try_into() + .map_err(|_| SerializationTransactionError::MaxFeePerGasNotU64)?, + ) + } else { + None + }; + + Ok(CallOverrides { enforced_base_fee }) + } } /// Call Request Builder @@ -183,10 +204,16 @@ pub enum SerializationTransactionError { AccessListsNotSupported, #[error("nonce has max value")] TooBigNonce, - /// Sanity check error to avoid extremely big numbers specified + + /// Sanity checks to avoid extremely big numbers specified /// to gas and pubdata price. - #[error("{0}")] - TooHighGas(String), + #[error("max fee per gas higher than 2^64-1")] + MaxFeePerGasNotU64, + #[error("max fee per pubdata byte higher than 2^64-1")] + MaxFeePerPubdataByteNotU64, + #[error("max priority fee per gas higher than 2^64-1")] + MaxPriorityFeePerGasNotU64, + /// OversizedData is returned if the raw tx size is greater /// than some meaningful limit a user might use. This is not a consensus error /// making the transaction invalid, rather a DOS protection. @@ -736,16 +763,12 @@ impl TransactionRequest { fn get_fee_data_checked(&self) -> Result { if self.gas_price > u64::MAX.into() { - return Err(SerializationTransactionError::TooHighGas( - "max fee per gas higher than 2^64-1".to_string(), - )); + return Err(SerializationTransactionError::MaxFeePerGasNotU64); } let gas_per_pubdata_limit = if let Some(meta) = &self.eip712_meta { if meta.gas_per_pubdata > u64::MAX.into() { - return Err(SerializationTransactionError::TooHighGas( - "max fee per pubdata byte higher than 2^64-1".to_string(), - )); + return Err(SerializationTransactionError::MaxFeePerPubdataByteNotU64); } else if meta.gas_per_pubdata == U256::zero() { return Err(SerializationTransactionError::GasPerPubDataLimitZero); } @@ -757,9 +780,7 @@ impl TransactionRequest { let max_priority_fee_per_gas = self.max_priority_fee_per_gas.unwrap_or(self.gas_price); if max_priority_fee_per_gas > u64::MAX.into() { - return Err(SerializationTransactionError::TooHighGas( - "max priority fee per gas higher than 2^64-1".to_string(), - )); + return Err(SerializationTransactionError::MaxPriorityFeePerGasNotU64); } Ok(Fee { @@ -1316,9 +1337,7 @@ mod tests { L2Tx::from_request(tx1, usize::MAX); assert_eq!( execute_tx1.unwrap_err(), - SerializationTransactionError::TooHighGas( - "max fee per gas higher than 2^64-1".to_string() - ) + SerializationTransactionError::MaxFeePerGasNotU64 ); let tx2 = TransactionRequest { @@ -1332,9 +1351,7 @@ mod tests { L2Tx::from_request(tx2, usize::MAX); assert_eq!( execute_tx2.unwrap_err(), - SerializationTransactionError::TooHighGas( - "max priority fee per gas higher than 2^64-1".to_string() - ) + SerializationTransactionError::MaxPriorityFeePerGasNotU64 ); let tx3 = TransactionRequest { @@ -1352,9 +1369,7 @@ mod tests { L2Tx::from_request(tx3, usize::MAX); assert_eq!( execute_tx3.unwrap_err(), - SerializationTransactionError::TooHighGas( - "max fee per pubdata byte higher than 2^64-1".to_string() - ) + SerializationTransactionError::MaxFeePerPubdataByteNotU64 ); } diff --git a/core/lib/utils/src/env.rs b/core/lib/utils/src/env.rs index fec413927929..0eddc6c2cd64 100644 --- a/core/lib/utils/src/env.rs +++ b/core/lib/utils/src/env.rs @@ -52,10 +52,12 @@ pub fn locate_workspace() -> Option<&'static Path> { WORKSPACE .get_or_init(|| { let result = locate_workspace_inner(); - if let Err(err) = &result { + if result.is_err() { // `get_or_init()` is guaranteed to call the provided closure once per `OnceCell`; // i.e., we won't spam logs here. - tracing::warn!("locate_workspace() failed: {err:?}"); + tracing::info!( + "locate_workspace() failed. You are using an already compiled version" + ); } result.ok() }) diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index b0104cc795e3..4f8664ab74dc 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -154,6 +154,8 @@ pub enum Component { Consensus, /// Component generating commitment for L1 batches. CommitmentGenerator, + /// VM runner-based component that saves protective reads to Postgres. + VmRunnerProtectiveReads, } #[derive(Debug)] @@ -190,6 +192,9 @@ impl FromStr for Components { "proof_data_handler" => Ok(Components(vec![Component::ProofDataHandler])), "consensus" => Ok(Components(vec![Component::Consensus])), "commitment_generator" => Ok(Components(vec![Component::CommitmentGenerator])), + "vm_runner_protective_reads" => { + Ok(Components(vec![Component::VmRunnerProtectiveReads])) + } other => Err(format!("{} is not a valid component name", other)), } } diff --git a/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs b/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs index cfac1df27cd0..68389228861a 100644 --- a/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs +++ b/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs @@ -10,7 +10,7 @@ use zksync_config::{ wallets::{AddressWallet, EthSender, StateKeeper, Wallet, Wallets}, FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig, FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, GeneralConfig, - ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, + ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, ProtectiveReadsWriterConfig, }, ApiConfig, ContractVerifierConfig, DBConfig, EthConfig, EthWatchConfig, GasAdjusterConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, @@ -61,6 +61,7 @@ pub struct TempConfigStore { pub object_store_config: Option, pub observability: Option, pub snapshot_creator: Option, + pub protective_reads_writer_config: Option, } impl TempConfigStore { @@ -86,6 +87,7 @@ impl TempConfigStore { eth: self.eth_sender_config.clone(), snapshot_creator: self.snapshot_creator.clone(), observability: self.observability.clone(), + protective_reads_writer_config: self.protective_reads_writer_config.clone(), } } diff --git a/core/node/api_server/src/execution_sandbox/apply.rs b/core/node/api_server/src/execution_sandbox/apply.rs index d3af1a5c9dd4..dc8b56f41967 100644 --- a/core/node/api_server/src/execution_sandbox/apply.rs +++ b/core/node/api_server/src/execution_sandbox/apply.rs @@ -403,12 +403,12 @@ impl StoredL2BlockInfo { } #[derive(Debug)] -struct ResolvedBlockInfo { +pub(crate) struct ResolvedBlockInfo { state_l2_block_number: L2BlockNumber, state_l2_block_hash: H256, vm_l1_batch_number: L1BatchNumber, l1_batch_timestamp: u64, - protocol_version: ProtocolVersionId, + pub(crate) protocol_version: ProtocolVersionId, historical_fee_input: Option, } @@ -429,7 +429,7 @@ impl BlockArgs { ) } - async fn resolve_block_info( + pub(crate) async fn resolve_block_info( &self, connection: &mut Connection<'_, Core>, ) -> anyhow::Result { diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index 2fd5b376acb3..72c94e2a428c 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -4,14 +4,13 @@ use anyhow::Context as _; use multivm::{ interface::{TxExecutionMode, VmExecutionResultAndLogs, VmInterface}, tracers::StorageInvocations, - vm_latest::constants::ETH_CALL_GAS_LIMIT, MultiVMTracer, }; use tracing::{span, Level}; use zksync_dal::{ConnectionPool, Core}; use zksync_types::{ - fee::TransactionExecutionMetrics, l2::L2Tx, ExecuteTransactionCommon, Nonce, - PackedEthSignature, Transaction, U256, + fee::TransactionExecutionMetrics, l2::L2Tx, transaction_request::CallOverrides, + ExecuteTransactionCommon, Nonce, PackedEthSignature, Transaction, U256, }; use super::{ @@ -40,7 +39,7 @@ impl TxExecutionArgs { } fn for_eth_call( - enforced_base_fee: u64, + enforced_base_fee: Option, vm_execution_cache_misses_limit: Option, ) -> Self { let missed_storage_invocation_limit = vm_execution_cache_misses_limit.unwrap_or(usize::MAX); @@ -48,7 +47,7 @@ impl TxExecutionArgs { execution_mode: TxExecutionMode::EthCall, enforced_nonce: None, added_balance: U256::zero(), - enforced_base_fee: Some(enforced_base_fee), + enforced_base_fee, missed_storage_invocation_limit, } } @@ -170,23 +169,21 @@ impl TransactionExecutor { vm_permit: VmPermit, shared_args: TxSharedArgs, connection_pool: ConnectionPool, + call_overrides: CallOverrides, mut tx: L2Tx, block_args: BlockArgs, vm_execution_cache_misses_limit: Option, custom_tracers: Vec, ) -> anyhow::Result { - let enforced_base_fee = tx.common_data.fee.max_fee_per_gas.as_u64(); - let execution_args = - TxExecutionArgs::for_eth_call(enforced_base_fee, vm_execution_cache_misses_limit); + let execution_args = TxExecutionArgs::for_eth_call( + call_overrides.enforced_base_fee, + vm_execution_cache_misses_limit, + ); if tx.common_data.signature.is_empty() { tx.common_data.signature = PackedEthSignature::default().serialize_packed().into(); } - // Protection against infinite-loop eth_calls and alike: - // limiting the amount of gas the call can use. - // We can't use `BLOCK_ERGS_LIMIT` here since the VM itself has some overhead. - tx.common_data.fee.gas_limit = ETH_CALL_GAS_LIMIT.into(); let output = self .execute_tx_in_sandbox( vm_permit, diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index 9e6bd86415f9..1b13e50b410f 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -7,7 +7,7 @@ use multivm::{ interface::VmExecutionResultAndLogs, utils::{ adjust_pubdata_price_for_tx, derive_base_fee_and_gas_per_pubdata, derive_overhead, - get_max_batch_gas_limit, + get_eth_call_gas_limit, get_max_batch_gas_limit, }, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; @@ -28,6 +28,7 @@ use zksync_types::{ fee_model::BatchFeeInput, get_code_key, get_intrinsic_constants, l2::{error::TxCheckError::TxDuplication, L2Tx}, + transaction_request::CallOverrides, utils::storage_key_for_eth_balance, AccountTreeId, Address, ExecuteTransactionCommon, L2ChainId, Nonce, PackedEthSignature, ProtocolVersionId, Transaction, VmVersion, H160, H256, MAX_L2_TX_GAS_LIMIT, @@ -965,6 +966,7 @@ impl TxSender { pub(super) async fn eth_call( &self, block_args: BlockArgs, + call_overrides: CallOverrides, tx: L2Tx, ) -> Result, SubmitTxError> { let vm_permit = self.0.vm_concurrency_limiter.acquire().await; @@ -977,6 +979,7 @@ impl TxSender { vm_permit, self.shared_args().await?, self.0.replica_connection_pool.clone(), + call_overrides, tx, block_args, vm_execution_cache_misses_limit, @@ -1036,4 +1039,19 @@ impl TxSender { } Ok(()) } + + pub(crate) async fn get_default_eth_call_gas( + &self, + block_args: BlockArgs, + ) -> anyhow::Result { + let mut connection = self.acquire_replica_connection().await?; + + let protocol_version = block_args + .resolve_block_info(&mut connection) + .await + .context("failed to resolve block info")? + .protocol_version; + + Ok(get_eth_call_gas_limit(protocol_version.into())) + } } diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index 4b998adcfeb8..400711de8593 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -125,7 +125,7 @@ impl DebugNamespace { pub async fn debug_trace_call_impl( &self, - request: CallRequest, + mut request: CallRequest, block_id: Option, options: Option, ) -> Result { @@ -148,6 +148,19 @@ impl DebugNamespace { .last_sealed_l2_block .diff_with_block_args(&block_args), ); + + if request.gas.is_none() { + request.gas = Some( + self.state + .tx_sender + .get_default_eth_call_gas(block_args) + .await + .map_err(Web3Error::InternalError)? + .into(), + ) + } + + let call_overrides = request.get_call_overrides()?; let tx = L2Tx::from_request(request.into(), MAX_ENCODED_TX_SIZE)?; let shared_args = self.shared_args().await; @@ -173,6 +186,7 @@ impl DebugNamespace { vm_permit, shared_args, self.state.connection_pool.clone(), + call_overrides, tx.clone(), block_args, self.sender_config().vm_execution_cache_misses_limit, diff --git a/core/node/api_server/src/web3/namespaces/eth.rs b/core/node/api_server/src/web3/namespaces/eth.rs index ff2403051de0..b1541f7261bf 100644 --- a/core/node/api_server/src/web3/namespaces/eth.rs +++ b/core/node/api_server/src/web3/namespaces/eth.rs @@ -52,7 +52,7 @@ impl EthNamespace { pub async fn call_impl( &self, - request: CallRequest, + mut request: CallRequest, block_id: Option, ) -> Result { let block_id = block_id.unwrap_or(BlockId::Number(BlockNumber::Pending)); @@ -70,8 +70,25 @@ impl EthNamespace { ); drop(connection); + if request.gas.is_none() { + request.gas = Some( + self.state + .tx_sender + .get_default_eth_call_gas(block_args) + .await + .map_err(Web3Error::InternalError)? + .into(), + ) + } + let call_overrides = request.get_call_overrides()?; let tx = L2Tx::from_request(request.into(), self.state.api_config.max_tx_size)?; - let call_result = self.state.tx_sender.eth_call(block_args, tx).await?; + + // It is assumed that the previous checks has already enforced that the `max_fee_per_gas` is at most u64. + let call_result: Vec = self + .state + .tx_sender + .eth_call(block_args, call_overrides, tx) + .await?; Ok(call_result.into()) } diff --git a/core/node/block_reverter/Cargo.toml b/core/node/block_reverter/Cargo.toml index 178e3da6c58a..68fdf72acd83 100644 --- a/core/node/block_reverter/Cargo.toml +++ b/core/node/block_reverter/Cargo.toml @@ -21,11 +21,13 @@ zksync_state.workspace = true zksync_merkle_tree.workspace = true anyhow.workspace = true +futures.workspace = true tokio = { workspace = true, features = ["time", "fs"] } serde.workspace = true tracing.workspace = true [dev-dependencies] assert_matches.workspace = true +async-trait.workspace = true tempfile.workspace = true test-casing.workspace = true diff --git a/core/node/block_reverter/src/lib.rs b/core/node/block_reverter/src/lib.rs index f9f8858a7b1c..baba02a559f0 100644 --- a/core/node/block_reverter/src/lib.rs +++ b/core/node/block_reverter/src/lib.rs @@ -2,7 +2,7 @@ use std::{path::Path, sync::Arc, time::Duration}; use anyhow::Context as _; use serde::Serialize; -use tokio::fs; +use tokio::{fs, sync::Semaphore}; use zksync_config::{configs::chain::NetworkConfig, ContractsConfig, EthConfig}; use zksync_contracts::hyperchain_contract; use zksync_dal::{ConnectionPool, Core, CoreDal}; @@ -382,6 +382,8 @@ impl BlockReverter { object_store: &dyn ObjectStore, deleted_snapshots: &[SnapshotMetadata], ) -> anyhow::Result<()> { + const CONCURRENT_REMOVE_REQUESTS: usize = 20; + fn ignore_not_found_errors(err: ObjectStoreError) -> Result<(), ObjectStoreError> { match err { ObjectStoreError::KeyNotFound(err) => { @@ -421,18 +423,46 @@ impl BlockReverter { }); combine_results(&mut overall_result, result); - for chunk_id in 0..snapshot.storage_logs_filepaths.len() as u64 { + let mut is_incomplete_snapshot = false; + let chunk_ids_iter = (0_u64..) + .zip(&snapshot.storage_logs_filepaths) + .filter_map(|(chunk_id, path)| { + if path.is_none() { + if !is_incomplete_snapshot { + is_incomplete_snapshot = true; + tracing::warn!( + "Snapshot for L1 batch #{} is incomplete (misses al least storage logs chunk ID {chunk_id}). \ + It is probable that it's currently being created, in which case you'll need to clean up produced files \ + manually afterwards", + snapshot.l1_batch_number + ); + } + return None; + } + Some(chunk_id) + }); + + let remove_semaphore = &Semaphore::new(CONCURRENT_REMOVE_REQUESTS); + let remove_futures = chunk_ids_iter.map(|chunk_id| async move { + let _permit = remove_semaphore + .acquire() + .await + .context("semaphore is never closed")?; + let key = SnapshotStorageLogsStorageKey { l1_batch_number: snapshot.l1_batch_number, chunk_id, }; tracing::info!("Removing storage logs chunk {key:?}"); - - let result = object_store + object_store .remove::(key) .await .or_else(ignore_not_found_errors) - .with_context(|| format!("failed removing storage logs chunk {key:?}")); + .with_context(|| format!("failed removing storage logs chunk {key:?}")) + }); + let remove_results = futures::future::join_all(remove_futures).await; + + for result in remove_results { combine_results(&mut overall_result, result); } } diff --git a/core/node/block_reverter/src/tests.rs b/core/node/block_reverter/src/tests.rs index d5510aac3bed..30ff24fa175b 100644 --- a/core/node/block_reverter/src/tests.rs +++ b/core/node/block_reverter/src/tests.rs @@ -1,11 +1,14 @@ //! Tests for block reverter. +use std::{collections::HashSet, sync::Mutex}; + use assert_matches::assert_matches; +use async_trait::async_trait; use test_casing::test_casing; use tokio::sync::watch; use zksync_dal::Connection; use zksync_merkle_tree::TreeInstruction; -use zksync_object_store::ObjectStoreFactory; +use zksync_object_store::{Bucket, ObjectStoreFactory}; use zksync_state::ReadStorage; use zksync_types::{ block::{L1BatchHeader, L2BlockHeader}, @@ -201,8 +204,13 @@ async fn create_mock_snapshot( storage: &mut Connection<'_, Core>, object_store: &dyn ObjectStore, l1_batch_number: L1BatchNumber, + chunk_ids: impl Iterator + Clone, ) { - let storage_logs_chunk_count = 5; + let storage_logs_chunk_count = chunk_ids + .clone() + .max() + .expect("`chunk_ids` cannot be empty") + + 1; let factory_deps_key = object_store .put( @@ -224,7 +232,7 @@ async fn create_mock_snapshot( .await .unwrap(); - for chunk_id in 0..storage_logs_chunk_count { + for chunk_id in chunk_ids { let key = SnapshotStorageLogsStorageKey { l1_batch_number, chunk_id, @@ -255,7 +263,7 @@ async fn reverting_snapshot(remove_objects: bool) { setup_storage(&mut storage, &storage_logs).await; let object_store = ObjectStoreFactory::mock().create_store().await; - create_mock_snapshot(&mut storage, &object_store, L1BatchNumber(7)).await; + create_mock_snapshot(&mut storage, &object_store, L1BatchNumber(7), 0..5).await; // Sanity check: snapshot should be visible. let all_snapshots = storage .snapshots_dal() @@ -304,3 +312,160 @@ async fn reverting_snapshot(remove_objects: bool) { } } } + +#[tokio::test] +async fn reverting_snapshot_ignores_not_found_object_store_errors() { + let storage_logs = gen_storage_logs(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + setup_storage(&mut storage, &storage_logs).await; + + let object_store = ObjectStoreFactory::mock().create_store().await; + create_mock_snapshot(&mut storage, &object_store, L1BatchNumber(7), 0..5).await; + + // Manually remove some data from the store. + object_store + .remove::(L1BatchNumber(7)) + .await + .unwrap(); + let key = SnapshotStorageLogsStorageKey { + l1_batch_number: L1BatchNumber(7), + chunk_id: 1, + }; + object_store + .remove::(key) + .await + .unwrap(); + + let mut block_reverter = BlockReverter::new(NodeRole::External, pool.clone()); + block_reverter.enable_rolling_back_postgres(); + block_reverter.enable_rolling_back_snapshot_objects(object_store); + block_reverter.roll_back(L1BatchNumber(5)).await.unwrap(); + + // Check that snapshot metadata has been removed. + let all_snapshots = storage + .snapshots_dal() + .get_all_complete_snapshots() + .await + .unwrap(); + assert_eq!(all_snapshots.snapshots_l1_batch_numbers, []); +} + +#[derive(Debug, Default)] +struct ErroneousStore { + object_keys: Mutex>, +} + +#[async_trait] +impl ObjectStore for ErroneousStore { + async fn get_raw(&self, _bucket: Bucket, _key: &str) -> Result, ObjectStoreError> { + unreachable!("not called by reverter") + } + + async fn put_raw( + &self, + bucket: Bucket, + key: &str, + _value: Vec, + ) -> Result<(), ObjectStoreError> { + self.object_keys + .lock() + .unwrap() + .insert((bucket, key.to_owned())); + Ok(()) + } + + async fn remove_raw(&self, bucket: Bucket, key: &str) -> Result<(), ObjectStoreError> { + self.object_keys + .lock() + .unwrap() + .remove(&(bucket, key.to_owned())); + Err(ObjectStoreError::Other { + is_transient: false, + source: "fatal error".into(), + }) + } + + fn storage_prefix_raw(&self, bucket: Bucket) -> String { + bucket.to_string() + } +} + +#[tokio::test] +async fn reverting_snapshot_propagates_fatal_errors() { + let storage_logs = gen_storage_logs(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + setup_storage(&mut storage, &storage_logs).await; + + let object_store = Arc::new(ErroneousStore::default()); + create_mock_snapshot(&mut storage, &object_store, L1BatchNumber(7), 0..5).await; + + let mut block_reverter = BlockReverter::new(NodeRole::External, pool.clone()); + block_reverter.enable_rolling_back_postgres(); + block_reverter.enable_rolling_back_snapshot_objects(object_store.clone()); + let err = block_reverter + .roll_back(L1BatchNumber(5)) + .await + .unwrap_err(); + assert!(err.chain().any(|source| { + if let Some(err) = source.downcast_ref::() { + matches!(err, ObjectStoreError::Other { .. }) + } else { + false + } + })); + + // Check that snapshot metadata has been removed (it's not atomic with snapshot removal). + let all_snapshots = storage + .snapshots_dal() + .get_all_complete_snapshots() + .await + .unwrap(); + assert_eq!(all_snapshots.snapshots_l1_batch_numbers, []); + + // Check that removal was called for all objects (i.e., the reverter doesn't bail early). + let retained_object_keys = object_store.object_keys.lock().unwrap(); + assert!(retained_object_keys.is_empty(), "{retained_object_keys:?}"); +} + +#[tokio::test] +async fn reverter_handles_incomplete_snapshot() { + let storage_logs = gen_storage_logs(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + setup_storage(&mut storage, &storage_logs).await; + + let object_store = ObjectStoreFactory::mock().create_store().await; + let chunk_ids = [0, 1, 4].into_iter(); + create_mock_snapshot( + &mut storage, + &object_store, + L1BatchNumber(7), + chunk_ids.clone(), + ) + .await; + + let mut block_reverter = BlockReverter::new(NodeRole::External, pool.clone()); + block_reverter.enable_rolling_back_postgres(); + block_reverter.enable_rolling_back_snapshot_objects(object_store.clone()); + block_reverter.roll_back(L1BatchNumber(5)).await.unwrap(); + + // Check that snapshot metadata has been removed. + let all_snapshots = storage + .snapshots_dal() + .get_all_complete_snapshots() + .await + .unwrap(); + assert_eq!(all_snapshots.snapshots_l1_batch_numbers, []); + + // Check that chunk files have been removed. + for chunk_id in chunk_ids { + let key = SnapshotStorageLogsStorageKey { + l1_batch_number: L1BatchNumber(7), + chunk_id, + }; + let chunk_result = object_store.get::(key).await; + assert_matches!(chunk_result.unwrap_err(), ObjectStoreError::KeyNotFound(_)); + } +} diff --git a/core/node/commitment_generator/src/validation_task.rs b/core/node/commitment_generator/src/validation_task.rs index 4488e0c2c56e..cf93a4899b89 100644 --- a/core/node/commitment_generator/src/validation_task.rs +++ b/core/node/commitment_generator/src/validation_task.rs @@ -3,7 +3,7 @@ use std::time::Duration; use tokio::sync::watch; use zksync_eth_client::{ clients::{DynClient, L1}, - CallFunctionArgs, ClientError, Error as EthClientError, + CallFunctionArgs, ClientError, ContractCallError, }; use zksync_types::{commitment::L1BatchCommitmentMode, Address}; @@ -66,14 +66,14 @@ impl L1BatchCommitmentModeValidationTask { // Getters contract does not support `getPubdataPricingMode` method. // This case is accepted for backwards compatibility with older contracts, but emits a // warning in case the wrong contract address was passed by the caller. - Err(EthClientError::EthereumGateway(err)) + Err(ContractCallError::EthereumGateway(err)) if matches!(err.as_ref(), ClientError::Call(_)) => { tracing::warn!("Contract {diamond_proxy_address:?} does not support getPubdataPricingMode method: {err}"); return Ok(()); } - Err(EthClientError::EthereumGateway(err)) if err.is_transient() => { + Err(ContractCallError::EthereumGateway(err)) if err.is_transient() => { tracing::warn!( "Transient error validating commitment mode, will retry after {:?}: {err}", self.retry_interval @@ -92,7 +92,7 @@ impl L1BatchCommitmentModeValidationTask { async fn get_pubdata_pricing_mode( diamond_proxy_address: Address, eth_client: &DynClient, - ) -> Result { + ) -> Result { CallFunctionArgs::new("getPubdataPricingMode", ()) .for_contract( diamond_proxy_address, diff --git a/core/node/consistency_checker/src/lib.rs b/core/node/consistency_checker/src/lib.rs index eb7eea42007c..ae092b2d1c11 100644 --- a/core/node/consistency_checker/src/lib.rs +++ b/core/node/consistency_checker/src/lib.rs @@ -7,7 +7,7 @@ use zksync_contracts::PRE_BOOJUM_COMMIT_FUNCTION; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_eth_client::{ clients::{DynClient, L1}, - CallFunctionArgs, Error as L1ClientError, EthInterface, + CallFunctionArgs, ContractCallError, EnrichedClientError, EthInterface, }; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_l1_contract_interface::{ @@ -29,7 +29,9 @@ mod tests; #[derive(Debug, thiserror::Error)] enum CheckError { #[error("Web3 error communicating with L1")] - Web3(#[from] L1ClientError), + Web3(#[from] EnrichedClientError), + #[error("error calling L1 contract")] + ContractCall(#[from] ContractCallError), /// Error that is caused by the main node providing incorrect information etc. #[error("failed validating commit transaction")] Validation(anyhow::Error), @@ -42,7 +44,7 @@ impl CheckError { fn is_transient(&self) -> bool { matches!( self, - Self::Web3(L1ClientError::EthereumGateway(err)) if err.is_transient() + Self::Web3(err) if err.is_transient() ) } } diff --git a/core/node/eth_sender/src/error.rs b/core/node/eth_sender/src/error.rs index 206bbf2d583a..61d92bcbe132 100644 --- a/core/node/eth_sender/src/error.rs +++ b/core/node/eth_sender/src/error.rs @@ -1,9 +1,12 @@ +use zksync_eth_client::{ContractCallError, EnrichedClientError}; use zksync_types::web3::contract; #[derive(Debug, thiserror::Error)] -pub enum ETHSenderError { - #[error("Ethereum gateway Error {0}")] - EthereumGateWayError(#[from] zksync_eth_client::Error), - #[error("Token parsing Error: {0}")] - ParseError(#[from] contract::Error), +pub enum EthSenderError { + #[error("Ethereum gateway error: {0}")] + EthereumGateway(#[from] EnrichedClientError), + #[error("Contract call error: {0}")] + ContractCall(#[from] ContractCallError), + #[error("Token parsing error: {0}")] + Parse(#[from] contract::Error), } diff --git a/core/node/eth_sender/src/eth_tx_aggregator.rs b/core/node/eth_sender/src/eth_tx_aggregator.rs index 11c4f6362b7b..ee5806c72f54 100644 --- a/core/node/eth_sender/src/eth_tx_aggregator.rs +++ b/core/node/eth_sender/src/eth_tx_aggregator.rs @@ -29,7 +29,7 @@ use crate::{ metrics::{PubdataKind, METRICS}, utils::agg_l1_batch_base_cost, zksync_functions::ZkSyncFunctions, - Aggregator, ETHSenderError, + Aggregator, EthSenderError, }; /// Data queried from L1 using multicall contract. @@ -134,7 +134,7 @@ impl EthTxAggregator { Ok(()) } - pub(super) async fn get_multicall_data(&mut self) -> Result { + pub(super) async fn get_multicall_data(&mut self) -> Result { let calldata = self.generate_calldata_for_multicall(); let args = CallFunctionArgs::new(&self.functions.aggregate3.name, calldata).for_contract( self.l1_multicall3_address, @@ -221,14 +221,11 @@ impl EthTxAggregator { pub(super) fn parse_multicall_data( &self, token: Token, - ) -> Result { + ) -> Result { let parse_error = |tokens: &[Token]| { - Err(ETHSenderError::ParseError( - Web3ContractError::InvalidOutputType(format!( - "Failed to parse multicall token: {:?}", - tokens - )), - )) + Err(EthSenderError::Parse(Web3ContractError::InvalidOutputType( + format!("Failed to parse multicall token: {:?}", tokens), + ))) }; if let Token::Array(call_results) = token { @@ -242,24 +239,24 @@ impl EthTxAggregator { Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; if multicall3_bootloader.len() != 32 { - return Err(ETHSenderError::ParseError( - Web3ContractError::InvalidOutputType(format!( + return Err(EthSenderError::Parse(Web3ContractError::InvalidOutputType( + format!( "multicall3 bootloader hash data is not of the len of 32: {:?}", multicall3_bootloader - )), - )); + ), + ))); } let bootloader = H256::from_slice(&multicall3_bootloader); let multicall3_default_aa = Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; if multicall3_default_aa.len() != 32 { - return Err(ETHSenderError::ParseError( - Web3ContractError::InvalidOutputType(format!( + return Err(EthSenderError::Parse(Web3ContractError::InvalidOutputType( + format!( "multicall3 default aa hash data is not of the len of 32: {:?}", multicall3_default_aa - )), - )); + ), + ))); } let default_aa = H256::from_slice(&multicall3_default_aa); let base_system_contracts_hashes = BaseSystemContractsHashes { @@ -270,12 +267,12 @@ impl EthTxAggregator { let multicall3_verifier_params = Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; if multicall3_verifier_params.len() != 96 { - return Err(ETHSenderError::ParseError( - Web3ContractError::InvalidOutputType(format!( + return Err(EthSenderError::Parse(Web3ContractError::InvalidOutputType( + format!( "multicall3 verifier params data is not of the len of 96: {:?}", multicall3_default_aa - )), - )); + ), + ))); } let recursion_node_level_vk_hash = H256::from_slice(&multicall3_verifier_params[..32]); let recursion_leaf_level_vk_hash = @@ -291,24 +288,24 @@ impl EthTxAggregator { let multicall3_verifier_address = Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; if multicall3_verifier_address.len() != 32 { - return Err(ETHSenderError::ParseError( - Web3ContractError::InvalidOutputType(format!( + return Err(EthSenderError::Parse(Web3ContractError::InvalidOutputType( + format!( "multicall3 verifier address data is not of the len of 32: {:?}", multicall3_verifier_address - )), - )); + ), + ))); } let verifier_address = Address::from_slice(&multicall3_verifier_address[12..]); let multicall3_protocol_version = Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; if multicall3_protocol_version.len() != 32 { - return Err(ETHSenderError::ParseError( - Web3ContractError::InvalidOutputType(format!( + return Err(EthSenderError::Parse(Web3ContractError::InvalidOutputType( + format!( "multicall3 protocol version data is not of the len of 32: {:?}", multicall3_protocol_version - )), - )); + ), + ))); } let protocol_version = U256::from_big_endian(&multicall3_protocol_version); @@ -334,7 +331,7 @@ impl EthTxAggregator { async fn get_recursion_scheduler_level_vk_hash( &mut self, verifier_address: Address, - ) -> Result { + ) -> Result { let get_vk_hash = &self.functions.verification_key_hash; let vk_hash: H256 = CallFunctionArgs::new(&get_vk_hash.name, ()) .for_contract(verifier_address, &self.functions.verifier_contract) @@ -347,7 +344,7 @@ impl EthTxAggregator { async fn loop_iteration( &mut self, storage: &mut Connection<'_, Core>, - ) -> Result<(), ETHSenderError> { + ) -> Result<(), EthSenderError> { let MulticallData { base_system_contracts_hashes, verifier_params, @@ -546,7 +543,7 @@ impl EthTxAggregator { storage: &mut Connection<'_, Core>, aggregated_op: &AggregatedOperation, contracts_are_pre_shared_bridge: bool, - ) -> Result { + ) -> Result { let mut transaction = storage.start_transaction().await.unwrap(); let op_type = aggregated_op.get_action_type(); // We may be using a custom sender for commit transactions, so use this @@ -595,7 +592,7 @@ impl EthTxAggregator { &self, storage: &mut Connection<'_, Core>, from_addr: Option
, - ) -> Result { + ) -> Result { let db_nonce = storage .eth_sender_dal() .get_next_nonce(from_addr) diff --git a/core/node/eth_sender/src/eth_tx_manager.rs b/core/node/eth_sender/src/eth_tx_manager.rs index 09b1f3885551..7958aad6d78f 100644 --- a/core/node/eth_sender/src/eth_tx_manager.rs +++ b/core/node/eth_sender/src/eth_tx_manager.rs @@ -6,8 +6,8 @@ use zksync_config::configs::eth_sender::SenderConfig; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_eth_client::{ clients::{DynClient, L1}, - encode_blob_tx_with_sidecar, BoundEthInterface, ClientError, EnrichedClientError, Error, - EthInterface, ExecutedTxStatus, Options, RawTransactionBytes, SignedCallResult, + encode_blob_tx_with_sidecar, BoundEthInterface, ClientError, EnrichedClientError, EthInterface, + ExecutedTxStatus, Options, RawTransactionBytes, SignedCallResult, }; use zksync_node_fee_model::l1_gas_price::L1TxParamsProvider; use zksync_shared_metrics::BlockL1Stage; @@ -19,7 +19,7 @@ use zksync_types::{ }; use zksync_utils::time::seconds_since_epoch; -use super::{metrics::METRICS, ETHSenderError}; +use super::{metrics::METRICS, EthSenderError}; #[derive(Debug)] struct EthFee { @@ -85,7 +85,7 @@ impl EthTxManager { async fn get_tx_status( &self, tx_hash: H256, - ) -> Result, ETHSenderError> { + ) -> Result, EthSenderError> { self.query_client() .get_tx_status(tx_hash) .await @@ -125,7 +125,7 @@ impl EthTxManager { storage: &mut Connection<'_, Core>, tx: &EthTx, time_in_mempool: u32, - ) -> Result { + ) -> Result { let base_fee_per_gas = self.gas_adjuster.get_base_fee(0); let priority_fee_per_gas = self.gas_adjuster.get_priority_fee(); let blob_base_fee_per_gas = Some(self.gas_adjuster.get_blob_base_fee()); @@ -200,7 +200,7 @@ impl EthTxManager { storage: &mut Connection<'_, Core>, eth_tx_id: u32, base_fee_per_gas: u64, - ) -> Result { + ) -> Result { let previous_sent_tx = storage .eth_sender_dal() .get_last_sent_eth_tx(eth_tx_id) @@ -228,7 +228,7 @@ impl EthTxManager { .with_arg("base_fee_per_gas", &base_fee_per_gas) .with_arg("previous_base_fee", &previous_base_fee) .with_arg("next_block_minimal_base_fee", &next_block_minimal_base_fee); - return Err(ETHSenderError::from(Error::EthereumGateway(err))); + return Err(err.into()); } // Increase `priority_fee_per_gas` by at least 20% to prevent "replacement transaction under-priced" error. @@ -242,7 +242,7 @@ impl EthTxManager { tx: &EthTx, time_in_mempool: u32, current_block: L1BlockNumber, - ) -> Result { + ) -> Result { let EthFee { base_fee_per_gas, priority_fee_per_gas, @@ -310,7 +310,7 @@ impl EthTxManager { tx_history_id: u32, raw_tx: RawTransactionBytes, current_block: L1BlockNumber, - ) -> Result { + ) -> Result { match self.query_client().send_raw_tx(raw_tx).await { Ok(tx_hash) => { storage @@ -334,7 +334,7 @@ impl EthTxManager { async fn get_operator_nonce( &self, block_numbers: L1BlockNumbers, - ) -> Result { + ) -> Result { let finalized = self .ethereum_gateway .nonce_at(block_numbers.finalized.0.into()) @@ -354,7 +354,7 @@ impl EthTxManager { async fn get_blobs_operator_nonce( &self, block_numbers: L1BlockNumbers, - ) -> Result, ETHSenderError> { + ) -> Result, EthSenderError> { match &self.ethereum_gateway_blobs { None => Ok(None), Some(gateway) => { @@ -374,7 +374,7 @@ impl EthTxManager { } } - async fn get_l1_block_numbers(&self) -> Result { + async fn get_l1_block_numbers(&self) -> Result { let (finalized, safe) = if let Some(confirmations) = self.config.wait_confirmations { let latest_block_number = self.query_client().block_number().await?.as_u64(); @@ -418,7 +418,7 @@ impl EthTxManager { &mut self, storage: &mut Connection<'_, Core>, l1_block_numbers: L1BlockNumbers, - ) -> Result, ETHSenderError> { + ) -> Result, EthSenderError> { METRICS.track_block_numbers(&l1_block_numbers); let operator_nonce = self.get_operator_nonce(l1_block_numbers).await?; let blobs_operator_nonce = self.get_blobs_operator_nonce(l1_block_numbers).await?; @@ -458,7 +458,7 @@ impl EthTxManager { l1_block_numbers: L1BlockNumbers, operator_nonce: OperatorNonce, operator_address: Option
, - ) -> Result, ETHSenderError> { + ) -> Result, EthSenderError> { let inflight_txs = storage.eth_sender_dal().get_inflight_txs().await.unwrap(); METRICS.number_of_inflight_txs.set(inflight_txs.len()); @@ -799,7 +799,7 @@ impl EthTxManager { &mut self, storage: &mut Connection<'_, Core>, previous_block: L1BlockNumber, - ) -> Result { + ) -> Result { let l1_block_numbers = self.get_l1_block_numbers().await?; self.send_new_eth_txs(storage, l1_block_numbers.latest) diff --git a/core/node/eth_sender/src/lib.rs b/core/node/eth_sender/src/lib.rs index c0a4a892e52a..3ae29a520030 100644 --- a/core/node/eth_sender/src/lib.rs +++ b/core/node/eth_sender/src/lib.rs @@ -12,6 +12,6 @@ mod zksync_functions; mod tests; pub use self::{ - aggregator::Aggregator, error::ETHSenderError, eth_tx_aggregator::EthTxAggregator, + aggregator::Aggregator, error::EthSenderError, eth_tx_aggregator::EthTxAggregator, eth_tx_manager::EthTxManager, }; diff --git a/core/node/eth_sender/src/tests.rs b/core/node/eth_sender/src/tests.rs index 5090af08cf86..cd00f3af0883 100644 --- a/core/node/eth_sender/src/tests.rs +++ b/core/node/eth_sender/src/tests.rs @@ -29,7 +29,7 @@ use zksync_types::{ use crate::{ aggregated_operations::AggregatedOperation, eth_tx_manager::L1BlockNumbers, Aggregator, - ETHSenderError, EthTxAggregator, EthTxManager, + EthSenderError, EthTxAggregator, EthTxManager, }; // Alias to conveniently call static methods of `ETHSender`. @@ -1104,7 +1104,7 @@ async fn test_parse_multicall_data(commitment_mode: L1BatchCommitmentMode) { tester .aggregator .parse_multicall_data(wrong_data_instance.clone()), - Err(ETHSenderError::ParseError(Error::InvalidOutputType(_))) + Err(EthSenderError::Parse(Error::InvalidOutputType(_))) ); } } diff --git a/core/node/eth_watch/src/client.rs b/core/node/eth_watch/src/client.rs index 4e3e8e997361..604ea2f471cc 100644 --- a/core/node/eth_watch/src/client.rs +++ b/core/node/eth_watch/src/client.rs @@ -1,10 +1,10 @@ use std::fmt; use zksync_contracts::verifier_contract; -pub(super) use zksync_eth_client::Error as EthClientError; use zksync_eth_client::{ clients::{DynClient, L1}, - CallFunctionArgs, ClientError, EnrichedClientError, EthInterface, + CallFunctionArgs, ClientError, ContractCallError, EnrichedClientError, EnrichedClientResult, + EthInterface, }; use zksync_types::{ ethabi::Contract, @@ -21,11 +21,12 @@ pub trait EthClient: 'static + fmt::Debug + Send + Sync { from: BlockNumber, to: BlockNumber, retries_left: usize, - ) -> Result, EthClientError>; + ) -> EnrichedClientResult>; /// Returns finalized L1 block number. - async fn finalized_block_number(&self) -> Result; + async fn finalized_block_number(&self) -> EnrichedClientResult; /// Returns scheduler verification key hash by verifier address. - async fn scheduler_vk_hash(&self, verifier_address: Address) -> Result; + async fn scheduler_vk_hash(&self, verifier_address: Address) + -> Result; /// Sets list of topics to return events for. fn set_topics(&mut self, topics: Vec); } @@ -76,7 +77,7 @@ impl EthHttpQueryClient { from: BlockNumber, to: BlockNumber, topics: Vec, - ) -> Result, EthClientError> { + ) -> EnrichedClientResult> { let filter = FilterBuilder::default() .address( [ @@ -92,13 +93,16 @@ impl EthHttpQueryClient { .to_block(to) .topics(Some(topics), None, None, None) .build(); - self.client.logs(filter).await + self.client.logs(&filter).await } } #[async_trait::async_trait] impl EthClient for EthHttpQueryClient { - async fn scheduler_vk_hash(&self, verifier_address: Address) -> Result { + async fn scheduler_vk_hash( + &self, + verifier_address: Address, + ) -> Result { // New verifier returns the hash of the verification key. CallFunctionArgs::new("verificationKeyHash", ()) .for_contract(verifier_address, &self.verifier_contract_abi) @@ -111,12 +115,12 @@ impl EthClient for EthHttpQueryClient { from: BlockNumber, to: BlockNumber, retries_left: usize, - ) -> Result, EthClientError> { + ) -> EnrichedClientResult> { let mut result = self.get_filter_logs(from, to, self.topics.clone()).await; // This code is compatible with both Infura and Alchemy API providers. // Note: we don't handle rate-limits here - assumption is that we're never going to hit them. - if let Err(EthClientError::EthereumGateway(err)) = &result { + if let Err(err) = &result { tracing::warn!("Provider returned error message: {err}"); let err_message = err.as_ref().to_string(); let err_code = if let ClientError::Call(err) = err.as_ref() { @@ -181,7 +185,7 @@ impl EthClient for EthHttpQueryClient { result } - async fn finalized_block_number(&self) -> Result { + async fn finalized_block_number(&self) -> EnrichedClientResult { if let Some(confirmations) = self.confirmations_for_eth_event { let latest_block_number = self.client.block_number().await?.as_u64(); Ok(latest_block_number.saturating_sub(confirmations)) diff --git a/core/node/eth_watch/src/event_processors/governance_upgrades.rs b/core/node/eth_watch/src/event_processors/governance_upgrades.rs index ddd74440cecb..d26cfe6dbd9b 100644 --- a/core/node/eth_watch/src/event_processors/governance_upgrades.rs +++ b/core/node/eth_watch/src/event_processors/governance_upgrades.rs @@ -102,13 +102,6 @@ impl EventProcessor for GovernanceUpgradesEventProcessor { .context("expected some version to be present in DB")?; if upgrade.version > latest_semantic_version { - if upgrade.version.minor == latest_semantic_version.minor { - // Only verification parameters may change if only patch is bumped. - assert!(upgrade.bootloader_code_hash.is_none()); - assert!(upgrade.default_account_code_hash.is_none()); - assert!(upgrade.tx.is_none()); - } - let latest_version = storage .protocol_versions_dal() .get_protocol_version_with_latest_patch(latest_semantic_version.minor) @@ -122,6 +115,14 @@ impl EventProcessor for GovernanceUpgradesEventProcessor { })?; let new_version = latest_version.apply_upgrade(upgrade, scheduler_vk_hash); + if new_version.version.minor == latest_semantic_version.minor { + // Only verification parameters may change if only patch is bumped. + assert_eq!( + new_version.base_system_contracts_hashes, + latest_version.base_system_contracts_hashes + ); + assert!(new_version.tx.is_none()); + } storage .protocol_versions_dal() .save_protocol_version_with_tx(&new_version) diff --git a/core/node/eth_watch/src/event_processors/mod.rs b/core/node/eth_watch/src/event_processors/mod.rs index 2a3a6344bdbc..396bcc2e1ca5 100644 --- a/core/node/eth_watch/src/event_processors/mod.rs +++ b/core/node/eth_watch/src/event_processors/mod.rs @@ -1,12 +1,13 @@ use std::fmt; use zksync_dal::{Connection, Core}; +use zksync_eth_client::{ContractCallError, EnrichedClientError}; use zksync_types::{web3::Log, H256}; pub(crate) use self::{ governance_upgrades::GovernanceUpgradesEventProcessor, priority_ops::PriorityOpsEventProcessor, }; -use crate::client::{EthClient, EthClientError}; +use crate::client::EthClient; mod governance_upgrades; mod priority_ops; @@ -21,7 +22,9 @@ pub(super) enum EventProcessorError { source: anyhow::Error, }, #[error("Eth client error: {0}")] - Client(#[from] EthClientError), + Client(#[from] EnrichedClientError), + #[error("Contract call error: {0}")] + ContractCall(#[from] ContractCallError), /// Internal errors are considered fatal (i.e., they bubble up and lead to the watcher termination). #[error("internal processing error: {0:?}")] Internal(#[from] anyhow::Error), diff --git a/core/node/eth_watch/src/tests.rs b/core/node/eth_watch/src/tests.rs index f6abe93b35f0..870c2b858a55 100644 --- a/core/node/eth_watch/src/tests.rs +++ b/core/node/eth_watch/src/tests.rs @@ -3,6 +3,7 @@ use std::{collections::HashMap, convert::TryInto, sync::Arc}; use tokio::sync::RwLock; use zksync_contracts::{governance_contract, hyperchain_contract}; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use zksync_eth_client::{ContractCallError, EnrichedClientResult}; use zksync_types::{ ethabi::{encode, Hash, Token}, l1::{L1Tx, OpProcessingType, PriorityQueueType}, @@ -13,10 +14,7 @@ use zksync_types::{ ProtocolVersionId, Transaction, H256, U256, }; -use crate::{ - client::{EthClient, EthClientError}, - EthWatch, -}; +use crate::{client::EthClient, EthWatch}; #[derive(Debug)] struct FakeEthClientData { @@ -106,7 +104,7 @@ impl EthClient for MockEthClient { from: BlockNumber, to: BlockNumber, _retries_left: usize, - ) -> Result, EthClientError> { + ) -> EnrichedClientResult> { let from = self.block_to_number(from).await; let to = self.block_to_number(to).await; let mut logs = vec![]; @@ -126,11 +124,14 @@ impl EthClient for MockEthClient { fn set_topics(&mut self, _topics: Vec) {} - async fn scheduler_vk_hash(&self, _verifier_address: Address) -> Result { + async fn scheduler_vk_hash( + &self, + _verifier_address: Address, + ) -> Result { Ok(H256::zero()) } - async fn finalized_block_number(&self) -> Result { + async fn finalized_block_number(&self) -> EnrichedClientResult { Ok(self.inner.read().await.last_finalized_block_number) } } @@ -146,8 +147,6 @@ fn build_l1_tx(serial_id: u64, eth_block: u64) -> L1Tx { common_data: L1TxCommonData { serial_id: PriorityOpId(serial_id), sender: [1u8; 20].into(), - deadline_block: 0, - eth_hash: [2; 32].into(), eth_block, gas_limit: Default::default(), max_fee_per_gas: Default::default(), @@ -175,7 +174,6 @@ fn build_upgrade_tx(id: ProtocolVersionId, eth_block: u64) -> ProtocolUpgradeTx common_data: ProtocolUpgradeTxCommonData { upgrade_id: id, sender: [1u8; 20].into(), - eth_hash: [2; 32].into(), eth_block, gas_limit: Default::default(), max_fee_per_gas: Default::default(), diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs index 12bb87c43431..9e553ba47bf2 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs @@ -8,7 +8,7 @@ use std::{ use tokio::sync::watch; use zksync_config::{configs::eth_sender::PubdataSendingMode, GasAdjusterConfig}; -use zksync_eth_client::{Error, EthInterface}; +use zksync_eth_client::EthInterface; use zksync_types::{commitment::L1BatchCommitmentMode, L1_GAS_PER_PUBDATA_BYTE, U256, U64}; use zksync_web3_decl::client::{DynClient, L1}; @@ -41,7 +41,7 @@ impl GasAdjuster { config: GasAdjusterConfig, pubdata_sending_mode: PubdataSendingMode, commitment_mode: L1BatchCommitmentMode, - ) -> Result { + ) -> anyhow::Result { let eth_client = eth_client.for_component("gas_adjuster"); // Subtracting 1 from the "latest" block number to prevent errors in case @@ -81,7 +81,7 @@ impl GasAdjuster { /// Performs an actualization routine for `GasAdjuster`. /// This method is intended to be invoked periodically. - pub async fn keep_updated(&self) -> Result<(), Error> { + pub async fn keep_updated(&self) -> anyhow::Result<()> { // Subtracting 1 from the "latest" block number to prevent errors in case // the info about the latest block is not yet present on the node. // This sometimes happens on Infura. @@ -229,7 +229,7 @@ impl GasAdjuster { async fn get_base_fees_history( eth_client: &DynClient, block_range: RangeInclusive, - ) -> Result<(Vec, Vec), Error> { + ) -> anyhow::Result<(Vec, Vec)> { let mut base_fee_history = Vec::new(); let mut blob_base_fee_history = Vec::new(); for block_number in block_range { diff --git a/core/node/genesis/src/lib.rs b/core/node/genesis/src/lib.rs index 12dd6afc68b3..bfa6b77cbfef 100644 --- a/core/node/genesis/src/lib.rs +++ b/core/node/genesis/src/lib.rs @@ -432,7 +432,7 @@ pub async fn save_set_chain_id_tx( .from_block(from.into()) .to_block(BlockNumber::Latest) .build(); - let mut logs = query_client.logs(filter).await?; + let mut logs = query_client.logs(&filter).await?; anyhow::ensure!( logs.len() == 1, "Expected a single set_chain_id event, got these {}: {:?}", diff --git a/core/node/house_keeper/src/prover/metrics.rs b/core/node/house_keeper/src/prover/metrics.rs index 4af13b61b0c5..7711c9c04a6b 100644 --- a/core/node/house_keeper/src/prover/metrics.rs +++ b/core/node/house_keeper/src/prover/metrics.rs @@ -1,6 +1,5 @@ use vise::{Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, LabeledFamily, Metrics}; -use zksync_types::ProtocolVersionId; - +use zksync_types::protocol_version::ProtocolSemanticVersion; #[derive(Debug, Metrics)] #[metrics(prefix = "house_keeper")] pub(crate) struct HouseKeeperMetrics { @@ -63,7 +62,7 @@ impl FriProverMetrics { circuit_id: u8, aggregation_round: u8, prover_group_id: u8, - protocol_version: ProtocolVersionId, + protocol_version: ProtocolSemanticVersion, amount: u64, ) { self.prover_jobs[&ProverJobsLabels { diff --git a/core/node/house_keeper/src/prover/queue_reporter/fri_proof_compressor_queue_reporter.rs b/core/node/house_keeper/src/prover/queue_reporter/fri_proof_compressor_queue_reporter.rs index ce7d7467bcc9..886a4c116b89 100644 --- a/core/node/house_keeper/src/prover/queue_reporter/fri_proof_compressor_queue_reporter.rs +++ b/core/node/house_keeper/src/prover/queue_reporter/fri_proof_compressor_queue_reporter.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use async_trait::async_trait; use prover_dal::{Prover, ProverDal}; use zksync_dal::ConnectionPool; -use zksync_types::{prover_dal::JobCountStatistics, ProtocolVersionId}; +use zksync_types::{protocol_version::ProtocolSemanticVersion, prover_dal::JobCountStatistics}; use crate::{ periodic_job::PeriodicJob, @@ -28,7 +28,7 @@ impl FriProofCompressorQueueReporter { async fn get_job_statistics( pool: &ConnectionPool, - ) -> HashMap { + ) -> HashMap { pool.connection() .await .unwrap() diff --git a/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs b/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs index b3b04a519b29..1ae03c74b45e 100644 --- a/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs +++ b/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs @@ -2,7 +2,6 @@ use async_trait::async_trait; use prover_dal::{Prover, ProverDal}; use zksync_config::configs::fri_prover_group::FriProverGroupConfig; use zksync_dal::{ConnectionPool, Core, CoreDal}; -use zksync_types::ProtocolVersionId; use crate::{periodic_job::PeriodicJob, prover::metrics::FRI_PROVER_METRICS}; @@ -67,7 +66,7 @@ impl PeriodicJob for FriProverQueueReporter { circuit_id, job_identifiers.aggregation_round, group_id, - ProtocolVersionId::try_from(job_identifiers.protocol_version).unwrap(), + job_identifiers.get_semantic_protocol_version(), stats.queued as u64, ); @@ -76,7 +75,7 @@ impl PeriodicJob for FriProverQueueReporter { circuit_id, job_identifiers.aggregation_round, group_id, - ProtocolVersionId::try_from(job_identifiers.protocol_version).unwrap(), + job_identifiers.get_semantic_protocol_version(), stats.in_progress as u64, ); } diff --git a/core/node/house_keeper/src/prover/queue_reporter/fri_witness_generator_queue_reporter.rs b/core/node/house_keeper/src/prover/queue_reporter/fri_witness_generator_queue_reporter.rs index da44a34f145a..487b28491c43 100644 --- a/core/node/house_keeper/src/prover/queue_reporter/fri_witness_generator_queue_reporter.rs +++ b/core/node/house_keeper/src/prover/queue_reporter/fri_witness_generator_queue_reporter.rs @@ -4,7 +4,8 @@ use async_trait::async_trait; use prover_dal::{Prover, ProverDal}; use zksync_dal::ConnectionPool; use zksync_types::{ - basic_fri_types::AggregationRound, prover_dal::JobCountStatistics, ProtocolVersionId, + basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, + prover_dal::JobCountStatistics, }; use crate::{periodic_job::PeriodicJob, prover::metrics::SERVER_METRICS}; @@ -27,7 +28,7 @@ impl FriWitnessGeneratorQueueReporter { async fn get_job_statistics( &self, - ) -> HashMap<(AggregationRound, ProtocolVersionId), JobCountStatistics> { + ) -> HashMap<(AggregationRound, ProtocolSemanticVersion), JobCountStatistics> { let mut conn = self.pool.connection().await.unwrap(); let mut result = HashMap::new(); result.extend( @@ -62,7 +63,7 @@ impl FriWitnessGeneratorQueueReporter { fn emit_metrics_for_round( round: AggregationRound, - protocol_version: ProtocolVersionId, + protocol_version: ProtocolSemanticVersion, stats: &JobCountStatistics, ) { if stats.queued > 0 || stats.in_progress > 0 { @@ -95,7 +96,7 @@ impl PeriodicJob for FriWitnessGeneratorQueueReporter { async fn run_routine_task(&mut self) -> anyhow::Result<()> { let stats_for_all_rounds = self.get_job_statistics().await; - let mut aggregated = HashMap::::new(); + let mut aggregated = HashMap::::new(); for ((round, protocol_version), stats) in stats_for_all_rounds { emit_metrics_for_round(round, protocol_version, &stats); diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index f95500a3836d..8e2c915d5749 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -33,6 +33,7 @@ zksync_commitment_generator.workspace = true zksync_house_keeper.workspace = true zksync_node_fee_model.workspace = true zksync_eth_sender.workspace = true +zksync_block_reverter.workspace = true zksync_state_keeper.workspace = true zksync_consistency_checker.workspace = true zksync_metadata_calculator.workspace = true @@ -42,6 +43,8 @@ zksync_node_consensus.workspace = true zksync_contract_verification_server.workspace = true zksync_tee_verifier_input_producer.workspace = true zksync_queued_job_processor.workspace = true +zksync_reorg_detector.workspace = true +zksync_vm_runner.workspace = true tracing.workspace = true thiserror.workspace = true diff --git a/core/node/node_framework/examples/showcase.rs b/core/node/node_framework/examples/showcase.rs index 0a1552f33501..98baa5bc9683 100644 --- a/core/node/node_framework/examples/showcase.rs +++ b/core/node/node_framework/examples/showcase.rs @@ -10,7 +10,7 @@ use std::{ use zksync_node_framework::{ resource::Resource, service::{ServiceContext, StopReceiver, ZkStackServiceBuilder}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -96,14 +96,14 @@ impl PutTask { #[async_trait::async_trait] impl Task for PutTask { - fn name(&self) -> &'static str { + fn id(&self) -> TaskId { // Task names simply have to be unique. They are used for logging and debugging. - "put_task" + "put_task".into() } /// This method will be invoked by the framework when the task is started. async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { - tracing::info!("Starting the task {}", self.name()); + tracing::info!("Starting the task {}", self.id()); // We have to respect the stop receiver and should exit as soon as we receive // a stop signal. @@ -138,12 +138,12 @@ impl CheckTask { #[async_trait::async_trait] impl Task for CheckTask { - fn name(&self) -> &'static str { - "check_task" + fn id(&self) -> TaskId { + "check_task".into() } async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { - tracing::info!("Starting the task {}", self.name()); + tracing::info!("Starting the task {}", self.id()); tokio::select! { _ = self.run_inner() => {}, diff --git a/core/node/node_framework/src/implementations/layers/circuit_breaker_checker.rs b/core/node/node_framework/src/implementations/layers/circuit_breaker_checker.rs index f493d8081ef9..b8fff34b7e92 100644 --- a/core/node/node_framework/src/implementations/layers/circuit_breaker_checker.rs +++ b/core/node/node_framework/src/implementations/layers/circuit_breaker_checker.rs @@ -4,7 +4,7 @@ use zksync_config::configs::chain::CircuitBreakerConfig; use crate::{ implementations::resources::circuit_breakers::CircuitBreakersResource, service::{ServiceContext, StopReceiver}, - task::UnconstrainedTask, + task::{TaskId, UnconstrainedTask}, wiring_layer::{WiringError, WiringLayer}, }; @@ -43,8 +43,8 @@ struct CircuitBreakerCheckerTask { #[async_trait::async_trait] impl UnconstrainedTask for CircuitBreakerCheckerTask { - fn name(&self) -> &'static str { - "circuit_breaker_checker" + fn id(&self) -> TaskId { + "circuit_breaker_checker".into() } async fn run_unconstrained( diff --git a/core/node/node_framework/src/implementations/layers/commitment_generator.rs b/core/node/node_framework/src/implementations/layers/commitment_generator.rs index aeb668dca178..5d2f63931295 100644 --- a/core/node/node_framework/src/implementations/layers/commitment_generator.rs +++ b/core/node/node_framework/src/implementations/layers/commitment_generator.rs @@ -7,7 +7,7 @@ use crate::{ pools::{MasterPool, PoolResource}, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -55,8 +55,8 @@ struct CommitmentGeneratorTask { #[async_trait::async_trait] impl Task for CommitmentGeneratorTask { - fn name(&self) -> &'static str { - "commitment_generator" + fn id(&self) -> TaskId { + "commitment_generator".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/consensus.rs b/core/node/node_framework/src/implementations/layers/consensus.rs index 5a91e796eb5f..06bca1bba3ae 100644 --- a/core/node/node_framework/src/implementations/layers/consensus.rs +++ b/core/node/node_framework/src/implementations/layers/consensus.rs @@ -14,7 +14,7 @@ use crate::{ sync_state::SyncStateResource, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -110,8 +110,8 @@ pub struct MainNodeConsensusTask { #[async_trait::async_trait] impl Task for MainNodeConsensusTask { - fn name(&self) -> &'static str { - "consensus" + fn id(&self) -> TaskId { + "consensus".into() } async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -147,8 +147,8 @@ pub struct FetcherTask { #[async_trait::async_trait] impl Task for FetcherTask { - fn name(&self) -> &'static str { - "consensus_fetcher" + fn id(&self) -> TaskId { + "consensus_fetcher".into() } async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/consistency_checker.rs b/core/node/node_framework/src/implementations/layers/consistency_checker.rs index 4f2ec2ededcc..a387fc19ead1 100644 --- a/core/node/node_framework/src/implementations/layers/consistency_checker.rs +++ b/core/node/node_framework/src/implementations/layers/consistency_checker.rs @@ -8,7 +8,7 @@ use crate::{ pools::{MasterPool, PoolResource}, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -75,8 +75,8 @@ pub struct ConsistencyCheckerTask { #[async_trait::async_trait] impl Task for ConsistencyCheckerTask { - fn name(&self) -> &'static str { - "consistency_checker" + fn id(&self) -> TaskId { + "consistency_checker".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/contract_verification_api.rs b/core/node/node_framework/src/implementations/layers/contract_verification_api.rs index 2e0dcf540ea6..5e76c32ddd53 100644 --- a/core/node/node_framework/src/implementations/layers/contract_verification_api.rs +++ b/core/node/node_framework/src/implementations/layers/contract_verification_api.rs @@ -4,7 +4,7 @@ use zksync_dal::{ConnectionPool, Core}; use crate::{ implementations::resources::pools::{MasterPool, PoolResource, ReplicaPool}, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -46,8 +46,8 @@ pub struct ContractVerificationApiTask { #[async_trait::async_trait] impl Task for ContractVerificationApiTask { - fn name(&self) -> &'static str { - "contract_verification_api" + fn id(&self) -> TaskId { + "contract_verification_api".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/eth_sender.rs b/core/node/node_framework/src/implementations/layers/eth_sender.rs index ed27fe863214..3cf2cf597c31 100644 --- a/core/node/node_framework/src/implementations/layers/eth_sender.rs +++ b/core/node/node_framework/src/implementations/layers/eth_sender.rs @@ -14,7 +14,7 @@ use crate::{ pools::{MasterPool, PoolResource, ReplicaPool}, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -173,8 +173,8 @@ struct EthTxAggregatorTask { #[async_trait::async_trait] impl Task for EthTxAggregatorTask { - fn name(&self) -> &'static str { - "eth_tx_aggregator" + fn id(&self) -> TaskId { + "eth_tx_aggregator".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -189,8 +189,8 @@ struct EthTxManagerTask { #[async_trait::async_trait] impl Task for EthTxManagerTask { - fn name(&self) -> &'static str { - "eth_tx_manager" + fn id(&self) -> TaskId { + "eth_tx_manager".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/eth_watch.rs b/core/node/node_framework/src/implementations/layers/eth_watch.rs index c12d92907534..df9319013112 100644 --- a/core/node/node_framework/src/implementations/layers/eth_watch.rs +++ b/core/node/node_framework/src/implementations/layers/eth_watch.rs @@ -12,7 +12,7 @@ use crate::{ pools::{MasterPool, PoolResource}, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -75,8 +75,8 @@ struct EthWatchTask { #[async_trait::async_trait] impl Task for EthWatchTask { - fn name(&self) -> &'static str { - "eth_watch" + fn id(&self) -> TaskId { + "eth_watch".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/healtcheck_server.rs b/core/node/node_framework/src/implementations/layers/healtcheck_server.rs index 34c41fd70a97..c6138c711083 100644 --- a/core/node/node_framework/src/implementations/layers/healtcheck_server.rs +++ b/core/node/node_framework/src/implementations/layers/healtcheck_server.rs @@ -7,7 +7,7 @@ use zksync_node_api_server::healthcheck::HealthCheckHandle; use crate::{ implementations::resources::healthcheck::AppHealthCheckResource, service::{ServiceContext, StopReceiver}, - task::UnconstrainedTask, + task::{TaskId, UnconstrainedTask}, wiring_layer::{WiringError, WiringLayer}, }; @@ -53,8 +53,8 @@ struct HealthCheckTask { #[async_trait::async_trait] impl UnconstrainedTask for HealthCheckTask { - fn name(&self) -> &'static str { - "healthcheck_server" + fn id(&self) -> TaskId { + "healthcheck_server".into() } async fn run_unconstrained( diff --git a/core/node/node_framework/src/implementations/layers/house_keeper.rs b/core/node/node_framework/src/implementations/layers/house_keeper.rs index 1eb559ea5e1f..7b3e52c7ed5d 100644 --- a/core/node/node_framework/src/implementations/layers/house_keeper.rs +++ b/core/node/node_framework/src/implementations/layers/house_keeper.rs @@ -19,7 +19,7 @@ use zksync_house_keeper::{ use crate::{ implementations::resources::pools::{PoolResource, ProverPool, ReplicaPool}, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -179,8 +179,8 @@ struct PostgresMetricsScrapingTask { #[async_trait::async_trait] impl Task for PostgresMetricsScrapingTask { - fn name(&self) -> &'static str { - "postgres_metrics_scraping" + fn id(&self) -> TaskId { + "postgres_metrics_scraping".into() } async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -203,8 +203,8 @@ struct L1BatchMetricsReporterTask { #[async_trait::async_trait] impl Task for L1BatchMetricsReporterTask { - fn name(&self) -> &'static str { - "l1_batch_metrics_reporter" + fn id(&self) -> TaskId { + "l1_batch_metrics_reporter".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -219,8 +219,8 @@ struct FriProverJobRetryManagerTask { #[async_trait::async_trait] impl Task for FriProverJobRetryManagerTask { - fn name(&self) -> &'static str { - "fri_prover_job_retry_manager" + fn id(&self) -> TaskId { + "fri_prover_job_retry_manager".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -235,8 +235,8 @@ struct FriWitnessGeneratorJobRetryManagerTask { #[async_trait::async_trait] impl Task for FriWitnessGeneratorJobRetryManagerTask { - fn name(&self) -> &'static str { - "fri_witness_generator_job_retry_manager" + fn id(&self) -> TaskId { + "fri_witness_generator_job_retry_manager".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -253,8 +253,8 @@ struct WaitingToQueuedFriWitnessJobMoverTask { #[async_trait::async_trait] impl Task for WaitingToQueuedFriWitnessJobMoverTask { - fn name(&self) -> &'static str { - "waiting_to_queued_fri_witness_job_mover" + fn id(&self) -> TaskId { + "waiting_to_queued_fri_witness_job_mover".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -271,8 +271,8 @@ struct FriWitnessGeneratorStatsReporterTask { #[async_trait::async_trait] impl Task for FriWitnessGeneratorStatsReporterTask { - fn name(&self) -> &'static str { - "fri_witness_generator_stats_reporter" + fn id(&self) -> TaskId { + "fri_witness_generator_stats_reporter".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -289,8 +289,8 @@ struct FriProverStatsReporterTask { #[async_trait::async_trait] impl Task for FriProverStatsReporterTask { - fn name(&self) -> &'static str { - "fri_prover_stats_reporter" + fn id(&self) -> TaskId { + "fri_prover_stats_reporter".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -305,8 +305,8 @@ struct FriProofCompressorStatsReporterTask { #[async_trait::async_trait] impl Task for FriProofCompressorStatsReporterTask { - fn name(&self) -> &'static str { - "fri_proof_compressor_stats_reporter" + fn id(&self) -> TaskId { + "fri_proof_compressor_stats_reporter".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -323,8 +323,8 @@ struct FriProofCompressorJobRetryManagerTask { #[async_trait::async_trait] impl Task for FriProofCompressorJobRetryManagerTask { - fn name(&self) -> &'static str { - "fri_proof_compressor_job_retry_manager" + fn id(&self) -> TaskId { + "fri_proof_compressor_job_retry_manager".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -341,8 +341,8 @@ struct FriProverJobArchiverTask { #[async_trait::async_trait] impl Task for FriProverJobArchiverTask { - fn name(&self) -> &'static str { - "fri_prover_job_archiver" + fn id(&self) -> TaskId { + "fri_prover_job_archiver".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -356,8 +356,8 @@ struct FriProverGpuArchiverTask { #[async_trait::async_trait] impl Task for FriProverGpuArchiverTask { - fn name(&self) -> &'static str { - "fri_prover_gpu_archiver" + fn id(&self) -> TaskId { + "fri_prover_gpu_archiver".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/l1_gas.rs b/core/node/node_framework/src/implementations/layers/l1_gas.rs index d9e554aad04e..8deafd4e2949 100644 --- a/core/node/node_framework/src/implementations/layers/l1_gas.rs +++ b/core/node/node_framework/src/implementations/layers/l1_gas.rs @@ -14,7 +14,7 @@ use crate::{ l1_tx_params::L1TxParamsResource, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -80,8 +80,8 @@ struct GasAdjusterTask { #[async_trait::async_trait] impl Task for GasAdjusterTask { - fn name(&self) -> &'static str { - "gas_adjuster" + fn id(&self) -> TaskId { + "gas_adjuster".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/main_node_client.rs b/core/node/node_framework/src/implementations/layers/main_node_client.rs new file mode 100644 index 000000000000..80e5d44c350f --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/main_node_client.rs @@ -0,0 +1,48 @@ +use std::num::NonZeroUsize; + +use anyhow::Context; +use zksync_types::{url::SensitiveUrl, L2ChainId}; +use zksync_web3_decl::client::{Client, DynClient, L2}; + +use crate::{ + implementations::resources::main_node_client::MainNodeClientResource, + service::ServiceContext, + wiring_layer::{WiringError, WiringLayer}, +}; + +#[derive(Debug)] +pub struct MainNodeClientLayer { + url: SensitiveUrl, + rate_limit_rps: NonZeroUsize, + l2_chain_id: L2ChainId, +} + +impl MainNodeClientLayer { + pub fn new(url: SensitiveUrl, rate_limit_rps: NonZeroUsize, l2_chain_id: L2ChainId) -> Self { + Self { + url, + rate_limit_rps, + l2_chain_id, + } + } +} + +#[async_trait::async_trait] +impl WiringLayer for MainNodeClientLayer { + fn layer_name(&self) -> &'static str { + "main_node_client_layer" + } + + async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + let main_node_client = Client::http(self.url) + .context("failed creating JSON-RPC client for main node")? + .for_network(self.l2_chain_id.into()) + .with_allowed_requests_per_second(self.rate_limit_rps) + .build(); + + context.insert_resource(MainNodeClientResource( + Box::new(main_node_client) as Box> + ))?; + Ok(()) + } +} diff --git a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs index 4b1e1d00cb5e..935bb283fe81 100644 --- a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs +++ b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs @@ -18,7 +18,7 @@ use crate::{ web3_api::TreeApiClientResource, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -118,8 +118,8 @@ pub struct MetadataCalculatorTask { #[async_trait::async_trait] impl Task for MetadataCalculatorTask { - fn name(&self) -> &'static str { - "metadata_calculator" + fn id(&self) -> TaskId { + "metadata_calculator".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -141,8 +141,8 @@ pub struct TreeApiTask { #[async_trait::async_trait] impl Task for TreeApiTask { - fn name(&self) -> &'static str { - "tree_api" + fn id(&self) -> TaskId { + "tree_api".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/mod.rs b/core/node/node_framework/src/implementations/layers/mod.rs index cee9a0b6906d..1c171e84b5ba 100644 --- a/core/node/node_framework/src/implementations/layers/mod.rs +++ b/core/node/node_framework/src/implementations/layers/mod.rs @@ -8,6 +8,7 @@ pub mod eth_watch; pub mod healtcheck_server; pub mod house_keeper; pub mod l1_gas; +pub mod main_node_client; pub mod metadata_calculator; pub mod object_store; pub mod pk_signing_eth_client; @@ -15,7 +16,10 @@ pub mod pools_layer; pub mod prometheus_exporter; pub mod proof_data_handler; pub mod query_eth_client; +pub mod reorg_detector_checker; +pub mod reorg_detector_runner; pub mod sigint; pub mod state_keeper; pub mod tee_verifier_input_producer; +pub mod vm_runner; pub mod web3_api; diff --git a/core/node/node_framework/src/implementations/layers/prometheus_exporter.rs b/core/node/node_framework/src/implementations/layers/prometheus_exporter.rs index 95477291e432..6c7d4f915df4 100644 --- a/core/node/node_framework/src/implementations/layers/prometheus_exporter.rs +++ b/core/node/node_framework/src/implementations/layers/prometheus_exporter.rs @@ -4,7 +4,7 @@ use zksync_health_check::{HealthStatus, HealthUpdater, ReactiveHealthCheck}; use crate::{ implementations::resources::healthcheck::AppHealthCheckResource, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -50,8 +50,8 @@ impl WiringLayer for PrometheusExporterLayer { #[async_trait::async_trait] impl Task for PrometheusExporterTask { - fn name(&self) -> &'static str { - "prometheus_exporter" + fn id(&self) -> TaskId { + "prometheus_exporter".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs index f9960036cec4..7952ca6a585f 100644 --- a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs +++ b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs @@ -11,7 +11,7 @@ use crate::{ pools::{MasterPool, PoolResource}, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -73,8 +73,8 @@ struct ProofDataHandlerTask { #[async_trait::async_trait] impl Task for ProofDataHandlerTask { - fn name(&self) -> &'static str { - "proof_data_handler" + fn id(&self) -> TaskId { + "proof_data_handler".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/reorg_detector_checker.rs b/core/node/node_framework/src/implementations/layers/reorg_detector_checker.rs new file mode 100644 index 000000000000..64454b63998b --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/reorg_detector_checker.rs @@ -0,0 +1,71 @@ +use std::time::Duration; + +use anyhow::Context; +use zksync_reorg_detector::{self, ReorgDetector}; + +use crate::{ + implementations::resources::{ + main_node_client::MainNodeClientResource, + pools::{MasterPool, PoolResource}, + }, + precondition::Precondition, + service::{ServiceContext, StopReceiver}, + task::TaskId, + wiring_layer::{WiringError, WiringLayer}, +}; + +const REORG_DETECTED_SLEEP_INTERVAL: Duration = Duration::from_secs(1); + +/// The layer is responsible for integrating reorg checking into the system. +/// When a reorg is detected, the system will not start running until it is fixed. +#[derive(Debug)] +pub struct ReorgDetectorCheckerLayer; + +#[async_trait::async_trait] +impl WiringLayer for ReorgDetectorCheckerLayer { + fn layer_name(&self) -> &'static str { + "reorg_detector_checker_layer" + } + + async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + // Get resources. + let main_node_client = context.get_resource::().await?.0; + + let pool_resource = context.get_resource::>().await?; + let pool = pool_resource.get().await?; + + // Create and insert precondition. + context.add_precondition(Box::new(CheckerPrecondition { + reorg_detector: ReorgDetector::new(main_node_client, pool), + })); + + Ok(()) + } +} + +pub struct CheckerPrecondition { + reorg_detector: ReorgDetector, +} + +#[async_trait::async_trait] +impl Precondition for CheckerPrecondition { + fn id(&self) -> TaskId { + "reorg_detector_checker".into() + } + + async fn check(mut self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + loop { + match self.reorg_detector.run_once(stop_receiver.0.clone()).await { + Ok(()) => return Ok(()), + Err(zksync_reorg_detector::Error::ReorgDetected(last_correct_l1_batch)) => { + tracing::warn!( + "Reorg detected, last correct L1 batch #{}. Waiting till it will be resolved. Sleep for {} seconds and retry", + last_correct_l1_batch, REORG_DETECTED_SLEEP_INTERVAL.as_secs() + ); + tokio::time::sleep(REORG_DETECTED_SLEEP_INTERVAL).await; + } + Err(err) => return Err(err).context("reorg_detector.check_consistency()"), + } + } + } +} diff --git a/core/node/node_framework/src/implementations/layers/reorg_detector_runner.rs b/core/node/node_framework/src/implementations/layers/reorg_detector_runner.rs new file mode 100644 index 000000000000..55ee621c15b0 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/reorg_detector_runner.rs @@ -0,0 +1,73 @@ +use std::sync::Arc; + +use anyhow::Context; +use zksync_block_reverter::BlockReverter; +use zksync_reorg_detector::{self, ReorgDetector}; + +use crate::{ + implementations::resources::{ + main_node_client::MainNodeClientResource, + pools::{MasterPool, PoolResource}, + reverter::BlockReverterResource, + }, + service::{ServiceContext, StopReceiver}, + task::{TaskId, UnconstrainedOneshotTask}, + wiring_layer::{WiringError, WiringLayer}, +}; + +/// Layer responsible for detecting reorg and reverting blocks in case it was found. +#[derive(Debug)] +pub struct ReorgDetectorRunnerLayer; + +#[async_trait::async_trait] +impl WiringLayer for ReorgDetectorRunnerLayer { + fn layer_name(&self) -> &'static str { + "reorg_detector_runner_layer" + } + + async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + // Get resources. + let main_node_client = context.get_resource::().await?.0; + + let pool_resource = context.get_resource::>().await?; + let pool = pool_resource.get().await?; + + let reverter = context.get_resource::().await?.0; + + // Create and insert task. + context.add_unconstrained_oneshot_task(Box::new(RunnerUnconstrainedOneshotTask { + reorg_detector: ReorgDetector::new(main_node_client, pool), + reverter, + })); + + Ok(()) + } +} + +pub struct RunnerUnconstrainedOneshotTask { + reorg_detector: ReorgDetector, + reverter: Arc, +} + +#[async_trait::async_trait] +impl UnconstrainedOneshotTask for RunnerUnconstrainedOneshotTask { + fn id(&self) -> TaskId { + "reorg_detector_runner".into() + } + + async fn run_unconstrained_oneshot( + mut self: Box, + stop_receiver: StopReceiver, + ) -> anyhow::Result<()> { + match self.reorg_detector.run_once(stop_receiver.0.clone()).await { + Ok(()) => {} + Err(zksync_reorg_detector::Error::ReorgDetected(last_correct_l1_batch)) => { + tracing::info!("Reverting to l1 batch number {last_correct_l1_batch}"); + self.reverter.roll_back(last_correct_l1_batch).await?; + tracing::info!("Revert successfully completed"); + } + Err(err) => return Err(err).context("reorg_detector.check_consistency()"), + } + Ok(()) + } +} diff --git a/core/node/node_framework/src/implementations/layers/sigint.rs b/core/node/node_framework/src/implementations/layers/sigint.rs index a028be97995d..2d11f1525370 100644 --- a/core/node/node_framework/src/implementations/layers/sigint.rs +++ b/core/node/node_framework/src/implementations/layers/sigint.rs @@ -2,7 +2,7 @@ use tokio::sync::oneshot; use crate::{ service::{ServiceContext, StopReceiver}, - task::UnconstrainedTask, + task::{TaskId, UnconstrainedTask}, wiring_layer::{WiringError, WiringLayer}, }; @@ -29,8 +29,8 @@ struct SigintHandlerTask; #[async_trait::async_trait] impl UnconstrainedTask for SigintHandlerTask { - fn name(&self) -> &'static str { - "sigint_handler" + fn id(&self) -> TaskId { + "sigint_handler".into() } async fn run_unconstrained( diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs b/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs index 91be11ea8a8e..65e86bef5204 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs @@ -22,7 +22,7 @@ use crate::{ }, resource::Unique, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -142,8 +142,8 @@ struct L2BlockSealerTask(zksync_state_keeper::L2BlockSealerTask); #[async_trait::async_trait] impl Task for L2BlockSealerTask { - fn name(&self) -> &'static str { - "state_keeper/l2_block_sealer" + fn id(&self) -> TaskId { + "state_keeper/l2_block_sealer".into() } async fn run(self: Box, _stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -157,8 +157,8 @@ struct MempoolFetcherTask(MempoolFetcher); #[async_trait::async_trait] impl Task for MempoolFetcherTask { - fn name(&self) -> &'static str { - "state_keeper/mempool_fetcher" + fn id(&self) -> TaskId { + "state_keeper/mempool_fetcher".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs b/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs index 8d56bdd671a4..edbe1d6e12f7 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs @@ -21,7 +21,7 @@ use crate::{ }, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -105,8 +105,8 @@ struct StateKeeperTask { #[async_trait::async_trait] impl Task for StateKeeperTask { - fn name(&self) -> &'static str { - "state_keeper" + fn id(&self) -> TaskId { + "state_keeper".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -134,8 +134,8 @@ struct RocksdbCatchupTask(AsyncCatchupTask); #[async_trait::async_trait] impl Task for RocksdbCatchupTask { - fn name(&self) -> &'static str { - "state_keeper/rocksdb_catchup_task" + fn id(&self) -> TaskId { + "state_keeper/rocksdb_catchup_task".into() } async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs b/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs index a595e2eeb20b..76ae0b26971f 100644 --- a/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs +++ b/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs @@ -8,7 +8,7 @@ use crate::{ pools::{MasterPool, PoolResource}, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -52,8 +52,8 @@ pub struct TeeVerifierInputProducerTask { #[async_trait::async_trait] impl Task for TeeVerifierInputProducerTask { - fn name(&self) -> &'static str { - "tee_verifier_input_producer" + fn id(&self) -> TaskId { + "tee_verifier_input_producer".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/mod.rs b/core/node/node_framework/src/implementations/layers/vm_runner/mod.rs new file mode 100644 index 000000000000..a105ad81ee60 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/vm_runner/mod.rs @@ -0,0 +1,34 @@ +use zksync_vm_runner::{ConcurrentOutputHandlerFactoryTask, StorageSyncTask, VmRunnerIo}; + +use crate::{ + service::StopReceiver, + task::{Task, TaskId}, +}; + +pub mod protective_reads; + +#[async_trait::async_trait] +impl Task for StorageSyncTask { + fn id(&self) -> TaskId { + format!("vm_runner/{}/storage_sync", self.io().name()).into() + } + + async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { + StorageSyncTask::run(*self, stop_receiver.0.clone()).await?; + stop_receiver.0.changed().await?; + Ok(()) + } +} + +#[async_trait::async_trait] +impl Task for ConcurrentOutputHandlerFactoryTask { + fn id(&self) -> TaskId { + format!("vm_runner/{}/output_handler", self.io().name()).into() + } + + async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { + ConcurrentOutputHandlerFactoryTask::run(*self, stop_receiver.0.clone()).await?; + stop_receiver.0.changed().await?; + Ok(()) + } +} diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/protective_reads.rs b/core/node/node_framework/src/implementations/layers/vm_runner/protective_reads.rs new file mode 100644 index 000000000000..332793031fa5 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/vm_runner/protective_reads.rs @@ -0,0 +1,86 @@ +use zksync_config::configs::vm_runner::ProtectiveReadsWriterConfig; +use zksync_types::L2ChainId; +use zksync_vm_runner::ProtectiveReadsWriter; + +use crate::{ + implementations::resources::pools::{MasterPool, PoolResource}, + service::{ServiceContext, StopReceiver}, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, +}; + +#[derive(Debug)] +pub struct ProtectiveReadsWriterLayer { + protective_reads_writer_config: ProtectiveReadsWriterConfig, + zksync_network_id: L2ChainId, +} + +impl ProtectiveReadsWriterLayer { + pub fn new( + protective_reads_writer_config: ProtectiveReadsWriterConfig, + zksync_network_id: L2ChainId, + ) -> Self { + Self { + protective_reads_writer_config, + zksync_network_id, + } + } +} + +#[async_trait::async_trait] +impl WiringLayer for ProtectiveReadsWriterLayer { + fn layer_name(&self) -> &'static str { + "vm_runner_protective_reads" + } + + async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + let master_pool = context.get_resource::>().await?; + + let (protective_reads_writer, tasks) = ProtectiveReadsWriter::new( + // One for `StorageSyncTask` which can hold a long-term connection in case it needs to + // catch up cache. + // + // One for `ConcurrentOutputHandlerFactoryTask`/`VmRunner` as they need occasional access + // to DB for querying last processed batch and last ready to be loaded batch. + // + // `self.protective_reads_writer_config` connections for `ProtectiveReadsOutputHandlerFactory` + // as there can be multiple output handlers holding multi-second connections to write + // large amount of protective reads. + master_pool + .get_custom( + self.protective_reads_writer_config + .protective_reads_window_size + + 2, + ) + .await?, + self.protective_reads_writer_config.protective_reads_db_path, + self.zksync_network_id, + self.protective_reads_writer_config + .protective_reads_window_size, + ) + .await?; + + context.add_task(Box::new(tasks.loader_task)); + context.add_task(Box::new(tasks.output_handler_factory_task)); + context.add_task(Box::new(ProtectiveReadsWriterTask { + protective_reads_writer, + })); + Ok(()) + } +} + +#[derive(Debug)] +struct ProtectiveReadsWriterTask { + protective_reads_writer: ProtectiveReadsWriter, +} + +#[async_trait::async_trait] +impl Task for ProtectiveReadsWriterTask { + fn id(&self) -> TaskId { + "vm_runner/protective_reads_writer".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + self.protective_reads_writer.run(&stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/layers/web3_api/caches.rs b/core/node/node_framework/src/implementations/layers/web3_api/caches.rs index 7c6d160c3339..c01a62748fa4 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/caches.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/caches.rs @@ -8,7 +8,7 @@ use crate::{ web3_api::MempoolCacheResource, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -49,8 +49,8 @@ pub struct MempoolCacheUpdateTask(mempool_cache::MempoolCacheUpdateTask); #[async_trait::async_trait] impl Task for MempoolCacheUpdateTask { - fn name(&self) -> &'static str { - "mempool_cache_update_task" + fn id(&self) -> TaskId { + "mempool_cache_update_task".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/web3_api/server.rs b/core/node/node_framework/src/implementations/layers/web3_api/server.rs index 08eaa4b80444..c81b475c3ec4 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/server.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/server.rs @@ -14,7 +14,7 @@ use crate::{ web3_api::{MempoolCacheResource, TreeApiClientResource, TxSenderResource}, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -206,10 +206,10 @@ type ApiJoinHandle = JoinHandle>; #[async_trait::async_trait] impl Task for Web3ApiTask { - fn name(&self) -> &'static str { + fn id(&self) -> TaskId { match self.transport { - Transport::Http => "web3_http_server", - Transport::Ws => "web3_ws_server", + Transport::Http => "web3_http_server".into(), + Transport::Ws => "web3_ws_server".into(), } } @@ -232,8 +232,8 @@ struct ApiTaskGarbageCollector { #[async_trait::async_trait] impl Task for ApiTaskGarbageCollector { - fn name(&self) -> &'static str { - "api_task_garbage_collector" + fn id(&self) -> TaskId { + "api_task_garbage_collector".into() } async fn run(self: Box, _stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs index eea9148f6a6e..c7a568e5cb4d 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs @@ -14,7 +14,7 @@ use crate::{ web3_api::{TxSenderResource, TxSinkResource}, }, service::{ServiceContext, StopReceiver}, - task::Task, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -123,8 +123,8 @@ impl fmt::Debug for PostgresStorageCachesTask { #[async_trait::async_trait] impl Task for PostgresStorageCachesTask { - fn name(&self) -> &'static str { - "postgres_storage_caches" + fn id(&self) -> TaskId { + "postgres_storage_caches".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { @@ -138,8 +138,8 @@ struct VmConcurrencyBarrierTask { #[async_trait::async_trait] impl Task for VmConcurrencyBarrierTask { - fn name(&self) -> &'static str { - "vm_concurrency_barrier_task" + fn id(&self) -> TaskId { + "vm_concurrency_barrier_task".into() } async fn run(mut self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/implementations/resources/mod.rs b/core/node/node_framework/src/implementations/resources/mod.rs index 17c939419985..edfb280d4db7 100644 --- a/core/node/node_framework/src/implementations/resources/mod.rs +++ b/core/node/node_framework/src/implementations/resources/mod.rs @@ -7,6 +7,7 @@ pub mod l1_tx_params; pub mod main_node_client; pub mod object_store; pub mod pools; +pub mod reverter; pub mod state_keeper; pub mod sync_state; pub mod web3_api; diff --git a/core/node/node_framework/src/implementations/resources/reverter.rs b/core/node/node_framework/src/implementations/resources/reverter.rs new file mode 100644 index 000000000000..2a2bdb142a85 --- /dev/null +++ b/core/node/node_framework/src/implementations/resources/reverter.rs @@ -0,0 +1,15 @@ +use std::sync::Arc; + +use zksync_block_reverter::BlockReverter; + +use crate::resource::Resource; + +/// Wrapper for the block reverter. +#[derive(Debug, Clone)] +pub struct BlockReverterResource(pub Arc); + +impl Resource for BlockReverterResource { + fn name() -> String { + "common/block_reverter".into() + } +} diff --git a/core/node/node_framework/src/precondition.rs b/core/node/node_framework/src/precondition.rs index 0e47da6a631e..a612c5b90a8b 100644 --- a/core/node/node_framework/src/precondition.rs +++ b/core/node/node_framework/src/precondition.rs @@ -2,12 +2,12 @@ use std::sync::Arc; use tokio::sync::Barrier; -use crate::service::StopReceiver; +use crate::{service::StopReceiver, task::TaskId}; #[async_trait::async_trait] pub trait Precondition: 'static + Send + Sync { /// Unique name of the precondition. - fn name(&self) -> &'static str; + fn id(&self) -> TaskId; async fn check(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()>; } diff --git a/core/node/node_framework/src/service/context.rs b/core/node/node_framework/src/service/context.rs index 4ec76ca1d2a3..81d094630c32 100644 --- a/core/node/node_framework/src/service/context.rs +++ b/core/node/node_framework/src/service/context.rs @@ -39,7 +39,7 @@ impl<'a> ServiceContext<'a> { /// Added tasks will be launched after the wiring process will be finished and all the preconditions /// are met. pub fn add_task(&mut self, task: Box) -> &mut Self { - tracing::info!("Layer {} has added a new task: {}", self.layer, task.name()); + tracing::info!("Layer {} has added a new task: {}", self.layer, task.id()); self.service.runnables.tasks.push(task); self } @@ -50,7 +50,7 @@ impl<'a> ServiceContext<'a> { tracing::info!( "Layer {} has added a new unconstrained task: {}", self.layer, - task.name() + task.id() ); self.service.runnables.unconstrained_tasks.push(task); self @@ -61,7 +61,7 @@ impl<'a> ServiceContext<'a> { tracing::info!( "Layer {} has added a new precondition: {}", self.layer, - precondition.name() + precondition.id() ); self.service.runnables.preconditions.push(precondition); self @@ -72,7 +72,7 @@ impl<'a> ServiceContext<'a> { tracing::info!( "Layer {} has added a new oneshot task: {}", self.layer, - task.name() + task.id() ); self.service.runnables.oneshot_tasks.push(task); self @@ -86,7 +86,7 @@ impl<'a> ServiceContext<'a> { tracing::info!( "Layer {} has added a new unconstrained oneshot task: {}", self.layer, - task.name() + task.id() ); self.service .runnables diff --git a/core/node/node_framework/src/service/runnables.rs b/core/node/node_framework/src/service/runnables.rs index 7b3e3f7f43b7..7f35e384d6cc 100644 --- a/core/node/node_framework/src/service/runnables.rs +++ b/core/node/node_framework/src/service/runnables.rs @@ -27,22 +27,22 @@ pub(super) struct Runnables { impl fmt::Debug for Runnables { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // Macro that iterates over a `Vec`, invokes `.name()` method and collects the results into a `Vec`. + // Macro that iterates over a `Vec`, invokes `.id()` method and collects the results into a `Vec`. // Returns a reference to created `Vec` to satisfy the `.field` method signature. - macro_rules! names { + macro_rules! ids { ($vec:expr) => { - &$vec.iter().map(|x| x.name()).collect::>() + &$vec.iter().map(|x| x.id()).collect::>() }; } f.debug_struct("Runnables") - .field("preconditions", names!(self.preconditions)) - .field("tasks", names!(self.tasks)) - .field("oneshot_tasks", names!(self.oneshot_tasks)) - .field("unconstrained_tasks", names!(self.unconstrained_tasks)) + .field("preconditions", ids!(self.preconditions)) + .field("tasks", ids!(self.tasks)) + .field("oneshot_tasks", ids!(self.oneshot_tasks)) + .field("unconstrained_tasks", ids!(self.unconstrained_tasks)) .field( "unconstrained_oneshot_tasks", - names!(self.unconstrained_oneshot_tasks), + ids!(self.unconstrained_oneshot_tasks), ) .finish() } @@ -127,7 +127,7 @@ impl Runnables { stop_receiver: StopReceiver, ) { for task in std::mem::take(&mut self.unconstrained_tasks) { - let name = task.name(); + let name = task.id(); let stop_receiver = stop_receiver.clone(); let task_future = Box::pin(async move { task.run_unconstrained(stop_receiver) @@ -145,7 +145,7 @@ impl Runnables { stop_receiver: StopReceiver, ) { for task in std::mem::take(&mut self.tasks) { - let name = task.name(); + let name = task.id(); let stop_receiver = stop_receiver.clone(); let task_barrier = task_barrier.clone(); let task_future = Box::pin(async move { @@ -164,7 +164,7 @@ impl Runnables { stop_receiver: StopReceiver, ) { for precondition in std::mem::take(&mut self.preconditions) { - let name = precondition.name(); + let name = precondition.id(); let stop_receiver = stop_receiver.clone(); let task_barrier = task_barrier.clone(); let task_future = Box::pin(async move { @@ -184,7 +184,7 @@ impl Runnables { stop_receiver: StopReceiver, ) { for oneshot_task in std::mem::take(&mut self.oneshot_tasks) { - let name = oneshot_task.name(); + let name = oneshot_task.id(); let stop_receiver = stop_receiver.clone(); let task_barrier = task_barrier.clone(); let task_future = Box::pin(async move { @@ -203,7 +203,7 @@ impl Runnables { stop_receiver: StopReceiver, ) { for unconstrained_oneshot_task in std::mem::take(&mut self.unconstrained_oneshot_tasks) { - let name = unconstrained_oneshot_task.name(); + let name = unconstrained_oneshot_task.id(); let stop_receiver = stop_receiver.clone(); let task_future = Box::pin(async move { unconstrained_oneshot_task diff --git a/core/node/node_framework/src/service/tests.rs b/core/node/node_framework/src/service/tests.rs index 81a7eaabdc6d..b5bcc3aaa255 100644 --- a/core/node/node_framework/src/service/tests.rs +++ b/core/node/node_framework/src/service/tests.rs @@ -9,7 +9,7 @@ use crate::{ ServiceContext, StopReceiver, WiringError, WiringLayer, ZkStackServiceBuilder, ZkStackServiceError, }, - task::Task, + task::{Task, TaskId}, }; // `ZkStack` Service's `new()` method has to have a check for nested runtime. @@ -127,8 +127,8 @@ struct ErrorTask; #[async_trait::async_trait] impl Task for ErrorTask { - fn name(&self) -> &'static str { - "error_task" + fn id(&self) -> TaskId { + "error_task".into() } async fn run(self: Box, _stop_receiver: StopReceiver) -> anyhow::Result<()> { anyhow::bail!("error task") @@ -178,8 +178,8 @@ struct SuccessfulTask(Arc, Arc>); #[async_trait::async_trait] impl Task for SuccessfulTask { - fn name(&self) -> &'static str { - "successful_task" + fn id(&self) -> TaskId { + "successful_task".into() } async fn run(self: Box, _stop_receiver: StopReceiver) -> anyhow::Result<()> { self.0.wait().await; @@ -196,8 +196,8 @@ struct RemainingTask(Arc, Arc>); #[async_trait::async_trait] impl Task for RemainingTask { - fn name(&self) -> &'static str { - "remaining_task" + fn id(&self) -> TaskId { + "remaining_task".into() } async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { diff --git a/core/node/node_framework/src/task.rs b/core/node/node_framework/src/task.rs index f5ba08de193a..8ff73d75d8fa 100644 --- a/core/node/node_framework/src/task.rs +++ b/core/node/node_framework/src/task.rs @@ -28,12 +28,52 @@ //! - A task that must be started as soon as possible, e.g. healthcheck server. //! - A task that may be a driving force for some precondition to be met. -use std::sync::Arc; +use std::{ + fmt::{Display, Formatter}, + ops::Deref, + sync::Arc, +}; use tokio::sync::Barrier; use crate::service::StopReceiver; +/// A unique human-readable identifier of a task. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct TaskId(String); + +impl TaskId { + pub fn new(value: String) -> Self { + TaskId(value) + } +} + +impl Display for TaskId { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str(&self.0) + } +} + +impl From<&str> for TaskId { + fn from(value: &str) -> Self { + TaskId(value.to_owned()) + } +} + +impl From for TaskId { + fn from(value: String) -> Self { + TaskId(value) + } +} + +impl Deref for TaskId { + type Target = str; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + /// A task implementation. /// /// Note: any `Task` added to the service will only start after all the [preconditions](crate::precondition::Precondition) @@ -41,7 +81,7 @@ use crate::service::StopReceiver; #[async_trait::async_trait] pub trait Task: 'static + Send { /// Unique name of the task. - fn name(&self) -> &'static str; + fn id(&self) -> TaskId; /// Runs the task. /// @@ -85,7 +125,7 @@ impl dyn Task { #[async_trait::async_trait] pub trait OneshotTask: 'static + Send { /// Unique name of the task. - fn name(&self) -> &'static str; + fn id(&self) -> TaskId; /// Runs the task. /// @@ -130,7 +170,7 @@ impl dyn OneshotTask { #[async_trait::async_trait] pub trait UnconstrainedTask: 'static + Send { /// Unique name of the task. - fn name(&self) -> &'static str; + fn id(&self) -> TaskId; /// Runs the task without waiting for any precondition to be met. async fn run_unconstrained(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()>; @@ -141,7 +181,7 @@ pub trait UnconstrainedTask: 'static + Send { #[async_trait::async_trait] pub trait UnconstrainedOneshotTask: 'static + Send { /// Unique name of the task. - fn name(&self) -> &'static str; + fn id(&self) -> TaskId; /// Runs the task without waiting for any precondition to be met. async fn run_unconstrained_oneshot( diff --git a/core/node/node_sync/Cargo.toml b/core/node/node_sync/Cargo.toml index 248478abddf5..9fd0aad73094 100644 --- a/core/node/node_sync/Cargo.toml +++ b/core/node/node_sync/Cargo.toml @@ -38,4 +38,5 @@ thiserror.workspace = true zksync_node_test_utils.workspace = true assert_matches.workspace = true +once_cell.workspace = true test-casing.workspace = true diff --git a/core/node/node_sync/src/tree_data_fetcher/metrics.rs b/core/node/node_sync/src/tree_data_fetcher/metrics.rs index 5d063312f4ca..f0fb342b69b1 100644 --- a/core/node/node_sync/src/tree_data_fetcher/metrics.rs +++ b/core/node/node_sync/src/tree_data_fetcher/metrics.rs @@ -7,18 +7,22 @@ use vise::{ Info, Metrics, Unit, }; -use super::{StepOutcome, TreeDataFetcher, TreeDataFetcherError}; +use super::{provider::TreeDataProviderSource, StepOutcome, TreeDataFetcher, TreeDataFetcherError}; #[derive(Debug, EncodeLabelSet)] struct TreeDataFetcherInfo { #[metrics(unit = Unit::Seconds)] poll_interval: DurationAsSecs, + diamond_proxy_address: Option, } impl From<&TreeDataFetcher> for TreeDataFetcherInfo { fn from(fetcher: &TreeDataFetcher) -> Self { Self { poll_interval: fetcher.poll_interval.into(), + diamond_proxy_address: fetcher + .diamond_proxy_address + .map(|addr| format!("{addr:?}")), } } } @@ -39,6 +43,10 @@ pub(super) enum StepOutcomeLabel { TransientError, } +const BLOCK_DIFF_BUCKETS: Buckets = Buckets::values(&[ + 10.0, 20.0, 50.0, 100.0, 200.0, 500.0, 1_000.0, 2_000.0, 5_000.0, 10_000.0, 20_000.0, 50_000.0, +]); + #[derive(Debug, Metrics)] #[metrics(prefix = "external_node_tree_data_fetcher")] pub(super) struct TreeDataFetcherMetrics { @@ -51,6 +59,15 @@ pub(super) struct TreeDataFetcherMetrics { /// Latency of a particular stage of processing a single L1 batch. #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] pub stage_latency: Family>, + /// Number of steps during binary search of the L1 commit block number. + #[metrics(buckets = Buckets::linear(0.0..=32.0, 2.0))] + pub l1_commit_block_number_binary_search_steps: Histogram, + /// Difference between the "from" block specified in the event filter and the L1 block number of the fetched event. + /// Large values here can signal that fetching data from L1 can break because the filter won't get necessary events. + #[metrics(buckets = BLOCK_DIFF_BUCKETS)] + pub l1_commit_block_number_from_diff: Histogram, + /// Number of root hashes fetched from a particular source. + pub root_hash_sources: Family, } impl TreeDataFetcherMetrics { diff --git a/core/node/node_sync/src/tree_data_fetcher/mod.rs b/core/node/node_sync/src/tree_data_fetcher/mod.rs index dfa1f8ffa2cc..912952a8d144 100644 --- a/core/node/node_sync/src/tree_data_fetcher/mod.rs +++ b/core/node/node_sync/src/tree_data_fetcher/mod.rs @@ -1,51 +1,32 @@ //! Fetcher responsible for getting Merkle tree outputs from the main node. -use std::{fmt, time::Duration}; +use std::time::Duration; use anyhow::Context as _; -use async_trait::async_trait; use serde::Serialize; #[cfg(test)] use tokio::sync::mpsc; use tokio::sync::watch; use zksync_dal::{ConnectionPool, Core, CoreDal, DalError}; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; -use zksync_types::{api, block::L1BatchTreeData, L1BatchNumber}; +use zksync_types::{block::L1BatchTreeData, Address, L1BatchNumber}; use zksync_web3_decl::{ - client::{DynClient, L2}, - error::{ClientRpcContext, EnrichedClientError, EnrichedClientResult}, - namespaces::ZksNamespaceClient, + client::{DynClient, L1, L2}, + error::EnrichedClientError, }; -use self::metrics::{ProcessingStage, TreeDataFetcherMetrics, METRICS}; +use self::{ + metrics::{ProcessingStage, TreeDataFetcherMetrics, METRICS}, + provider::{L1DataProvider, MissingData, TreeDataProvider}, +}; mod metrics; +mod provider; #[cfg(test)] mod tests; -#[async_trait] -trait MainNodeClient: fmt::Debug + Send + Sync + 'static { - async fn batch_details( - &self, - number: L1BatchNumber, - ) -> EnrichedClientResult>; -} - -#[async_trait] -impl MainNodeClient for Box> { - async fn batch_details( - &self, - number: L1BatchNumber, - ) -> EnrichedClientResult> { - self.get_l1_batch_details(number) - .rpc_context("get_l1_batch_details") - .with_arg("number", &number) - .await - } -} - #[derive(Debug, thiserror::Error)] -enum TreeDataFetcherError { +pub(crate) enum TreeDataFetcherError { #[error("error fetching data from main node")] Rpc(#[from] EnrichedClientError), #[error("internal error")] @@ -67,6 +48,8 @@ impl TreeDataFetcherError { } } +type TreeDataFetcherResult = Result; + #[derive(Debug, Serialize)] #[serde(untagged)] enum TreeDataFetcherHealth { @@ -108,7 +91,9 @@ enum StepOutcome { /// by Consistency checker. #[derive(Debug)] pub struct TreeDataFetcher { - main_node_client: Box, + data_provider: Box, + // Used in the Info metric + diamond_proxy_address: Option
, pool: ConnectionPool, metrics: &'static TreeDataFetcherMetrics, health_updater: HealthUpdater, @@ -123,7 +108,8 @@ impl TreeDataFetcher { /// Creates a new fetcher connected to the main node. pub fn new(client: Box>, pool: ConnectionPool) -> Self { Self { - main_node_client: Box::new(client.for_component("tree_data_fetcher")), + data_provider: Box::new(client.for_component("tree_data_fetcher")), + diamond_proxy_address: None, pool, metrics: &METRICS, health_updater: ReactiveHealthCheck::new("tree_data_fetcher").1, @@ -133,6 +119,29 @@ impl TreeDataFetcher { } } + /// Attempts to fetch root hashes from L1 (namely, `BlockCommit` events emitted by the diamond proxy) if possible. + /// The main node will still be used as a fallback in case communicating with L1 fails, or for newer batches, + /// which may not be committed on L1. + pub fn with_l1_data( + mut self, + eth_client: Box>, + diamond_proxy_address: Address, + ) -> anyhow::Result { + anyhow::ensure!( + self.diamond_proxy_address.is_none(), + "L1 tree data provider is already set up" + ); + + let l1_provider = L1DataProvider::new( + self.pool.clone(), + eth_client.for_component("tree_data_fetcher"), + diamond_proxy_address, + )?; + self.data_provider = Box::new(l1_provider.with_fallback(self.data_provider)); + self.diamond_proxy_address = Some(diamond_proxy_address); + Ok(self) + } + /// Returns a health check for this fetcher. pub fn health_check(&self) -> ReactiveHealthCheck { self.health_updater.subscribe() @@ -169,29 +178,38 @@ impl TreeDataFetcher { }) } - async fn step(&self) -> Result { + async fn step(&mut self) -> Result { let Some(l1_batch_to_fetch) = self.get_batch_to_fetch().await? else { return Ok(StepOutcome::NoProgress); }; tracing::debug!("Fetching tree data for L1 batch #{l1_batch_to_fetch} from main node"); let stage_latency = self.metrics.stage_latency[&ProcessingStage::Fetch].start(); - let batch_details = self - .main_node_client - .batch_details(l1_batch_to_fetch) - .await? - .with_context(|| { - format!( + let root_hash_result = self.data_provider.batch_details(l1_batch_to_fetch).await?; + stage_latency.observe(); + let root_hash = match root_hash_result { + Ok(output) => { + tracing::debug!( + "Received root hash for L1 batch #{l1_batch_to_fetch} from {source:?}: {root_hash:?}", + source = output.source, + root_hash = output.root_hash + ); + self.metrics.root_hash_sources[&output.source].inc(); + output.root_hash + } + Err(MissingData::Batch) => { + let err = anyhow::anyhow!( "L1 batch #{l1_batch_to_fetch} is sealed locally, but is not present on the main node, \ which is assumed to store batch info indefinitely" - ) - })?; - stage_latency.observe(); - let Some(root_hash) = batch_details.base.root_hash else { - tracing::debug!( - "L1 batch #{l1_batch_to_fetch} does not have root hash computed on the main node" - ); - return Ok(StepOutcome::RemoteHashMissing); + ); + return Err(err.into()); + } + Err(MissingData::RootHash) => { + tracing::debug!( + "L1 batch #{l1_batch_to_fetch} does not have root hash computed on the main node" + ); + return Ok(StepOutcome::RemoteHashMissing); + } }; let stage_latency = self.metrics.stage_latency[&ProcessingStage::Persistence].start(); @@ -224,7 +242,7 @@ impl TreeDataFetcher { /// Runs this component until a fatal error occurs or a stop signal is received. Transient errors /// (e.g., no network connection) are handled gracefully by retrying after a delay. - pub async fn run(self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + pub async fn run(mut self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { self.metrics.observe_info(&self); self.health_updater .update(Health::from(HealthStatus::Ready)); diff --git a/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs b/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs new file mode 100644 index 000000000000..27cd040677d6 --- /dev/null +++ b/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs @@ -0,0 +1,347 @@ +use std::fmt; + +use anyhow::Context; +use async_trait::async_trait; +use vise::{EncodeLabelSet, EncodeLabelValue}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; +use zksync_eth_client::EthInterface; +use zksync_types::{web3, Address, L1BatchNumber, H256, U256, U64}; +use zksync_web3_decl::{ + client::{DynClient, L1, L2}, + error::{ClientRpcContext, EnrichedClientError, EnrichedClientResult}, + jsonrpsee::core::ClientError, + namespaces::ZksNamespaceClient, +}; + +use super::{metrics::METRICS, TreeDataFetcherResult}; + +#[cfg(test)] +mod tests; + +#[derive(Debug, thiserror::Error)] +pub(super) enum MissingData { + /// The provider lacks a requested L1 batch. + #[error("no requested L1 batch")] + Batch, + /// The provider lacks a root hash for a requested L1 batch; the batch itself is present on the provider. + #[error("no root hash for L1 batch")] + RootHash, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "source", rename_all = "snake_case")] +pub(super) enum TreeDataProviderSource { + L1CommitEvent, + BatchDetailsRpc, +} + +#[derive(Debug)] +pub(super) struct TreeDataProviderOutput { + pub root_hash: H256, + pub source: TreeDataProviderSource, +} + +pub(super) type TreeDataProviderResult = + TreeDataFetcherResult>; + +/// External provider of tree data, such as main node (via JSON-RPC). +#[async_trait] +pub(super) trait TreeDataProvider: fmt::Debug + Send + Sync + 'static { + /// Fetches a state root hash for the L1 batch with the specified number. + /// + /// It is guaranteed that this method will be called with monotonically increasing `number`s (although not necessarily sequential ones). + async fn batch_details(&mut self, number: L1BatchNumber) -> TreeDataProviderResult; +} + +#[async_trait] +impl TreeDataProvider for Box> { + async fn batch_details(&mut self, number: L1BatchNumber) -> TreeDataProviderResult { + let Some(batch_details) = self + .get_l1_batch_details(number) + .rpc_context("get_l1_batch_details") + .with_arg("number", &number) + .await? + else { + return Ok(Err(MissingData::Batch)); + }; + Ok(batch_details + .base + .root_hash + .ok_or(MissingData::RootHash) + .map(|root_hash| TreeDataProviderOutput { + root_hash, + source: TreeDataProviderSource::BatchDetailsRpc, + })) + } +} + +#[derive(Debug, Clone, Copy)] +struct PastL1BatchInfo { + number: L1BatchNumber, + l1_commit_block_number: U64, + l1_commit_block_timestamp: U256, +} + +/// Provider of tree data loading it from L1 `BlockCommit` events emitted by the diamond proxy contract. +/// Should be used together with an L2 provider because L1 data can be missing for latest batches, +/// and the provider implementation uses assumptions that can break in some corner cases. +/// +/// # Implementation details +/// +/// To limit the range of L1 blocks for `eth_getLogs` calls, the provider assumes that an L1 block with a `BlockCommit` event +/// for a certain L1 batch is relatively close to L1 batch sealing. Thus, the provider finds an approximate L1 block number +/// for the event using binary search, or uses an L1 block number of the `BlockCommit` event for the previously queried L1 batch +/// (provided it's not too far behind the seal timestamp of the batch). +#[derive(Debug)] +pub(super) struct L1DataProvider { + pool: ConnectionPool, + eth_client: Box>, + diamond_proxy_address: Address, + block_commit_signature: H256, + past_l1_batch: Option, +} + +impl L1DataProvider { + /// Accuracy when guessing L1 block number by L1 batch timestamp. + const L1_BLOCK_ACCURACY: U64 = U64([1_000]); + /// Range of L1 blocks queried via `eth_getLogs`. Should be at least several times greater than + /// `L1_BLOCK_ACCURACY`, but not large enough to trigger request limiting on the L1 RPC provider. + const L1_BLOCK_RANGE: U64 = U64([20_000]); + + pub fn new( + pool: ConnectionPool, + eth_client: Box>, + diamond_proxy_address: Address, + ) -> anyhow::Result { + let block_commit_signature = zksync_contracts::hyperchain_contract() + .event("BlockCommit") + .context("missing `BlockCommit` event")? + .signature(); + Ok(Self { + pool, + eth_client, + diamond_proxy_address, + block_commit_signature, + past_l1_batch: None, + }) + } + + async fn l1_batch_seal_timestamp(&self, number: L1BatchNumber) -> anyhow::Result { + let mut storage = self.pool.connection_tagged("tree_data_fetcher").await?; + let (_, last_l2_block_number) = storage + .blocks_dal() + .get_l2_block_range_of_l1_batch(number) + .await? + .with_context(|| format!("L1 batch #{number} does not have L2 blocks"))?; + let block_header = storage + .blocks_dal() + .get_l2_block_header(last_l2_block_number) + .await? + .with_context(|| format!("L2 block #{last_l2_block_number} (last block in L1 batch #{number}) disappeared"))?; + Ok(block_header.timestamp) + } + + /// Guesses the number of an L1 block with a `BlockCommit` event for the specified L1 batch. + /// The guess is based on the L1 batch seal timestamp. + async fn guess_l1_commit_block_number( + eth_client: &DynClient, + l1_batch_seal_timestamp: u64, + ) -> EnrichedClientResult<(U64, usize)> { + let l1_batch_seal_timestamp = U256::from(l1_batch_seal_timestamp); + let (latest_number, latest_timestamp) = + Self::get_block(eth_client, web3::BlockNumber::Latest).await?; + if latest_timestamp < l1_batch_seal_timestamp { + return Ok((latest_number, 0)); // No better estimate at this point + } + let (earliest_number, earliest_timestamp) = + Self::get_block(eth_client, web3::BlockNumber::Earliest).await?; + if earliest_timestamp > l1_batch_seal_timestamp { + return Ok((earliest_number, 0)); // No better estimate at this point + } + + // At this point, we have `earliest_timestamp <= l1_batch_seal_timestamp <= latest_timestamp`. + // Binary-search the range until we're sort of accurate. + let mut steps = 0; + let mut left = earliest_number; + let mut right = latest_number; + while left + Self::L1_BLOCK_ACCURACY < right { + let middle = (left + right) / 2; + let (_, middle_timestamp) = + Self::get_block(eth_client, web3::BlockNumber::Number(middle)).await?; + if middle_timestamp <= l1_batch_seal_timestamp { + left = middle; + } else { + right = middle; + } + steps += 1; + } + Ok((left, steps)) + } + + /// Gets a block that should be present on L1. + async fn get_block( + eth_client: &DynClient, + number: web3::BlockNumber, + ) -> EnrichedClientResult<(U64, U256)> { + let block = eth_client.block(number.into()).await?.ok_or_else(|| { + let err = "block is missing on L1 RPC provider"; + EnrichedClientError::new(ClientError::Custom(err.into()), "get_block") + .with_arg("number", &number) + })?; + let number = block.number.ok_or_else(|| { + let err = "block is missing a number"; + EnrichedClientError::new(ClientError::Custom(err.into()), "get_block") + .with_arg("number", &number) + })?; + Ok((number, block.timestamp)) + } + + pub fn with_fallback(self, fallback: Box) -> CombinedDataProvider { + CombinedDataProvider { + l1: Some(self), + fallback, + } + } +} + +#[async_trait] +impl TreeDataProvider for L1DataProvider { + async fn batch_details(&mut self, number: L1BatchNumber) -> TreeDataProviderResult { + let l1_batch_seal_timestamp = self.l1_batch_seal_timestamp(number).await?; + let from_block = self.past_l1_batch.and_then(|info| { + assert!( + info.number < number, + "`batch_details()` must be called with monotonically increasing numbers" + ); + let threshold_timestamp = info.l1_commit_block_timestamp + Self::L1_BLOCK_RANGE.as_u64() / 2; + if U256::from(l1_batch_seal_timestamp) > threshold_timestamp { + tracing::debug!( + number = number.0, + "L1 batch #{number} seal timestamp ({l1_batch_seal_timestamp}) is too far ahead \ + of the previous processed L1 batch ({info:?}); not using L1 batch info" + ); + None + } else { + // This is an exact lower boundary: L1 batches are committed in order + Some(info.l1_commit_block_number) + } + }); + + let from_block = match from_block { + Some(number) => number, + None => { + let (approximate_block, steps) = Self::guess_l1_commit_block_number( + self.eth_client.as_ref(), + l1_batch_seal_timestamp, + ) + .await?; + tracing::debug!( + number = number.0, + "Guessed L1 block number for L1 batch #{number} commit in {steps} binary search steps: {approximate_block}" + ); + METRICS + .l1_commit_block_number_binary_search_steps + .observe(steps); + // Subtract to account for imprecise L1 and L2 timestamps etc. + approximate_block.saturating_sub(Self::L1_BLOCK_ACCURACY) + } + }; + + let number_topic = H256::from_low_u64_be(number.0.into()); + let filter = web3::FilterBuilder::default() + .address(vec![self.diamond_proxy_address]) + .from_block(web3::BlockNumber::Number(from_block)) + .to_block(web3::BlockNumber::Number(from_block + Self::L1_BLOCK_RANGE)) + .topics( + Some(vec![self.block_commit_signature]), + Some(vec![number_topic]), + None, + None, + ) + .build(); + let mut logs = self.eth_client.logs(&filter).await?; + logs.retain(|log| !log.is_removed() && log.block_number.is_some()); + + match logs.as_slice() { + [] => Ok(Err(MissingData::Batch)), + [log] => { + let root_hash = log.topics.get(2).copied().ok_or_else(|| { + let err = "Bogus `BlockCommit` event, does not have the root hash topic"; + EnrichedClientError::new(ClientError::Custom(err.into()), "batch_details") + .with_arg("filter", &filter) + .with_arg("log", &log) + })?; + // `unwrap()` is safe due to the filtering above + let l1_commit_block_number = log.block_number.unwrap(); + let diff = l1_commit_block_number.saturating_sub(from_block).as_u64(); + METRICS.l1_commit_block_number_from_diff.observe(diff); + tracing::debug!( + "`BlockCommit` event for L1 batch #{number} is at block #{l1_commit_block_number}, \ + {diff} block(s) after the `from` block from the filter" + ); + + let l1_commit_block = self.eth_client.block(l1_commit_block_number.into()).await?; + let l1_commit_block = l1_commit_block.ok_or_else(|| { + let err = "Block disappeared from L1 RPC provider"; + EnrichedClientError::new(ClientError::Custom(err.into()), "batch_details") + .with_arg("number", &l1_commit_block_number) + })?; + self.past_l1_batch = Some(PastL1BatchInfo { + number, + l1_commit_block_number, + l1_commit_block_timestamp: l1_commit_block.timestamp, + }); + Ok(Ok(TreeDataProviderOutput { + root_hash, + source: TreeDataProviderSource::L1CommitEvent, + })) + } + _ => { + tracing::warn!("Non-unique `BlockCommit` event for L1 batch #{number} queried using {filter:?}: {logs:?}"); + Ok(Err(MissingData::RootHash)) + } + } + } +} + +/// Data provider combining [`L1DataProvider`] with a fallback provider. +#[derive(Debug)] +pub(super) struct CombinedDataProvider { + l1: Option, + fallback: Box, +} + +#[async_trait] +impl TreeDataProvider for CombinedDataProvider { + async fn batch_details(&mut self, number: L1BatchNumber) -> TreeDataProviderResult { + if let Some(l1) = &mut self.l1 { + match l1.batch_details(number).await { + Err(err) => { + if err.is_transient() { + tracing::info!( + number = number.0, + "Transient error calling L1 data provider: {err}" + ); + } else { + tracing::warn!( + number = number.0, + "Fatal error calling L1 data provider: {err}" + ); + self.l1 = None; + } + } + Ok(Ok(root_hash)) => return Ok(Ok(root_hash)), + Ok(Err(missing_data)) => { + tracing::debug!( + number = number.0, + "L1 data provider misses batch data: {missing_data}" + ); + // No sense of calling the L1 provider in the future; the L2 provider will very likely get information + // about batches significantly faster. + self.l1 = None; + } + } + } + self.fallback.batch_details(number).await + } +} diff --git a/core/node/node_sync/src/tree_data_fetcher/provider/tests.rs b/core/node/node_sync/src/tree_data_fetcher/provider/tests.rs new file mode 100644 index 000000000000..90b912b8816a --- /dev/null +++ b/core/node/node_sync/src/tree_data_fetcher/provider/tests.rs @@ -0,0 +1,249 @@ +//! Tests for tree data providers. + +use assert_matches::assert_matches; +use once_cell::sync::Lazy; +use test_casing::test_casing; +use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; +use zksync_web3_decl::client::MockClient; + +use super::*; +use crate::tree_data_fetcher::tests::{seal_l1_batch_with_timestamp, MockMainNodeClient}; + +const DIAMOND_PROXY_ADDRESS: Address = Address::repeat_byte(0x22); + +static BLOCK_COMMIT_SIGNATURE: Lazy = Lazy::new(|| { + zksync_contracts::hyperchain_contract() + .event("BlockCommit") + .expect("missing `BlockCommit` event") + .signature() +}); + +struct EthereumParameters { + block_number: U64, + // L1 block numbers in which L1 batches are committed starting from L1 batch #1 + l1_blocks_for_commits: Vec, +} + +impl EthereumParameters { + fn new(block_number: u64) -> Self { + Self { + block_number: block_number.into(), + l1_blocks_for_commits: vec![], + } + } + + fn push_commit(&mut self, l1_block_number: u64) { + assert!(l1_block_number <= self.block_number.as_u64()); + + let l1_block_number = U64::from(l1_block_number); + let last_commit = self.l1_blocks_for_commits.last().copied(); + let is_increasing = last_commit.map_or(true, |last_number| last_number <= l1_block_number); + assert!(is_increasing, "Invalid L1 block number for commit"); + + self.l1_blocks_for_commits.push(l1_block_number); + } + + fn filter_logs(logs: &[web3::Log], filter: web3::Filter) -> Vec { + let Some(web3::BlockNumber::Number(filter_from)) = filter.from_block else { + panic!("Unexpected filter: {filter:?}"); + }; + let Some(web3::BlockNumber::Number(filter_to)) = filter.to_block else { + panic!("Unexpected filter: {filter:?}"); + }; + let filter_block_range = filter_from..=filter_to; + + let filter_addresses = filter.address.unwrap().flatten(); + let filter_topics = filter.topics.unwrap(); + let filter_topics: Vec<_> = filter_topics + .into_iter() + .map(|topic| topic.map(web3::ValueOrArray::flatten)) + .collect(); + + let filtered_logs = logs.iter().filter(|log| { + if !filter_addresses.contains(&log.address) { + return false; + } + if !filter_block_range.contains(&log.block_number.unwrap()) { + return false; + } + filter_topics + .iter() + .zip(&log.topics) + .all(|(filter_topics, actual_topic)| match filter_topics { + Some(topics) => topics.contains(actual_topic), + None => true, + }) + }); + filtered_logs.cloned().collect() + } + + fn client(&self) -> MockClient { + let logs = self + .l1_blocks_for_commits + .iter() + .enumerate() + .map(|(i, &l1_block_number)| { + let l1_batch_number = H256::from_low_u64_be(i as u64 + 1); + let root_hash = H256::repeat_byte(i as u8 + 1); + web3::Log { + address: DIAMOND_PROXY_ADDRESS, + topics: vec![ + *BLOCK_COMMIT_SIGNATURE, + l1_batch_number, + root_hash, + H256::zero(), // commitment hash; not used + ], + block_number: Some(l1_block_number), + ..web3::Log::default() + } + }); + let logs: Vec<_> = logs.collect(); + let block_number = self.block_number; + + MockClient::builder(L1::default()) + .method("eth_blockNumber", move || Ok(block_number)) + .method( + "eth_getBlockByNumber", + move |number: web3::BlockNumber, with_txs: bool| { + assert!(!with_txs); + + let number = match number { + web3::BlockNumber::Number(number) => number, + web3::BlockNumber::Latest => block_number, + web3::BlockNumber::Earliest => U64::zero(), + _ => panic!("Unexpected number: {number:?}"), + }; + if number > block_number { + return Ok(None); + } + Ok(Some(web3::Block:: { + number: Some(number), + timestamp: U256::from(number.as_u64()), // timestamp == number + ..web3::Block::default() + })) + }, + ) + .method("eth_getLogs", move |filter: web3::Filter| { + Ok(Self::filter_logs(&logs, filter)) + }) + .build() + } +} + +#[tokio::test] +async fn guessing_l1_commit_block_number() { + let eth_params = EthereumParameters::new(100_000); + let eth_client = eth_params.client(); + + for timestamp in [0, 100, 1_000, 5_000, 10_000, 100_000] { + let (guessed_block_number, step_count) = + L1DataProvider::guess_l1_commit_block_number(ð_client, timestamp) + .await + .unwrap(); + + assert!( + guessed_block_number.abs_diff(timestamp.into()) <= L1DataProvider::L1_BLOCK_ACCURACY, + "timestamp={timestamp}, guessed={guessed_block_number}" + ); + assert!(step_count > 0); + assert!(step_count < 100); + } +} + +async fn test_using_l1_data_provider(l1_batch_timestamps: &[u64]) { + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + + let mut eth_params = EthereumParameters::new(1_000_000); + for (number, &ts) in l1_batch_timestamps.iter().enumerate() { + let number = L1BatchNumber(number as u32 + 1); + seal_l1_batch_with_timestamp(&mut storage, number, ts).await; + eth_params.push_commit(ts + 1_000); // have a reasonable small diff between batch generation and commitment + } + drop(storage); + + let mut provider = + L1DataProvider::new(pool, Box::new(eth_params.client()), DIAMOND_PROXY_ADDRESS).unwrap(); + for i in 0..l1_batch_timestamps.len() { + let number = L1BatchNumber(i as u32 + 1); + let output = provider + .batch_details(number) + .await + .unwrap() + .expect("no root hash"); + assert_eq!(output.root_hash, H256::repeat_byte(number.0 as u8)); + assert_matches!(output.source, TreeDataProviderSource::L1CommitEvent); + + let past_l1_batch = provider.past_l1_batch.unwrap(); + assert_eq!(past_l1_batch.number, number); + let expected_l1_block_number = eth_params.l1_blocks_for_commits[i]; + assert_eq!( + past_l1_batch.l1_commit_block_number, + expected_l1_block_number + ); + assert_eq!( + past_l1_batch.l1_commit_block_timestamp, + expected_l1_block_number.as_u64().into() + ); + } +} + +#[test_casing(4, [500, 1_500, 10_000, 30_000])] +#[tokio::test] +async fn using_l1_data_provider(batch_spacing: u64) { + let l1_batch_timestamps: Vec<_> = (0..10).map(|i| 50_000 + batch_spacing * i).collect(); + test_using_l1_data_provider(&l1_batch_timestamps).await; +} + +#[tokio::test] +async fn combined_data_provider_errors() { + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + + let mut eth_params = EthereumParameters::new(1_000_000); + seal_l1_batch_with_timestamp(&mut storage, L1BatchNumber(1), 50_000).await; + eth_params.push_commit(51_000); + seal_l1_batch_with_timestamp(&mut storage, L1BatchNumber(2), 52_000).await; + drop(storage); + + let mut main_node_client = MockMainNodeClient::default(); + main_node_client.insert_batch(L1BatchNumber(2), H256::repeat_byte(2)); + let mut provider = + L1DataProvider::new(pool, Box::new(eth_params.client()), DIAMOND_PROXY_ADDRESS) + .unwrap() + .with_fallback(Box::new(main_node_client)); + + // L1 batch #1 should be obtained from L1 + let output = provider + .batch_details(L1BatchNumber(1)) + .await + .unwrap() + .expect("no root hash"); + assert_eq!(output.root_hash, H256::repeat_byte(1)); + assert_matches!(output.source, TreeDataProviderSource::L1CommitEvent); + assert!(provider.l1.is_some()); + + // L1 batch #2 should be obtained from L2 + let output = provider + .batch_details(L1BatchNumber(2)) + .await + .unwrap() + .expect("no root hash"); + assert_eq!(output.root_hash, H256::repeat_byte(2)); + assert_matches!(output.source, TreeDataProviderSource::BatchDetailsRpc); + assert!(provider.l1.is_none()); + + // L1 batch #3 is not present anywhere. + let missing = provider + .batch_details(L1BatchNumber(3)) + .await + .unwrap() + .unwrap_err(); + assert_matches!(missing, MissingData::Batch); +} diff --git a/core/node/node_sync/src/tree_data_fetcher/tests.rs b/core/node/node_sync/src/tree_data_fetcher/tests.rs index d1192e3ea942..35671861bb29 100644 --- a/core/node/node_sync/src/tree_data_fetcher/tests.rs +++ b/core/node/node_sync/src/tree_data_fetcher/tests.rs @@ -8,64 +8,82 @@ use std::{ }; use assert_matches::assert_matches; +use async_trait::async_trait; use test_casing::test_casing; use zksync_dal::Connection; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; -use zksync_node_test_utils::{create_l1_batch, prepare_recovery_snapshot}; +use zksync_node_test_utils::{create_l1_batch, create_l2_block, prepare_recovery_snapshot}; use zksync_types::{AccountTreeId, Address, L2BlockNumber, StorageKey, StorageLog, H256}; use zksync_web3_decl::jsonrpsee::core::ClientError; -use super::{metrics::StepOutcomeLabel, *}; +use super::{ + metrics::StepOutcomeLabel, + provider::{TreeDataProviderOutput, TreeDataProviderResult, TreeDataProviderSource}, + *, +}; #[derive(Debug, Default)] -struct MockMainNodeClient { +pub(super) struct MockMainNodeClient { transient_error: Arc, - batch_details_responses: HashMap, + batch_details_responses: HashMap, +} + +impl MockMainNodeClient { + pub fn insert_batch(&mut self, number: L1BatchNumber, root_hash: H256) { + self.batch_details_responses.insert(number, root_hash); + } } #[async_trait] -impl MainNodeClient for MockMainNodeClient { - async fn batch_details( - &self, - number: L1BatchNumber, - ) -> EnrichedClientResult> { +impl TreeDataProvider for MockMainNodeClient { + async fn batch_details(&mut self, number: L1BatchNumber) -> TreeDataProviderResult { if self.transient_error.fetch_and(false, Ordering::Relaxed) { let err = ClientError::RequestTimeout; - return Err(EnrichedClientError::new(err, "batch_details")); + return Err(EnrichedClientError::new(err, "batch_details").into()); } - Ok(self.batch_details_responses.get(&number).cloned()) + Ok(self + .batch_details_responses + .get(&number) + .map(|&root_hash| TreeDataProviderOutput { + root_hash, + source: TreeDataProviderSource::BatchDetailsRpc, + }) + .ok_or(MissingData::Batch)) } } -fn mock_l1_batch_details(number: L1BatchNumber, root_hash: Option) -> api::L1BatchDetails { - api::L1BatchDetails { - number, - base: api::BlockDetailsBase { - timestamp: number.0.into(), - l1_tx_count: 0, - l2_tx_count: 10, - root_hash, - status: api::BlockStatus::Sealed, - commit_tx_hash: None, - committed_at: None, - prove_tx_hash: None, - proven_at: None, - execute_tx_hash: None, - executed_at: None, - l1_gas_price: 123, - l2_fair_gas_price: 456, - base_system_contracts_hashes: Default::default(), - }, - } +async fn seal_l1_batch(storage: &mut Connection<'_, Core>, number: L1BatchNumber) { + seal_l1_batch_with_timestamp(storage, number, number.0.into()).await; } -async fn seal_l1_batch(storage: &mut Connection<'_, Core>, number: L1BatchNumber) { +pub(super) async fn seal_l1_batch_with_timestamp( + storage: &mut Connection<'_, Core>, + number: L1BatchNumber, + timestamp: u64, +) { let mut transaction = storage.start_transaction().await.unwrap(); + // Insert a single L2 block belonging to the batch. + let mut block_header = create_l2_block(number.0); + block_header.timestamp = timestamp; transaction .blocks_dal() - .insert_mock_l1_batch(&create_l1_batch(number.0)) + .insert_l2_block(&block_header) .await .unwrap(); + + let mut batch_header = create_l1_batch(number.0); + batch_header.timestamp = timestamp; + transaction + .blocks_dal() + .insert_mock_l1_batch(&batch_header) + .await + .unwrap(); + transaction + .blocks_dal() + .mark_l2_blocks_as_executed_in_l1_batch(batch_header.number) + .await + .unwrap(); + // One initial write per L1 batch let initial_writes = [StorageKey::new( AccountTreeId::new(Address::repeat_byte(1)), @@ -87,11 +105,12 @@ struct FetcherHarness { } impl FetcherHarness { - fn new(client: impl MainNodeClient, pool: ConnectionPool) -> Self { + fn new(client: impl TreeDataProvider, pool: ConnectionPool) -> Self { let (updates_sender, updates_receiver) = mpsc::unbounded_channel(); let metrics = &*Box::leak(Box::::default()); let fetcher = TreeDataFetcher { - main_node_client: Box::new(client), + data_provider: Box::new(client), + diamond_proxy_address: None, pool: pool.clone(), metrics, health_updater: ReactiveHealthCheck::new("tree_data_fetcher").1, @@ -117,12 +136,13 @@ async fn tree_data_fetcher_steps() { let mut client = MockMainNodeClient::default(); for number in 1..=5 { let number = L1BatchNumber(number); - let details = mock_l1_batch_details(number, Some(H256::from_low_u64_be(number.0.into()))); - client.batch_details_responses.insert(number, details); + client + .batch_details_responses + .insert(number, H256::from_low_u64_be(number.0.into())); seal_l1_batch(&mut storage, number).await; } - let fetcher = FetcherHarness::new(client, pool.clone()).fetcher; + let mut fetcher = FetcherHarness::new(client, pool.clone()).fetcher; for number in 1..=5 { let step_outcome = fetcher.step().await.unwrap(); assert_matches!( @@ -181,12 +201,13 @@ async fn tree_data_fetcher_steps_after_snapshot_recovery() { let mut client = MockMainNodeClient::default(); for i in 1..=5 { let number = snapshot.l1_batch_number + i; - let details = mock_l1_batch_details(number, Some(H256::from_low_u64_be(number.0.into()))); - client.batch_details_responses.insert(number, details); + client + .batch_details_responses + .insert(number, H256::from_low_u64_be(number.0.into())); seal_l1_batch(&mut storage, number).await; } - let fetcher = FetcherHarness::new(client, pool.clone()).fetcher; + let mut fetcher = FetcherHarness::new(client, pool.clone()).fetcher; for i in 1..=5 { let step_outcome = fetcher.step().await.unwrap(); assert_matches!( @@ -212,8 +233,9 @@ async fn tree_data_fetcher_recovers_from_transient_errors() { let mut client = MockMainNodeClient::default(); for number in 1..=5 { let number = L1BatchNumber(number); - let details = mock_l1_batch_details(number, Some(H256::from_low_u64_be(number.0.into()))); - client.batch_details_responses.insert(number, details); + client + .batch_details_responses + .insert(number, H256::from_low_u64_be(number.0.into())); } let transient_error = client.transient_error.clone(); @@ -278,21 +300,20 @@ impl SlowMainNode { } #[async_trait] -impl MainNodeClient for SlowMainNode { - async fn batch_details( - &self, - number: L1BatchNumber, - ) -> EnrichedClientResult> { +impl TreeDataProvider for SlowMainNode { + async fn batch_details(&mut self, number: L1BatchNumber) -> TreeDataProviderResult { if number != L1BatchNumber(1) { - return Ok(None); + return Ok(Err(MissingData::Batch)); } let request_count = self.request_count.fetch_add(1, Ordering::Relaxed); - let root_hash = if request_count >= self.compute_root_hash_after { - Some(H256::repeat_byte(1)) + Ok(if request_count >= self.compute_root_hash_after { + Ok(TreeDataProviderOutput { + root_hash: H256::repeat_byte(1), + source: TreeDataProviderSource::BatchDetailsRpc, + }) } else { - None - }; - Ok(Some(mock_l1_batch_details(number, root_hash))) + Err(MissingData::RootHash) + }) } } diff --git a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs index e4c4ea39506e..48d4696c57ae 100644 --- a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs +++ b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs @@ -211,8 +211,8 @@ impl L2BlockSealSubtask for InsertFactoryDepsSubtask { .factory_deps_dal() .insert_factory_deps(command.l2_block.number, &command.l2_block.new_factory_deps) .await?; - progress.observe(command.l2_block.new_factory_deps.len()); } + progress.observe(command.l2_block.new_factory_deps.len()); Ok(()) } @@ -250,12 +250,11 @@ impl L2BlockSealSubtask for InsertTokensSubtask { extract_added_tokens(command.l2_shared_bridge_addr, &command.l2_block.events); progress.observe(added_tokens.len()); + let progress = L2_BLOCK_METRICS.start(L2BlockSealStage::InsertTokens, is_fictive); if !added_tokens.is_empty() { - let progress = L2_BLOCK_METRICS.start(L2BlockSealStage::InsertTokens, is_fictive); - let added_tokens_len = added_tokens.len(); connection.tokens_dal().add_tokens(&added_tokens).await?; - progress.observe(added_tokens_len); } + progress.observe(added_tokens.len()); Ok(()) } @@ -342,10 +341,12 @@ impl L2BlockSealSubtask for InsertL2ToL1LogsSubtask { progress.observe(user_l2_to_l1_log_count); let progress = L2_BLOCK_METRICS.start(L2BlockSealStage::InsertL2ToL1Logs, is_fictive); - connection - .events_dal() - .save_user_l2_to_l1_logs(command.l2_block.number, &user_l2_to_l1_logs) - .await?; + if !user_l2_to_l1_logs.is_empty() { + connection + .events_dal() + .save_user_l2_to_l1_logs(command.l2_block.number, &user_l2_to_l1_logs) + .await?; + } progress.observe(user_l2_to_l1_log_count); Ok(()) } diff --git a/core/node/state_keeper/src/updates/mod.rs b/core/node/state_keeper/src/updates/mod.rs index bb33a6f58678..772ee71641a0 100644 --- a/core/node/state_keeper/src/updates/mod.rs +++ b/core/node/state_keeper/src/updates/mod.rs @@ -123,7 +123,7 @@ impl UpdatesManager { ); } - pub(crate) fn finish_batch(&mut self, finished_batch: FinishedL1Batch) { + pub fn finish_batch(&mut self, finished_batch: FinishedL1Batch) { assert!( self.l1_batch.finished.is_none(), "Cannot finish already finished batch" diff --git a/core/node/vm_runner/Cargo.toml b/core/node/vm_runner/Cargo.toml index 67de95f60cb0..b3ede5a796be 100644 --- a/core/node/vm_runner/Cargo.toml +++ b/core/node/vm_runner/Cargo.toml @@ -17,6 +17,7 @@ zksync_contracts.workspace = true zksync_state.workspace = true zksync_storage.workspace = true zksync_state_keeper.workspace = true +zksync_utils.workspace = true vm_utils.workspace = true tokio = { workspace = true, features = ["time"] } @@ -30,7 +31,6 @@ dashmap.workspace = true zksync_node_test_utils.workspace = true zksync_node_genesis.workspace = true zksync_test_account.workspace = true -zksync_utils.workspace = true backon.workspace = true futures = { workspace = true, features = ["compat"] } rand.workspace = true diff --git a/core/node/vm_runner/src/impls/mod.rs b/core/node/vm_runner/src/impls/mod.rs new file mode 100644 index 000000000000..70d01f6932ef --- /dev/null +++ b/core/node/vm_runner/src/impls/mod.rs @@ -0,0 +1,3 @@ +mod protective_reads; + +pub use protective_reads::{ProtectiveReadsWriter, ProtectiveReadsWriterTasks}; diff --git a/core/node/vm_runner/src/impls/protective_reads.rs b/core/node/vm_runner/src/impls/protective_reads.rs new file mode 100644 index 000000000000..03a5f1254aa6 --- /dev/null +++ b/core/node/vm_runner/src/impls/protective_reads.rs @@ -0,0 +1,193 @@ +use std::sync::Arc; + +use anyhow::Context; +use async_trait::async_trait; +use tokio::sync::watch; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use zksync_state_keeper::{MainBatchExecutor, StateKeeperOutputHandler, UpdatesManager}; +use zksync_types::{zk_evm_types::LogQuery, AccountTreeId, L1BatchNumber, L2ChainId, StorageKey}; +use zksync_utils::u256_to_h256; + +use crate::{ + storage::StorageSyncTask, ConcurrentOutputHandlerFactory, ConcurrentOutputHandlerFactoryTask, + OutputHandlerFactory, VmRunner, VmRunnerIo, VmRunnerStorage, +}; + +/// A standalone component that writes protective reads asynchronously to state keeper. +#[derive(Debug)] +pub struct ProtectiveReadsWriter { + vm_runner: VmRunner, +} + +impl ProtectiveReadsWriter { + /// Create a new protective reads writer from the provided DB parameters and window size which + /// regulates how many batches this component can handle at the same time. + pub async fn new( + pool: ConnectionPool, + rocksdb_path: String, + chain_id: L2ChainId, + window_size: u32, + ) -> anyhow::Result<(Self, ProtectiveReadsWriterTasks)> { + let io = ProtectiveReadsIo { window_size }; + let (loader, loader_task) = + VmRunnerStorage::new(pool.clone(), rocksdb_path, io.clone(), chain_id).await?; + let output_handler_factory = ProtectiveReadsOutputHandlerFactory { pool: pool.clone() }; + let (output_handler_factory, output_handler_factory_task) = + ConcurrentOutputHandlerFactory::new(pool.clone(), io.clone(), output_handler_factory); + let batch_processor = MainBatchExecutor::new(false, false); + let vm_runner = VmRunner::new( + pool, + Box::new(io), + Arc::new(loader), + Box::new(output_handler_factory), + Box::new(batch_processor), + ); + Ok(( + Self { vm_runner }, + ProtectiveReadsWriterTasks { + loader_task, + output_handler_factory_task, + }, + )) + } + + /// Continuously loads new available batches and writes the corresponding protective reads + /// produced by that batch. + /// + /// # Errors + /// + /// Propagates RocksDB and Postgres errors. + pub async fn run(self, stop_receiver: &watch::Receiver) -> anyhow::Result<()> { + self.vm_runner.run(stop_receiver).await + } +} + +/// A collections of tasks that need to be run in order for protective reads writer to work as +/// intended. +#[derive(Debug)] +pub struct ProtectiveReadsWriterTasks { + /// Task that synchronizes storage with new available batches. + pub loader_task: StorageSyncTask, + /// Task that handles output from processed batches. + pub output_handler_factory_task: ConcurrentOutputHandlerFactoryTask, +} + +#[derive(Debug, Clone)] +pub struct ProtectiveReadsIo { + window_size: u32, +} + +#[async_trait] +impl VmRunnerIo for ProtectiveReadsIo { + fn name(&self) -> &'static str { + "protective_reads_writer" + } + + async fn latest_processed_batch( + &self, + conn: &mut Connection<'_, Core>, + ) -> anyhow::Result { + Ok(conn + .vm_runner_dal() + .get_protective_reads_latest_processed_batch() + .await?) + } + + async fn last_ready_to_be_loaded_batch( + &self, + conn: &mut Connection<'_, Core>, + ) -> anyhow::Result { + Ok(conn + .vm_runner_dal() + .get_protective_reads_last_ready_batch(self.window_size) + .await?) + } + + async fn mark_l1_batch_as_completed( + &self, + conn: &mut Connection<'_, Core>, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result<()> { + Ok(conn + .vm_runner_dal() + .mark_protective_reads_batch_as_completed(l1_batch_number) + .await?) + } +} + +#[derive(Debug)] +struct ProtectiveReadsOutputHandler { + pool: ConnectionPool, +} + +#[async_trait] +impl StateKeeperOutputHandler for ProtectiveReadsOutputHandler { + async fn handle_l2_block(&mut self, _updates_manager: &UpdatesManager) -> anyhow::Result<()> { + Ok(()) + } + + async fn handle_l1_batch( + &mut self, + updates_manager: Arc, + ) -> anyhow::Result<()> { + let finished_batch = updates_manager + .l1_batch + .finished + .as_ref() + .context("L1 batch is not actually finished")?; + let (_, protective_reads): (Vec, Vec) = finished_batch + .final_execution_state + .deduplicated_storage_log_queries + .iter() + .partition(|log_query| log_query.rw_flag); + + let mut connection = self + .pool + .connection_tagged("protective_reads_writer") + .await?; + let mut expected_protective_reads = connection + .storage_logs_dedup_dal() + .get_protective_reads_for_l1_batch(updates_manager.l1_batch.number) + .await?; + + for protective_read in protective_reads { + let address = AccountTreeId::new(protective_read.address); + let key = u256_to_h256(protective_read.key); + if !expected_protective_reads.remove(&StorageKey::new(address, key)) { + tracing::error!( + l1_batch_number = %updates_manager.l1_batch.number, + address = %protective_read.address, + key = %key, + "VM runner produced a protective read that did not happen in state keeper" + ); + } + } + for remaining_read in expected_protective_reads { + tracing::error!( + l1_batch_number = %updates_manager.l1_batch.number, + address = %remaining_read.address(), + key = %remaining_read.key(), + "State keeper produced a protective read that did not happen in VM runner" + ); + } + + Ok(()) + } +} + +#[derive(Debug)] +struct ProtectiveReadsOutputHandlerFactory { + pool: ConnectionPool, +} + +#[async_trait] +impl OutputHandlerFactory for ProtectiveReadsOutputHandlerFactory { + async fn create_handler( + &mut self, + _l1_batch_number: L1BatchNumber, + ) -> anyhow::Result> { + Ok(Box::new(ProtectiveReadsOutputHandler { + pool: self.pool.clone(), + })) + } +} diff --git a/core/node/vm_runner/src/lib.rs b/core/node/vm_runner/src/lib.rs index 4664d4eb8e11..ca9f8bdc0eb4 100644 --- a/core/node/vm_runner/src/lib.rs +++ b/core/node/vm_runner/src/lib.rs @@ -3,6 +3,7 @@ #![warn(missing_debug_implementations, missing_docs)] +mod impls; mod io; mod output_handler; mod process; @@ -11,9 +12,10 @@ mod storage; #[cfg(test)] mod tests; +pub use impls::{ProtectiveReadsWriter, ProtectiveReadsWriterTasks}; pub use io::VmRunnerIo; pub use output_handler::{ ConcurrentOutputHandlerFactory, ConcurrentOutputHandlerFactoryTask, OutputHandlerFactory, }; pub use process::VmRunner; -pub use storage::{BatchExecuteData, VmRunnerStorage}; +pub use storage::{BatchExecuteData, StorageSyncTask, VmRunnerStorage}; diff --git a/core/node/vm_runner/src/output_handler.rs b/core/node/vm_runner/src/output_handler.rs index 30fe9e0c9010..49bed83cd96e 100644 --- a/core/node/vm_runner/src/output_handler.rs +++ b/core/node/vm_runner/src/output_handler.rs @@ -203,6 +203,11 @@ impl Debug for ConcurrentOutputHandlerFactoryTask { } impl ConcurrentOutputHandlerFactoryTask { + /// Access the underlying [`VmRunnerIo`]. + pub fn io(&self) -> &Io { + &self.io + } + /// Starts running the task which is supposed to last until the end of the node's lifetime. /// /// # Errors diff --git a/core/node/vm_runner/src/process.rs b/core/node/vm_runner/src/process.rs index 5ff7d7cc0b87..5e51b5e658f7 100644 --- a/core/node/vm_runner/src/process.rs +++ b/core/node/vm_runner/src/process.rs @@ -109,10 +109,11 @@ impl VmRunner { .await .context("VM runner failed to handle L2 block")?; } - batch_executor + let finished_batch = batch_executor .finish_batch() .await .context("failed finishing L1 batch in executor")?; + updates_manager.finish_batch(finished_batch); output_handler .handle_l1_batch(Arc::new(updates_manager)) .await diff --git a/core/node/vm_runner/src/storage.rs b/core/node/vm_runner/src/storage.rs index 5ffd1d11e70d..e7a8b147c76f 100644 --- a/core/node/vm_runner/src/storage.rs +++ b/core/node/vm_runner/src/storage.rs @@ -271,6 +271,17 @@ impl StorageSyncTask { }) } + /// Access the underlying [`VmRunnerIo`]. + pub fn io(&self) -> &Io { + &self.io + } + + /// Block until RocksDB cache instance is caught up with Postgres and then continuously makes + /// sure that the new ready batches are loaded into the cache. + /// + /// # Errors + /// + /// Propagates RocksDB and Postgres errors. pub async fn run(self, stop_receiver: watch::Receiver) -> anyhow::Result<()> { const SLEEP_INTERVAL: Duration = Duration::from_millis(50); @@ -289,10 +300,10 @@ impl StorageSyncTask { if rocksdb_builder.l1_batch_number().await == Some(latest_processed_batch + 1) { // RocksDB is already caught up, we might not need to do anything. // Just need to check that the memory diff is up-to-date in case this is a fresh start. + let last_ready_batch = self.io.last_ready_to_be_loaded_batch(&mut conn).await?; let state = self.state.read().await; - if state - .storage - .contains_key(&self.io.last_ready_to_be_loaded_batch(&mut conn).await?) + if last_ready_batch == latest_processed_batch + || state.storage.contains_key(&last_ready_batch) { // No need to do anything, killing time until last processed batch is updated. drop(conn); diff --git a/core/tests/loadnext/src/sdk/ethereum/mod.rs b/core/tests/loadnext/src/sdk/ethereum/mod.rs index 1c45d8b5b56a..6800fb75a7d3 100644 --- a/core/tests/loadnext/src/sdk/ethereum/mod.rs +++ b/core/tests/loadnext/src/sdk/ethereum/mod.rs @@ -4,7 +4,8 @@ use std::time::{Duration, Instant}; use serde_json::{Map, Value}; use zksync_eth_client::{ - clients::SigningClient, BoundEthInterface, CallFunctionArgs, Error, EthInterface, Options, + clients::SigningClient, BoundEthInterface, CallFunctionArgs, ContractCallError, EthInterface, + Options, }; use zksync_eth_signer::EthereumSigner; use zksync_types::{ @@ -158,7 +159,9 @@ impl EthereumProvider { .call(self.query_client()) .await .map_err(|err| match err { - Error::EthereumGateway(err) => ClientError::NetworkError(err.to_string()), + ContractCallError::EthereumGateway(err) => { + ClientError::NetworkError(err.to_string()) + } _ => ClientError::MalformedResponse(err.to_string()), }) } @@ -193,7 +196,9 @@ impl EthereumProvider { .call(self.query_client()) .await .map_err(|err| match err { - Error::EthereumGateway(err) => ClientError::NetworkError(err.to_string()), + ContractCallError::EthereumGateway(err) => { + ClientError::NetworkError(err.to_string()) + } _ => ClientError::MalformedResponse(err.to_string()), }) } @@ -360,7 +365,7 @@ impl EthereumProvider { gas_limit: U256, gas_per_pubdata_byte: u32, gas_price: Option, - ) -> Result { + ) -> Result { let gas_price = if let Some(gas_price) = gas_price { gas_price } else { diff --git a/core/tests/test_account/src/lib.rs b/core/tests/test_account/src/lib.rs index 37da054f53bf..089e3b69b3ed 100644 --- a/core/tests/test_account/src/lib.rs +++ b/core/tests/test_account/src/lib.rs @@ -169,12 +169,10 @@ impl Account { serial_id: PriorityOpId(serial_id), max_fee_per_gas, canonical_tx_hash: H256::from_low_u64_be(serial_id), - deadline_block: 100000, layer_2_tip_fee: Default::default(), op_processing_type: OpProcessingType::Common, priority_queue_type: PriorityQueueType::Deque, - eth_hash: H256::random(), - eth_block: 1, + eth_block: 0, refund_recipient: self.address, full_fee: Default::default(), }), diff --git a/core/tests/ts-integration/contracts/custom-account/interfaces/ISystemContext.sol b/core/tests/ts-integration/contracts/custom-account/interfaces/ISystemContext.sol new file mode 100644 index 000000000000..6b83f6d6ada4 --- /dev/null +++ b/core/tests/ts-integration/contracts/custom-account/interfaces/ISystemContext.sol @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.8.0; + +/** + * @author Matter Labs + * @custom:security-contact security@matterlabs.dev + * @notice Contract that stores some of the context variables, that may be either + * block-scoped, tx-scoped or system-wide. + */ +interface ISystemContext { + struct BlockInfo { + uint128 timestamp; + uint128 number; + } + + /// @notice A structure representing the timeline for the upgrade from the batch numbers to the L2 block numbers. + /// @dev It will be used for the L1 batch -> L2 block migration in Q3 2023 only. + struct VirtualBlockUpgradeInfo { + /// @notice In order to maintain consistent results for `blockhash` requests, we'll + /// have to remember the number of the batch when the upgrade to the virtual blocks has been done. + /// The hashes for virtual blocks before the upgrade are identical to the hashes of the corresponding batches. + uint128 virtualBlockStartBatch; + /// @notice L2 block when the virtual blocks have caught up with the L2 blocks. Starting from this block, + /// all the information returned to users for block.timestamp/number, etc should be the information about the L2 blocks and + /// not virtual blocks. + uint128 virtualBlockFinishL2Block; + } + + function chainId() external view returns (uint256); + + function origin() external view returns (address); + + function gasPrice() external view returns (uint256); + + function blockGasLimit() external view returns (uint256); + + function coinbase() external view returns (address); + + function difficulty() external view returns (uint256); + + function baseFee() external view returns (uint256); + + function txNumberInBlock() external view returns (uint16); + + function getBlockHashEVM(uint256 _block) external view returns (bytes32); + + function getBatchHash(uint256 _batchNumber) external view returns (bytes32 hash); + + function getBlockNumber() external view returns (uint128); + + function getBlockTimestamp() external view returns (uint128); + + function getBatchNumberAndTimestamp() external view returns (uint128 blockNumber, uint128 blockTimestamp); + + function getL2BlockNumberAndTimestamp() external view returns (uint128 blockNumber, uint128 blockTimestamp); + + function gasPerPubdataByte() external view returns (uint256 gasPerPubdataByte); + + function getCurrentPubdataSpent() external view returns (uint256 currentPubdataSpent); +} diff --git a/core/tests/ts-integration/src/env.ts b/core/tests/ts-integration/src/env.ts index 60175d621da9..c440e6b08ea6 100644 --- a/core/tests/ts-integration/src/env.ts +++ b/core/tests/ts-integration/src/env.ts @@ -61,6 +61,7 @@ async function loadTestEnvironmentFromFile(chain: string): Promise { const baseTokenAddressL2 = L2_BASE_TOKEN_ADDRESS; const l2ChainId = parseInt(process.env.CHAIN_ETH_ZKSYNC_NETWORK_ID!); - const l1BatchCommitDataGeneratorMode = process.env - .CHAIN_STATE_KEEPER_L1_BATCH_COMMIT_DATA_GENERATOR_MODE! as DataAvailabityMode; + // If the `CHAIN_STATE_KEEPER_L1_BATCH_COMMIT_DATA_GENERATOR_MODE` is not set, the default value is `Rollup`. + const l1BatchCommitDataGeneratorMode = (process.env.CHAIN_STATE_KEEPER_L1_BATCH_COMMIT_DATA_GENERATOR_MODE || + process.env.EN_L1_BATCH_COMMIT_DATA_GENERATOR_MODE || + 'Rollup') as DataAvailabityMode; let minimalL2GasPrice; if (process.env.CHAIN_STATE_KEEPER_MINIMAL_L2_GAS_PRICE !== undefined) { minimalL2GasPrice = ethers.BigNumber.from(process.env.CHAIN_STATE_KEEPER_MINIMAL_L2_GAS_PRICE!); @@ -355,7 +358,6 @@ function loadEcosystem(pathToHome: string): any { function loadConfig(pathToHome: string, chainName: string, config: string): any { const configPath = path.join(pathToHome, `/chains/${chainName}/configs/${config}`); if (!fs.existsSync(configPath)) { - console.log('no existe pa'); return []; } return yaml.parse( diff --git a/core/tests/ts-integration/src/helpers.ts b/core/tests/ts-integration/src/helpers.ts index 966a77b3fb8e..d3464bc84bdd 100644 --- a/core/tests/ts-integration/src/helpers.ts +++ b/core/tests/ts-integration/src/helpers.ts @@ -4,6 +4,8 @@ import * as ethers from 'ethers'; import * as hre from 'hardhat'; import { ZkSyncArtifact } from '@matterlabs/hardhat-zksync-solc/dist/src/types'; +export const SYSTEM_CONTEXT_ADDRESS = '0x000000000000000000000000000000000000800b'; + /** * Loads the test contract * diff --git a/core/tests/ts-integration/tests/fees.test.ts b/core/tests/ts-integration/tests/fees.test.ts index a2a72cfa5be3..699b9e5e886b 100644 --- a/core/tests/ts-integration/tests/fees.test.ts +++ b/core/tests/ts-integration/tests/fees.test.ts @@ -16,8 +16,11 @@ import { TestMaster } from '../src/index'; import * as zksync from 'zksync-ethers'; import { BigNumber, ethers } from 'ethers'; import { DataAvailabityMode, Token } from '../src/types'; +import { keccak256 } from 'ethers/lib/utils'; +import { SYSTEM_CONTEXT_ADDRESS, getTestContract } from '../src/helpers'; const UINT32_MAX = BigNumber.from(2).pow(32).sub(1); +const MAX_GAS_PER_PUBDATA = 50_000; const logs = fs.createWriteStream('fees.log', { flags: 'a' }); @@ -168,6 +171,15 @@ testFees('Test fees', () => { const receipt = await tx.wait(); expect(receipt.gasUsed.gt(UINT32_MAX)).toBeTruthy(); + // Let's also check that the same transaction would work as eth_call + const systemContextArtifact = getTestContract('ISystemContext'); + const systemContext = new ethers.Contract(SYSTEM_CONTEXT_ADDRESS, systemContextArtifact.abi, alice.provider); + const systemContextGasPerPubdataByte = await systemContext.gasPerPubdataByte(); + expect(systemContextGasPerPubdataByte.toNumber()).toEqual(MAX_GAS_PER_PUBDATA); + + const dataHash = await l1Messenger.callStatic.sendToL1(largeData, { type: 0 }); + expect(dataHash).toEqual(keccak256(largeData)); + // Secondly, let's test an unsuccessful transaction with large refund. // The size of the data has increased, so the previous gas limit is not enough. diff --git a/core/tests/ts-integration/tests/system.test.ts b/core/tests/ts-integration/tests/system.test.ts index c46916c4ec67..2934226eed8f 100644 --- a/core/tests/ts-integration/tests/system.test.ts +++ b/core/tests/ts-integration/tests/system.test.ts @@ -13,7 +13,8 @@ import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; import { BigNumberish, BytesLike } from 'ethers'; import { hashBytecode, serialize } from 'zksync-ethers/build/utils'; -import { getTestContract } from '../src/helpers'; +import { SYSTEM_CONTEXT_ADDRESS, getTestContract } from '../src/helpers'; +import { DataAvailabityMode } from '../src/types'; const contracts = { counter: getTestContract('Counter'), @@ -311,6 +312,20 @@ describe('System behavior checks', () => { ).toBeAccepted(); }); + test('Gas per pubdata byte getter should work', async () => { + const systemContextArtifact = getTestContract('ISystemContext'); + const systemContext = new ethers.Contract(SYSTEM_CONTEXT_ADDRESS, systemContextArtifact.abi, alice.provider); + const currentGasPerPubdata = await systemContext.gasPerPubdataByte(); + + // The current gas per pubdata depends on a lot of factors, so it wouldn't be sustainable to check the exact value. + // We'll just check that it is greater than zero. + if (testMaster.environment().l1BatchCommitDataGeneratorMode === DataAvailabityMode.Rollup) { + expect(currentGasPerPubdata.toNumber()).toBeGreaterThan(0); + } else { + expect(currentGasPerPubdata.toNumber()).toEqual(0); + } + }); + it('should reject transaction with huge gas limit', async () => { await expect( alice.sendTransaction({ to: alice.address, gasLimit: ethers.BigNumber.from(2).pow(51) }) diff --git a/docs/guides/advanced/pubdata-with-blobs.md b/docs/guides/advanced/pubdata-with-blobs.md new file mode 100644 index 000000000000..e27372e934ef --- /dev/null +++ b/docs/guides/advanced/pubdata-with-blobs.md @@ -0,0 +1,300 @@ +# Pubdata Post 4844 + +## Motivation + +EIP-4844, commonly known as Proto-Danksharding, is an upgrade to the ethereum protocol that introduces a new data +availability solution embedded in layer 1. More information about it can be found +[here](https://ethereum.org/en/roadmap/danksharding/). With proto-danksharding we can utilize the new blob data +availability for cheaper storage of pubdata when we commit batches resulting in more transactions per batch and cheaper +batches/transactions. We want to ensure we have the flexibility at the contract level to process both pubdata via +calldata, as well as pubdata via blobs. A quick callout here, while 4844 has introduced blobs as new DA layer, it is the +first step in full Danksharding. With full Danksharding ethereum will be able to handle a total of 64 blobs per block +unlike 4844 which supports just 6 per block. + +> 💡 Given the nature of 4844 development from a solidity viewpoint, we’ve had to create a temporary contract +> `BlobVersionedHash.yul` which acts in place of the eventual `BLOBHASH` opcode. + +## Technical Approach + +The approach spans both L2 system contracts and L1 zkSync contracts (namely `Executor.sol`). When a batch is sealed on +L2 we will chunk it into blob-sized pieces (4096 elements \* 31 bytes per what is required by our circuits), take the +hash of each chunk, and send them to L1 via system logs. Within `Executor.sol` , when we are dealing with blob-based +commitments, we verify that the blob contains the correct data with the point evaluation precompile. If the batch +utilizes calldata instead, the processing should remain the same as in a pre-4844 zkSync. Regardless of if pubdata is in +calldata or blobs are used, the batch’s commitment changes as we include new data within the auxiliary output. + +Given that this is the first step to a longer-term solution, and the restrictions of proto-danksharding that get lifted +for full danksharding, we impose the following constraints: + +1. we will support a maximum of 2 blobs per batch +2. only 1 batch will be committed in a given transaction +3. we will always send 2 system logs (one for each potential blob commitment) even if the batch only uses 1 blob. + +This simplifies the processing logic on L1 and stops us from increasing the blob base fee (increases when there 3 or +more blobs in a given block). + +## Backward-compatibility + +While some of the parameter formatting changes, we maintain the same function signature for `commitBatches` and still +allow for pubdata to be submitted via calldata: + +```solidity +struct StoredBatchInfo { + uint64 batchNumber; + bytes32 batchHash; + uint64 indexRepeatedStorageChanges; + uint256 numberOfLayer1Txs; + bytes32 priorityOperationsHash; + bytes32 l2LogsTreeRoot; + uint256 timestamp; + bytes32 commitment; +} + +struct CommitBatchInfo { + uint64 batchNumber; + uint64 timestamp; + uint64 indexRepeatedStorageChanges; + bytes32 newStateRoot; + uint256 numberOfLayer1Txs; + bytes32 priorityOperationsHash; + bytes32 bootloaderHeapInitialContentsHash; + bytes32 eventsQueueStateHash; + bytes systemLogs; + bytes pubdataCommitments; +} + +function commitBatches(StoredBatchInfo calldata _lastCommittedBatchData, CommitBatchInfo[] calldata _newBatchesData) + external; + +``` + +## Implementation + +### Bootloader Memory + +With the increase in the amount of pubdata due to blobs, changes can be made to the bootloader memory to facilitate more +l2 to l1 logs, compressed bytecodes, and pubdata. We take the naive approach for l2 to l1 logs and the compressed +bytecode, doubling their previous constraints from `2048` logs and `32768 slots` to `4096 logs` and `65536 slots` +respectively. We then increase the number of slots for pubdata from `208000` to `411900`. Copying the comment around +pubdata slot calculation from our code: + +```solidity +One of "worst case" scenarios for the number of state diffs in a batch is when 240kb of pubdata is spent +on repeated writes, that are all zeroed out. In this case, the number of diffs is 240k / 5 = 48k. This means that they will have +accommodate 13056000 bytes of calldata for the uncompressed state diffs. Adding 120k on top leaves us with +roughly 13176000 bytes needed for calldata. 411750 slots are needed to accommodate this amount of data. +We round up to 411900 slots just in case. +``` + +The overall bootloader max memory is increased from `24000000` to `30000000` bytes to accommodate the increases. + +### L2 System Contracts + +We introduce a new system contract PubdataChunkPublisher that takes the full pubdata, creates chunks that are each +126,976 bytes in length (this is calculated as 4096 elements per blob each of which has 31 bytes), and commits them in +the form of 2 system logs. We have the following keys for system logs: + +```solidity +enum SystemLogKey { + L2_TO_L1_LOGS_TREE_ROOT_KEY, + TOTAL_L2_TO_L1_PUBDATA_KEY, + STATE_DIFF_HASH_KEY, + PACKED_BATCH_AND_L2_BLOCK_TIMESTAMP_KEY, + PREV_BATCH_HASH_KEY, + CHAINED_PRIORITY_TXN_HASH_KEY, + NUMBER_OF_LAYER_1_TXS_KEY, + BLOB_ONE_HASH_KEY, + BLOB_TWO_HASH_KEY, + EXPECTED_SYSTEM_CONTRACT_UPGRADE_TX_HASH_KEY +} + +``` + +In addition to the blob commitments, the hash of the total pubdata is still sent and is used if a batch is committed +with pubdata as calldata vs as blob data. As stated earlier, even when we only have enough pubdata for a single blob, 2 +system logs are sent. The hash value in the second log in this case will `bytes32(0)` . + +One important thing is that we don’t try to reason about the data here, that is done in the L1Messenger and Compressor +contracts. The main purpose of this is to commit to blobs and have those commitments travel to L1 via system logs. + +### L1 Executor Facet + +While the function signature for `commitBatches` and the structure of `CommitBatchInfo` stays the same, the format of +`CommitBatchInfo.pubdataCommitments` changes. Before 4844, this field held a byte array of pubdata, now it can hold +either the total pubdata as before or it can hold a list of concatenated info for kzg blob commitments. To differentiate +between the two, a header byte is prepended to the byte array. At the moment we only support 2 values: + +```solidity +/// @dev Enum used to determine the source of pubdata. At first we will support calldata and blobs but this can be extended. +enum PubdataSource { + Calldata = 0, + Blob = 1 +} +``` + +We reject all other values in the first byte. + +### Calldata Based Pubdata Processing + +When using calldata, we want to operate on `pubdataCommitments[1:pubdataCommitments.length - 32]` as this is the full +pubdata that was committed to via system logs. The reason we don’t operate on the last 32 bytes is that we also include +what the blob commitment for this data would be as a way to make our witness generation more generic. Only a single blob +commitment is needed for this as the max size of calldata is the same size as a single blob. When processing the system +logs in this context, we will check the hash of the supplied pubdata without the 1 byte header for pubdata source +against the value in the corresponding system log with key `TOTAL_L2_TO_L1_PUBDATA_KEY`. We still require logs for the 2 +blob commitments, even if these logs contain values we will substitute them for `bytes32(0)` when constructing the batch +commitment. + +### Blob Based Pubdata Processing + +The format for `pubdataCommitments` changes when we send pubdata as blobs, containing data we need to verify the blob +contents via the newly introduced point evaluation precompile. The data is `pubdataCommitments[1:]` is the concatenation +of `opening point (16 bytes) || claimed value (32 bytes) || commitment (48 bytes) || proof (48 bytes)` for each blob +attached to the transaction, lowering our calldata from N → 144 bytes per blob. More on how this is used later on. + +Utilizing blobs causes us to process logs in a slightly different way. Similar to how it's done when pubdata is sent via +calldata, we require a system log with a key of the `TOTAL_L2_TO_L1_PUBDATA_KEY` , although the value is ignored and +extract the 2 blob hashes from the `BLOB_ONE_HASH_KEY` and `BLOB_TWO_HASH_KEY` system logs to be used in the batch +commitment. + +While calldata verification is simple, comparing the hash of the supplied calldata versus the value in the system log, +we need to take a few extra steps when verifying the blobs attached to the transaction contain the correct data. After +processing the logs and getting the 2 blob linear hashes, we will have all the data we need to call the +[point evaluation precompile](https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile). Recall that the +contents of `pubdataCommitments` have the opening point (in its 16 byte form), claimed value, the commitment, and the +proof of this claimed value. The last piece of information we need is the blob’s versioned hash (obtained via `BLOBHASH` +opcode). + +There are checks within `_verifyBlobInformation` that ensure that we have the correct blob linear hashes and that if we +aren’t expecting a second blob, the linear hash should be equal to `bytes32(0)`. This is how we signal to our circuits +that we didn’t publish any information in the second blob. + +Verifying the commitment via the point evaluation precompile goes as follows (note that we assume the header byte for +pubdataSource has already been removed by this point): + +```solidity +// The opening point is passed as 16 bytes as that is what our circuits expect and use when verifying the new batch commitment +// PUBDATA_COMMITMENT_SIZE = 144 bytes +pubdata_commitments <- [opening point (16 bytes) || claimed value (32 bytes) || commitment (48 bytes) || proof (48 bytes)] from calldata +opening_point = bytes32(pubdata_commitments[:16]) +versioned_hash <- from BLOBHASH opcode + +// Given that we needed to pad the opening point for the precompile, append the data after. +point_eval_input = versioned_hash || opening_point || pubdataCommitments[16: PUBDATA_COMMITMENT_SIZE] + +// this part handles the following: +// verify versioned_hash == hash(commitment) +// verify P(z) = y +res <- point_valuation_precompile(point_eval_input) + +assert uint256(res[32:]) == BLS_MODULUS +``` + +Where correctness is validated by checking the latter 32 bytes of output from the point evaluation call is equal to +`BLS_MODULUS`. + +### Batch Commitment and Proof of Equivalence + +With the contents of the blob being verified, we need to add this information to the batch commitment so that it can +further be part of the verification of the overall batch by our proof system. Our batch commitment is the hashing of a +few different values: passthrough data (holding our new state root, and next enumeration index to be used), meta +parameters (flag for if zk porter is available, bootloader bytecode hash, and default account bytecode hash), and +auxiliary output. The auxiliary output changes with 4844, adding in 4 new fields and the new corresponding encoding: + +- 2 `bytes32` fields for linear hashes + - These are the hashes of the blob’s preimages +- 2 `bytes32` for 4844 output commitment hashes + - These are `(versioned hash || opening point || evaluation value)` + - The format of the opening point here is expected to be the 16 byte value passed by calldata +- We encode an additional 28 `bytes32(0)` at the end because with the inclusion of vm 1.5.0, our circuits support a + total of 16 blobs that will be used once the total number of blobs supported by ethereum increase. + +```solidity +abi.encode( + l2ToL1LogsHash, + _stateDiffHash, + _batch.bootloaderHeapInitialContentsHash, + _batch.eventsQueueStateHash, + _blob1LinearHash, + _blob1OutputCommitment, + _blob2LinearHash, + _blob2OutputCommitment, + _encode28Bytes32Zeroes() +); +``` + +There are 3 different scenarios that change the values posted here: + +1. We submit pubdata via calldata +2. We only utilize a single blob +3. We use both blobs + +When we use calldata, the values `_blob1LinearHash`, `_blob1OutputCommitment`, `_blob2LinearHash`, and +`_blob2OutputCommitment` should all be `bytes32(0)`. If we are using blobs but only have a single blob, +`_blob1LinearHash` and `_blob1OutputCommitment` should correspond to that blob, while `_blob2LinearHash` and +`_blob2OutputCommitment` will be `bytes32(0)`. Following this, when we use both blobs, the data for these should be +present in all of the values. + +Our circuits will then handle the proof of equivalence, following a method similar to the moderate approach mentioned +[here](https://notes.ethereum.org/@vbuterin/proto_danksharding_faq#Moderate-approach-works-with-any-ZK-SNARK), verifying +that the total pubdata can be repackaged as the blobs we submitted and that the commitments in fact evaluate to the +given value at the computed opening point. + +## Pubdata Contents and Blobs + +Given how data representation changes on the consensus layer (where blobs live) versus on the execution layer (where +calldata is found), there is some preprocessing that takes place to make it compatible. When calldata is used for +pubdata, we keep it as is and no additional processing is required to transform it. Recalling the above section when +pubdata is sent via calldata it has the format: source byte (1 bytes) || pubdata || blob commitment (32 bytes) and so we +must first trim it of the source byte and blob commitment before decoding it. A more detailed guide on the format can be +found in our documentation. Using blobs requires a few more steps: + +```python +ZKSYNC_BLOB_SIZE = 31 * 4096 + +# First we pad the pubdata with the required amount of zeroes to fill +# the nearest blobs +padding_amount = ZKSYNC_BLOB_SIZE - len(pubdata) % ZKSYNC_BLOB_SIZE) +padded_pubdata = pad_right_with_zeroes(pubdata, padding_amount) + +# We then chunk them into `ZKSYNC_BLOB_SIZE` sized arrays +blobs = chunk(padded_pubdata, ZKSYNC_BLOB_SIZE) + +# Each blob is then encoded to be compatible with the CL +for blob in blobs: + encoded_blob = zksync_pubdata_into_ethereum_4844_data(blob) +``` + +Now we can apply the encoding formula, with some of the data from the blob commit transaction to move from encoded blobs +back into decodable zksync pubdata: + +```python +# opening point (16 bytes) || claimed value (32 bytes) || commitment (48 bytes) || proof (48 bytes) +BLOB_PUBDATA_COMMITMENT_SIZE = 144 + +# Parse the kzg commitment from the commit calldata +commit_calldata_without_source = commit_calldata[1:] +for i in range(0, len(commit_calldata_without_source), BLOB_PUBDATA_COMMITMENT_SIZE): + # We can skip the opening point and claimed value, ignoring the proof + kzg_commitment = commit_calldata_without_source[48:96] + +# We then need to pull the blobs in the correct order, this can be found by matching +# each blob with their kzg_commitment keeping the order from the calldata +encoded_blobs = pull_blob_for_each_kzg_commitment(kzg_commitments) + +# Decode each blob into the zksync specific format +for encoded_blob in encoded_blobs: + decoded_blob = ethereum_4844_data_into_zksync_pubdata(encoded_blob) + +reconstructed_pubdata = concat(decoded_blobs) +``` + +The last thing to do depends on the strategy taken, the two approaches are: + +- Remove all trailing zeroes after concatenation +- Parse the data and ignore the extra zeroes at the end + +The second option is a bit messier so going with the first, we can then decode the pubdata and when we get to the last +state diff, if the number of bytes is less than specified we know that the remaining data are zeroes. The needed +functions can be found within the +[zkevm_circuits code](https://github.com/matter-labs/era-zkevm_circuits/blob/3a973afb3cf2b50b7138c1af61cc6ac3d7d0189f/src/eip_4844/mod.rs#L358). diff --git a/docs/guides/advanced/pubdata.md b/docs/guides/advanced/pubdata.md index f0e159a8010c..cc0c82497cab 100644 --- a/docs/guides/advanced/pubdata.md +++ b/docs/guides/advanced/pubdata.md @@ -12,14 +12,14 @@ One thing to note is that the way that the data is represented changes in a pre- level, in a pre-boojum era these are represented as separate fields while in boojum they are packed into a single bytes array. -> Note: Once 4844 gets integrated this bytes array will move from being part of the calldata to blob data. +> Note: When the 4844 was integrated this bytes array was moved from being part of the calldata to blob data. While the structure of the pubdata changes, we can use the same strategy to pull the relevant information. First, we -need to filter all of the transactions to the L1 zkSync contract for only the `commitBlocks` transactions where the -proposed block has been referenced by a corresponding `executeBlocks` call (the reason for this is that a committed or -even proven block can be reverted but an executed one cannot). Once we have all the committed blocks that have been -executed, we then will pull the transaction input and the relevant fields, applying them in order to reconstruct the -current state of L2. +need to filter all of the transactions to the L1 zkSync contract for only the `commitBlocks/commitBatches` transactions +where the proposed block has been referenced by a corresponding `executeBlocks/executeBatches` call (the reason for this +is that a committed or even proven block can be reverted but an executed one cannot). Once we have all the committed +blocks that have been executed, we then will pull the transaction input and the relevant fields, applying them in order +to reconstruct the current state of L2. One thing to note is that in both systems some of the contract bytecode is compressed into an array of indices where each 2 byte index corresponds to an 8 byte word in a dictionary. More on how that is done [here](./compression.md). Once @@ -90,35 +90,44 @@ id generated as part of a batch will be in the `indexRepeatedStorageChanges` fie ### Post-Boojum Era ```solidity -/// @notice Data needed to commit new block -/// @param blockNumber Number of the committed block -/// @param timestamp Unix timestamp denoting the start of the block execution +/// @notice Data needed to commit new batch +/// @param batchNumber Number of the committed batch +/// @param timestamp Unix timestamp denoting the start of the batch execution /// @param indexRepeatedStorageChanges The serial number of the shortcut index that's used as a unique identifier for storage keys that were used twice or more /// @param newStateRoot The state root of the full state tree /// @param numberOfLayer1Txs Number of priority operations to be processed -/// @param priorityOperationsHash Hash of all priority operations from this block -/// @param systemLogs concatenation of all L2 -> L1 system logs in the block -/// @param totalL2ToL1Pubdata Total pubdata committed to as part of bootloader run. Contents are: l2Tol1Logs <> l2Tol1Messages <> publishedBytecodes <> stateDiffs -struct CommitBlockInfo { - uint64 blockNumber; +/// @param priorityOperationsHash Hash of all priority operations from this batch +/// @param bootloaderHeapInitialContentsHash Hash of the initial contents of the bootloader heap. In practice it serves as the commitment to the transactions in the batch. +/// @param eventsQueueStateHash Hash of the events queue state. In practice it serves as the commitment to the events in the batch. +/// @param systemLogs concatenation of all L2 -> L1 system logs in the batch +/// @param pubdataCommitments Packed pubdata commitments/data. +/// @dev pubdataCommitments format: This will always start with a 1 byte pubdataSource flag. Current allowed values are 0 (calldata) or 1 (blobs) +/// kzg: list of: opening point (16 bytes) || claimed value (32 bytes) || commitment (48 bytes) || proof (48 bytes) = 144 bytes +/// calldata: pubdataCommitments.length - 1 - 32 bytes of pubdata +/// and 32 bytes appended to serve as the blob commitment part for the aux output part of the batch commitment +/// @dev For 2 blobs we will be sending 288 bytes of calldata instead of the full amount for pubdata. +/// @dev When using calldata, we only need to send one blob commitment since the max number of bytes in calldata fits in a single blob and we can pull the +/// linear hash from the system logs +struct CommitBatchInfo { + uint64 batchNumber; uint64 timestamp; uint64 indexRepeatedStorageChanges; bytes32 newStateRoot; uint256 numberOfLayer1Txs; bytes32 priorityOperationsHash; + bytes32 bootloaderHeapInitialContentsHash; + bytes32 eventsQueueStateHash; bytes systemLogs; - bytes totalL2ToL1Pubdata; + bytes pubdataCommitments; } ``` -The main difference between the two `CommitBlockInfo` structs is that we have taken a few of the fields and merged them -into a single bytes array called `totalL2ToL1Pubdata`. The contents of pubdata include: - -1. L2 to L1 Logs -2. L2 to L1 Messages -3. Published Bytecodes -4. Compressed State Diffs +The main difference between the two `CommitBatchInfo` and `CommitBlockInfo` structs is that we have taken a few of the +fields and merged them into a single bytes array called `pubdataCommitments`. In the `calldata` mode, the pubdata is +being passed using that field. In the `blobs` mode, that field is used to store the KZG commitments and proofs. More on +EIP-4844 blobs [here](./pubdata-with-blobs.md). In the Validium mode, the field will either be empty or store the +inclusion proof for the DA blob. The 2 main fields needed for state reconstruction are the bytecodes and the state diffs. The bytecodes follow the same structure and reasoning in the old system (as explained above). The state diffs will follow the compression illustrated diff --git a/docs/guides/setup-dev.md b/docs/guides/setup-dev.md index f096a2f8a270..7b2879ff04a4 100644 --- a/docs/guides/setup-dev.md +++ b/docs/guides/setup-dev.md @@ -221,7 +221,7 @@ SQLx is a Rust library we use to interact with Postgres, and its CLI is used to features of the library. ```bash -cargo install sqlx-cli --version 0.7.3 +cargo install --locked sqlx-cli --version 0.7.3 ``` ## Solidity compiler `solc` diff --git a/etc/env/base/vm_runner.toml b/etc/env/base/vm_runner.toml new file mode 100644 index 000000000000..d9e10e8b357d --- /dev/null +++ b/etc/env/base/vm_runner.toml @@ -0,0 +1,9 @@ +# Configuration for the VM runner crate + +[vm_runner] + +[vm_runner.protective_reads] +# Path to the directory that contains RocksDB with protective reads writer cache. +protective_reads_db_path = "./db/main/protective_reads" +# Amount of batches that can be processed in parallel. +protective_reads_window_size = 3 diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index d59da18d1266..fdccdf03b5f7 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -321,3 +321,7 @@ observability: opentelemetry: endpoint: unset level: debug + +protective_reads_writer: + protective_reads_db_path: "./db/main/protective_reads" + protective_reads_window_size: 3 diff --git a/infrastructure/protocol-upgrade/src/transaction.ts b/infrastructure/protocol-upgrade/src/transaction.ts index 38f4ed1e91bd..dc9d5d190512 100644 --- a/infrastructure/protocol-upgrade/src/transaction.ts +++ b/infrastructure/protocol-upgrade/src/transaction.ts @@ -1,5 +1,5 @@ import { BigNumberish } from '@ethersproject/bignumber'; -import { Bytes, BytesLike, ethers } from 'ethers'; +import { BytesLike, ethers } from 'ethers'; import { ForceDeployUpgraderFactory as ForceDeployUpgraderFactoryL2 } from 'l2-contracts/typechain'; import { DefaultUpgradeFactory as DefaultUpgradeFactoryL1, diff --git a/infrastructure/zk/src/utils.ts b/infrastructure/zk/src/utils.ts index 96fd7674e000..38d980cb1509 100644 --- a/infrastructure/zk/src/utils.ts +++ b/infrastructure/zk/src/utils.ts @@ -25,8 +25,7 @@ const IGNORED_DIRS = [ 'artifacts-zk', 'cache-zk', // Ignore directories with OZ and forge submodules. - 'contracts/l1-contracts/lib', - 'contracts/l1-contracts-foundry/lib' + 'contracts/l1-contracts/lib' ]; const IGNORED_FILES = ['KeysWithPlonkVerifier.sol', 'TokenInit.sol', '.tslintrc.js', '.prettierrc.js']; diff --git a/prover/CHANGELOG.md b/prover/CHANGELOG.md index 2e6ea787f814..8306f2e02d7b 100644 --- a/prover/CHANGELOG.md +++ b/prover/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## [14.5.0](https://github.com/matter-labs/zksync-era/compare/prover-v14.4.0...prover-v14.5.0) (2024-06-04) + + +### Features + +* update VKs and bump cargo.lock ([#2112](https://github.com/matter-labs/zksync-era/issues/2112)) ([6510317](https://github.com/matter-labs/zksync-era/commit/65103173085a0b500a626cb8179fad77ee97fadd)) +* use semver for metrics, move constants to prover workspace ([#2098](https://github.com/matter-labs/zksync-era/issues/2098)) ([7a50a9f](https://github.com/matter-labs/zksync-era/commit/7a50a9f79e516ec150d1f30b9f1c781a5523375b)) + + +### Bug Fixes + +* **block-reverter:** Fix reverting snapshot files ([#2064](https://github.com/matter-labs/zksync-era/issues/2064)) ([17a7e78](https://github.com/matter-labs/zksync-era/commit/17a7e782d9e35eaf38acf920c2326d4037c7781e)) +* **house-keeper:** Fix queue size queries ([#2106](https://github.com/matter-labs/zksync-era/issues/2106)) ([183502a](https://github.com/matter-labs/zksync-era/commit/183502a17eb47a747f50b6a9d38ab78de984f80e)) + ## [14.4.0](https://github.com/matter-labs/zksync-era/compare/prover-v14.3.0...prover-v14.4.0) (2024-05-30) diff --git a/prover/Cargo.lock b/prover/Cargo.lock index f0191b835453..733fdab19265 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -673,7 +673,7 @@ dependencies = [ "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", "const_format", "convert_case", - "crossbeam 0.8.4", + "crossbeam 0.7.3", "crypto-bigint 0.5.5", "cs_derive 0.1.0 (git+https://github.com/matter-labs/era-boojum?branch=main)", "derivative", @@ -2526,7 +2526,7 @@ dependencies = [ "crossbeam 0.7.3", "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=snark_wrapper)", "gpu-ffi", - "itertools 0.10.5", + "itertools 0.11.0", "num_cpus", "rand 0.4.6", "serde", @@ -4670,7 +4670,7 @@ dependencies = [ name = "prover_version" version = "0.1.0" dependencies = [ - "zksync_types", + "zksync_prover_fri_types", ] [[package]] @@ -7596,7 +7596,7 @@ dependencies = [ [[package]] name = "zk_evm" version = "1.5.0" -source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.5.0#9bbf7ffd2c38ee8b9667e96eaf0c111037fe976f" +source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.5.0#0c5cdca00cca4fa0a8c49147a11048c24f8a4b12" dependencies = [ "anyhow", "lazy_static", @@ -7747,7 +7747,7 @@ dependencies = [ [[package]] name = "zkevm_circuits" version = "1.5.0" -source = "git+https://github.com/matter-labs/era-zkevm_circuits.git?branch=v1.5.0#a93a3a5c34ec1ec31d73191d11ab00b4d8215a3f" +source = "git+https://github.com/matter-labs/era-zkevm_circuits.git?branch=v1.5.0#b7a86c739e8a8f88e788e90893c6e7496f6d7dfc" dependencies = [ "arrayvec 0.7.4", "boojum", @@ -7805,7 +7805,7 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" version = "1.5.0" -source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.5.0#109d9f734804a8b9dc0531c0b576e2a0f55a40de" +source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.5.0#28d2edabf902ea9b08f6a26a4506831fd89346b9" dependencies = [ "bitflags 2.4.2", "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -8179,6 +8179,7 @@ dependencies = [ "google-cloud-storage", "http", "prost 0.12.3", + "rand 0.8.5", "serde_json", "tokio", "tracing", diff --git a/prover/proof_fri_compressor/src/main.rs b/prover/proof_fri_compressor/src/main.rs index ec66515b6a35..9786170874ec 100644 --- a/prover/proof_fri_compressor/src/main.rs +++ b/prover/proof_fri_compressor/src/main.rs @@ -10,8 +10,8 @@ use tokio::sync::{oneshot, watch}; use zksync_config::configs::{DatabaseSecrets, FriProofCompressorConfig, ObservabilityConfig}; use zksync_env_config::{object_store::ProverObjectStoreConfig, FromEnv}; use zksync_object_store::ObjectStoreFactory; +use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; use zksync_queued_job_processor::JobProcessor; -use zksync_types::protocol_version::ProtocolSemanticVersion; use zksync_utils::wait_for_tasks::ManagedTasks; use crate::{ @@ -73,7 +73,7 @@ async fn main() -> anyhow::Result<()> { .create_store() .await; - let protocol_version = ProtocolSemanticVersion::current_prover_version(); + let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; let proof_compressor = ProofCompressor::new( blob_store, diff --git a/prover/prover_dal/.sqlx/query-7496a9df12e409162338266085ce27807ede6b4db9541198cee2861b874b52f9.json b/prover/prover_dal/.sqlx/query-7496a9df12e409162338266085ce27807ede6b4db9541198cee2861b874b52f9.json deleted file mode 100644 index 1a8ebf4e4253..000000000000 --- a/prover/prover_dal/.sqlx/query-7496a9df12e409162338266085ce27807ede6b4db9541198cee2861b874b52f9.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version,\n COUNT(*) FILTER (\n WHERE\n status = 'queued'\n ) AS queued,\n COUNT(*) FILTER (\n WHERE\n status = 'in_progress'\n ) AS in_progress\n FROM\n proof_compression_jobs_fri\n WHERE\n protocol_version IS NOT NULL\n GROUP BY\n protocol_version\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "protocol_version", - "type_info": "Int4" - }, - { - "ordinal": 1, - "name": "queued", - "type_info": "Int8" - }, - { - "ordinal": 2, - "name": "in_progress", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - true, - null, - null - ] - }, - "hash": "7496a9df12e409162338266085ce27807ede6b4db9541198cee2861b874b52f9" -} diff --git a/prover/prover_dal/.sqlx/query-5449963638944edc62b5ec293bcda37e6c0c6c85fe5f701ebc9919fcad749a23.json b/prover/prover_dal/.sqlx/query-a94fffdbc1827dc5df908ea1e99ef3ad13840d2c497760e9bd0513f68dc4271c.json similarity index 52% rename from prover/prover_dal/.sqlx/query-5449963638944edc62b5ec293bcda37e6c0c6c85fe5f701ebc9919fcad749a23.json rename to prover/prover_dal/.sqlx/query-a94fffdbc1827dc5df908ea1e99ef3ad13840d2c497760e9bd0513f68dc4271c.json index 01d32127608e..20db1e57aeb8 100644 --- a/prover/prover_dal/.sqlx/query-5449963638944edc62b5ec293bcda37e6c0c6c85fe5f701ebc9919fcad749a23.json +++ b/prover/prover_dal/.sqlx/query-a94fffdbc1827dc5df908ea1e99ef3ad13840d2c497760e9bd0513f68dc4271c.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n COUNT(*) AS \"count!\",\n circuit_id AS \"circuit_id!\",\n aggregation_round AS \"aggregation_round!\",\n status AS \"status!\",\n protocol_version AS \"protocol_version!\"\n FROM\n prover_jobs_fri\n WHERE\n (\n status = 'queued'\n OR status = 'in_progress'\n )\n AND protocol_version IS NOT NULL\n GROUP BY\n circuit_id,\n aggregation_round,\n status,\n protocol_version\n ", + "query": "\n SELECT\n COUNT(*) AS \"count!\",\n circuit_id AS \"circuit_id!\",\n aggregation_round AS \"aggregation_round!\",\n status AS \"status!\",\n protocol_version AS \"protocol_version!\",\n protocol_version_patch AS \"protocol_version_patch!\"\n FROM\n prover_jobs_fri\n WHERE\n (\n status = 'queued'\n OR status = 'in_progress'\n )\n AND protocol_version IS NOT NULL\n GROUP BY\n circuit_id,\n aggregation_round,\n status,\n protocol_version,\n protocol_version_patch\n ", "describe": { "columns": [ { @@ -27,6 +27,11 @@ "ordinal": 4, "name": "protocol_version!", "type_info": "Int4" + }, + { + "ordinal": 5, + "name": "protocol_version_patch!", + "type_info": "Int4" } ], "parameters": { @@ -37,8 +42,9 @@ false, false, false, - true + true, + false ] }, - "hash": "5449963638944edc62b5ec293bcda37e6c0c6c85fe5f701ebc9919fcad749a23" + "hash": "a94fffdbc1827dc5df908ea1e99ef3ad13840d2c497760e9bd0513f68dc4271c" } diff --git a/prover/prover_dal/.sqlx/query-e9c9b69852fa68f463f17b6d63ab99cf505662036f2dd7a9f1807c4c1bad7c7b.json b/prover/prover_dal/.sqlx/query-e9c9b69852fa68f463f17b6d63ab99cf505662036f2dd7a9f1807c4c1bad7c7b.json new file mode 100644 index 000000000000..160eb31bf953 --- /dev/null +++ b/prover/prover_dal/.sqlx/query-e9c9b69852fa68f463f17b6d63ab99cf505662036f2dd7a9f1807c4c1bad7c7b.json @@ -0,0 +1,38 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n protocol_version,\n protocol_version_patch,\n COUNT(*) FILTER (\n WHERE\n status = 'queued'\n ) AS queued,\n COUNT(*) FILTER (\n WHERE\n status = 'in_progress'\n ) AS in_progress\n FROM\n proof_compression_jobs_fri\n WHERE\n protocol_version IS NOT NULL\n GROUP BY\n protocol_version,\n protocol_version_patch\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "protocol_version_patch", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "queued", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "in_progress", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true, + false, + null, + null + ] + }, + "hash": "e9c9b69852fa68f463f17b6d63ab99cf505662036f2dd7a9f1807c4c1bad7c7b" +} diff --git a/prover/prover_dal/src/fri_proof_compressor_dal.rs b/prover/prover_dal/src/fri_proof_compressor_dal.rs index 35bb6329bdb8..38f09114f2bf 100644 --- a/prover/prover_dal/src/fri_proof_compressor_dal.rs +++ b/prover/prover_dal/src/fri_proof_compressor_dal.rs @@ -251,11 +251,12 @@ impl FriProofCompressorDal<'_, '_> { .unwrap(); } - pub async fn get_jobs_stats(&mut self) -> HashMap { + pub async fn get_jobs_stats(&mut self) -> HashMap { sqlx::query!( r#" SELECT protocol_version, + protocol_version_patch, COUNT(*) FILTER ( WHERE status = 'queued' @@ -269,7 +270,8 @@ impl FriProofCompressorDal<'_, '_> { WHERE protocol_version IS NOT NULL GROUP BY - protocol_version + protocol_version, + protocol_version_patch "#, ) .fetch_all(self.storage.conn()) @@ -277,7 +279,10 @@ impl FriProofCompressorDal<'_, '_> { .unwrap() .into_iter() .map(|row| { - let key = ProtocolVersionId::try_from(row.protocol_version.unwrap() as u16).unwrap(); + let key = ProtocolSemanticVersion::new( + ProtocolVersionId::try_from(row.protocol_version.unwrap() as u16).unwrap(), + VersionPatch(row.protocol_version_patch as u32), + ); let value = JobCountStatistics { queued: row.queued.unwrap() as usize, in_progress: row.in_progress.unwrap() as usize, diff --git a/prover/prover_dal/src/fri_prover_dal.rs b/prover/prover_dal/src/fri_prover_dal.rs index 18d9ec9e14fa..35fb46e8aff3 100644 --- a/prover/prover_dal/src/fri_prover_dal.rs +++ b/prover/prover_dal/src/fri_prover_dal.rs @@ -409,7 +409,8 @@ impl FriProverDal<'_, '_> { circuit_id AS "circuit_id!", aggregation_round AS "aggregation_round!", status AS "status!", - protocol_version AS "protocol_version!" + protocol_version AS "protocol_version!", + protocol_version_patch AS "protocol_version_patch!" FROM prover_jobs_fri WHERE @@ -422,7 +423,8 @@ impl FriProverDal<'_, '_> { circuit_id, aggregation_round, status, - protocol_version + protocol_version, + protocol_version_patch "# ) .fetch_all(self.storage.conn()) @@ -437,6 +439,7 @@ impl FriProverDal<'_, '_> { circuit_id: row.circuit_id as u8, aggregation_round: row.aggregation_round as u8, protocol_version: row.protocol_version as u16, + protocol_version_patch: row.protocol_version_patch as u32, }) .or_default(); match row.status.as_ref() { diff --git a/prover/prover_dal/src/fri_witness_generator_dal.rs b/prover/prover_dal/src/fri_witness_generator_dal.rs index 4ce0122d7143..3c733623e477 100644 --- a/prover/prover_dal/src/fri_witness_generator_dal.rs +++ b/prover/prover_dal/src/fri_witness_generator_dal.rs @@ -1365,19 +1365,21 @@ impl FriWitnessGeneratorDal<'_, '_> { pub async fn get_witness_jobs_stats( &mut self, aggregation_round: AggregationRound, - ) -> HashMap<(AggregationRound, ProtocolVersionId), JobCountStatistics> { + ) -> HashMap<(AggregationRound, ProtocolSemanticVersion), JobCountStatistics> { let table_name = Self::input_table_name_for(aggregation_round); let sql = format!( r#" SELECT protocol_version, + protocol_version_patch, COUNT(*) FILTER (WHERE status = 'queued') as queued, COUNT(*) FILTER (WHERE status = 'in_progress') as in_progress FROM {} WHERE protocol_version IS NOT NULL GROUP BY - protocol_version + protocol_version, + protocol_version_patch "#, table_name, ); @@ -1387,11 +1389,12 @@ impl FriWitnessGeneratorDal<'_, '_> { .unwrap() .into_iter() .map(|row| { - let key = ( - aggregation_round, + let protocol_semantic_version = ProtocolSemanticVersion::new( ProtocolVersionId::try_from(row.get::("protocol_version") as u16) .unwrap(), + VersionPatch(row.get::("protocol_version_patch") as u32), ); + let key = (aggregation_round, protocol_semantic_version); let value = JobCountStatistics { queued: row.get::("queued") as usize, in_progress: row.get::("in_progress") as usize, diff --git a/prover/prover_fri/src/main.rs b/prover/prover_fri/src/main.rs index 4caceae13e9d..7bd658868258 100644 --- a/prover/prover_fri/src/main.rs +++ b/prover/prover_fri/src/main.rs @@ -18,11 +18,11 @@ use zksync_env_config::{ FromEnv, }; use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; use zksync_prover_fri_utils::{get_all_circuit_id_round_tuples_for, region_fetcher::get_zone}; use zksync_queued_job_processor::JobProcessor; use zksync_types::{ basic_fri_types::CircuitIdRoundTuple, - protocol_version::ProtocolSemanticVersion, prover_dal::{GpuProverInstanceStatus, SocketAddress}, }; use zksync_utils::wait_for_tasks::ManagedTasks; @@ -195,7 +195,7 @@ async fn get_prover_tasks( ) -> anyhow::Result>>> { use crate::prover_job_processor::{load_setup_data_cache, Prover}; - let protocol_version = ProtocolSemanticVersion::current_prover_version(); + let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; tracing::info!( "Starting CPU FRI proof generation for with protocol_version: {:?}", @@ -247,7 +247,7 @@ async fn get_prover_tasks( port: prover_config.witness_vector_receiver_port, }; - let protocol_version = ProtocolSemanticVersion::current_prover_version(); + let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; let prover = gpu_prover::Prover::new( store_factory.create_store().await, diff --git a/prover/prover_fri_types/src/lib.rs b/prover/prover_fri_types/src/lib.rs index 611702cd34f1..0c6557c27ffc 100644 --- a/prover/prover_fri_types/src/lib.rs +++ b/prover/prover_fri_types/src/lib.rs @@ -14,7 +14,11 @@ use circuit_definitions::{ }, }; use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; -use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; +use zksync_types::{ + basic_fri_types::AggregationRound, + protocol_version::{ProtocolSemanticVersion, VersionPatch}, + L1BatchNumber, ProtocolVersionId, +}; use crate::keys::FriCircuitKey; @@ -23,6 +27,14 @@ pub mod queue; pub const EIP_4844_CIRCUIT_ID: u8 = 255; +// THESE VALUES SHOULD BE UPDATED ON ANY PROTOCOL UPGRADE OF PROVERS +pub const PROVER_PROTOCOL_VERSION: ProtocolVersionId = ProtocolVersionId::Version24; +pub const PROVER_PROTOCOL_PATCH: VersionPatch = VersionPatch(1); +pub const PROVER_PROTOCOL_SEMANTIC_VERSION: ProtocolSemanticVersion = ProtocolSemanticVersion { + minor: PROVER_PROTOCOL_VERSION, + patch: PROVER_PROTOCOL_PATCH, +}; + #[derive(serde::Serialize, serde::Deserialize, Clone)] #[allow(clippy::large_enum_variant)] pub enum CircuitWrapper { diff --git a/prover/prover_version/Cargo.toml b/prover/prover_version/Cargo.toml index af2c9936ec7f..0275b4169b72 100644 --- a/prover/prover_version/Cargo.toml +++ b/prover/prover_version/Cargo.toml @@ -4,4 +4,4 @@ version = "0.1.0" edition.workspace = true [dependencies] -zksync_types.workspace = true +zksync_prover_fri_types.workspace = true diff --git a/prover/prover_version/src/main.rs b/prover/prover_version/src/main.rs index 3ed931240d9f..f4b52801820a 100644 --- a/prover/prover_version/src/main.rs +++ b/prover/prover_version/src/main.rs @@ -1,5 +1,5 @@ -use zksync_types::ProtocolVersionId; +use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; fn main() { - println!("{}", ProtocolVersionId::current_prover_version()); + println!("{}", PROVER_PROTOCOL_SEMANTIC_VERSION); } diff --git a/prover/setup-data-gpu-keys.json b/prover/setup-data-gpu-keys.json index 600427385c79..4acc51b9add0 100644 --- a/prover/setup-data-gpu-keys.json +++ b/prover/setup-data-gpu-keys.json @@ -1,5 +1,5 @@ { - "us": "gs://matterlabs-setup-data-us/744b4e8-gpu/", - "europe": "gs://matterlabs-setup-data-europe/744b4e8-gpu/", - "asia": "gs://matterlabs-setup-data-asia/744b4e8-gpu/" + "us": "gs://matterlabs-setup-data-us/ffc5da2-gpu/", + "europe": "gs://matterlabs-setup-data-europe/ffc5da2-gpu/", + "asia": "gs://matterlabs-setup-data-asia/ffc5da2-gpu/" } diff --git a/prover/vk_setup_data_generator_server_fri/data/commitments.json b/prover/vk_setup_data_generator_server_fri/data/commitments.json index 00161454a9a2..086609a5822b 100644 --- a/prover/vk_setup_data_generator_server_fri/data/commitments.json +++ b/prover/vk_setup_data_generator_server_fri/data/commitments.json @@ -1,6 +1,6 @@ { - "leaf": "0xcc4ac1853353538a166f5c2dde2c24e7e6c461dce8e3dc47d81e9139e1719456", + "leaf": "0xf9664f4324c1400fa5c3822d667f30e873f53f1b8033180cd15fe41c1e2355c6", "node": "0xf520cd5b37e74e19fdb369c8d676a04dce8a19457497ac6686d2bb95d94109c8", - "scheduler": "0x8e58ecfdb4d987f32c45ed50f72a47dc5c46c262d83549c426a8fa6edacbc4dd", - "snark_wrapper": "0xb45190a52235abe353afd606a9144728f807804f5282df9247e27c56e817ccd6" + "scheduler": "0xe6ba9d6b042440c480fa1c7182be32387db6e90281e82f37398d3f98f63f098a", + "snark_wrapper": "0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824ed4f42bbca2" } \ No newline at end of file diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_1.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_1.bin index eeaee8f8a3b46870699f01aed8405bcd84329268..b1623bfe3ef1d593a5eb321903de9daafddce42f 100644 GIT binary patch delta 69 mcmbQjG=*ux9;P6+iTmU%*8b#Y00BlY$-n^PGokUB(f9!ICJRyk delta 69 mcmbQjG=*ux9;Rhn6ZgqkM1JLG00BlY$-n^PGokUB(f9z`f(l9i diff --git a/prover/vk_setup_data_generator_server_fri/data/snark_verification_scheduler_key.json b/prover/vk_setup_data_generator_server_fri/data/snark_verification_scheduler_key.json index 4313abe7616b..acb7e3fe8969 100644 --- a/prover/vk_setup_data_generator_server_fri/data/snark_verification_scheduler_key.json +++ b/prover/vk_setup_data_generator_server_fri/data/snark_verification_scheduler_key.json @@ -6,16 +6,16 @@ "gate_setup_commitments": [ { "x": [ - 3639645538835826981, - 13358681319193882915, - 14654814390686320869, - 2265744977747292559 + 14543631136906534221, + 11532161447842416044, + 11114175029926010938, + 1228896787564295039 ], "y": [ - 5699456119250210464, - 11698616611432786025, - 15205083455076303537, - 793062898509501988 + 13293602262342424489, + 8897930584356943159, + 13256028170406220369, + 3214939367598363288 ], "infinity": false }, @@ -96,16 +96,16 @@ }, { "x": [ - 8181305420893527265, - 8023799216002703714, - 15496213284243332216, - 770710052375668551 + 9586697317366528906, + 2325800863365957883, + 1243781259615311278, + 3048012003267036960 ], "y": [ - 1173987788591134762, - 3283714838474547428, - 15288445962933699259, - 953799583719157434 + 612821620743617231, + 1510385666449513894, + 9368337288452385056, + 2949736812933507034 ], "infinity": false }, diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json index 1f219f9e876d..8459e87826ac 100644 --- a/prover/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json +++ b/prover/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json @@ -19,19 +19,19 @@ "public_inputs_locations": [ [ 0, - 1045849 + 1046957 ], [ 1, - 1045849 + 1046957 ], [ 2, - 1045849 + 1046957 ], [ 3, - 1045849 + 1046957 ] ], "extra_constant_polys_for_selectors": 3, @@ -183,100 +183,100 @@ }, "setup_merkle_tree_cap": [ [ - 7045554076696889632, - 16529088100684214116, - 6290514233821252509, - 3001343423260616923 + 9473487953399898748, + 16270419805909860203, + 7335367583540379607, + 18438161812709418982 ], [ - 2940766705131855345, - 4555670488918609622, - 5753494248126846134, - 6256617137189379231 + 12967681057814187922, + 15701035168973396898, + 11259967584839810575, + 10571912581839654023 ], [ - 11827587136011675723, - 10889029680830982431, - 13439167774157155113, - 2734855668043648738 + 5264981558950918922, + 7322263530084687711, + 17011319323793220700, + 14479065901870485923 ], [ - 15389434355711868094, - 11598886769225733235, - 8482571407659321701, - 1997900333773344820 + 15574099641370951434, + 17000829784989701584, + 15964436826107516267, + 11346203353481465805 ], [ - 4548024410962672141, - 4394433224146674864, - 13832051321856375918, - 18445586359141413559 + 5474255527556252767, + 16570571942564149566, + 11428025503403431038, + 6617585440243326997 ], [ - 3613486671466248529, - 8630760380746238913, - 14296646559228531904, - 9397645087732339531 + 308081994977850819, + 8729962239283422104, + 14597407866734738386, + 14829347258931409833 ], [ - 840865276850212173, - 16736429831088322497, - 14611332307377976471, - 3907807757864441481 + 9980505926358439430, + 4909215529832368544, + 8351461288536129828, + 1249767629546599012 ], [ - 2637545975653412188, - 3660986788535112218, - 9902405273825560113, - 7195558443610319480 + 1807216890691480940, + 8617426931824195446, + 11002408656746191939, + 2928848780068318198 ], [ - 8393139460037640371, - 10765566899430361860, - 18329680108258922867, - 741850204565671783 + 11541179157141990516, + 12173830690959139035, + 2440341332114286947, + 12109090346106141232 ], [ - 4000428793481961239, - 15763840098880028026, - 10171423830051614055, - 13386566252539583097 + 11418690736500468651, + 16634379025633469741, + 15202881082421411217, + 1933046213639751324 ], [ - 998896299132355394, - 14206990988719530146, - 8999279144001525320, - 10626686453302503838 + 7447003196248321129, + 18332700323878037759, + 9559830827790696535, + 15476899088175820878 ], [ - 17426248181155971215, - 4962517775468765428, - 7032151950452105750, - 7025431744279194673 + 9516228739964317619, + 3715247844046085602, + 3402341140845153636, + 6208479534561471430 ], [ - 12275611679628867217, - 4758528062899618473, - 14082115197178538846, - 3896427251413045084 + 13129761831635161708, + 1199200173405945178, + 2225893329254814674, + 11792586660360798317 ], [ - 15483865238199990360, - 5691435570314737886, - 14756340954295671676, - 17828994026924671768 + 11807698182439073980, + 7978262413534788419, + 11140621065717310105, + 1380069160672719340 ], [ - 17160835723214490721, - 7256922695144660559, - 4901345145374519964, - 1493120881299167685 + 347840206922472862, + 10448076973761280929, + 6823062094681347787, + 15218544951788424466 ], [ - 1740794570609564600, - 609320811844141042, - 426822094057894482, - 6559582870374070860 + 13614576575170767970, + 7218359081103672230, + 15716723129949529907, + 15097061601049280170 ] ] } diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json index 70823d429afd..a44d59cd38ec 100644 --- a/prover/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json +++ b/prover/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json @@ -162,100 +162,100 @@ }, "setup_merkle_tree_cap": [ [ - 14888709561675676412, - 9216741205039404929, - 9684149635019531913, - 13880860109035593219 + 17855141276447231405, + 7822266582101144460, + 13588292742840523493, + 6469182181208683317 ], [ - 15104809072293329963, - 1896126018678273430, - 12116942096160132903, - 7145610089866937425 + 4232699233227875249, + 16903438402968182485, + 6943950277201482792, + 2110689468668186473 ], [ - 5938467841458718442, - 13853503804678923615, - 9221120555920683684, - 15112098065915315318 + 7707237321810352304, + 6515546920961633488, + 12952446233485170717, + 15066548759710591627 ], [ - 10492005768294435976, - 10245537693158081259, - 17481852070620274887, - 9681223495665222888 + 4639470535288257573, + 9977204060471305820, + 13620252730672745323, + 13906174107064885101 ], [ - 2330970386857215037, - 4019699060591160553, - 1410714382025032836, - 13967465531165811113 + 3380569754818632951, + 14592200377838954179, + 4655944779251366596, + 10461459338163125811 ], [ - 2697285946544359790, - 10219469019881018060, - 4617295552426676526, - 4165342253057202206 + 9505371692898482313, + 17672643349055132324, + 10968459678378506342, + 7203066191514731188 ], [ - 7573986049996963514, - 7859751312783523495, - 6058686987847329688, - 17050513781000134964 + 6361719037117192382, + 14180108541189529084, + 6222651441291357456, + 992683928102460932 ], [ - 7848395666220166703, - 1808170012978044134, - 12886183437176343290, - 9247906664812684040 + 533421257849918809, + 11687478703243746707, + 17923492118938261966, + 3240289105687966878 ], [ - 4758224957823408119, - 18390374702861572456, - 12054973031816727956, - 9964456186628666135 + 10537826768508055055, + 12735025794843706714, + 12285680957016823071, + 10987522679748444515 ], [ - 9913247106175321276, - 1133994713615747518, - 15467305915923599881, - 14137150334296727741 + 13934405620933279246, + 3346346012923536354, + 13038612823504141140, + 5021904630472945213 ], [ - 519510401159554954, - 671623465327617337, - 6946618752566126355, - 14839792343867641685 + 4317559511773342187, + 9030560588429997541, + 4631410576253261376, + 9787322710458812055 ], [ - 15769588697424611648, - 2044484567072981120, - 9195524138415042973, - 17683243399640174941 + 6546515965342993735, + 14693131313122528660, + 17792579751764566634, + 8313761089615939487 ], [ - 12667910057570482067, - 5348170454137185946, - 13596174350294476632, - 10205751496630857536 + 3974680093533741999, + 14912060828934556038, + 1881259422671526373, + 12651251867986376553 ], [ - 6454065087063181969, - 6868636153285926242, - 15096145533308286351, - 5607823324493271199 + 4700501802410133974, + 13415065184486663986, + 2400366378830519355, + 16672949145027127976 ], [ - 9258544726611497878, - 10424111256988796050, - 6681130502078897352, - 7923029268540343473 + 14532304468096502099, + 8898488667664282945, + 421877734780369270, + 18139574494023430530 ], [ - 1072638076145855116, - 5751602392190609095, - 10716732206422190696, - 12121400551621687065 + 2695266391937250139, + 8565247931723474329, + 8596490620847451819, + 2058702883352054572 ] ] } diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json index 4c328cbfd819..8a52cc244bac 100644 --- a/prover/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json +++ b/prover/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json @@ -170,100 +170,100 @@ }, "setup_merkle_tree_cap": [ [ - 2680192913777199386, - 7877900777764568562, - 7967270885539056261, - 11491786516879257714 + 9887208323851505217, + 1123001217986730435, + 343259880253311786, + 2151140818520262118 ], [ - 1576848689219001454, - 2538042691131197824, - 16789498574115229290, - 3214129711903181558 + 12495904531249642919, + 17232615797756148395, + 3335544159309667561, + 6261962261160675850 ], [ - 856301905705619734, - 4331213335266799158, - 15267490766684530921, - 3265714654258242220 + 3290174806954782361, + 3957604867997030178, + 12129129725630125865, + 1636089896333385634 ], [ - 8865784570897245270, - 2362765988103793581, - 6943670874402562853, - 14632996114278721596 + 14645858759272203991, + 11653487901803110416, + 2499237237036147984, + 1841727833267838231 ], [ - 63247458005995468, - 12539771084927052853, - 13041512411442114569, - 9742813247561592554 + 18193008520821522692, + 14508611443656176962, + 15201308762805005611, + 16051075400380887227 ], [ - 16743936557271219178, - 14841453989210747254, - 12724413787690930702, - 10592542358880202219 + 4504987266706704494, + 7397695837427186224, + 10067172051000661467, + 5044520361343796759 ], [ - 16695338323889693576, - 8527536001711027994, - 13212045085202022064, - 11071462626939596790 + 9408005523417633181, + 14924548137262927482, + 8927260223716946348, + 25087104176919469 ], [ - 18060750313558946749, - 15824434706098663517, - 775292596891170912, - 18445377984966327048 + 11857324568001808264, + 5783626311717767938, + 10769426771780222703, + 8523712547334248178 ], [ - 3549745875383468285, - 2238890537215251462, - 4591889095789072384, - 13012706980710418598 + 18394924697039022030, + 3773697459649116941, + 6013511991919985339, + 17810626771729638933 ], [ - 14771394899136640222, - 13143304103596416048, - 14456129193020560275, - 5740433968684323698 + 13290121767754155136, + 11225142773614876536, + 4764911669339622945, + 17476639133556434478 ], [ - 11651473654699970526, - 4694969877986805556, - 7029204199916750383, - 6916614362901685796 + 11822797557540925718, + 17521847674855164779, + 18126641713175128985, + 3215884914057380988 ], [ - 4368206191480113515, - 9562279231528697429, - 1907048590194817686, - 13209277185471975687 + 15220380051263546850, + 7948573237324556416, + 264360501330239312, + 16455579027557250339 ], [ - 14438342866286439870, - 383769026263703315, - 1077241575478137065, - 1158227982301730574 + 17738768733790921549, + 4021891743990340907, + 17352941271057641152, + 15584530612705924787 ], [ - 10868817472877525981, - 11920954565057859026, - 10684659491915725994, - 15343028344024922569 + 7157587680183062137, + 8837818432071888650, + 16467824236289155049, + 17557580094049845697 ], [ - 4969179907509861760, - 3560160134545277440, - 11797495979614319546, - 13436348584120593030 + 15526977922222496027, + 5885713491624121557, + 8813450728670527813, + 10234120825800411733 ], [ - 8873263215018682993, - 13828390019511310487, - 12329030402425507188, - 18004618114160314165 + 12554317685609787988, + 4789370247234643566, + 16370523223191414986, + 9108687955872827734 ] ] } diff --git a/prover/vk_setup_data_generator_server_fri/src/keystore.rs b/prover/vk_setup_data_generator_server_fri/src/keystore.rs index d1ba66e1fd2a..25aedeb089ff 100644 --- a/prover/vk_setup_data_generator_server_fri/src/keystore.rs +++ b/prover/vk_setup_data_generator_server_fri/src/keystore.rs @@ -44,7 +44,18 @@ pub struct Keystore { } fn get_base_path() -> PathBuf { - core_workspace_dir_or_current_dir().join("prover/vk_setup_data_generator_server_fri/data") + let path = core_workspace_dir_or_current_dir(); + + let new_path = path.join("prover/vk_setup_data_generator_server_fri/data"); + if new_path.exists() { + return new_path; + } + + let mut components = path.components(); + components.next_back().unwrap(); + components + .as_path() + .join("prover/vk_setup_data_generator_server_fri/data") } impl Default for Keystore { diff --git a/prover/witness_generator/src/main.rs b/prover/witness_generator/src/main.rs index e176347acafe..e0e39b442a83 100644 --- a/prover/witness_generator/src/main.rs +++ b/prover/witness_generator/src/main.rs @@ -41,7 +41,7 @@ mod utils; #[cfg(not(target_env = "msvc"))] use jemallocator::Jemalloc; use zksync_dal::Core; -use zksync_types::protocol_version::ProtocolSemanticVersion; +use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; #[cfg(not(target_env = "msvc"))] #[global_allocator] @@ -126,7 +126,7 @@ async fn main() -> anyhow::Result<()> { .context("failed to build a prover_connection_pool")?; let (stop_sender, stop_receiver) = watch::channel(false); - let protocol_version = ProtocolSemanticVersion::current_prover_version(); + let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; let vk_commitments_in_db = match prover_connection_pool .connection() .await diff --git a/prover/witness_vector_generator/src/main.rs b/prover/witness_vector_generator/src/main.rs index 843ae02530d2..2b8134d09e58 100644 --- a/prover/witness_vector_generator/src/main.rs +++ b/prover/witness_vector_generator/src/main.rs @@ -13,9 +13,9 @@ use zksync_config::configs::{ }; use zksync_env_config::{object_store::ProverObjectStoreConfig, FromEnv}; use zksync_object_store::ObjectStoreFactory; +use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; use zksync_prover_fri_utils::{get_all_circuit_id_round_tuples_for, region_fetcher::get_zone}; use zksync_queued_job_processor::JobProcessor; -use zksync_types::protocol_version::ProtocolSemanticVersion; use zksync_utils::wait_for_tasks::ManagedTasks; use crate::generator::WitnessVectorGenerator; @@ -87,7 +87,7 @@ async fn main() -> anyhow::Result<()> { let zone_url = &fri_prover_config.zone_read_url; let zone = get_zone(zone_url).await.context("get_zone()")?; - let protocol_version = ProtocolSemanticVersion::current_prover_version(); + let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; let witness_vector_generator = WitnessVectorGenerator::new( blob_store, diff --git a/zk_toolbox/.gitignore b/zk_toolbox/.gitignore deleted file mode 100644 index a7a33979c0b4..000000000000 --- a/zk_toolbox/.gitignore +++ /dev/null @@ -1 +0,0 @@ -eco diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index 927ef514f324..7679313e9d68 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -545,6 +545,7 @@ dependencies = [ "clap", "common", "ethers", + "path-absolutize", "rand", "serde", "serde_json", @@ -2301,6 +2302,24 @@ version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +[[package]] +name = "path-absolutize" +version = "3.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4af381fe79fa195b4909485d99f73a80792331df0625188e707854f0b3383f5" +dependencies = [ + "path-dedot", +] + +[[package]] +name = "path-dedot" +version = "3.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07ba0ad7e047712414213ff67533e6dd477af0a4e1d14fb52343e53d30ea9397" +dependencies = [ + "once_cell", +] + [[package]] name = "path-slash" version = "0.2.1" diff --git a/zk_toolbox/Cargo.toml b/zk_toolbox/Cargo.toml index ae4b40fa435e..6f9c288438ed 100644 --- a/zk_toolbox/Cargo.toml +++ b/zk_toolbox/Cargo.toml @@ -36,6 +36,7 @@ futures = "0.3.30" human-panic = "2.0" lazy_static = "1.4.0" once_cell = "1.19.0" +path-absolutize = "3.1.1" rand = "0.8.5" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/zk_toolbox/crates/config/Cargo.toml b/zk_toolbox/crates/config/Cargo.toml index 936cf57498f5..a1fb10760b45 100644 --- a/zk_toolbox/crates/config/Cargo.toml +++ b/zk_toolbox/crates/config/Cargo.toml @@ -15,6 +15,7 @@ anyhow.workspace = true clap.workspace = true common.workspace = true ethers.workspace = true +path-absolutize.workspace = true rand.workspace = true serde.workspace = true serde_json.workspace = true diff --git a/zk_toolbox/crates/config/src/consts.rs b/zk_toolbox/crates/config/src/consts.rs index 9082a17abb24..90645ff19acf 100644 --- a/zk_toolbox/crates/config/src/consts.rs +++ b/zk_toolbox/crates/config/src/consts.rs @@ -31,7 +31,7 @@ pub(crate) const LOCAL_DB_PATH: &str = "db/"; pub(crate) const ECOSYSTEM_PATH: &str = "etc/ecosystem"; /// Path to l1 contracts foundry folder inside zksync-era -pub(crate) const L1_CONTRACTS_FOUNDRY: &str = "contracts/l1-contracts-foundry"; +pub(crate) const L1_CONTRACTS_FOUNDRY: &str = "contracts/l1-contracts"; pub(crate) const ERA_CHAIN_ID: ChainId = ChainId(270); diff --git a/zk_toolbox/crates/config/src/ecosystem.rs b/zk_toolbox/crates/config/src/ecosystem.rs index a76e6a5858a5..1557ab21646f 100644 --- a/zk_toolbox/crates/config/src/ecosystem.rs +++ b/zk_toolbox/crates/config/src/ecosystem.rs @@ -1,5 +1,6 @@ use std::{cell::OnceCell, path::PathBuf}; +use path_absolutize::Absolutize; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use thiserror::Error; use types::{ChainId, L1Network, ProverMode, WalletCreation}; @@ -66,7 +67,11 @@ impl<'de> Deserialize<'de> for EcosystemConfig { Ok(EcosystemConfig { name: config.name.clone(), l1_network: config.l1_network, - link_to_code: config.link_to_code.clone(), + link_to_code: config + .link_to_code + .absolutize() + .expect("Failed to parse zksync-era path") + .to_path_buf(), chains: config.chains.clone(), config: config.config.clone(), default_chain: config.default_chain.clone(), @@ -117,7 +122,11 @@ impl EcosystemConfig { configs: config.configs, l1_batch_commit_data_generator_mode: config.l1_batch_commit_data_generator_mode, l1_network: self.l1_network, - link_to_code: self.link_to_code.clone(), + link_to_code: self + .link_to_code + .absolutize() + .expect("Failed to parse zksync-era path") + .into(), base_token: config.base_token, rocks_db_path: config.rocks_db_path, wallet_creation: config.wallet_creation, @@ -187,7 +196,11 @@ impl EcosystemConfig { EcosystemConfigInternal { name: self.name.clone(), l1_network: self.l1_network, - link_to_code: self.link_to_code.clone(), + link_to_code: self + .link_to_code + .absolutize() + .expect("Failed to parse zksync-era path") + .into(), chains: self.chains.clone(), config: self.config.clone(), default_chain: self.default_chain.clone(), diff --git a/zk_toolbox/crates/config/src/forge_interface/script_params.rs b/zk_toolbox/crates/config/src/forge_interface/script_params.rs index a01a15be2a01..70ed08ec5651 100644 --- a/zk_toolbox/crates/config/src/forge_interface/script_params.rs +++ b/zk_toolbox/crates/config/src/forge_interface/script_params.rs @@ -29,35 +29,35 @@ impl ForgeScriptParams { pub const DEPLOY_ECOSYSTEM_SCRIPT_PARAMS: ForgeScriptParams = ForgeScriptParams { input: "script-config/config-deploy-l1.toml", output: "script-out/output-deploy-l1.toml", - script_path: "script/DeployL1.s.sol", + script_path: "deploy-scripts/DeployL1.s.sol", }; pub const INITIALIZE_BRIDGES_SCRIPT_PARAMS: ForgeScriptParams = ForgeScriptParams { input: "script-config/config-initialize-shared-bridges.toml", output: "script-out/output-initialize-shared-bridges.toml", - script_path: "script/InitializeSharedBridgeOnL2.sol", + script_path: "deploy-scripts/InitializeSharedBridgeOnL2.sol", }; pub const REGISTER_CHAIN_SCRIPT_PARAMS: ForgeScriptParams = ForgeScriptParams { input: "script-config/register-hyperchain.toml", output: "script-out/output-register-hyperchain.toml", - script_path: "script/RegisterHyperchain.s.sol", + script_path: "deploy-scripts/RegisterHyperchain.s.sol", }; pub const DEPLOY_ERC20_SCRIPT_PARAMS: ForgeScriptParams = ForgeScriptParams { input: "script-config/config-deploy-erc20.toml", output: "script-out/output-deploy-erc20.toml", - script_path: "script/DeployErc20.s.sol", + script_path: "deploy-scripts/DeployErc20.s.sol", }; pub const DEPLOY_PAYMASTER_SCRIPT_PARAMS: ForgeScriptParams = ForgeScriptParams { input: "script-config/config-deploy-paymaster.toml", output: "script-out/output-deploy-paymaster.toml", - script_path: "script/DeployPaymaster.s.sol", + script_path: "deploy-scripts/DeployPaymaster.s.sol", }; pub const ACCEPT_GOVERNANCE_SCRIPT_PARAMS: ForgeScriptParams = ForgeScriptParams { input: "script-config/config-accept-admin.toml", output: "script-out/output-accept-admin.toml", - script_path: "script/AcceptAdmin.s.sol", + script_path: "deploy-scripts/AcceptAdmin.s.sol", }; diff --git a/zk_toolbox/crates/types/src/protocol_version.rs b/zk_toolbox/crates/types/src/protocol_version.rs index 35ac74d3b5f8..5b619c883a3e 100644 --- a/zk_toolbox/crates/types/src/protocol_version.rs +++ b/zk_toolbox/crates/types/src/protocol_version.rs @@ -25,13 +25,7 @@ impl ProtocolSemanticVersion { impl fmt::Display for ProtocolSemanticVersion { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "{}.{}.{}", - Self::MAJOR_VERSION, - self.minor as u16, - self.patch - ) + write!(f, "{}.{}.{}", Self::MAJOR_VERSION, self.minor, self.patch) } } diff --git a/zk_toolbox/crates/zk_inception/src/commands/server.rs b/zk_toolbox/crates/zk_inception/src/commands/server.rs index 20ab0f3e32a4..e2d35dd9b792 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/server.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/server.rs @@ -1,11 +1,11 @@ use anyhow::Context; -use common::{config::global_config, logger}; +use common::{cmd::Cmd, config::global_config, logger, spinner::Spinner}; use config::{ChainConfig, EcosystemConfig}; -use xshell::Shell; +use xshell::{cmd, Shell}; use crate::{ commands::args::RunServerArgs, - messages::{MSG_CHAIN_NOT_INITIALIZED, MSG_STARTING_SERVER}, + messages::{MSG_BUILDING_L1_CONTRACTS, MSG_CHAIN_NOT_INITIALIZED, MSG_STARTING_SERVER}, server::{RunServer, ServerMode}, }; @@ -18,11 +18,21 @@ pub fn run(shell: &Shell, args: RunServerArgs) -> anyhow::Result<()> { .context(MSG_CHAIN_NOT_INITIALIZED)?; logger::info(MSG_STARTING_SERVER); + + build_l1_contracts(shell, &ecosystem_config)?; run_server(args, &chain_config, shell)?; Ok(()) } +fn build_l1_contracts(shell: &Shell, ecosystem_config: &EcosystemConfig) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(ecosystem_config.path_to_foundry()); + let spinner = Spinner::new(MSG_BUILDING_L1_CONTRACTS); + Cmd::new(cmd!(shell, "yarn build")).run()?; + spinner.finish(); + Ok(()) +} + fn run_server( args: RunServerArgs, chain_config: &ChainConfig, diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zk_toolbox/crates/zk_inception/src/messages.rs index 799f1a5e2d7a..2e328baa3a58 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zk_toolbox/crates/zk_inception/src/messages.rs @@ -164,6 +164,7 @@ pub(super) const MSG_FAILED_TO_FIND_ECOSYSTEM_ERR: &str = "Failed to find ecosys /// Server related messages pub(super) const MSG_STARTING_SERVER: &str = "Starting server"; pub(super) const MSG_FAILED_TO_RUN_SERVER_ERR: &str = "Failed to start server"; +pub(super) const MSG_BUILDING_L1_CONTRACTS: &str = "Building L1 contracts..."; /// Forge utils related messages pub(super) const MSG_DEPLOYER_PK_NOT_SET_ERR: &str = "Deployer private key is not set"; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/integration_tests.rs b/zk_toolbox/crates/zk_supervisor/src/commands/integration_tests.rs index d807c82730d0..525f476c179a 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/integration_tests.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/integration_tests.rs @@ -1,17 +1,23 @@ -use common::{cmd::Cmd, logger}; +use common::{cmd::Cmd, logger, spinner::Spinner}; use config::EcosystemConfig; use xshell::{cmd, Shell}; -use crate::messages::{MSG_INTEGRATION_TESTS_RUN_INFO, MSG_INTEGRATION_TESTS_RUN_SUCCESS}; +use crate::messages::{ + MSG_INTEGRATION_TESTS_BUILDING_CONTRACTS, MSG_INTEGRATION_TESTS_RUN_INFO, + MSG_INTEGRATION_TESTS_RUN_SUCCESS, +}; const TS_INTEGRATION_PATH: &str = "core/tests/ts-integration"; +const CONTRACTS_TEST_DATA_PATH: &str = "etc/contracts-test-data"; pub fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; - let _dir_guard = shell.push_dir(ecosystem_config.link_to_code.join(TS_INTEGRATION_PATH)); + shell.change_dir(ecosystem_config.link_to_code.join(TS_INTEGRATION_PATH)); logger::info(MSG_INTEGRATION_TESTS_RUN_INFO); + build_test_contracts(shell, &ecosystem_config)?; + Cmd::new( cmd!(shell, "yarn jest --forceExit --testTimeout 60000") .env("CHAIN_NAME", ecosystem_config.default_chain), @@ -23,3 +29,16 @@ pub fn run(shell: &Shell) -> anyhow::Result<()> { Ok(()) } + +fn build_test_contracts(shell: &Shell, ecosystem_config: &EcosystemConfig) -> anyhow::Result<()> { + let spinner = Spinner::new(MSG_INTEGRATION_TESTS_BUILDING_CONTRACTS); + + Cmd::new(cmd!(shell, "yarn build")).run()?; + Cmd::new(cmd!(shell, "yarn build-yul")).run()?; + + let _dir_guard = shell.push_dir(ecosystem_config.link_to_code.join(CONTRACTS_TEST_DATA_PATH)); + Cmd::new(cmd!(shell, "yarn build")).run()?; + + spinner.finish(); + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/messages.rs b/zk_toolbox/crates/zk_supervisor/src/messages.rs index fa3c1ae19206..6858d5197a06 100644 --- a/zk_toolbox/crates/zk_supervisor/src/messages.rs +++ b/zk_toolbox/crates/zk_supervisor/src/messages.rs @@ -63,6 +63,6 @@ pub(super) fn msg_database_new_migration_loading(dal: &str) -> String { pub(super) const MSG_DATABASE_NEW_MIGRATION_SUCCESS: &str = "Migration created successfully"; // Integration tests related messages -pub(super) const MSG_INTEGRATION_TESTS_RUN_ABOUT: &str = "Run integration tests"; pub(super) const MSG_INTEGRATION_TESTS_RUN_INFO: &str = "Running integration tests"; pub(super) const MSG_INTEGRATION_TESTS_RUN_SUCCESS: &str = "Integration tests ran successfully"; +pub(super) const MSG_INTEGRATION_TESTS_BUILDING_CONTRACTS: &str = "Building test contracts..."; From ef12716e71de70537fa7bee82cd6360091798668 Mon Sep 17 00:00:00 2001 From: aon <21188659+aon@users.noreply.github.com> Date: Wed, 5 Jun 2024 12:11:59 -0300 Subject: [PATCH 3/8] fix: remove --- .github/workflows/ci-zk-toolbox-reusable.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/workflows/ci-zk-toolbox-reusable.yml b/.github/workflows/ci-zk-toolbox-reusable.yml index 05df367d821a..c3ef46453f13 100644 --- a/.github/workflows/ci-zk-toolbox-reusable.yml +++ b/.github/workflows/ci-zk-toolbox-reusable.yml @@ -76,10 +76,6 @@ jobs: run: | ci_localnet_up - - name: Setup zk - run: | - ci_run zk - - name: Initialize ecosystem run: | ci_run zk_inception ecosystem init --deploy-paymaster --deploy-erc20 \ From 2dcb3d8cb5277fd4da8933c180ecfcfa2e5c3e4c Mon Sep 17 00:00:00 2001 From: aon <21188659+aon@users.noreply.github.com> Date: Wed, 5 Jun 2024 14:17:19 -0300 Subject: [PATCH 4/8] fix: remove zk util dependency --- .../tests/revert-and-restart-en.test.ts | 2 +- .../tests/revert-and-restart.test.ts | 2 +- core/tests/ts-integration/tests/fees.test.ts | 6 +- core/tests/upgrade-test/tests/upgrade.test.ts | 2 +- etc/utils/package.json | 12 ++ etc/utils/src/index.ts | 178 ++++++++++++++++++ .../protocol-upgrade/src/crypto/deployer.ts | 2 +- .../src/hyperchain-upgrade.ts | 2 +- .../src/l1upgrade/deployer.ts | 2 +- .../protocol-upgrade/src/l1upgrade/facets.ts | 2 +- .../src/l2upgrade/deployer.ts | 2 +- .../protocol-upgrade/src/transaction.ts | 2 +- package.json | 1 + .../src/commands/integration_tests.rs | 15 +- .../crates/zk_supervisor/src/messages.rs | 2 + 15 files changed, 218 insertions(+), 14 deletions(-) create mode 100644 etc/utils/package.json create mode 100644 etc/utils/src/index.ts diff --git a/core/tests/revert-test/tests/revert-and-restart-en.test.ts b/core/tests/revert-test/tests/revert-and-restart-en.test.ts index 7e5931ac8ad3..6edf40a8d2d4 100644 --- a/core/tests/revert-test/tests/revert-and-restart-en.test.ts +++ b/core/tests/revert-test/tests/revert-and-restart-en.test.ts @@ -3,7 +3,7 @@ // NOTE: // main_contract.getTotalBatchesCommitted actually checks the number of batches committed. // main_contract.getTotalBatchesExecuted actually checks the number of batches executed. -import * as utils from 'zk/build/utils'; +import * as utils from 'utils'; import { Tester } from './tester'; import * as zksync from 'zksync-ethers'; import { BigNumber, ethers } from 'ethers'; diff --git a/core/tests/revert-test/tests/revert-and-restart.test.ts b/core/tests/revert-test/tests/revert-and-restart.test.ts index 6381f696283b..92869ab45c8c 100644 --- a/core/tests/revert-test/tests/revert-and-restart.test.ts +++ b/core/tests/revert-test/tests/revert-and-restart.test.ts @@ -1,4 +1,4 @@ -import * as utils from 'zk/build/utils'; +import * as utils from 'utils'; import { Tester } from './tester'; import * as zksync from 'zksync-ethers'; import { BigNumber, Contract, ethers } from 'ethers'; diff --git a/core/tests/ts-integration/tests/fees.test.ts b/core/tests/ts-integration/tests/fees.test.ts index 699b9e5e886b..796ff6d7daff 100644 --- a/core/tests/ts-integration/tests/fees.test.ts +++ b/core/tests/ts-integration/tests/fees.test.ts @@ -9,7 +9,7 @@ * sure that the test is maintained does not get broken. * */ -import * as utils from 'zk/build/utils'; +import * as utils from 'utils'; import * as fs from 'fs'; import { TestMaster } from '../src/index'; @@ -258,10 +258,10 @@ async function updateReport( const l2EstimatedPriceAsNumber = +ethers.utils.formatEther(estimatedPrice); const gasReport = `Gas price ${newL1GasPrice / 1000000000} gwei: - L1 cost ${expectedL1Price}, + L1 cost ${expectedL1Price}, L2 estimated cost: ${l2EstimatedPriceAsNumber} Estimated Gain: ${expectedL1Price / l2EstimatedPriceAsNumber} - L2 cost: ${l2PriceAsNumber}, + L2 cost: ${l2PriceAsNumber}, Gain: ${expectedL1Price / l2PriceAsNumber}\n`; console.log(gasReport); diff --git a/core/tests/upgrade-test/tests/upgrade.test.ts b/core/tests/upgrade-test/tests/upgrade.test.ts index 9d4ff8f05f75..2da6acab18e6 100644 --- a/core/tests/upgrade-test/tests/upgrade.test.ts +++ b/core/tests/upgrade-test/tests/upgrade.test.ts @@ -1,4 +1,4 @@ -import * as utils from 'zk/build/utils'; +import * as utils from 'utils'; import { Tester } from './tester'; import * as zksync from 'zksync-ethers'; import { BigNumber, BigNumberish, ethers } from 'ethers'; diff --git a/etc/utils/package.json b/etc/utils/package.json new file mode 100644 index 000000000000..47f2b9f238d1 --- /dev/null +++ b/etc/utils/package.json @@ -0,0 +1,12 @@ +{ + "name": "utils", + "version": "0.1.0", + "main": "src/index.ts", + "license": "MIT", + "scripts": { + "build": "tsc" + }, + "dependencies": { + "chalk": "^4.0.0" + } +} diff --git a/etc/utils/src/index.ts b/etc/utils/src/index.ts new file mode 100644 index 000000000000..38d980cb1509 --- /dev/null +++ b/etc/utils/src/index.ts @@ -0,0 +1,178 @@ +import { exec as _exec, spawn as _spawn } from 'child_process'; +import { promisify } from 'util'; +import fs from 'fs'; +import readline from 'readline'; +import chalk from 'chalk'; + +export type { ChildProcess } from 'child_process'; + +const IGNORED_DIRS = [ + 'target', + 'node_modules', + 'volumes', + 'build', + 'dist', + '.git', + 'generated', + 'grafonnet-lib', + 'prettier-config', + 'lint-config', + 'cache', + 'artifacts', + 'typechain', + 'binaryen', + 'system-contracts', + 'artifacts-zk', + 'cache-zk', + // Ignore directories with OZ and forge submodules. + 'contracts/l1-contracts/lib' +]; +const IGNORED_FILES = ['KeysWithPlonkVerifier.sol', 'TokenInit.sol', '.tslintrc.js', '.prettierrc.js']; + +// async executor of shell commands +// spawns a new shell and can execute arbitrary commands, like "ls -la | grep .env" +// returns { stdout, stderr } +const promisified = promisify(_exec); +export function exec(command: string) { + command = command.replace(/\n/g, ' '); + return promisified(command); +} + +// executes a command in a new shell +// but pipes data to parent's stdout/stderr +export function spawn(command: string) { + command = command.replace(/\n/g, ' '); + const child = _spawn(command, { stdio: 'inherit', shell: true }); + return new Promise((resolve, reject) => { + child.on('error', reject); + child.on('close', (code) => { + code == 0 ? resolve(code) : reject(`Child process exited with code ${code}`); + }); + }); +} + +// executes a command in background and returns a child process handle +// by default pipes data to parent's stdio but this can be overridden +export function background(command: string, stdio: any = 'inherit') { + command = command.replace(/\n/g, ' '); + return _spawn(command, { stdio: stdio, shell: true, detached: true }); +} + +export async function confirmAction() { + if (process.env.ZKSYNC_ACTION == 'dont_ask') return; + const rl = readline.createInterface({ + input: process.stdin, + output: process.stdout + }); + const input = await new Promise((resolve) => { + rl.question( + 'Dangerous action! (set ZKSYNC_ACTION=dont_ask to always allow)\n' + + `Type environment name (${process.env.ZKSYNC_ENV}) to confirm: `, + (input) => { + rl.close(); + resolve(input); + } + ); + }); + if (input !== process.env.ZKSYNC_ENV) { + throw new Error('[aborted] action was not confirmed'); + } +} + +export async function sleep(seconds: number) { + return new Promise((resolve) => setTimeout(resolve, seconds * 1000)); +} + +// the sync version of sleep is needed +// for process.on('exit') hook, which MUST be synchronous. +// no idea why it has to be so ugly, though +export function sleepSync(seconds: number) { + Atomics.wait(new Int32Array(new SharedArrayBuffer(4)), 0, 0, seconds * 1000); +} + +export async function allowFail(promise: Promise) { + try { + return await promise; + } catch { + return null; + } +} + +export function allowFailSync(func: () => T) { + try { + return func(); + } catch { + return null; + } +} + +export function replaceInFile(filename: string, before: string | RegExp, after: string) { + before = new RegExp(before, 'g'); + modifyFile(filename, (source) => source.replace(before, after)); +} + +// performs an operation on the content of `filename` +export function modifyFile(filename: string, modifier: (s: string) => string) { + const source = fs.readFileSync(filename).toString(); + fs.writeFileSync(filename, modifier(source)); +} + +// If you wonder why this is written so obscurely through find and not through .prettierignore and globs, +// it's because prettier *first* expands globs and *then* applies ignore rules, which leads to an error +// because it can't expand into volumes folder with not enough access rights, even if it is ignored. +// +// And if we let the shell handle glob expansion instead of prettier, `shopt -s globstar` will be +// disabled (because yarn spawns its own shell that does not load .bashrc) and thus glob patterns +// with double-stars will not work +export async function getUnignoredFiles(extension: string) { + const root = extension == 'sol' ? 'contracts' : '.'; + const ignored_dirs = IGNORED_DIRS.map((dir) => `-o -path '*/${dir}' -prune`).join(' '); + const ignored_files = IGNORED_FILES.map((file) => `-a ! -name '${file}'`).join(' '); + const { stdout: files } = await exec( + `find ${root} -type f -name '*.${extension}' ${ignored_files} -print ${ignored_dirs}` + ); + + return files; +} + +export function web3Url() { + return process.env.ETH_CLIENT_WEB3_URL!; +} + +export async function readZkSyncAbi() { + const zksync = process.env.ZKSYNC_HOME; + const path = `${zksync}/contracts/l1-contracts/artifacts/contracts/state-transition/chain-interfaces/IZkSyncHyperchain.sol/IZkSyncHyperchain.json`; + + const fileContent = (await fs.promises.readFile(path)).toString(); + + const abi = JSON.parse(fileContent).abi; + + return abi; +} + +const entry = chalk.bold.yellow; +const announce = chalk.yellow; +const success = chalk.green; +const timestamp = chalk.grey; + +// Wrapper that writes an announcement and completion notes for each executed task. +export const announced = async (fn: string, promise: Promise | void) => { + const announceLine = `${entry('>')} ${announce(fn)}`; + const separator = '-'.repeat(fn.length + 2); // 2 is the length of "> ". + console.log(`\n` + separator); // So it's easier to see each individual step in the console. + console.log(announceLine); + + const start = new Date().getTime(); + // The actual execution part + await promise; + + const time = new Date().getTime() - start; + const successLine = `${success('✔')} ${fn} done`; + const timestampLine = timestamp(`(${time}ms)`); + console.log(`${successLine} ${timestampLine}`); +}; + +export function unpackStringSemVer(semver: string): [number, number, number] { + const [major, minor, patch] = semver.split('.'); + return [parseInt(major), parseInt(minor), parseInt(patch)]; +} diff --git a/infrastructure/protocol-upgrade/src/crypto/deployer.ts b/infrastructure/protocol-upgrade/src/crypto/deployer.ts index 98043d0c4498..685f64c46656 100644 --- a/infrastructure/protocol-upgrade/src/crypto/deployer.ts +++ b/infrastructure/protocol-upgrade/src/crypto/deployer.ts @@ -1,4 +1,4 @@ -import { spawn } from 'zk/build/utils'; +import { spawn } from 'utils'; export async function deployVerifier( l1Rpc: string, diff --git a/infrastructure/protocol-upgrade/src/hyperchain-upgrade.ts b/infrastructure/protocol-upgrade/src/hyperchain-upgrade.ts index 84bae2e891ca..102c67e509fe 100644 --- a/infrastructure/protocol-upgrade/src/hyperchain-upgrade.ts +++ b/infrastructure/protocol-upgrade/src/hyperchain-upgrade.ts @@ -1,5 +1,5 @@ import { Command } from 'commander'; -import { spawn } from 'zk/build/utils'; +import { spawn } from 'utils'; import fs from 'fs'; import { ethers } from 'ethers'; diff --git a/infrastructure/protocol-upgrade/src/l1upgrade/deployer.ts b/infrastructure/protocol-upgrade/src/l1upgrade/deployer.ts index ba639cfd89b8..b7595aaea9d3 100644 --- a/infrastructure/protocol-upgrade/src/l1upgrade/deployer.ts +++ b/infrastructure/protocol-upgrade/src/l1upgrade/deployer.ts @@ -1,4 +1,4 @@ -import { spawn } from 'zk/build/utils'; +import { spawn } from 'utils'; export async function callFacetDeployer( l1RpcProvider: string, diff --git a/infrastructure/protocol-upgrade/src/l1upgrade/facets.ts b/infrastructure/protocol-upgrade/src/l1upgrade/facets.ts index 4f89ec477a7b..5aec166968f0 100644 --- a/infrastructure/protocol-upgrade/src/l1upgrade/facets.ts +++ b/infrastructure/protocol-upgrade/src/l1upgrade/facets.ts @@ -1,6 +1,6 @@ import fs from 'fs'; import { Command } from 'commander'; -import { spawn } from 'zk/build/utils'; +import { spawn } from 'utils'; import { getFacetCutsFileName, getFacetsFileName, getUpgradePath } from '../utils'; import { callFacetDeployer } from './deployer'; diff --git a/infrastructure/protocol-upgrade/src/l2upgrade/deployer.ts b/infrastructure/protocol-upgrade/src/l2upgrade/deployer.ts index d72c8c8e46f4..e3b5f364efd9 100644 --- a/infrastructure/protocol-upgrade/src/l2upgrade/deployer.ts +++ b/infrastructure/protocol-upgrade/src/l2upgrade/deployer.ts @@ -1,4 +1,4 @@ -import { spawn } from 'zk/build/utils'; +import { spawn } from 'utils'; export async function callSystemContractDeployer( l1RpcProvider: string, diff --git a/infrastructure/protocol-upgrade/src/transaction.ts b/infrastructure/protocol-upgrade/src/transaction.ts index dc9d5d190512..604e98ee3bf6 100644 --- a/infrastructure/protocol-upgrade/src/transaction.ts +++ b/infrastructure/protocol-upgrade/src/transaction.ts @@ -23,7 +23,7 @@ import { } from './utils'; import fs from 'fs'; import { Command } from 'commander'; -import { web3Url } from 'zk/build/utils'; +import { web3Url } from 'utils'; import * as path from 'path'; const testConfigPath = path.join(process.env.ZKSYNC_HOME as string, `etc/test_config/constant`); diff --git a/package.json b/package.json index cdbc8acee00e..51f7a8c22453 100644 --- a/package.json +++ b/package.json @@ -11,6 +11,7 @@ "contracts/system-contracts", "etc/contracts-test-data", "etc/ERC20", + "etc/utils", "infrastructure/zk", "infrastructure/local-setup-preparation", "core/tests/revert-test", diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/integration_tests.rs b/zk_toolbox/crates/zk_supervisor/src/commands/integration_tests.rs index 525f476c179a..47a886983759 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/integration_tests.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/integration_tests.rs @@ -3,8 +3,8 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use crate::messages::{ - MSG_INTEGRATION_TESTS_BUILDING_CONTRACTS, MSG_INTEGRATION_TESTS_RUN_INFO, - MSG_INTEGRATION_TESTS_RUN_SUCCESS, + MSG_INTEGRATION_TESTS_BUILDING_CONTRACTS, MSG_INTEGRATION_TESTS_BUILDING_DEPENDENCIES, + MSG_INTEGRATION_TESTS_RUN_INFO, MSG_INTEGRATION_TESTS_RUN_SUCCESS, }; const TS_INTEGRATION_PATH: &str = "core/tests/ts-integration"; @@ -16,6 +16,7 @@ pub fn run(shell: &Shell) -> anyhow::Result<()> { logger::info(MSG_INTEGRATION_TESTS_RUN_INFO); + build_repository(shell, &ecosystem_config)?; build_test_contracts(shell, &ecosystem_config)?; Cmd::new( @@ -30,6 +31,16 @@ pub fn run(shell: &Shell) -> anyhow::Result<()> { Ok(()) } +fn build_repository(shell: &Shell, ecosystem_config: &EcosystemConfig) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(&ecosystem_config.link_to_code); + let spinner = Spinner::new(MSG_INTEGRATION_TESTS_BUILDING_DEPENDENCIES); + + Cmd::new(cmd!(shell, "yarn install --frozen-lockfile")).run()?; + + spinner.finish(); + Ok(()) +} + fn build_test_contracts(shell: &Shell, ecosystem_config: &EcosystemConfig) -> anyhow::Result<()> { let spinner = Spinner::new(MSG_INTEGRATION_TESTS_BUILDING_CONTRACTS); diff --git a/zk_toolbox/crates/zk_supervisor/src/messages.rs b/zk_toolbox/crates/zk_supervisor/src/messages.rs index 6858d5197a06..31bdb0eb9b1d 100644 --- a/zk_toolbox/crates/zk_supervisor/src/messages.rs +++ b/zk_toolbox/crates/zk_supervisor/src/messages.rs @@ -65,4 +65,6 @@ pub(super) const MSG_DATABASE_NEW_MIGRATION_SUCCESS: &str = "Migration created s // Integration tests related messages pub(super) const MSG_INTEGRATION_TESTS_RUN_INFO: &str = "Running integration tests"; pub(super) const MSG_INTEGRATION_TESTS_RUN_SUCCESS: &str = "Integration tests ran successfully"; +pub(super) const MSG_INTEGRATION_TESTS_BUILDING_DEPENDENCIES: &str = + "Building repository dependencies..."; pub(super) const MSG_INTEGRATION_TESTS_BUILDING_CONTRACTS: &str = "Building test contracts..."; From 3e9f40d0d213ce8a2c9c0d02b5462bf3ef04d7e1 Mon Sep 17 00:00:00 2001 From: aon <21188659+aon@users.noreply.github.com> Date: Wed, 5 Jun 2024 14:21:02 -0300 Subject: [PATCH 5/8] fix: remove utils from zk infrastructure --- infrastructure/zk/src/clean.ts | 2 +- infrastructure/zk/src/compiler.ts | 2 +- infrastructure/zk/src/config.ts | 2 +- infrastructure/zk/src/contract.ts | 2 +- infrastructure/zk/src/contract_verifier.ts | 2 +- infrastructure/zk/src/database.ts | 2 +- infrastructure/zk/src/docker.ts | 2 +- infrastructure/zk/src/down.ts | 2 +- infrastructure/zk/src/env.ts | 2 +- infrastructure/zk/src/fmt.ts | 2 +- infrastructure/zk/src/format_sql.ts | 2 +- infrastructure/zk/src/hyperchain_wizard.ts | 9 +- infrastructure/zk/src/init.ts | 31 +++- infrastructure/zk/src/lint.ts | 2 +- infrastructure/zk/src/prover_setup.ts | 2 +- infrastructure/zk/src/reinit.ts | 7 +- infrastructure/zk/src/run.ts | 2 +- infrastructure/zk/src/server.ts | 2 +- infrastructure/zk/src/setup_en.ts | 2 +- infrastructure/zk/src/spellcheck.ts | 2 +- infrastructure/zk/src/test/integration.ts | 2 +- infrastructure/zk/src/test/test.ts | 2 +- infrastructure/zk/src/up.ts | 10 +- infrastructure/zk/src/utils.ts | 178 --------------------- 24 files changed, 63 insertions(+), 210 deletions(-) delete mode 100644 infrastructure/zk/src/utils.ts diff --git a/infrastructure/zk/src/clean.ts b/infrastructure/zk/src/clean.ts index 6d86b0daa620..be62de372bac 100644 --- a/infrastructure/zk/src/clean.ts +++ b/infrastructure/zk/src/clean.ts @@ -1,7 +1,7 @@ import { Command } from 'commander'; import * as fs from 'fs'; import * as path from 'path'; -import { confirmAction } from './utils'; +import { confirmAction } from 'utils'; import * as down from './down'; export function clean(path: string) { diff --git a/infrastructure/zk/src/compiler.ts b/infrastructure/zk/src/compiler.ts index 271bfdcd0be3..9a90154909ba 100644 --- a/infrastructure/zk/src/compiler.ts +++ b/infrastructure/zk/src/compiler.ts @@ -1,5 +1,5 @@ import { Command } from 'commander'; -import * as utils from './utils'; +import * as utils from 'utils'; export async function compileTestContracts() { await utils.spawn('yarn workspace contracts-test-data build'); diff --git a/infrastructure/zk/src/config.ts b/infrastructure/zk/src/config.ts index 3aa331a752be..5ca7fb1ce59f 100644 --- a/infrastructure/zk/src/config.ts +++ b/infrastructure/zk/src/config.ts @@ -5,7 +5,7 @@ import deepExtend from 'deep-extend'; import * as env from './env'; import path from 'path'; import dotenv from 'dotenv'; -import { unpackStringSemVer } from './utils'; +import { unpackStringSemVer } from 'utils'; function loadConfigFile(configPath: string, stack: string[] = []) { if (stack.includes(configPath)) { diff --git a/infrastructure/zk/src/contract.ts b/infrastructure/zk/src/contract.ts index bd6173dd6c8c..a76da74b01ef 100644 --- a/infrastructure/zk/src/contract.ts +++ b/infrastructure/zk/src/contract.ts @@ -1,5 +1,5 @@ import { Command } from 'commander'; -import * as utils from './utils'; +import * as utils from 'utils'; import * as env from './env'; import fs from 'fs'; diff --git a/infrastructure/zk/src/contract_verifier.ts b/infrastructure/zk/src/contract_verifier.ts index 507834866bb3..a70296acc1b8 100644 --- a/infrastructure/zk/src/contract_verifier.ts +++ b/infrastructure/zk/src/contract_verifier.ts @@ -1,5 +1,5 @@ import { Command } from 'commander'; -import * as utils from './utils'; +import * as utils from 'utils'; export async function contractVerifier() { await utils.spawn(`cargo run --bin zksync_contract_verifier --release`); diff --git a/infrastructure/zk/src/database.ts b/infrastructure/zk/src/database.ts index 8a1e43af40d6..2d11bca447d2 100644 --- a/infrastructure/zk/src/database.ts +++ b/infrastructure/zk/src/database.ts @@ -1,5 +1,5 @@ import { Command } from 'commander'; -import * as utils from './utils'; +import * as utils from 'utils'; export async function reset(opts: DbOpts) { await utils.confirmAction(); diff --git a/infrastructure/zk/src/docker.ts b/infrastructure/zk/src/docker.ts index 85bc0dbc72e0..7f42fca1d022 100644 --- a/infrastructure/zk/src/docker.ts +++ b/infrastructure/zk/src/docker.ts @@ -1,5 +1,5 @@ import { Command } from 'commander'; -import * as utils from './utils'; +import * as utils from 'utils'; const IMAGES = [ 'server-v2', diff --git a/infrastructure/zk/src/down.ts b/infrastructure/zk/src/down.ts index 24dd98f8d5f5..64b77673395f 100644 --- a/infrastructure/zk/src/down.ts +++ b/infrastructure/zk/src/down.ts @@ -1,5 +1,5 @@ import { Command } from 'commander'; -import * as utils from './utils'; +import * as utils from 'utils'; import * as fs from 'fs'; export async function down() { diff --git a/infrastructure/zk/src/env.ts b/infrastructure/zk/src/env.ts index 3fecf466f12a..d6852640619e 100644 --- a/infrastructure/zk/src/env.ts +++ b/infrastructure/zk/src/env.ts @@ -1,7 +1,7 @@ import { Command } from 'commander'; import fs from 'fs'; import dotenv from 'dotenv'; -import * as utils from './utils'; +import * as utils from 'utils'; import * as config from './config'; export const getAvailableEnvsFromFiles = () => { diff --git a/infrastructure/zk/src/fmt.ts b/infrastructure/zk/src/fmt.ts index 97be5c571d6f..e58cdbc8e547 100644 --- a/infrastructure/zk/src/fmt.ts +++ b/infrastructure/zk/src/fmt.ts @@ -1,6 +1,6 @@ import { Command } from 'commander'; import { formatSqlxQueries } from './format_sql'; -import * as utils from './utils'; +import * as utils from 'utils'; const EXTENSIONS = ['ts', 'md', 'js']; const CONFIG_PATH = 'etc/prettier-config'; diff --git a/infrastructure/zk/src/format_sql.ts b/infrastructure/zk/src/format_sql.ts index ba1bf263e4cc..7f18d4a46388 100644 --- a/infrastructure/zk/src/format_sql.ts +++ b/infrastructure/zk/src/format_sql.ts @@ -1,5 +1,5 @@ import * as fs from 'fs'; -import * as utils from './utils'; +import * as utils from 'utils'; import { format } from 'sql-formatter'; function formatQuery(query: string) { diff --git a/infrastructure/zk/src/hyperchain_wizard.ts b/infrastructure/zk/src/hyperchain_wizard.ts index 04e9db2a4147..ba4c85454563 100644 --- a/infrastructure/zk/src/hyperchain_wizard.ts +++ b/infrastructure/zk/src/hyperchain_wizard.ts @@ -13,7 +13,7 @@ import fetch from 'node-fetch'; import { up } from './up'; import * as Handlebars from 'handlebars'; import { ProverType, setupProver } from './prover_setup'; -import { announced } from './utils'; +import { announced } from 'utils'; import { DeploymentMode } from './contract'; const title = chalk.blueBright; @@ -49,7 +49,12 @@ export interface BasePromptOptions { async function initHyperchain(envName: string, runObservability: boolean, validiumMode: boolean) { await announced('Initializing hyperchain creation', setupConfiguration(envName, runObservability)); let deploymentMode = validiumMode !== undefined ? DeploymentMode.Validium : DeploymentMode.Rollup; - await init.initHyperCmdAction({ skipSetupCompletely: false, bumpChainId: true, runObservability, deploymentMode }); + await init.initHyperCmdAction({ + skipSetupCompletely: false, + bumpChainId: true, + runObservability, + deploymentMode + }); // TODO: EVM:577 fix hyperchain wizard env.mergeInitToEnv(); diff --git a/infrastructure/zk/src/init.ts b/infrastructure/zk/src/init.ts index d6e30e415e6f..9ed6e178e51e 100644 --- a/infrastructure/zk/src/init.ts +++ b/infrastructure/zk/src/init.ts @@ -1,7 +1,7 @@ import { Command } from 'commander'; -import * as utils from './utils'; -import { announced } from './utils'; +import * as utils from 'utils'; +import { announced } from 'utils'; import { clean } from './clean'; import * as compiler from './compiler'; @@ -161,7 +161,12 @@ export const initDevCmdAction = async ({ await makeEraChainIdSameAsCurrent(); } let deploymentMode = validiumMode !== undefined ? contract.DeploymentMode.Validium : contract.DeploymentMode.Rollup; - await initSetup({ skipEnvSetup, skipSubmodulesCheckout, runObservability, deploymentMode }); + await initSetup({ + skipEnvSetup, + skipSubmodulesCheckout, + runObservability, + deploymentMode + }); if (!skipVerifier) { await deployVerifier(); } @@ -170,7 +175,12 @@ export const initDevCmdAction = async ({ } await initBridgehubStateTransition(); await initDatabase(); - await initHyperchain({ includePaymaster: true, baseTokenName, localLegacyBridgeTesting, deploymentMode }); + await initHyperchain({ + includePaymaster: true, + baseTokenName, + localLegacyBridgeTesting, + deploymentMode + }); if (localLegacyBridgeTesting) { await makeEraAddressSameAsCurrent(); } @@ -214,10 +224,19 @@ export const initHyperCmdAction = async ({ config.bumpChainId(); } if (!skipSetupCompletely) { - await initSetup({ skipEnvSetup: false, skipSubmodulesCheckout: false, runObservability, deploymentMode }); + await initSetup({ + skipEnvSetup: false, + skipSubmodulesCheckout: false, + runObservability, + deploymentMode + }); } await initDatabase(); - await initHyperchain({ includePaymaster: true, baseTokenName, deploymentMode }); + await initHyperchain({ + includePaymaster: true, + baseTokenName, + deploymentMode + }); }; // ########################### Command Definitions ########################### diff --git a/infrastructure/zk/src/lint.ts b/infrastructure/zk/src/lint.ts index fcba41110fb4..84c2c4535c59 100644 --- a/infrastructure/zk/src/lint.ts +++ b/infrastructure/zk/src/lint.ts @@ -1,5 +1,5 @@ import { Command } from 'commander'; -import * as utils from './utils'; +import * as utils from 'utils'; // Note that `rust` is not noted here, as clippy isn't run via `yarn`. // `rust` option is still supported though. diff --git a/infrastructure/zk/src/prover_setup.ts b/infrastructure/zk/src/prover_setup.ts index 361ae44b8fa0..5a17c9683742 100644 --- a/infrastructure/zk/src/prover_setup.ts +++ b/infrastructure/zk/src/prover_setup.ts @@ -1,5 +1,5 @@ import { Command } from 'commander'; -import * as utils from './utils'; +import * as utils from 'utils'; import fs from 'fs'; import enquirer from 'enquirer'; import { BasePromptOptions } from './hyperchain_wizard'; diff --git a/infrastructure/zk/src/reinit.ts b/infrastructure/zk/src/reinit.ts index 8535af8e05a8..65f0b73d6540 100644 --- a/infrastructure/zk/src/reinit.ts +++ b/infrastructure/zk/src/reinit.ts @@ -1,7 +1,7 @@ import { Command } from 'commander'; import { up } from './up'; -import { announced } from './utils'; +import { announced } from 'utils'; import { initDevCmdAction, initHyperCmdAction } from './init'; import { DeploymentMode } from './contract'; @@ -20,7 +20,10 @@ const reinitDevCmdAction = async (): Promise => { }); }; -type ReinitHyperCmdActionOptions = { baseTokenName?: string; validiumMode: boolean }; +type ReinitHyperCmdActionOptions = { + baseTokenName?: string; + validiumMode: boolean; +}; const reinitHyperCmdAction = async ({ baseTokenName, validiumMode }: ReinitHyperCmdActionOptions): Promise => { // skipSetupCompletely, because we only want to compile // bumpChainId, because we want to reinitialize hyperchain with a new chain id diff --git a/infrastructure/zk/src/run.ts b/infrastructure/zk/src/run.ts index f0c4994756c1..02e3a15e3c46 100644 --- a/infrastructure/zk/src/run.ts +++ b/infrastructure/zk/src/run.ts @@ -1,5 +1,5 @@ import { Command } from 'commander'; -import * as utils from './utils'; +import * as utils from 'utils'; import { Wallet } from 'ethers'; import fs from 'fs'; import * as path from 'path'; diff --git a/infrastructure/zk/src/server.ts b/infrastructure/zk/src/server.ts index 923097f5c604..872aff2eb5c3 100644 --- a/infrastructure/zk/src/server.ts +++ b/infrastructure/zk/src/server.ts @@ -1,5 +1,5 @@ import { Command } from 'commander'; -import * as utils from './utils'; +import * as utils from 'utils'; import { clean } from './clean'; import fs from 'fs'; import * as path from 'path'; diff --git a/infrastructure/zk/src/setup_en.ts b/infrastructure/zk/src/setup_en.ts index 81185ad0cc6b..3d92b326251f 100644 --- a/infrastructure/zk/src/setup_en.ts +++ b/infrastructure/zk/src/setup_en.ts @@ -6,7 +6,7 @@ import fs from 'fs'; import path from 'path'; import { set as setEnv } from './env'; import { setup as setupDb } from './database'; -import * as utils from './utils'; +import * as utils from 'utils'; enum Environment { Mainnet = 'mainnet', diff --git a/infrastructure/zk/src/spellcheck.ts b/infrastructure/zk/src/spellcheck.ts index 4f6553e2c654..8bf78869788d 100644 --- a/infrastructure/zk/src/spellcheck.ts +++ b/infrastructure/zk/src/spellcheck.ts @@ -1,5 +1,5 @@ import { Command } from 'commander'; -import * as utils from './utils'; +import * as utils from 'utils'; export async function runSpellCheck(pattern: string, useCargo: boolean, useCSpell: boolean) { // Default commands for cSpell and cargo spellcheck diff --git a/infrastructure/zk/src/test/integration.ts b/infrastructure/zk/src/test/integration.ts index 08582a553c79..386ffbef6304 100644 --- a/infrastructure/zk/src/test/integration.ts +++ b/infrastructure/zk/src/test/integration.ts @@ -1,5 +1,5 @@ import { Command } from 'commander'; -import * as utils from '../utils'; +import * as utils from 'utils'; import * as config from '../config'; import deepExtend from 'deep-extend'; diff --git a/infrastructure/zk/src/test/test.ts b/infrastructure/zk/src/test/test.ts index 2aa6fa971d47..2e3202051917 100644 --- a/infrastructure/zk/src/test/test.ts +++ b/infrastructure/zk/src/test/test.ts @@ -1,6 +1,6 @@ import chalk from 'chalk'; import { Command } from 'commander'; -import * as utils from '../utils'; +import * as utils from 'utils'; import * as integration from './integration'; import * as db from '../database'; diff --git a/infrastructure/zk/src/up.ts b/infrastructure/zk/src/up.ts index 2e917b3bea26..6f49dd7d05e1 100644 --- a/infrastructure/zk/src/up.ts +++ b/infrastructure/zk/src/up.ts @@ -1,11 +1,15 @@ import { Command } from 'commander'; -import * as utils from './utils'; +import * as utils from 'utils'; import fs from 'fs'; // Make sure that the volumes exists before starting the containers. export function createVolumes() { - fs.mkdirSync(`${process.env.ZKSYNC_HOME}/volumes/reth/data`, { recursive: true }); - fs.mkdirSync(`${process.env.ZKSYNC_HOME}/volumes/postgres`, { recursive: true }); + fs.mkdirSync(`${process.env.ZKSYNC_HOME}/volumes/reth/data`, { + recursive: true + }); + fs.mkdirSync(`${process.env.ZKSYNC_HOME}/volumes/postgres`, { + recursive: true + }); } export async function up(runObservability: boolean, composeFile?: string) { diff --git a/infrastructure/zk/src/utils.ts b/infrastructure/zk/src/utils.ts deleted file mode 100644 index 38d980cb1509..000000000000 --- a/infrastructure/zk/src/utils.ts +++ /dev/null @@ -1,178 +0,0 @@ -import { exec as _exec, spawn as _spawn } from 'child_process'; -import { promisify } from 'util'; -import fs from 'fs'; -import readline from 'readline'; -import chalk from 'chalk'; - -export type { ChildProcess } from 'child_process'; - -const IGNORED_DIRS = [ - 'target', - 'node_modules', - 'volumes', - 'build', - 'dist', - '.git', - 'generated', - 'grafonnet-lib', - 'prettier-config', - 'lint-config', - 'cache', - 'artifacts', - 'typechain', - 'binaryen', - 'system-contracts', - 'artifacts-zk', - 'cache-zk', - // Ignore directories with OZ and forge submodules. - 'contracts/l1-contracts/lib' -]; -const IGNORED_FILES = ['KeysWithPlonkVerifier.sol', 'TokenInit.sol', '.tslintrc.js', '.prettierrc.js']; - -// async executor of shell commands -// spawns a new shell and can execute arbitrary commands, like "ls -la | grep .env" -// returns { stdout, stderr } -const promisified = promisify(_exec); -export function exec(command: string) { - command = command.replace(/\n/g, ' '); - return promisified(command); -} - -// executes a command in a new shell -// but pipes data to parent's stdout/stderr -export function spawn(command: string) { - command = command.replace(/\n/g, ' '); - const child = _spawn(command, { stdio: 'inherit', shell: true }); - return new Promise((resolve, reject) => { - child.on('error', reject); - child.on('close', (code) => { - code == 0 ? resolve(code) : reject(`Child process exited with code ${code}`); - }); - }); -} - -// executes a command in background and returns a child process handle -// by default pipes data to parent's stdio but this can be overridden -export function background(command: string, stdio: any = 'inherit') { - command = command.replace(/\n/g, ' '); - return _spawn(command, { stdio: stdio, shell: true, detached: true }); -} - -export async function confirmAction() { - if (process.env.ZKSYNC_ACTION == 'dont_ask') return; - const rl = readline.createInterface({ - input: process.stdin, - output: process.stdout - }); - const input = await new Promise((resolve) => { - rl.question( - 'Dangerous action! (set ZKSYNC_ACTION=dont_ask to always allow)\n' + - `Type environment name (${process.env.ZKSYNC_ENV}) to confirm: `, - (input) => { - rl.close(); - resolve(input); - } - ); - }); - if (input !== process.env.ZKSYNC_ENV) { - throw new Error('[aborted] action was not confirmed'); - } -} - -export async function sleep(seconds: number) { - return new Promise((resolve) => setTimeout(resolve, seconds * 1000)); -} - -// the sync version of sleep is needed -// for process.on('exit') hook, which MUST be synchronous. -// no idea why it has to be so ugly, though -export function sleepSync(seconds: number) { - Atomics.wait(new Int32Array(new SharedArrayBuffer(4)), 0, 0, seconds * 1000); -} - -export async function allowFail(promise: Promise) { - try { - return await promise; - } catch { - return null; - } -} - -export function allowFailSync(func: () => T) { - try { - return func(); - } catch { - return null; - } -} - -export function replaceInFile(filename: string, before: string | RegExp, after: string) { - before = new RegExp(before, 'g'); - modifyFile(filename, (source) => source.replace(before, after)); -} - -// performs an operation on the content of `filename` -export function modifyFile(filename: string, modifier: (s: string) => string) { - const source = fs.readFileSync(filename).toString(); - fs.writeFileSync(filename, modifier(source)); -} - -// If you wonder why this is written so obscurely through find and not through .prettierignore and globs, -// it's because prettier *first* expands globs and *then* applies ignore rules, which leads to an error -// because it can't expand into volumes folder with not enough access rights, even if it is ignored. -// -// And if we let the shell handle glob expansion instead of prettier, `shopt -s globstar` will be -// disabled (because yarn spawns its own shell that does not load .bashrc) and thus glob patterns -// with double-stars will not work -export async function getUnignoredFiles(extension: string) { - const root = extension == 'sol' ? 'contracts' : '.'; - const ignored_dirs = IGNORED_DIRS.map((dir) => `-o -path '*/${dir}' -prune`).join(' '); - const ignored_files = IGNORED_FILES.map((file) => `-a ! -name '${file}'`).join(' '); - const { stdout: files } = await exec( - `find ${root} -type f -name '*.${extension}' ${ignored_files} -print ${ignored_dirs}` - ); - - return files; -} - -export function web3Url() { - return process.env.ETH_CLIENT_WEB3_URL!; -} - -export async function readZkSyncAbi() { - const zksync = process.env.ZKSYNC_HOME; - const path = `${zksync}/contracts/l1-contracts/artifacts/contracts/state-transition/chain-interfaces/IZkSyncHyperchain.sol/IZkSyncHyperchain.json`; - - const fileContent = (await fs.promises.readFile(path)).toString(); - - const abi = JSON.parse(fileContent).abi; - - return abi; -} - -const entry = chalk.bold.yellow; -const announce = chalk.yellow; -const success = chalk.green; -const timestamp = chalk.grey; - -// Wrapper that writes an announcement and completion notes for each executed task. -export const announced = async (fn: string, promise: Promise | void) => { - const announceLine = `${entry('>')} ${announce(fn)}`; - const separator = '-'.repeat(fn.length + 2); // 2 is the length of "> ". - console.log(`\n` + separator); // So it's easier to see each individual step in the console. - console.log(announceLine); - - const start = new Date().getTime(); - // The actual execution part - await promise; - - const time = new Date().getTime() - start; - const successLine = `${success('✔')} ${fn} done`; - const timestampLine = timestamp(`(${time}ms)`); - console.log(`${successLine} ${timestampLine}`); -}; - -export function unpackStringSemVer(semver: string): [number, number, number] { - const [major, minor, patch] = semver.split('.'); - return [parseInt(major), parseInt(minor), parseInt(patch)]; -} From ad53750f5f992a94a0175c52e2e684b34a6a4b74 Mon Sep 17 00:00:00 2001 From: aon <21188659+aon@users.noreply.github.com> Date: Wed, 5 Jun 2024 14:46:05 -0300 Subject: [PATCH 6/8] fix: add utils build workflow --- bin/zk | 2 +- etc/utils/.gitignore | 1 + etc/utils/package.json | 3 ++- etc/utils/tsconfig.json | 15 +++++++++++++++ .../src/commands/integration_tests.rs | 1 + 5 files changed, 20 insertions(+), 2 deletions(-) create mode 100644 etc/utils/.gitignore create mode 100644 etc/utils/tsconfig.json diff --git a/bin/zk b/bin/zk index fec96763b788..868c4e338cdf 100755 --- a/bin/zk +++ b/bin/zk @@ -41,7 +41,7 @@ check_subdirectory check_yarn_version if [ -z "$1" ]; then cd $ZKSYNC_HOME - run_retried yarn install --frozen-lockfile && yarn zk build + run_retried yarn install --frozen-lockfile && yarn utils build && yarn zk build else # can't start this with yarn since it has quirks with `--` as an argument node -- $ZKSYNC_HOME/infrastructure/zk/build/index.js "$@" diff --git a/etc/utils/.gitignore b/etc/utils/.gitignore new file mode 100644 index 000000000000..796b96d1c402 --- /dev/null +++ b/etc/utils/.gitignore @@ -0,0 +1 @@ +/build diff --git a/etc/utils/package.json b/etc/utils/package.json index 47f2b9f238d1..6ce76330c8ea 100644 --- a/etc/utils/package.json +++ b/etc/utils/package.json @@ -1,8 +1,9 @@ { "name": "utils", "version": "0.1.0", - "main": "src/index.ts", "license": "MIT", + "main": "build/index.js", + "types": "build/index.d.ts", "scripts": { "build": "tsc" }, diff --git a/etc/utils/tsconfig.json b/etc/utils/tsconfig.json new file mode 100644 index 000000000000..f96df8d60edb --- /dev/null +++ b/etc/utils/tsconfig.json @@ -0,0 +1,15 @@ +{ + "compilerOptions": { + "target": "es2019", + "module": "commonjs", + "outDir": "build", + "strict": true, + "esModuleInterop": true, + "noEmitOnError": true, + "skipLibCheck": true, + "declaration": true + }, + "files": [ + "src/index.ts" + ] +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/integration_tests.rs b/zk_toolbox/crates/zk_supervisor/src/commands/integration_tests.rs index 47a886983759..c5b1229dd2ce 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/integration_tests.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/integration_tests.rs @@ -36,6 +36,7 @@ fn build_repository(shell: &Shell, ecosystem_config: &EcosystemConfig) -> anyhow let spinner = Spinner::new(MSG_INTEGRATION_TESTS_BUILDING_DEPENDENCIES); Cmd::new(cmd!(shell, "yarn install --frozen-lockfile")).run()?; + Cmd::new(cmd!(shell, "yarn utils build")).run()?; spinner.finish(); Ok(()) From 2933e29092d62bdaf77c18c1b6efa425101c620f Mon Sep 17 00:00:00 2001 From: aon <21188659+aon@users.noreply.github.com> Date: Wed, 5 Jun 2024 14:51:19 -0300 Subject: [PATCH 7/8] fix: add missing script --- package.json | 1 + 1 file changed, 1 insertion(+) diff --git a/package.json b/package.json index 51f7a8c22453..b15675264d3e 100644 --- a/package.json +++ b/package.json @@ -33,6 +33,7 @@ "upgrade-test": "yarn workspace upgrade-test", "recovery-test": "yarn workspace recovery-test", "ts-integration": "yarn workspace ts-integration", + "utils": "yarn workspace utils", "zk": "yarn workspace zk" }, "devDependencies": { From bccdb3557f099f77299e81c9cb386a24d2c5d51e Mon Sep 17 00:00:00 2001 From: aon <21188659+aon@users.noreply.github.com> Date: Wed, 5 Jun 2024 15:21:40 -0300 Subject: [PATCH 8/8] fix: run zk_toolbox ci on core changes as well --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 31b388b4cfa1..881af2367d31 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -98,7 +98,7 @@ jobs: ci-for-zk-toolbox: needs: changed_files - if: ${{ (needs.changed_files.outputs.zk_toolbox == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} + if: ${{ (needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.zk_toolbox == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} name: CI for zk_toolbox uses: ./.github/workflows/ci-zk-toolbox-reusable.yml