diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index ffd9838d6c31..e714062266ea 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { - "core": "24.20.0", + "core": "24.22.0", "prover": "16.4.0", "zk_toolbox": "0.1.2" } diff --git a/.github/workflows/build-prover-template.yml b/.github/workflows/build-prover-template.yml index 25bf14728dd6..7591c45b49e4 100644 --- a/.github/workflows/build-prover-template.yml +++ b/.github/workflows/build-prover-template.yml @@ -53,6 +53,7 @@ jobs: - prover-gpu-fri - witness-vector-generator - prover-fri-gateway + - prover-job-monitor - proof-fri-gpu-compressor outputs: protocol_version: ${{ steps.protocolversion.outputs.protocol_version }} @@ -163,7 +164,7 @@ jobs: run: | ci_run sccache --show-stats || true ci_run cat /tmp/sccache_log.txt || true - + copy-images: name: Copy images between docker registries needs: build-images @@ -197,4 +198,3 @@ jobs: docker buildx imagetools create \ --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} - diff --git a/.github/workflows/build-witness-generator-template.yml b/.github/workflows/build-witness-generator-template.yml index a7139e5e0a8c..9c29297460d9 100644 --- a/.github/workflows/build-witness-generator-template.yml +++ b/.github/workflows/build-witness-generator-template.yml @@ -169,38 +169,4 @@ jobs: if: always() run: | ci_run sccache --show-stats || true - ci_run cat /tmp/sccache_log.txt || true - - copy-images: - name: Copy images between docker registries - needs: build-images - env: - IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }} - PROTOCOL_VERSION: ${{ needs.build-images.outputs.protocol_version }} - runs-on: matterlabs-ci-runner - if: ${{ inputs.action == 'push' }} - strategy: - matrix: - component: - - witness-vector-generator - steps: - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Login to us-central1 GAR - run: | - gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://us-docker.pkg.dev - - - name: Login and push to Asia GAR - run: | - gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://asia-docker.pkg.dev - docker buildx imagetools create \ - --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} - - - name: Login and push to Europe GAR - run: | - gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://europe-docker.pkg.dev - docker buildx imagetools create \ - --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} + ci_run cat /tmp/sccache_log.txt || true \ No newline at end of file diff --git a/.github/workflows/ci-core-lint-reusable.yml b/.github/workflows/ci-core-lint-reusable.yml index 2fa6cde5fdeb..e7c8b5340194 100644 --- a/.github/workflows/ci-core-lint-reusable.yml +++ b/.github/workflows/ci-core-lint-reusable.yml @@ -19,23 +19,20 @@ jobs: - name: Start services run: | - mkdir -p ./volumes/postgres - run_retried docker compose pull zk postgres - docker compose up -d zk postgres + ci_localnet_up ci_run sccache --start-server - - name: Setup db + - name: Build run: | - ci_run zk - ci_run run_retried rustup show - ci_run zk db migrate + ci_run ./bin/zkt + ci_run yarn install + ci_run git config --global --add safe.directory /usr/src/zksync - name: Lints run: | - ci_run zk fmt --check - ci_run zk lint rust --check - ci_run zk lint toolbox --check - ci_run zk lint js --check - ci_run zk lint ts --check - ci_run zk lint md --check - ci_run zk db check-sqlx-data + ci_run zk_supervisor fmt --check + ci_run zk_supervisor lint -t md --check + ci_run zk_supervisor lint -t sol --check + ci_run zk_supervisor lint -t js --check + ci_run zk_supervisor lint -t ts --check + ci_run zk_supervisor lint -t rs --check diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 1374b75515a3..56de305ca4b2 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -68,7 +68,7 @@ jobs: ci_run zk test rust # Benchmarks are not tested by `cargo nextest` unless specified explicitly, and even then `criterion` harness is incompatible # with how `cargo nextest` runs tests. Thus, we run criterion-based benchmark tests manually. - ci_run zk f cargo test --release -p vm-benchmark --bench criterion --bench fill_bootloader + ci_run zk f cargo test --release -p vm-benchmark --bench oneshot --bench batch # FIXME: support loadtest together with sync layer. loadtest: diff --git a/.github/workflows/ci-zk-toolbox-reusable.yml b/.github/workflows/ci-zk-toolbox-reusable.yml index 41b8a7d5dbc7..9470d074ce16 100644 --- a/.github/workflows/ci-zk-toolbox-reusable.yml +++ b/.github/workflows/ci-zk-toolbox-reusable.yml @@ -115,6 +115,10 @@ jobs: # --prover-db-url=postgres://postgres:notsecurepassword@postgres:5432 \ # --prover-db-name=zksync_prover_localhost_rollup + # - name: Check Database + # run: | + # ci_run zk_supervisor database check-sqlx-data + # - name: Run server # run: | # ci_run zk_inception server --ignore-prerequisites &>server.log & diff --git a/.github/workflows/release-test-stage.yml b/.github/workflows/release-test-stage.yml index 3f83d208f66c..9f921be78292 100644 --- a/.github/workflows/release-test-stage.yml +++ b/.github/workflows/release-test-stage.yml @@ -106,6 +106,20 @@ jobs: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + build-push-witness-generator-image-avx512: + name: Build and push prover images with avx512 instructions + needs: [setup, changed_files] + uses: ./.github/workflows/build-witness-generator-template.yml + if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' + with: + image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }}-avx512 + ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} + CUDA_ARCH: "60;70;75;89" + WITNESS_GENERATOR_RUST_FLAGS: "-Ctarget_feature=+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl" + secrets: + DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + build-gar-prover-fri-gpu: name: Build GAR prover FRI GPU needs: [setup, build-push-prover-images] diff --git a/.github/workflows/vm-perf-comparison.yml b/.github/workflows/vm-perf-comparison.yml index 6dbaca40ae8b..7a2705f5bf18 100644 --- a/.github/workflows/vm-perf-comparison.yml +++ b/.github/workflows/vm-perf-comparison.yml @@ -1,4 +1,4 @@ -name: Compare VM perfomance to base branch +name: Compare VM performance to base branch on: pull_request: @@ -47,7 +47,7 @@ jobs: # ci_run zk # ci_run zk compiler system-contracts # ci_run cargo bench --package vm-benchmark --bench iai | tee base-iai - # ci_run cargo run --package vm-benchmark --release --bin instruction-counts | tee base-opcodes || touch base-opcodes + # ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee base-opcodes || touch base-opcodes # ci_run yarn workspace system-contracts clean # - name: checkout PR @@ -59,7 +59,7 @@ jobs: # ci_run zk # ci_run zk compiler system-contracts # ci_run cargo bench --package vm-benchmark --bench iai | tee pr-iai - # ci_run cargo run --package vm-benchmark --release --bin instruction-counts | tee pr-opcodes || touch pr-opcodes + # ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee pr-opcodes || touch pr-opcodes # EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64) # echo "speedup<<$EOF" >> $GITHUB_OUTPUT diff --git a/.github/workflows/vm-perf-to-prometheus.yml b/.github/workflows/vm-perf-to-prometheus.yml index fce7ead2d696..3cfd4e4deb87 100644 --- a/.github/workflows/vm-perf-to-prometheus.yml +++ b/.github/workflows/vm-perf-to-prometheus.yml @@ -21,7 +21,7 @@ jobs: - name: setup-env run: | - echo PUSH_VM_BENCHMARKS_TO_PROMETHEUS=1 >> .env + echo BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL=${{ secrets.BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL }} >> .env echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH @@ -31,10 +31,12 @@ jobs: run_retried docker compose pull zk docker compose up -d zk ci_run zk - ci_run zk compiler system-contracts + ci_run zk compiler all - name: run benchmarks run: | - ci_run cargo bench --package vm-benchmark --bench diy_benchmark + ci_run cargo bench --package vm-benchmark --bench oneshot + # Run only benches with 1,000 transactions per batch to not spend too much time + ci_run cargo bench --package vm-benchmark --bench batch '/1000$' ci_run cargo bench --package vm-benchmark --bench iai | tee iai-result ci_run cargo run --package vm-benchmark --bin iai_results_to_prometheus --release < iai-result diff --git a/CODEOWNERS b/CODEOWNERS index 63094b333057..813cd396d2c2 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,4 +1,4 @@ -.github/release-please/** @RomanBrodetski @perekopskiy @Deniallugo @popzxc @EmilLuta -**/CHANGELOG.md @RomanBrodetski @perekopskiy @Deniallugo @popzxc @EmilLuta +.github/release-please/** @matter-labs/core-release-managers +**/CHANGELOG.md @matter-labs/core-release-managers CODEOWNERS @RomanBrodetski @perekopskiy @Deniallugo @popzxc .github/workflows/** @matter-labs/devops diff --git a/Cargo.lock b/Cargo.lock index 61c1f7285e66..10f89d30974b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -881,12 +881,13 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.1.6" +version = "1.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aba8f4e9906c7ce3c73463f62a7f0c65183ada1a2d47e397cc8810827f9694f" +checksum = "50d2eb3cd3d1bf4529e31c215ee6f93ec5a3d536d9f578f93d9d33ee19562932" dependencies = [ "jobserver", "libc", + "shlex", ] [[package]] @@ -3484,9 +3485,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.28.0" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f" +checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149" dependencies = [ "cc", "pkg-config", @@ -6091,9 +6092,9 @@ dependencies = [ [[package]] name = "sqlx" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27144619c6e5802f1380337a209d2ac1c431002dd74c6e60aebff3c506dc4f0c" +checksum = "fcfa89bea9500db4a0d038513d7a060566bfc51d46d1c014847049a45cce85e8" dependencies = [ "sqlx-core", "sqlx-macros", @@ -6104,9 +6105,9 @@ dependencies = [ [[package]] name = "sqlx-core" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a999083c1af5b5d6c071d34a708a19ba3e02106ad82ef7bbd69f5e48266b613b" +checksum = "d06e2f2bd861719b1f3f0c7dbe1d80c30bf59e76cf019f07d9014ed7eefb8e08" dependencies = [ "atoi", "bigdecimal", @@ -6148,9 +6149,9 @@ dependencies = [ [[package]] name = "sqlx-macros" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a23217eb7d86c584b8cbe0337b9eacf12ab76fe7673c513141ec42565698bb88" +checksum = "2f998a9defdbd48ed005a89362bd40dd2117502f15294f61c8d47034107dbbdc" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.36", @@ -6161,9 +6162,9 @@ dependencies = [ [[package]] name = "sqlx-macros-core" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a099220ae541c5db479c6424bdf1b200987934033c2584f79a0e1693601e776" +checksum = "3d100558134176a2629d46cec0c8891ba0be8910f7896abfdb75ef4ab6f4e7ce" dependencies = [ "dotenvy", "either", @@ -6187,9 +6188,9 @@ dependencies = [ [[package]] name = "sqlx-mysql" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5afe4c38a9b417b6a9a5eeffe7235d0a106716495536e7727d1c7f4b1ff3eba6" +checksum = "936cac0ab331b14cb3921c62156d913e4c15b74fb6ec0f3146bd4ef6e4fb3c12" dependencies = [ "atoi", "base64 0.22.1", @@ -6232,9 +6233,9 @@ dependencies = [ [[package]] name = "sqlx-postgres" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1dbb157e65f10dbe01f729339c06d239120221c9ad9fa0ba8408c4cc18ecf21" +checksum = "9734dbce698c67ecf67c442f768a5e90a49b2a4d61a9f1d59f73874bd4cf0710" dependencies = [ "atoi", "base64 0.22.1", @@ -6275,9 +6276,9 @@ dependencies = [ [[package]] name = "sqlx-sqlite" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2cdd83c008a622d94499c0006d8ee5f821f36c89b7d625c900e5dc30b5c5ee" +checksum = "a75b419c3c1b1697833dd927bdc4c6545a620bc1bbafabd44e1efbe9afcd337e" dependencies = [ "atoi", "chrono", @@ -7305,20 +7306,24 @@ dependencies = [ name = "vm-benchmark" version = "0.1.0" dependencies = [ + "assert_matches", "criterion", "iai", + "once_cell", "rand 0.8.5", "tokio", "vise", + "zksync_contracts", + "zksync_multivm", "zksync_types", + "zksync_utils", "zksync_vlog", - "zksync_vm_benchmark_harness", ] [[package]] name = "vm2" version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=9a38900d7af9b1d72b47ce3be980e77c1239a61d#9a38900d7af9b1d72b47ce3be980e77c1239a61d" +source = "git+https://github.com/matter-labs/vm2.git?rev=2276b7b5af520fca0477bdafe43781b51896d235#2276b7b5af520fca0477bdafe43781b51896d235" dependencies = [ "enum_dispatch", "primitive-types", @@ -8019,6 +8024,7 @@ dependencies = [ "rand 0.8.5", "tokio", "tracing", + "vise", "zksync_config", "zksync_contracts", "zksync_dal", @@ -8026,6 +8032,7 @@ dependencies = [ "zksync_external_price_api", "zksync_node_fee_model", "zksync_types", + "zksync_utils", ] [[package]] @@ -8644,7 +8651,7 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "24.20.0" +version = "24.22.0" dependencies = [ "anyhow", "assert_matches", @@ -8726,6 +8733,7 @@ dependencies = [ "bincode", "tokio", "tracing", + "vise", "zksync_basic_types", "zksync_config", "zksync_dal", @@ -8899,6 +8907,7 @@ name = "zksync_multivm" version = "0.1.0" dependencies = [ "anyhow", + "assert_matches", "circuit_sequencer_api 0.133.0", "circuit_sequencer_api 0.140.0", "circuit_sequencer_api 0.141.1", @@ -9726,21 +9735,6 @@ dependencies = [ "vise-exporter", ] -[[package]] -name = "zksync_vm_benchmark_harness" -version = "0.1.0" -dependencies = [ - "assert_matches", - "once_cell", - "zk_evm 0.133.0", - "zksync_contracts", - "zksync_multivm", - "zksync_state", - "zksync_system_constants", - "zksync_types", - "zksync_utils", -] - [[package]] name = "zksync_vm_interface" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index d4855a34b9de..6faea57fa1a0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -79,7 +79,6 @@ members = [ "core/tests/test_account", "core/tests/loadnext", "core/tests/vm-benchmark", - "core/tests/vm-benchmark/harness", # Parts of prover workspace that are needed for Core workspace "prover/crates/lib/prover_dal", ] @@ -167,7 +166,7 @@ serde_with = "1" serde_yaml = "0.9" sha2 = "0.10.8" sha3 = "0.10.8" -sqlx = "0.8.0" +sqlx = "0.8.1" static_assertions = "1.1" structopt = "0.3.20" strum = "0.26" @@ -218,7 +217,7 @@ zk_evm_1_4_1 = { package = "zk_evm", version = "0.141.0" } zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.4" } # New VM; pinned to a specific commit because of instability -vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "9a38900d7af9b1d72b47ce3be980e77c1239a61d" } +vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "2276b7b5af520fca0477bdafe43781b51896d235" } # Consensus dependencies. zksync_concurrency = "=0.1.0-rc.11" @@ -238,7 +237,6 @@ zksync_prover_dal = { version = "0.1.0", path = "prover/crates/lib/prover_dal" } zksync_vlog = { version = "0.1.0", path = "core/lib/vlog" } zksync_vm_interface = { version = "0.1.0", path = "core/lib/vm_interface" } zksync_vm_utils = { version = "0.1.0", path = "core/lib/vm_utils" } -zksync_vm_benchmark_harness = { version = "0.1.0", path = "core/tests/vm-benchmark/harness" } zksync_basic_types = { version = "0.1.0", path = "core/lib/basic_types" } zksync_circuit_breaker = { version = "0.1.0", path = "core/lib/circuit_breaker" } zksync_config = { version = "0.1.0", path = "core/lib/config" } diff --git a/contracts b/contracts index d273ebf5fc7d..10ec8ba28f3d 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit d273ebf5fc7d85ff59a6db4d93ac1a4719462599 +Subproject commit 10ec8ba28f3de36ab6d8f73d63496f59b37654e3 diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index e727a8326603..5464a8b10098 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,31 @@ # Changelog +## [24.22.0](https://github.com/matter-labs/zksync-era/compare/core-v24.21.0...core-v24.22.0) (2024-08-27) + + +### Features + +* add flag to enable/disable DA inclusion verification ([#2647](https://github.com/matter-labs/zksync-era/issues/2647)) ([b425561](https://github.com/matter-labs/zksync-era/commit/b4255618708349c51f60f5c7fc26f9356d32b6ff)) +* **Base token:** add cbt metrics ([#2720](https://github.com/matter-labs/zksync-era/issues/2720)) ([58438eb](https://github.com/matter-labs/zksync-era/commit/58438eb174c30edf62e2ff8abb74567de2a4bea8)) +* Change default_protective_reads_persistence_enabled to false ([#2716](https://github.com/matter-labs/zksync-era/issues/2716)) ([8d0eee7](https://github.com/matter-labs/zksync-era/commit/8d0eee7ca8fe117b2ee286c6080bfa0057ee31ae)) +* **vm:** Extract oneshot VM executor interface ([#2671](https://github.com/matter-labs/zksync-era/issues/2671)) ([951d5f2](https://github.com/matter-labs/zksync-era/commit/951d5f208e5d16a5d95878dd345a8bd2a4144aa7)) +* **zk_toolbox:** Add holesky testnet as layer1 network ([#2632](https://github.com/matter-labs/zksync-era/issues/2632)) ([d9266e5](https://github.com/matter-labs/zksync-era/commit/d9266e5ef3910732666c00c1324256fb5b54452d)) + + +### Bug Fixes + +* **api:** `tx.gas_price` field ([#2734](https://github.com/matter-labs/zksync-era/issues/2734)) ([aea3726](https://github.com/matter-labs/zksync-era/commit/aea3726c88b4e881bcd0f4a60ff32a730f200938)) +* **base_token_adjuster:** bug with a wrong metrics namespace ([#2744](https://github.com/matter-labs/zksync-era/issues/2744)) ([64b2ff8](https://github.com/matter-labs/zksync-era/commit/64b2ff8b81dcc146cd0535eb0d2d898c18ad5f7f)) +* **eth-sender:** missing Gateway migration changes ([#2732](https://github.com/matter-labs/zksync-era/issues/2732)) ([a4170e9](https://github.com/matter-labs/zksync-era/commit/a4170e9e7f321a1062495ec586e0ce9186269088)) +* **proof_data_handler:** TEE blob fetching error handling ([#2674](https://github.com/matter-labs/zksync-era/issues/2674)) ([c162510](https://github.com/matter-labs/zksync-era/commit/c162510598b45dc062c2c91085868f8aa966360e)) + +## [24.21.0](https://github.com/matter-labs/zksync-era/compare/core-v24.20.0...core-v24.21.0) (2024-08-22) + + +### Features + +* External prover API metrics, refactoring ([#2630](https://github.com/matter-labs/zksync-era/issues/2630)) ([c83cca8](https://github.com/matter-labs/zksync-era/commit/c83cca8fe7fa105ec6b1491e4efb9f9e4bd66d41)) + ## [24.20.0](https://github.com/matter-labs/zksync-era/compare/core-v24.19.0...core-v24.20.0) (2024-08-21) diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index 29b839c6a1fe..558de140628a 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "zksync_external_node" description = "Non-validator ZKsync node" -version = "24.20.0" # x-release-please-version +version = "24.22.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 5430e82a1164..e2d7962430b2 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -408,8 +408,7 @@ pub(crate) struct OptionalENConfig { /// Configures whether to persist protective reads when persisting L1 batches in the state keeper. /// Protective reads are never required by full nodes so far, not until such a node runs a full Merkle tree /// (presumably, to participate in L1 batch proving). - /// By default, set to `true` as a temporary safety measure. - #[serde(default = "OptionalENConfig::default_protective_reads_persistence_enabled")] + #[serde(default)] pub protective_reads_persistence_enabled: bool, /// Address of the L1 diamond proxy contract used by the consistency checker to match with the origin of logs emitted /// by commit transactions. If not set, it will not be verified. @@ -662,7 +661,7 @@ impl OptionalENConfig { .db_config .as_ref() .map(|a| a.experimental.protective_reads_persistence_enabled) - .unwrap_or(true), + .unwrap_or_default(), merkle_tree_processing_delay_ms: load_config_or_default!( general_config.db_config, experimental.processing_delay_ms, @@ -786,10 +785,6 @@ impl OptionalENConfig { 10 } - const fn default_protective_reads_persistence_enabled() -> bool { - true - } - const fn default_mempool_cache_update_interval_ms() -> u64 { 50 } diff --git a/core/bin/zksync_tee_prover/src/tee_prover.rs b/core/bin/zksync_tee_prover/src/tee_prover.rs index 64a3a9c5749d..7f874533b4b3 100644 --- a/core/bin/zksync_tee_prover/src/tee_prover.rs +++ b/core/bin/zksync_tee_prover/src/tee_prover.rs @@ -201,8 +201,8 @@ impl Task for TeeProver { if !err.is_retriable() || retries > self.config.max_retries { return Err(err.into()); } - retries += 1; tracing::warn!(%err, "Failed TEE prover step function {retries}/{}, retrying in {} milliseconds.", self.config.max_retries, backoff.as_millis()); + retries += 1; backoff = std::cmp::min( backoff.mul_f32(self.config.retry_backoff_multiplier), self.config.max_backoff, diff --git a/core/lib/basic_types/src/network.rs b/core/lib/basic_types/src/network.rs index 436282baae8c..3403ec404738 100644 --- a/core/lib/basic_types/src/network.rs +++ b/core/lib/basic_types/src/network.rs @@ -26,6 +26,8 @@ pub enum Network { Goerli, /// Ethereum Sepolia testnet. Sepolia, + /// Ethereum Holešky testnet. + Holesky, /// Self-hosted Ethereum network. Localhost, /// Self-hosted L2 network. @@ -48,6 +50,7 @@ impl FromStr for Network { "localhost" => Self::Localhost, "localhostL2" => Self::LocalhostL2, "sepolia" => Self::Sepolia, + "holesky" => Self::Holesky, "test" => Self::Test, another => return Err(another.to_owned()), }) @@ -64,6 +67,7 @@ impl fmt::Display for Network { Self::Localhost => write!(f, "localhost"), Self::LocalhostL2 => write!(f, "localhostL2"), Self::Sepolia => write!(f, "sepolia"), + Self::Holesky => write!(f, "holesky"), Self::Unknown => write!(f, "unknown"), Self::Test => write!(f, "test"), } @@ -79,8 +83,9 @@ impl Network { 4 => Self::Rinkeby, 5 => Self::Goerli, 9 => Self::Localhost, - 270 => Self::LocalhostL2, 11155111 => Self::Sepolia, + 17000 => Self::Holesky, + 270 => Self::LocalhostL2, _ => Self::Unknown, } } @@ -94,6 +99,7 @@ impl Network { Self::Goerli => SLChainId(5), Self::Localhost => SLChainId(9), Self::Sepolia => SLChainId(11155111), + Self::Holesky => SLChainId(17000), Self::LocalhostL2 => SLChainId(270), Self::Unknown => panic!("Unknown chain ID"), Self::Test => panic!("Test chain ID"), diff --git a/core/lib/config/src/configs/base_token_adjuster.rs b/core/lib/config/src/configs/base_token_adjuster.rs index 0ae451a62d9c..c8a0fe6312e3 100644 --- a/core/lib/config/src/configs/base_token_adjuster.rs +++ b/core/lib/config/src/configs/base_token_adjuster.rs @@ -26,6 +26,12 @@ const DEFAULT_L1_TX_SENDING_MAX_ATTEMPTS: u32 = 3; /// Default number of milliseconds to sleep between receipt checking attempts const DEFAULT_L1_RECEIPT_CHECKING_SLEEP_MS: u64 = 30_000; +/// Default maximum number of attempts to fetch price from a remote API +const DEFAULT_PRICE_FETCHING_MAX_ATTEMPTS: u32 = 3; + +/// Default number of milliseconds to sleep between price fetching attempts +const DEFAULT_PRICE_FETCHING_SLEEP_MS: u64 = 5_000; + /// Default number of milliseconds to sleep between transaction sending attempts const DEFAULT_L1_TX_SENDING_SLEEP_MS: u64 = 30_000; @@ -73,6 +79,14 @@ pub struct BaseTokenAdjusterConfig { #[serde(default = "BaseTokenAdjusterConfig::default_l1_tx_sending_sleep_ms")] pub l1_tx_sending_sleep_ms: u64, + /// Maximum number of attempts to fetch quote from a remote API before failing over + #[serde(default = "BaseTokenAdjusterConfig::default_price_fetching_max_attempts")] + pub price_fetching_max_attempts: u32, + + /// Number of seconds to sleep between price fetching attempts + #[serde(default = "BaseTokenAdjusterConfig::default_price_fetching_sleep_ms")] + pub price_fetching_sleep_ms: u64, + /// Defines whether base_token_adjuster should halt the process if there was an error while /// fetching or persisting the quote. Generally that should be set to false to not to halt /// the server process if an external api is not available or if L1 is congested. @@ -93,6 +107,8 @@ impl Default for BaseTokenAdjusterConfig { l1_receipt_checking_sleep_ms: Self::default_l1_receipt_checking_sleep_ms(), l1_tx_sending_max_attempts: Self::default_l1_tx_sending_max_attempts(), l1_tx_sending_sleep_ms: Self::default_l1_tx_sending_sleep_ms(), + price_fetching_sleep_ms: Self::default_price_fetching_sleep_ms(), + price_fetching_max_attempts: Self::default_price_fetching_max_attempts(), halt_on_error: Self::default_halt_on_error(), } } @@ -135,6 +151,10 @@ impl BaseTokenAdjusterConfig { Duration::from_millis(self.l1_tx_sending_sleep_ms) } + pub fn price_fetching_sleep_duration(&self) -> Duration { + Duration::from_millis(self.price_fetching_sleep_ms) + } + pub fn default_l1_receipt_checking_max_attempts() -> u32 { DEFAULT_L1_RECEIPT_CHECKING_MAX_ATTEMPTS } @@ -151,6 +171,14 @@ impl BaseTokenAdjusterConfig { DEFAULT_L1_TX_SENDING_SLEEP_MS } + pub fn default_price_fetching_sleep_ms() -> u64 { + DEFAULT_PRICE_FETCHING_SLEEP_MS + } + + pub fn default_price_fetching_max_attempts() -> u32 { + DEFAULT_PRICE_FETCHING_MAX_ATTEMPTS + } + pub fn default_max_tx_gas() -> u64 { DEFAULT_MAX_TX_GAS } diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index 6ac70b27b84a..7e33f6964bb7 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -127,8 +127,9 @@ pub struct StateKeeperConfig { /// Configures whether to persist protective reads when persisting L1 batches in the state keeper. /// Protective reads can be written asynchronously in VM runner instead. - /// By default, set to `true` as a temporary safety measure. - #[serde(default = "StateKeeperConfig::default_protective_reads_persistence_enabled")] + /// By default, set to `false` as it is expected that a separate `vm_runner_protective_reads` component + /// which is capable of saving protective reads is run. + #[serde(default)] pub protective_reads_persistence_enabled: bool, // Base system contract hashes, required only for generating genesis config. @@ -143,10 +144,6 @@ pub struct StateKeeperConfig { } impl StateKeeperConfig { - fn default_protective_reads_persistence_enabled() -> bool { - true - } - /// Creates a config object suitable for use in unit tests. /// Values mostly repeat the values used in the localhost environment. pub fn for_tests() -> Self { diff --git a/core/lib/config/src/configs/da_dispatcher.rs b/core/lib/config/src/configs/da_dispatcher.rs index 303a2c0b54c1..e9ad6bd3c074 100644 --- a/core/lib/config/src/configs/da_dispatcher.rs +++ b/core/lib/config/src/configs/da_dispatcher.rs @@ -5,6 +5,7 @@ use serde::Deserialize; pub const DEFAULT_POLLING_INTERVAL_MS: u32 = 5000; pub const DEFAULT_MAX_ROWS_TO_DISPATCH: u32 = 100; pub const DEFAULT_MAX_RETRIES: u16 = 5; +pub const DEFAULT_USE_DUMMY_INCLUSION_DATA: bool = false; #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct DADispatcherConfig { @@ -14,6 +15,10 @@ pub struct DADispatcherConfig { pub max_rows_to_dispatch: Option, /// The maximum number of retries for the dispatch of a blob. pub max_retries: Option, + /// Use dummy value as inclusion proof instead of getting it from the client. + // TODO: run a verification task to check if the L1 contract expects the inclusion proofs to + // avoid the scenario where contracts expect real proofs, and server is using dummy proofs. + pub use_dummy_inclusion_data: Option, } impl DADispatcherConfig { @@ -22,6 +27,7 @@ impl DADispatcherConfig { polling_interval_ms: Some(DEFAULT_POLLING_INTERVAL_MS), max_rows_to_dispatch: Some(DEFAULT_MAX_ROWS_TO_DISPATCH), max_retries: Some(DEFAULT_MAX_RETRIES), + use_dummy_inclusion_data: Some(DEFAULT_USE_DUMMY_INCLUSION_DATA), } } @@ -40,4 +46,9 @@ impl DADispatcherConfig { pub fn max_retries(&self) -> u16 { self.max_retries.unwrap_or(DEFAULT_MAX_RETRIES) } + + pub fn use_dummy_inclusion_data(&self) -> bool { + self.use_dummy_inclusion_data + .unwrap_or(DEFAULT_USE_DUMMY_INCLUSION_DATA) + } } diff --git a/core/lib/config/src/configs/experimental.rs b/core/lib/config/src/configs/experimental.rs index 8309b36e7f22..097f3c4112b3 100644 --- a/core/lib/config/src/configs/experimental.rs +++ b/core/lib/config/src/configs/experimental.rs @@ -16,8 +16,9 @@ pub struct ExperimentalDBConfig { /// Configures whether to persist protective reads when persisting L1 batches in the state keeper. /// Protective reads are never required by full nodes so far, not until such a node runs a full Merkle tree /// (presumably, to participate in L1 batch proving). - /// By default, set to `true` as a temporary safety measure. - #[serde(default = "ExperimentalDBConfig::default_protective_reads_persistence_enabled")] + /// By default, set to `false` as it is expected that a separate `vm_runner_protective_reads` component + /// which is capable of saving protective reads is run. + #[serde(default)] pub protective_reads_persistence_enabled: bool, // Merkle tree config /// Processing delay between processing L1 batches in the Merkle tree. @@ -36,8 +37,7 @@ impl Default for ExperimentalDBConfig { state_keeper_db_block_cache_capacity_mb: Self::default_state_keeper_db_block_cache_capacity_mb(), state_keeper_db_max_open_files: None, - protective_reads_persistence_enabled: - Self::default_protective_reads_persistence_enabled(), + protective_reads_persistence_enabled: false, processing_delay_ms: Self::default_merkle_tree_processing_delay_ms(), include_indices_and_filters_in_block_cache: false, } @@ -53,10 +53,6 @@ impl ExperimentalDBConfig { self.state_keeper_db_block_cache_capacity_mb * super::BYTES_IN_MEGABYTE } - const fn default_protective_reads_persistence_enabled() -> bool { - true - } - const fn default_merkle_tree_processing_delay_ms() -> u64 { 100 } diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index e23600802612..911451a5a348 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -946,6 +946,7 @@ impl Distribution for EncodeDist { polling_interval_ms: self.sample(rng), max_rows_to_dispatch: self.sample(rng), max_retries: self.sample(rng), + use_dummy_inclusion_data: self.sample(rng), } } } @@ -1051,6 +1052,8 @@ impl Distribution for Enc l1_receipt_checking_sleep_ms: self.sample(rng), l1_tx_sending_max_attempts: self.sample(rng), l1_tx_sending_sleep_ms: self.sample(rng), + price_fetching_max_attempts: self.sample(rng), + price_fetching_sleep_ms: self.sample(rng), halt_on_error: self.sample(rng), } } diff --git a/core/lib/dal/.sqlx/query-286f27e32a152c293d07e7c22e893c6f5a43386d4183745a4668507cf672b3f6.json b/core/lib/dal/.sqlx/query-47975cc0b5e4f3a6b5224cb452b8fee3209a950943dc2b4da82c324e1c09132f.json similarity index 75% rename from core/lib/dal/.sqlx/query-286f27e32a152c293d07e7c22e893c6f5a43386d4183745a4668507cf672b3f6.json rename to core/lib/dal/.sqlx/query-47975cc0b5e4f3a6b5224cb452b8fee3209a950943dc2b4da82c324e1c09132f.json index 540660bddf34..7e5f9e1713c4 100644 --- a/core/lib/dal/.sqlx/query-286f27e32a152c293d07e7c22e893c6f5a43386d4183745a4668507cf672b3f6.json +++ b/core/lib/dal/.sqlx/query-47975cc0b5e4f3a6b5224cb452b8fee3209a950943dc2b4da82c324e1c09132f.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE tee_proof_generation_details\n SET\n status = 'picked_by_prover',\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n tee_type = $1\n AND l1_batch_number = (\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $2\n AND (\n proofs.status = 'ready_to_be_proven'\n OR (\n proofs.status = 'picked_by_prover'\n AND proofs.prover_taken_at < NOW() - $3::INTERVAL\n )\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n tee_proof_generation_details.l1_batch_number\n ", + "query": "\n UPDATE tee_proof_generation_details\n SET\n status = 'picked_by_prover',\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n tee_type = $1\n AND l1_batch_number = (\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $2\n AND (\n proofs.status = 'ready_to_be_proven'\n OR (\n proofs.status = 'picked_by_prover'\n AND proofs.prover_taken_at < NOW() - $3::INTERVAL\n )\n )\n AND proofs.l1_batch_number >= $4\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n tee_proof_generation_details.l1_batch_number\n ", "describe": { "columns": [ { @@ -26,12 +26,13 @@ } } }, - "Interval" + "Interval", + "Int8" ] }, "nullable": [ false ] }, - "hash": "286f27e32a152c293d07e7c22e893c6f5a43386d4183745a4668507cf672b3f6" + "hash": "47975cc0b5e4f3a6b5224cb452b8fee3209a950943dc2b4da82c324e1c09132f" } diff --git a/core/lib/dal/.sqlx/query-5c5bdb0e419049f9fb4d8b3bbec468765628fd2c3b7c2a408d18b5aba0df9a30.json b/core/lib/dal/.sqlx/query-5c5bdb0e419049f9fb4d8b3bbec468765628fd2c3b7c2a408d18b5aba0df9a30.json new file mode 100644 index 000000000000..2d9a24d6d79c --- /dev/null +++ b/core/lib/dal/.sqlx/query-5c5bdb0e419049f9fb4d8b3bbec468765628fd2c3b7c2a408d18b5aba0df9a30.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE tee_proof_generation_details\n SET\n status = 'unpicked',\n updated_at = NOW()\n WHERE\n l1_batch_number = $1\n AND tee_type = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text" + ] + }, + "nullable": [] + }, + "hash": "5c5bdb0e419049f9fb4d8b3bbec468765628fd2c3b7c2a408d18b5aba0df9a30" +} diff --git a/core/lib/dal/doc/ProofGenerationDal.md b/core/lib/dal/doc/ProofGenerationDal.md index 618fdfba13b0..40ee31a4b1a2 100644 --- a/core/lib/dal/doc/ProofGenerationDal.md +++ b/core/lib/dal/doc/ProofGenerationDal.md @@ -11,9 +11,10 @@ proof_generation_details title: Status Diagram --- stateDiagram-v2 -[*] --> ready_to_be_proven : insert_proof_generation_details -ready_to_be_proven --> picked_by_prover : get_next_block_to_be_proven +[*] --> unpicked : insert_proof_generation_details +unpicked --> picked_by_prover : lock_batch_for_proving picked_by_prover --> generated : save_proof_artifacts_metadata +picked_by_prover --> unpicked : unlock_batch generated --> [*] [*] --> skipped : mark_proof_generation_job_as_skipped diff --git a/core/lib/dal/doc/TeeProofGenerationDal.md b/core/lib/dal/doc/TeeProofGenerationDal.md index 23474d5cb5c5..167e6b3c42ce 100644 --- a/core/lib/dal/doc/TeeProofGenerationDal.md +++ b/core/lib/dal/doc/TeeProofGenerationDal.md @@ -12,8 +12,10 @@ title: Status Diagram --- stateDiagram-v2 [*] --> ready_to_be_proven : insert_tee_proof_generation_job -ready_to_be_proven --> picked_by_prover : get_next_batch_to_be_proven +ready_to_be_proven --> picked_by_prover : lock_batch_for_proving picked_by_prover --> generated : save_proof_artifacts_metadata generated --> [*] +picked_by_prover --> unpicked : unlock_batch +unpicked --> [*] ``` diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index aca93ee8c5a9..9f67e9025e0c 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -508,6 +508,19 @@ impl StorageApiTransaction { .signature .and_then(|signature| PackedEthSignature::deserialize_packed(&signature).ok()); + // For legacy and EIP-2930 transactions it is gas price willing to be paid by the sender in wei. + // For other transactions it should be the effective gas price if transaction is included in block, + // otherwise this value should be set equal to the max fee per gas. + let gas_price = match self.tx_format { + None | Some(0) | Some(1) => self + .max_fee_per_gas + .clone() + .unwrap_or_else(BigDecimal::zero), + _ => self + .effective_gas_price + .or_else(|| self.max_fee_per_gas.clone()) + .unwrap_or_else(BigDecimal::zero), + }; let mut tx = api::Transaction { hash: H256::from_slice(&self.tx_hash), nonce: U256::from(self.nonce.unwrap_or(0) as u64), @@ -517,11 +530,7 @@ impl StorageApiTransaction { from: Some(Address::from_slice(&self.initiator_address)), to: Some(serde_json::from_value(self.execute_contract_address).unwrap()), value: bigdecimal_to_u256(self.value), - gas_price: Some(bigdecimal_to_u256( - self.effective_gas_price - .or_else(|| self.max_fee_per_gas.clone()) - .unwrap_or_else(BigDecimal::zero), - )), + gas_price: Some(bigdecimal_to_u256(gas_price)), gas: bigdecimal_to_u256(self.gas_limit.unwrap_or_else(BigDecimal::zero)), input: serde_json::from_value(self.calldata).expect("incorrect calldata in Postgres"), v: signature.as_ref().map(|s| U64::from(s.v())), diff --git a/core/lib/dal/src/proof_generation_dal.rs b/core/lib/dal/src/proof_generation_dal.rs index f83f026073e6..dada6c69ed34 100644 --- a/core/lib/dal/src/proof_generation_dal.rs +++ b/core/lib/dal/src/proof_generation_dal.rs @@ -88,7 +88,7 @@ impl ProofGenerationDal<'_, '_> { Ok(result) } - pub async fn get_available_batch(&mut self) -> DalResult { + pub async fn get_latest_proven_batch(&mut self) -> DalResult { let result = sqlx::query!( r#" SELECT diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index 2bd73323eb10..80e364273f69 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -2,7 +2,9 @@ use std::time::Duration; use zksync_db_connection::{ - connection::Connection, error::DalResult, instrument::Instrumented, + connection::Connection, + error::DalResult, + instrument::{InstrumentExt, Instrumented}, utils::pg_interval_from_duration, }; use zksync_types::{tee_types::TeeType, L1BatchNumber}; @@ -18,12 +20,14 @@ pub struct TeeProofGenerationDal<'a, 'c> { } impl TeeProofGenerationDal<'_, '_> { - pub async fn get_next_batch_to_be_proven( + pub async fn lock_batch_for_proving( &mut self, tee_type: TeeType, processing_timeout: Duration, + min_batch_number: Option, ) -> DalResult> { let processing_timeout = pg_interval_from_duration(processing_timeout); + let min_batch_number = min_batch_number.map_or(0, |num| i64::from(num.0)); let query = sqlx::query!( r#" UPDATE tee_proof_generation_details @@ -48,6 +52,7 @@ impl TeeProofGenerationDal<'_, '_> { AND proofs.prover_taken_at < NOW() - $3::INTERVAL ) ) + AND proofs.l1_batch_number >= $4 ORDER BY l1_batch_number ASC LIMIT @@ -58,13 +63,16 @@ impl TeeProofGenerationDal<'_, '_> { RETURNING tee_proof_generation_details.l1_batch_number "#, - &tee_type.to_string(), + tee_type.to_string(), TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, - &processing_timeout, + processing_timeout, + min_batch_number ); - let batch_number = Instrumented::new("get_next_batch_to_be_proven") + + let batch_number = Instrumented::new("lock_batch_for_proving") .with_arg("tee_type", &tee_type) .with_arg("processing_timeout", &processing_timeout) + .with_arg("l1_batch_number", &min_batch_number) .with(query) .fetch_optional(self.storage) .await? @@ -73,6 +81,34 @@ impl TeeProofGenerationDal<'_, '_> { Ok(batch_number) } + pub async fn unlock_batch( + &mut self, + l1_batch_number: L1BatchNumber, + tee_type: TeeType, + ) -> DalResult<()> { + let batch_number = i64::from(l1_batch_number.0); + sqlx::query!( + r#" + UPDATE tee_proof_generation_details + SET + status = 'unpicked', + updated_at = NOW() + WHERE + l1_batch_number = $1 + AND tee_type = $2 + "#, + batch_number, + tee_type.to_string() + ) + .instrument("unlock_batch") + .with_arg("l1_batch_number", &batch_number) + .with_arg("tee_type", &tee_type) + .execute(self.storage) + .await?; + + Ok(()) + } + pub async fn save_proof_artifacts_metadata( &mut self, batch_number: L1BatchNumber, diff --git a/core/lib/env_config/src/base_token_adjuster.rs b/core/lib/env_config/src/base_token_adjuster.rs index 67cdef9425cd..f94e9c8f92a2 100644 --- a/core/lib/env_config/src/base_token_adjuster.rs +++ b/core/lib/env_config/src/base_token_adjuster.rs @@ -26,6 +26,8 @@ mod tests { l1_receipt_checking_sleep_ms: 20_000, l1_tx_sending_max_attempts: 10, l1_tx_sending_sleep_ms: 30_000, + price_fetching_max_attempts: 20, + price_fetching_sleep_ms: 10_000, halt_on_error: true, } } @@ -41,6 +43,8 @@ mod tests { l1_receipt_checking_sleep_ms: 30_000, l1_tx_sending_max_attempts: 3, l1_tx_sending_sleep_ms: 30_000, + price_fetching_max_attempts: 3, + price_fetching_sleep_ms: 5_000, halt_on_error: false, } } @@ -58,6 +62,8 @@ mod tests { BASE_TOKEN_ADJUSTER_L1_RECEIPT_CHECKING_SLEEP_MS=20000 BASE_TOKEN_ADJUSTER_L1_TX_SENDING_MAX_ATTEMPTS=10 BASE_TOKEN_ADJUSTER_L1_TX_SENDING_SLEEP_MS=30000 + BASE_TOKEN_ADJUSTER_PRICE_FETCHING_MAX_ATTEMPTS=20 + BASE_TOKEN_ADJUSTER_PRICE_FETCHING_SLEEP_MS=10000 BASE_TOKEN_ADJUSTER_HALT_ON_ERROR=true "#; lock.set_env(config); @@ -79,6 +85,8 @@ mod tests { "BASE_TOKEN_ADJUSTER_L1_RECEIPT_CHECKING_SLEEP_MS", "BASE_TOKEN_ADJUSTER_L1_TX_SENDING_MAX_ATTEMPTS", "BASE_TOKEN_ADJUSTER_L1_TX_SENDING_SLEEP_MS", + "BASE_TOKEN_ADJUSTER_PRICE_FETCHING_MAX_ATTEMPTS", + "BASE_TOKEN_ADJUSTER_PRICE_FETCHING_SLEEP_MS", "BASE_TOKEN_ADJUSTER_HALT_ON_ERROR", ]); diff --git a/core/lib/env_config/src/da_dispatcher.rs b/core/lib/env_config/src/da_dispatcher.rs index 194e4185b286..246752db91ac 100644 --- a/core/lib/env_config/src/da_dispatcher.rs +++ b/core/lib/env_config/src/da_dispatcher.rs @@ -26,6 +26,7 @@ mod tests { polling_interval_ms: Some(interval), max_rows_to_dispatch: Some(rows_limit), max_retries: Some(max_retries), + use_dummy_inclusion_data: Some(true), } } @@ -36,6 +37,7 @@ mod tests { DA_DISPATCHER_POLLING_INTERVAL_MS=5000 DA_DISPATCHER_MAX_ROWS_TO_DISPATCH=60 DA_DISPATCHER_MAX_RETRIES=7 + DA_DISPATCHER_USE_DUMMY_INCLUSION_DATA="true" "#; lock.set_env(config); let actual = DADispatcherConfig::from_env().unwrap(); diff --git a/core/lib/multivm/Cargo.toml b/core/lib/multivm/Cargo.toml index e3ea58a7ae5a..f56277779818 100644 --- a/core/lib/multivm/Cargo.toml +++ b/core/lib/multivm/Cargo.toml @@ -41,6 +41,7 @@ tracing.workspace = true vise.workspace = true [dev-dependencies] +assert_matches.workspace = true tokio = { workspace = true, features = ["time"] } zksync_test_account.workspace = true ethabi.workspace = true diff --git a/core/lib/multivm/src/tracers/mod.rs b/core/lib/multivm/src/tracers/mod.rs index 0a6517a6cd2f..69501cf39882 100644 --- a/core/lib/multivm/src/tracers/mod.rs +++ b/core/lib/multivm/src/tracers/mod.rs @@ -3,7 +3,9 @@ pub use self::{ multivm_dispatcher::TracerDispatcher, prestate_tracer::PrestateTracer, storage_invocation::StorageInvocations, - validator::{ValidationError, ValidationTracer, ValidationTracerParams}, + validator::{ + ValidationError, ValidationTracer, ValidationTracerParams, ViolatedValidationRule, + }, }; mod call_tracer; diff --git a/core/lib/multivm/src/tracers/validator/mod.rs b/core/lib/multivm/src/tracers/validator/mod.rs index a91006368b6a..307256792cf7 100644 --- a/core/lib/multivm/src/tracers/validator/mod.rs +++ b/core/lib/multivm/src/tracers/validator/mod.rs @@ -10,13 +10,11 @@ use zksync_types::{ }; use zksync_utils::{be_bytes_to_safe_address, u256_to_account_address, u256_to_h256}; -pub use crate::tracers::validator::types::{ValidationError, ValidationTracerParams}; +use self::types::{NewTrustedValidationItems, ValidationTracerMode}; +pub use self::types::{ValidationError, ValidationTracerParams, ViolatedValidationRule}; use crate::{ glue::tracers::IntoOldVmTracer, interface::storage::{StoragePtr, WriteStorage}, - tracers::validator::types::{ - NewTrustedValidationItems, ValidationTracerMode, ViolatedValidationRule, - }, }; mod types; diff --git a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs index 1bfc2f8ff11f..5524bd3edde9 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs @@ -1,17 +1,23 @@ -use std::collections::HashSet; +use std::{collections::HashSet, iter}; +use assert_matches::assert_matches; +use ethabi::Token; use itertools::Itertools; +use zk_evm_1_3_1::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; use zksync_test_account::Account; -use zksync_types::{Execute, U256}; +use zksync_types::{Address, Execute, U256}; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; use crate::{ - interface::{storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{ + storage::ReadStorage, ExecutionResult, TxExecutionMode, VmExecutionMode, + VmExecutionResultAndLogs, VmInterface, + }, vm_fast::{ tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, + tester::{TxType, VmTester, VmTesterBuilder}, + utils::{read_proxy_counter_contract, read_test_contract, BASE_SYSTEM_CONTRACTS}, }, vm::Vm, }, @@ -88,8 +94,90 @@ fn known_bytecodes_without_aa_code(vm: &Vm) -> HashSet .keys() .cloned() .collect::>(); - known_bytecodes_without_aa_code.remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)); - known_bytecodes_without_aa_code } + +/// Counter test contract bytecode inflated by appending lots of `NOP` opcodes at the end. This leads to non-trivial +/// decommitment cost (>10,000 gas). +fn inflated_counter_bytecode() -> Vec { + let mut counter_bytecode = read_test_contract(); + counter_bytecode.extend( + iter::repeat(EncodingModeProduction::nop_encoding().to_be_bytes()) + .take(10_000) + .flatten(), + ); + counter_bytecode +} + +fn execute_proxy_counter(gas: u32) -> (VmTester, U256, VmExecutionResultAndLogs) { + let counter_bytecode = inflated_counter_bytecode(); + let counter_bytecode_hash = h256_to_u256(hash_bytecode(&counter_bytecode)); + let counter_address = Address::repeat_byte(0x23); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_custom_contracts(vec![(counter_bytecode, counter_address, false)]) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let (proxy_counter_bytecode, proxy_counter_abi) = read_proxy_counter_contract(); + let account = &mut vm.rich_accounts[0]; + let deploy_tx = account.get_deploy_tx( + &proxy_counter_bytecode, + Some(&[Token::Address(counter_address)]), + TxType::L2, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let decommitted_hashes = vm.vm.decommitted_hashes().collect::>(); + assert!( + !decommitted_hashes.contains(&counter_bytecode_hash), + "{decommitted_hashes:?}" + ); + + let increment = proxy_counter_abi.function("increment").unwrap(); + let increment_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: deploy_tx.address, + calldata: increment + .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(increment_tx, true); + compression_result.unwrap(); + (vm, counter_bytecode_hash, exec_result) +} + +#[test] +fn get_used_contracts_with_far_call() { + let (vm, counter_bytecode_hash, exec_result) = execute_proxy_counter(100_000); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + let decommitted_hashes = vm.vm.decommitted_hashes().collect::>(); + assert!( + decommitted_hashes.contains(&counter_bytecode_hash), + "{decommitted_hashes:?}" + ); +} + +#[test] +fn get_used_contracts_with_out_of_gas_far_call() { + let (vm, counter_bytecode_hash, exec_result) = execute_proxy_counter(10_000); + assert_matches!(exec_result.result, ExecutionResult::Revert { .. }); + let decommitted_hashes = vm.vm.decommitted_hashes().collect::>(); + assert!( + decommitted_hashes.contains(&counter_bytecode_hash), + "{decommitted_hashes:?}" + ); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs index f1411497c24c..a4dbcfbe2ee8 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs @@ -117,9 +117,8 @@ fn test_l1_tx_execution() { let res = vm.vm.execute(VmExecutionMode::OneTx); let storage_logs = res.logs.storage_logs; let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract. However, the rewrite of the `basePubdataSpent` didn't happen, since it was the same - // as the start of the previous tx. Thus we have `+1` slot for the changed counter and `-1` slot for base pubdata spent - assert_eq!(res.initial_storage_writes, basic_initial_writes); + // We changed one slot inside contract. + assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); // No repeated writes let repeated_writes = res.repeated_storage_writes; @@ -146,7 +145,7 @@ fn test_l1_tx_execution() { assert!(result.result.is_failed(), "The transaction should fail"); let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - assert_eq!(res.initial_storage_writes, basic_initial_writes); + assert_eq!(res.initial_storage_writes, basic_initial_writes + 1); assert_eq!(res.repeated_storage_writes, 1); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs index b18676cf2ba6..7b75d2b8bb72 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs @@ -36,6 +36,7 @@ impl From for u8 { #[test] fn test_nonce_holder() { let mut account = Account::random(); + let hex_addr = hex::encode(account.address.to_fixed_bytes()); let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() @@ -92,7 +93,7 @@ fn test_nonce_holder() { run_nonce_test( 1u32, NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), + Some("Error function_selector = 0x13595475, data = 0x13595475".to_string()), "Allowed to set value under non sequential value", ); @@ -133,7 +134,7 @@ fn test_nonce_holder() { run_nonce_test( 10u32, NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), + Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000a")), "Allowed to reuse nonce below the minimal one", ); @@ -149,7 +150,7 @@ fn test_nonce_holder() { run_nonce_test( 13u32, NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), + Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000d")), "Allowed to reuse the same nonce twice", ); @@ -165,7 +166,7 @@ fn test_nonce_holder() { run_nonce_test( 16u32, NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), + Some("Error function_selector = 0x45ac24a6, data = 0x45ac24a600000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000040000000000000000000000".to_string()), "Allowed for incrementing min nonce too much", ); @@ -173,7 +174,7 @@ fn test_nonce_holder() { run_nonce_test( 16u32, NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), + Some(format!("Error function_selector = 0x1f2f8478, data = 0x1f2f8478000000000000000000000000{hex_addr}0000000000000000000000000000000000000000000000000000000000000010")), "Allowed to leave nonce as unused", ); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs index c530c5af18ea..fd5dc495435d 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs @@ -1,6 +1,6 @@ use ethabi::Token; use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use zksync_types::{Execute, U256}; +use zksync_types::{Execute, Nonce, U256}; use crate::{ interface::TxExecutionMode, @@ -38,22 +38,40 @@ fn test_vm_rollbacks() { TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(0)).into(), + ), // This tx will succeed TransactionTestInfo::new_processed(tx_0.clone(), false), // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(1)).into(), + ), // This tx will succeed TransactionTestInfo::new_processed(tx_1, false), // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), // This tx will succeed TransactionTestInfo::new_processed(tx_2.clone(), false), // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::NonceReused(tx_2.initiator_account(), tx_2.nonce().unwrap()).into(), + ), + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), ]); assert_eq!(result_without_rollbacks, result_with_rollbacks); @@ -131,12 +149,23 @@ fn test_vm_loadnext_rollbacks() { TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), TransactionTestInfo::new_rejected( loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), + TxModifier::NonceReused( + loadnext_deploy_tx.initiator_account(), + loadnext_deploy_tx.nonce().unwrap(), + ) + .into(), ), TransactionTestInfo::new_processed(loadnext_tx_1, false), TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + loadnext_deploy_tx.clone(), + TxModifier::NonceReused( + loadnext_deploy_tx.initiator_account(), + loadnext_deploy_tx.nonce().unwrap(), + ) + .into(), + ), TransactionTestInfo::new_processed(loadnext_tx_2, false), ]); diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs index 562a8a6a6bdd..c3c1736902c9 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs @@ -1,4 +1,4 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction, H160, U256}; +use zksync_types::{ExecuteTransactionCommon, Nonce, Transaction, H160, U256}; use super::VmTester; use crate::{ @@ -15,8 +15,8 @@ pub(crate) enum TxModifier { WrongSignatureLength, WrongSignature, WrongMagicValue, - WrongNonce, - NonceReused, + WrongNonce(Nonce, Nonce), + NonceReused(H160, Nonce), } #[derive(Debug, Clone)] @@ -41,15 +41,9 @@ impl From for ExpectedError { fn from(value: TxModifier) -> Self { let revert_reason = match value { TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], + Halt::ValidationFailed(VmRevertReason::Unknown { + function_selector: vec![144, 240, 73, 201], + data: vec![144, 240, 73, 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 45], }) } TxModifier::WrongSignature => { @@ -59,38 +53,35 @@ impl From for ExpectedError { }) } TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], + Halt::ValidationFailed(VmRevertReason::Unknown { + function_selector: vec![144, 240, 73, 201], + data: vec![144, 240, 73, 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], }) } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], + TxModifier::WrongNonce(expected, actual) => { + let function_selector = vec![98, 106, 222, 48]; + let expected_nonce_bytes = expected.0.to_be_bytes().to_vec(); + let actual_nonce_bytes = actual.0.to_be_bytes().to_vec(); + // padding is 28 because an address takes up 4 bytes and we need it to fill a 32 byte field + let nonce_padding = vec![0u8; 28]; + let data = [function_selector.clone(), nonce_padding.clone(), expected_nonce_bytes, nonce_padding.clone(), actual_nonce_bytes].concat(); + Halt::ValidationFailed(VmRevertReason::Unknown { + function_selector, + data }) } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], + TxModifier::NonceReused(addr, nonce) => { + let function_selector = vec![233, 10, 222, 212]; + let addr = addr.as_bytes().to_vec(); + // padding is 12 because an address takes up 20 bytes and we need it to fill a 32 byte field + let addr_padding = vec![0u8; 12]; + // padding is 28 because an address takes up 4 bytes and we need it to fill a 32 byte field + let nonce_padding = vec![0u8; 28]; + let data = [function_selector.clone(), addr_padding, addr, nonce_padding, nonce.0.to_be_bytes().to_vec()].concat(); + Halt::ValidationFailed(VmRevertReason::Unknown { + function_selector, + data, }) } }; @@ -116,10 +107,10 @@ impl TransactionTestInfo { } TxModifier::WrongSignature => data.signature = vec![27u8; 65], TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { + TxModifier::WrongNonce(_, _) => { // Do not need to modify signature for nonce error } - TxModifier::NonceReused => { + TxModifier::NonceReused(_, _) => { // Do not need to modify signature for nonce error } } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs index 6b17e66f2616..d696aa582d64 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs @@ -127,3 +127,8 @@ pub(crate) fn read_expensive_contract() -> (Vec, Contract) { "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; (read_bytecode(PATH), load_contract(PATH)) } + +pub(crate) fn read_proxy_counter_contract() -> (Vec, Contract) { + const PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/counter/proxy_counter.sol/ProxyCounter.json"; + (read_bytecode(PATH), load_contract(PATH)) +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs index 752fd1a9087d..a77b8c97b425 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs @@ -1,9 +1,13 @@ use std::{ collections::{HashMap, HashSet}, + iter, str::FromStr, }; +use assert_matches::assert_matches; +use ethabi::Token; use itertools::Itertools; +use zk_evm_1_3_1::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; use zk_evm_1_5_0::{ abstractions::DecommittmentProcessor, aux_structures::{DecommittmentQuery, MemoryPage, Timestamp}, @@ -11,15 +15,18 @@ use zk_evm_1_5_0::{ }; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; use zksync_test_account::Account; -use zksync_types::{Execute, U256}; +use zksync_types::{Address, Execute, U256}; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; +use zksync_vm_interface::VmExecutionResultAndLogs; use crate::{ - interface::{storage::WriteStorage, TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{ + storage::WriteStorage, ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterface, + }, vm_latest::{ tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, + tester::{TxType, VmTester, VmTesterBuilder}, + utils::{read_proxy_counter_contract, read_test_contract, BASE_SYSTEM_CONTRACTS}, }, HistoryDisabled, Vm, }, @@ -148,10 +155,92 @@ fn known_bytecodes_without_aa_code( .known_bytecodes .inner() .clone(); - known_bytecodes_without_aa_code .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) .unwrap(); - known_bytecodes_without_aa_code } + +/// Counter test contract bytecode inflated by appending lots of `NOP` opcodes at the end. This leads to non-trivial +/// decommitment cost (>10,000 gas). +fn inflated_counter_bytecode() -> Vec { + let mut counter_bytecode = read_test_contract(); + counter_bytecode.extend( + iter::repeat(EncodingModeProduction::nop_encoding().to_be_bytes()) + .take(10_000) + .flatten(), + ); + counter_bytecode +} + +fn execute_proxy_counter(gas: u32) -> (VmTester, U256, VmExecutionResultAndLogs) { + let counter_bytecode = inflated_counter_bytecode(); + let counter_bytecode_hash = h256_to_u256(hash_bytecode(&counter_bytecode)); + let counter_address = Address::repeat_byte(0x23); + + let mut vm = VmTesterBuilder::new(HistoryDisabled) + .with_empty_in_memory_storage() + .with_custom_contracts(vec![(counter_bytecode, counter_address, false)]) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let (proxy_counter_bytecode, proxy_counter_abi) = read_proxy_counter_contract(); + let account = &mut vm.rich_accounts[0]; + let deploy_tx = account.get_deploy_tx( + &proxy_counter_bytecode, + Some(&[Token::Address(counter_address)]), + TxType::L2, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let decommitted_hashes = vm.vm.get_used_contracts(); + assert!( + !decommitted_hashes.contains(&counter_bytecode_hash), + "{decommitted_hashes:?}" + ); + + let increment = proxy_counter_abi.function("increment").unwrap(); + let increment_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: deploy_tx.address, + calldata: increment + .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(increment_tx, true); + compression_result.unwrap(); + (vm, counter_bytecode_hash, exec_result) +} + +#[test] +fn get_used_contracts_with_far_call() { + let (vm, counter_bytecode_hash, exec_result) = execute_proxy_counter(100_000); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + let decommitted_hashes = vm.vm.get_used_contracts(); + assert!( + decommitted_hashes.contains(&counter_bytecode_hash), + "{decommitted_hashes:?}" + ); +} + +#[test] +fn get_used_contracts_with_out_of_gas_far_call() { + let (vm, counter_bytecode_hash, exec_result) = execute_proxy_counter(10_000); + assert_matches!(exec_result.result, ExecutionResult::Revert { .. }); + let decommitted_hashes = vm.vm.get_used_contracts(); + assert!( + decommitted_hashes.contains(&counter_bytecode_hash), + "{decommitted_hashes:?}" + ); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs index 4d42bb96cc96..3e76fbf12723 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs @@ -112,9 +112,8 @@ fn test_l1_tx_execution() { let res = vm.vm.execute(VmExecutionMode::OneTx); let storage_logs = res.logs.storage_logs; let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract. However, the rewrite of the `basePubdataSpent` didn't happen, since it was the same - // as the start of the previous tx. Thus we have `+1` slot for the changed counter and `-1` slot for base pubdata spent - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); + // We changed one slot inside contract. + assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); // No repeated writes let repeated_writes = res.repeated_storage_writes; @@ -142,7 +141,7 @@ fn test_l1_tx_execution() { let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); + assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); } #[test] diff --git a/core/lib/multivm/src/versions/vm_latest/tests/migration.rs b/core/lib/multivm/src/versions/vm_latest/tests/migration.rs deleted file mode 100644 index 6bd0e87615ed..000000000000 --- a/core/lib/multivm/src/versions/vm_latest/tests/migration.rs +++ /dev/null @@ -1,51 +0,0 @@ -use zksync_types::{get_code_key, H256, SYSTEM_CONTEXT_ADDRESS}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_latest::{ - tests::{ - tester::{get_empty_storage, DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryEnabled, - }, -}; - -/// This test checks that the new bootloader will work fine even if the previous system context contract is not -/// compatible with it, i.e. the bootloader will upgrade it before starting any transaction. -#[test] -fn test_migration_for_system_context_aa_interaction() { - let mut storage = get_empty_storage(); - // We will set the system context bytecode to zero. - storage.set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::zero()); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Now, we will just proceed with standard transaction execution. - // The bootloader should be able to update system context regardless of whether - // the upgrade transaction is there or not. - let account = &mut vm.rich_accounts[0]; - let counter = read_test_contract(); - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful {:#?}", - result.result - ); - - let batch_result = vm.vm.execute(VmExecutionMode::Batch); - assert!( - !batch_result.result.is_failed(), - "Batch transaction wasn't successful {:#?}", - batch_result.result - ); -} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs index aececf7fea88..cc370f3906ea 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs @@ -14,7 +14,6 @@ mod is_write_initial; mod l1_messenger; mod l1_tx_execution; mod l2_blocks; -mod migration; mod nonce_holder; mod precompiles; mod prestate_tracer; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs index a8ce1665e6fe..55121debf9d1 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs @@ -40,6 +40,7 @@ impl From for u8 { #[test] fn test_nonce_holder() { let mut account = Account::random(); + // let hex_addr = hex::encode(account.address.to_fixed_bytes()); let mut vm = VmTesterBuilder::new(HistoryEnabled) .with_empty_in_memory_storage() @@ -103,7 +104,7 @@ fn test_nonce_holder() { // run_nonce_test( // 1u32, // NonceHolderTestMode::SetValueUnderNonce, - // Some("Previous nonce has not been used".to_string()), + // Some("Error function_selector = 0x13595475, data = 0x13595475".to_string()), // "Allowed to set value under non sequential value", // ); @@ -144,7 +145,7 @@ fn test_nonce_holder() { // run_nonce_test( // 10u32, // NonceHolderTestMode::IncreaseMinNonceBy5, - // Some("Reusing the same nonce twice".to_string()), + // Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000a")), // "Allowed to reuse nonce below the minimal one", // ); @@ -160,7 +161,7 @@ fn test_nonce_holder() { // run_nonce_test( // 13u32, // NonceHolderTestMode::IncreaseMinNonceBy5, - // Some("Reusing the same nonce twice".to_string()), + // Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000d")), // "Allowed to reuse the same nonce twice", // ); @@ -176,7 +177,7 @@ fn test_nonce_holder() { // run_nonce_test( // 16u32, // NonceHolderTestMode::IncreaseMinNonceTooMuch, - // Some("The value for incrementing the nonce is too high".to_string()), + // Some("Error function_selector = 0x45ac24a6, data = 0x45ac24a600000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000040000000000000000000000".to_string()), // "Allowed for incrementing min nonce too much", // ); @@ -184,7 +185,7 @@ fn test_nonce_holder() { // run_nonce_test( // 16u32, // NonceHolderTestMode::LeaveNonceUnused, - // Some("The nonce was not set as used".to_string()), + // Some(format!("Error function_selector = 0x1f2f8478, data = 0x1f2f8478000000000000000000000000{hex_addr}0000000000000000000000000000000000000000000000000000000000000010")), // "Allowed to leave nonce as unused", // ); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs index 72ab2cf3c76e..cfc6fbce9e44 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs @@ -1,6 +1,6 @@ use ethabi::Token; use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use zksync_types::{get_nonce_key, Execute, U256}; +use zksync_types::{get_nonce_key, U256}; use crate::{ interface::{ @@ -10,160 +10,173 @@ use crate::{ }, tracers::dynamic::vm_1_5_0::DynTracer, vm_latest::{ - tests::{ - tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, + tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}, types::internals::ZkSyncVmState, BootloaderState, HistoryEnabled, HistoryMode, SimpleMemory, ToTracerPointer, VmTracer, }, }; -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); +// #[test] +// fn test_vm_rollbacks() { +// let mut vm = VmTesterBuilder::new(HistoryEnabled) +// .with_empty_in_memory_storage() +// .with_execution_mode(TxExecutionMode::VerifyExecute) +// .with_random_rich_accounts(1) +// .build(); - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let _tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let _ = TransactionTestInfo::new_processed(tx_0.clone(), false); - let _ = - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()); - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()); - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()); - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()); - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false); - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()); - let _result_without_rollbacks = vm.execute_and_verify_txs( - &[], // &vec![ - // TransactionTestInfo::new_processed(tx_0.clone(), false), - // TransactionTestInfo::new_processed(tx_1.clone(), false), - // TransactionTestInfo::new_processed(tx_2.clone(), false), - // ] - ); +// let mut account = vm.rich_accounts[0].clone(); +// let counter = read_test_contract(); +// let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; +// let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; +// let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - // reset vm - vm.reset_with_empty_storage(); - // TODO - // let _result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - // TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - // TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - // TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // // The correct nonce is 0, this tx will fail - // TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // // This tx will succeed - // TransactionTestInfo::new_processed(tx_0.clone(), false), - // // The correct nonce is 1, this tx will fail - // TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // // The correct nonce is 1, this tx will fail - // TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // // This tx will succeed - // TransactionTestInfo::new_processed(tx_1, false), - // // The correct nonce is 2, this tx will fail - // TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // // This tx will succeed - // TransactionTestInfo::new_processed(tx_2.clone(), false), - // // This tx will fail - // TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - // TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), - // ]); - // TODO - // assert_eq!(result_without_rollbacks, result_with_rollbacks); -} +// let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ +// TransactionTestInfo::new_processed(tx_0.clone(), false), +// TransactionTestInfo::new_processed(tx_1.clone(), false), +// TransactionTestInfo::new_processed(tx_2.clone(), false), +// ]); -#[test] -fn test_vm_loadnext_rollbacks() { - // TODO add mut - let vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); +// // reset vm +// vm.reset_with_empty_storage(); - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: _loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); +// let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ +// TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), +// TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), +// TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), +// // The correct nonce is 0, this tx will fail +// TransactionTestInfo::new_rejected( +// tx_2.clone(), +// TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(0)).into(), +// ), +// // This tx will succeed +// TransactionTestInfo::new_processed(tx_0.clone(), false), +// // The correct nonce is 1, this tx will fail +// TransactionTestInfo::new_rejected( +// tx_0.clone(), +// TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), +// ), +// // The correct nonce is 1, this tx will fail +// TransactionTestInfo::new_rejected( +// tx_2.clone(), +// TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(1)).into(), +// ), +// // This tx will succeed +// TransactionTestInfo::new_processed(tx_1, false), +// // The correct nonce is 2, this tx will fail +// TransactionTestInfo::new_rejected( +// tx_0.clone(), +// TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), +// ), +// // This tx will succeed +// TransactionTestInfo::new_processed(tx_2.clone(), false), +// // This tx will fail +// TransactionTestInfo::new_rejected( +// tx_2.clone(), +// TxModifier::NonceReused(tx_2.initiator_account(), tx_2.nonce().unwrap()).into(), +// ), +// TransactionTestInfo::new_rejected( +// tx_0.clone(), +// TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), +// ), +// ]); - let _loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); +// // assert_eq!(result_without_rollbacks, result_with_rollbacks); +// } - let _loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - // TODO - // let _result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - // TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - // TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - // TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - // ]); - - // // reset vm - // vm.reset_with_empty_storage(); - // todo - // let _result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - // TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - // TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - // TransactionTestInfo::new_rejected( - // loadnext_deploy_tx.clone(), - // TxModifier::NonceReused.into(), - // ), - // TransactionTestInfo::new_processed(loadnext_tx_1, false), - // TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - // TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - // TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), - // TransactionTestInfo::new_processed(loadnext_tx_2, false), - // ]); - // TODO - // assert_eq!(result_without_rollbacks, result_with_rollbacks); -} +// #[test] +// fn test_vm_loadnext_rollbacks() { +// let mut vm = VmTesterBuilder::new(HistoryEnabled) +// .with_empty_in_memory_storage() +// .with_execution_mode(TxExecutionMode::VerifyExecute) +// .with_random_rich_accounts(1) +// .build(); +// let mut account = vm.rich_accounts[0].clone(); + +// let loadnext_contract = get_loadnext_contract(); +// let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; +// let DeployContractsTx { +// tx: loadnext_deploy_tx, +// address, +// .. +// } = account.get_deploy_tx_with_factory_deps( +// &loadnext_contract.bytecode, +// Some(loadnext_constructor_data), +// loadnext_contract.factory_deps.clone(), +// TxType::L2, +// ); + +// let loadnext_tx_1 = account.get_l2_tx_for_execute( +// Execute { +// contract_address: address, +// calldata: LoadnextContractExecutionParams { +// reads: 100, +// writes: 100, +// events: 100, +// hashes: 500, +// recursive_calls: 10, +// deploys: 60, +// } +// .to_bytes(), +// value: Default::default(), +// factory_deps: vec![], +// }, +// None, +// ); + +// let loadnext_tx_2 = account.get_l2_tx_for_execute( +// Execute { +// contract_address: address, +// calldata: LoadnextContractExecutionParams { +// reads: 100, +// writes: 100, +// events: 100, +// hashes: 500, +// recursive_calls: 10, +// deploys: 60, +// } +// .to_bytes(), +// value: Default::default(), +// factory_deps: vec![], +// }, +// None, +// ); + +// // let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ +// // TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), +// // TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), +// // TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), +// // ]); + +// // TODO: reset vm +// // vm.reset_with_empty_storage(); + +// // let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ +// // TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), +// // TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), +// // TransactionTestInfo::new_rejected( +// // loadnext_deploy_tx.clone(), +// // TxModifier::NonceReused( +// // loadnext_deploy_tx.initiator_account(), +// // loadnext_deploy_tx.nonce().unwrap(), +// // ) +// // .into(), +// // ), +// // TransactionTestInfo::new_processed(loadnext_tx_1, false), +// // TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), +// // TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), +// // TransactionTestInfo::new_rejected( +// // loadnext_deploy_tx.clone(), +// // TxModifier::NonceReused( +// // loadnext_deploy_tx.initiator_account(), +// // loadnext_deploy_tx.nonce().unwrap(), +// // ) +// // .into(), +// // ), +// // TransactionTestInfo::new_processed(loadnext_tx_2, false), +// // ]); + +// // assert_eq!(result_without_rollbacks, result_with_rollbacks); +// } // Testing tracer that does not allow the recursion to go deeper than a certain limit struct MaxRecursionTracer { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs index c3cc5d8d9803..d55d1fd6a69b 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs @@ -1,4 +1,4 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; +pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo}; pub(crate) use vm_tester::{ default_l1_batch, get_empty_storage, InMemoryStorageView, VmTester, VmTesterBuilder, }; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs index 114f80d1a217..c6cc2823a04b 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs @@ -1,4 +1,4 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; +use zksync_types::{ExecuteTransactionCommon, Nonce, Transaction, H160}; use crate::{ interface::{ @@ -8,15 +8,21 @@ use crate::{ vm_latest::{tests::tester::vm_tester::VmTester, HistoryEnabled}, }; +// FIXME: remove the dead code allow +#[allow(unused_variables)] +#[allow(dead_code)] #[derive(Debug, Clone)] pub(crate) enum TxModifier { WrongSignatureLength, WrongSignature, WrongMagicValue, - WrongNonce, - NonceReused, + WrongNonce(Nonce, Nonce), + NonceReused(H160, Nonce), } +// FIXME: remove the dead code allow +#[allow(unused_variables)] +#[allow(dead_code)] #[derive(Debug, Clone)] pub(crate) enum TxExpectedResult { Rejected { error: ExpectedError }, @@ -39,14 +45,11 @@ impl From for ExpectedError { fn from(value: TxModifier) -> Self { let revert_reason = match value { TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), + Halt::ValidationFailed(VmRevertReason::Unknown { + function_selector: vec![144, 240, 73, 201], data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, + 144, 240, 73, 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 45 ], }) } @@ -57,38 +60,35 @@ impl From for ExpectedError { }) } TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], + Halt::ValidationFailed(VmRevertReason::Unknown { + function_selector: vec![144, 240, 73, 201], + data: vec![144, 240, 73, 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], }) } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], + TxModifier::WrongNonce(expected, actual) => { + let function_selector = vec![98, 106, 222, 48]; + let expected_nonce_bytes = expected.0.to_be_bytes().to_vec(); + let actual_nonce_bytes = actual.0.to_be_bytes().to_vec(); + // padding is 28 because an address takes up 4 bytes and we need it to fill a 32 byte field + let nonce_padding = vec![0u8; 28]; + let data = [function_selector.clone(), nonce_padding.clone(), expected_nonce_bytes, nonce_padding.clone(), actual_nonce_bytes].concat(); + Halt::ValidationFailed(VmRevertReason::Unknown { + function_selector, + data }) } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], + TxModifier::NonceReused(addr, nonce) => { + let function_selector = vec![233, 10, 222, 212]; + let addr = addr.as_bytes().to_vec(); + // padding is 12 because an address takes up 20 bytes and we need it to fill a 32 byte field + let addr_padding = vec![0u8; 12]; + // padding is 28 because an address takes up 4 bytes and we need it to fill a 32 byte field + let nonce_padding = vec![0u8; 28]; + let data = [function_selector.clone(), addr_padding, addr, nonce_padding, nonce.0.to_be_bytes().to_vec()].concat(); + Halt::ValidationFailed(VmRevertReason::Unknown { + function_selector, + data, }) } }; @@ -114,10 +114,10 @@ impl TransactionTestInfo { } TxModifier::WrongSignature => data.signature = vec![27u8; 65], TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { + TxModifier::WrongNonce(_, _) => { // Do not need to modify signature for nonce error } - TxModifier::NonceReused => { + TxModifier::NonceReused(_, _) => { // Do not need to modify signature for nonce error } } @@ -135,6 +135,8 @@ impl TransactionTestInfo { } } + // FIXME: remove allow dead code + #[allow(dead_code)] pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { Self { tx: transaction, @@ -182,6 +184,8 @@ impl TransactionTestInfo { } impl VmTester { + // FIXME: remove allow dead code + #[allow(dead_code)] pub(crate) fn execute_and_verify_txs( &mut self, txs: &[TransactionTestInfo], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs index f0739c48c649..270433166655 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs @@ -60,10 +60,10 @@ impl VmTester { self.test_contract = Some(deployed_address); } - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } + // pub(crate) fn reset_with_empty_storage(&mut self) { + // self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); + // self.reset_state(false); + // } /// Reset the state of the VM to the initial state. /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs b/core/lib/multivm/src/versions/vm_latest/tests/utils.rs index 988e6256a499..c8e06b2c261a 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/utils.rs @@ -143,3 +143,8 @@ pub(crate) fn read_expensive_contract() -> (Vec, Contract) { "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; (read_bytecode(PATH), load_contract(PATH)) } + +pub(crate) fn read_proxy_counter_contract() -> (Vec, Contract) { + const PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/counter/proxy_counter.sol/ProxyCounter.json"; + (read_bytecode(PATH), load_contract(PATH)) +} diff --git a/core/lib/protobuf_config/src/base_token_adjuster.rs b/core/lib/protobuf_config/src/base_token_adjuster.rs index d68db5fd9796..951feac16533 100644 --- a/core/lib/protobuf_config/src/base_token_adjuster.rs +++ b/core/lib/protobuf_config/src/base_token_adjuster.rs @@ -30,6 +30,12 @@ impl ProtoRepr for proto::BaseTokenAdjuster { l1_receipt_checking_max_attempts: self .l1_receipt_checking_max_attempts .unwrap_or(Self::Type::default_l1_receipt_checking_max_attempts()), + price_fetching_sleep_ms: self + .price_fetching_sleep_ms + .unwrap_or(Self::Type::default_price_fetching_sleep_ms()), + price_fetching_max_attempts: self + .price_fetching_max_attempts + .unwrap_or(Self::Type::default_price_fetching_max_attempts()), l1_tx_sending_max_attempts: self .l1_tx_sending_max_attempts .unwrap_or(Self::Type::default_l1_tx_sending_max_attempts()), @@ -47,6 +53,8 @@ impl ProtoRepr for proto::BaseTokenAdjuster { l1_receipt_checking_max_attempts: Some(this.l1_receipt_checking_max_attempts), l1_tx_sending_max_attempts: Some(this.l1_tx_sending_max_attempts), l1_tx_sending_sleep_ms: Some(this.l1_tx_sending_sleep_ms), + price_fetching_max_attempts: Some(this.price_fetching_max_attempts), + price_fetching_sleep_ms: Some(this.price_fetching_sleep_ms), max_tx_gas: Some(this.max_tx_gas), default_priority_fee_per_gas: Some(this.default_priority_fee_per_gas), max_acceptable_priority_fee_in_gwei: Some(this.max_acceptable_priority_fee_in_gwei), diff --git a/core/lib/protobuf_config/src/chain.rs b/core/lib/protobuf_config/src/chain.rs index fafecc0131cd..f91bf07e43f8 100644 --- a/core/lib/protobuf_config/src/chain.rs +++ b/core/lib/protobuf_config/src/chain.rs @@ -78,10 +78,9 @@ impl ProtoRepr for proto::StateKeeper { max_circuits_per_batch: required(&self.max_circuits_per_batch) .and_then(|x| Ok((*x).try_into()?)) .context("max_circuits_per_batch")?, - protective_reads_persistence_enabled: *required( - &self.protective_reads_persistence_enabled, - ) - .context("protective_reads_persistence_enabled")?, + protective_reads_persistence_enabled: self + .protective_reads_persistence_enabled + .unwrap_or_default(), // We need these values only for instantiating configs from environmental variables, so it's not // needed during the initialization from files diff --git a/core/lib/protobuf_config/src/da_dispatcher.rs b/core/lib/protobuf_config/src/da_dispatcher.rs index 1cafa37a1e19..d77073bd32cf 100644 --- a/core/lib/protobuf_config/src/da_dispatcher.rs +++ b/core/lib/protobuf_config/src/da_dispatcher.rs @@ -11,6 +11,7 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { polling_interval_ms: self.polling_interval_ms, max_rows_to_dispatch: self.max_rows_to_dispatch, max_retries: self.max_retries.map(|x| x as u16), + use_dummy_inclusion_data: self.use_dummy_inclusion_data, }) } @@ -19,6 +20,7 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { polling_interval_ms: this.polling_interval_ms, max_rows_to_dispatch: this.max_rows_to_dispatch, max_retries: this.max_retries.map(Into::into), + use_dummy_inclusion_data: this.use_dummy_inclusion_data, } } } diff --git a/core/lib/protobuf_config/src/proto/config/base_token_adjuster.proto b/core/lib/protobuf_config/src/proto/config/base_token_adjuster.proto index 1132858bfa6f..396bd400c04b 100644 --- a/core/lib/protobuf_config/src/proto/config/base_token_adjuster.proto +++ b/core/lib/protobuf_config/src/proto/config/base_token_adjuster.proto @@ -13,4 +13,6 @@ message BaseTokenAdjuster { optional uint32 l1_tx_sending_max_attempts = 8; optional uint64 l1_tx_sending_sleep_ms = 9; optional bool halt_on_error = 10; + optional uint32 price_fetching_max_attempts = 11; + optional uint64 price_fetching_sleep_ms = 12; } diff --git a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto index d1d913498a4e..dd366bd5b925 100644 --- a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto +++ b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto @@ -2,10 +2,9 @@ syntax = "proto3"; package zksync.config.da_dispatcher; -import "zksync/config/object_store.proto"; - message DataAvailabilityDispatcher { optional uint32 polling_interval_ms = 1; optional uint32 max_rows_to_dispatch = 2; optional uint32 max_retries = 3; + optional bool use_dummy_inclusion_data = 4; } diff --git a/core/lib/vm_interface/src/lib.rs b/core/lib/vm_interface/src/lib.rs index b2b7d6484dad..120812842ad0 100644 --- a/core/lib/vm_interface/src/lib.rs +++ b/core/lib/vm_interface/src/lib.rs @@ -23,7 +23,10 @@ pub use crate::{ BytecodeCompressionError, Halt, TxRevertReason, VmRevertReason, VmRevertReasonParsingError, }, - inputs::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode}, + inputs::{ + L1BatchEnv, L2BlockEnv, OneshotEnv, StoredL2BlockEnv, SystemEnv, TxExecutionMode, + VmExecutionMode, + }, outputs::{ BootloaderMemory, Call, CallType, CircuitStatistic, CompressedBytecodeInfo, CurrentExecutionState, DeduplicatedWritesMetrics, ExecutionResult, FinishedL1Batch, diff --git a/core/lib/vm_interface/src/types/inputs/l2_block.rs b/core/lib/vm_interface/src/types/inputs/l2_block.rs index 7c9a028bbad7..b081dfbdeacc 100644 --- a/core/lib/vm_interface/src/types/inputs/l2_block.rs +++ b/core/lib/vm_interface/src/types/inputs/l2_block.rs @@ -10,12 +10,21 @@ pub struct L2BlockEnv { } impl L2BlockEnv { - pub fn from_l2_block_data(miniblock_execution_data: &L2BlockExecutionData) -> Self { + pub fn from_l2_block_data(execution_data: &L2BlockExecutionData) -> Self { Self { - number: miniblock_execution_data.number.0, - timestamp: miniblock_execution_data.timestamp, - prev_block_hash: miniblock_execution_data.prev_block_hash, - max_virtual_blocks_to_create: miniblock_execution_data.virtual_blocks, + number: execution_data.number.0, + timestamp: execution_data.timestamp, + prev_block_hash: execution_data.prev_block_hash, + max_virtual_blocks_to_create: execution_data.virtual_blocks, } } } + +/// Current block information stored in the system context contract. Can be used to set up +/// oneshot transaction / call execution. +#[derive(Debug, Clone, Copy)] +pub struct StoredL2BlockEnv { + pub number: u32, + pub timestamp: u64, + pub txs_rolling_hash: H256, +} diff --git a/core/lib/vm_interface/src/types/inputs/mod.rs b/core/lib/vm_interface/src/types/inputs/mod.rs index 1d2c49cdfa11..4801c4d88b55 100644 --- a/core/lib/vm_interface/src/types/inputs/mod.rs +++ b/core/lib/vm_interface/src/types/inputs/mod.rs @@ -1,7 +1,7 @@ pub use self::{ execution_mode::VmExecutionMode, l1_batch_env::L1BatchEnv, - l2_block::L2BlockEnv, + l2_block::{L2BlockEnv, StoredL2BlockEnv}, system_env::{SystemEnv, TxExecutionMode}, }; @@ -9,3 +9,15 @@ mod execution_mode; mod l1_batch_env; mod l2_block; mod system_env; + +/// Full environment for oneshot transaction / call execution. +#[derive(Debug)] +pub struct OneshotEnv { + /// System environment. + pub system: SystemEnv, + /// Part of the environment specific to an L1 batch. + pub l1_batch: L1BatchEnv, + /// Part of the environment representing the current L2 block. Can be used to override storage slots + /// in the system context contract, which are set from `L1BatchEnv.first_l2_block` by default. + pub current_block: Option, +} diff --git a/core/node/api_server/src/execution_sandbox/apply.rs b/core/node/api_server/src/execution_sandbox/apply.rs index 4d9282b2331c..58c953236836 100644 --- a/core/node/api_server/src/execution_sandbox/apply.rs +++ b/core/node/api_server/src/execution_sandbox/apply.rs @@ -9,16 +9,19 @@ use std::time::{Duration, Instant}; use anyhow::Context as _; +use async_trait::async_trait; use tokio::runtime::Handle; -use zksync_dal::{Connection, ConnectionPool, Core, CoreDal, DalError}; +use zksync_dal::{Connection, Core, CoreDal, DalError}; use zksync_multivm::{ interface::{ storage::{ReadStorage, StoragePtr, StorageView, WriteStorage}, - L1BatchEnv, L2BlockEnv, SystemEnv, VmInterface, + BytecodeCompressionError, L1BatchEnv, L2BlockEnv, OneshotEnv, StoredL2BlockEnv, SystemEnv, + TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, }, - utils::adjust_pubdata_price_for_tx, + tracers::StorageInvocations, + utils::{adjust_pubdata_price_for_tx, get_eth_call_gas_limit}, vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, HistoryDisabled}, - VmInstance, + MultiVMTracer, MultiVmTracerPointer, VmInstance, }; use zksync_state::PostgresStorage; use zksync_system_constants::{ @@ -26,7 +29,7 @@ use zksync_system_constants::{ SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, ZKPORTER_IS_AVAILABLE, }; use zksync_types::{ - api::{self, state_override::StateOverride}, + api, block::{pack_block_info, unpack_block_info, L2BlockHasher}, commitment::PubdataParams, fee_model::BatchFeeInput, @@ -38,179 +41,251 @@ use zksync_types::{ use zksync_utils::{h256_to_u256, time::seconds_since_epoch, u256_to_h256}; use super::{ - storage::StorageWithOverrides, vm_metrics::{self, SandboxStage, SANDBOX_METRICS}, - BlockArgs, TxExecutionArgs, TxSharedArgs, VmPermit, + ApiTracer, BlockArgs, OneshotExecutor, TxExecutionArgs, TxSetupArgs, }; -type VmStorageView<'a> = StorageView>>; -type BoxedVm<'a> = Box>, HistoryDisabled>>; +pub(super) async fn prepare_env_and_storage( + mut connection: Connection<'static, Core>, + setup_args: TxSetupArgs, + block_args: &BlockArgs, +) -> anyhow::Result<(OneshotEnv, PostgresStorage<'static>)> { + let initialization_stage = SANDBOX_METRICS.sandbox[&SandboxStage::Initialization].start(); -#[derive(Debug)] -struct Sandbox<'a> { - system_env: SystemEnv, - l1_batch_env: L1BatchEnv, - execution_args: &'a TxExecutionArgs, - l2_block_info_to_reset: Option, - storage_view: VmStorageView<'a>, -} - -impl<'a> Sandbox<'a> { - async fn new( - mut connection: Connection<'a, Core>, - shared_args: TxSharedArgs, - execution_args: &'a TxExecutionArgs, - block_args: BlockArgs, - state_override: &StateOverride, - ) -> anyhow::Result> { - let resolve_started_at = Instant::now(); - let resolved_block_info = block_args - .resolve_block_info(&mut connection) - .await - .with_context(|| format!("cannot resolve block numbers for {block_args:?}"))?; - let resolve_time = resolve_started_at.elapsed(); - // We don't want to emit too many logs. - if resolve_time > Duration::from_millis(10) { - tracing::debug!("Resolved block numbers (took {resolve_time:?})"); - } - - if block_args.resolves_to_latest_sealed_l2_block() { - shared_args - .caches - .schedule_values_update(resolved_block_info.state_l2_block_number); - } - - let (next_l2_block_info, l2_block_info_to_reset) = Self::load_l2_block_info( - &mut connection, - block_args.is_pending_l2_block(), - &resolved_block_info, - ) - .await?; - - let storage = PostgresStorage::new_async( - Handle::current(), - connection, - resolved_block_info.state_l2_block_number, - false, - ) + let resolve_started_at = Instant::now(); + let resolved_block_info = block_args + .resolve_block_info(&mut connection) .await - .context("cannot create `PostgresStorage`")? - .with_caches(shared_args.caches.clone()); - - let storage_with_overrides = StorageWithOverrides::new(storage, state_override); - let storage_view = StorageView::new(storage_with_overrides); - let (system_env, l1_batch_env) = Self::prepare_env( - shared_args, - execution_args, - &resolved_block_info, - next_l2_block_info, - ); + .with_context(|| format!("cannot resolve block numbers for {block_args:?}"))?; + let resolve_time = resolve_started_at.elapsed(); + // We don't want to emit too many logs. + if resolve_time > Duration::from_millis(10) { + tracing::debug!("Resolved block numbers (took {resolve_time:?})"); + } - Ok(Self { - system_env, - l1_batch_env, - storage_view, - execution_args, - l2_block_info_to_reset, - }) + if block_args.resolves_to_latest_sealed_l2_block() { + setup_args + .caches + .schedule_values_update(resolved_block_info.state_l2_block_number); } - async fn load_l2_block_info( - connection: &mut Connection<'_, Core>, - is_pending_block: bool, - resolved_block_info: &ResolvedBlockInfo, - ) -> anyhow::Result<(L2BlockEnv, Option)> { - let mut l2_block_info_to_reset = None; - let current_l2_block_info = StoredL2BlockInfo::new( - connection, - resolved_block_info.state_l2_block_number, - Some(resolved_block_info.state_l2_block_hash), - ) + let (next_block, current_block) = load_l2_block_info( + &mut connection, + block_args.is_pending_l2_block(), + &resolved_block_info, + ) + .await?; + + let storage = PostgresStorage::new_async( + Handle::current(), + connection, + resolved_block_info.state_l2_block_number, + false, + ) + .await + .context("cannot create `PostgresStorage`")? + .with_caches(setup_args.caches.clone()); + + let (system, l1_batch) = prepare_env(setup_args, &resolved_block_info, next_block); + + let env = OneshotEnv { + system, + l1_batch, + current_block, + }; + initialization_stage.observe(); + Ok((env, storage)) +} + +async fn load_l2_block_info( + connection: &mut Connection<'_, Core>, + is_pending_block: bool, + resolved_block_info: &ResolvedBlockInfo, +) -> anyhow::Result<(L2BlockEnv, Option)> { + let mut current_block = None; + let next_block = read_stored_l2_block(connection, resolved_block_info.state_l2_block_number) .await .context("failed reading L2 block info")?; - let next_l2_block_info = if is_pending_block { - L2BlockEnv { - number: current_l2_block_info.l2_block_number + 1, - timestamp: resolved_block_info.l1_batch_timestamp, - prev_block_hash: current_l2_block_info.l2_block_hash, - // For simplicity, we assume each L2 block create one virtual block. - // This may be wrong only during transition period. - max_virtual_blocks_to_create: 1, - } - } else if current_l2_block_info.l2_block_number == 0 { - // Special case: - // - For environments, where genesis block was created before virtual block upgrade it doesn't matter what we put here. - // - Otherwise, we need to put actual values here. We cannot create next L2 block with block_number=0 and `max_virtual_blocks_to_create=0` - // because of SystemContext requirements. But, due to intrinsics of SystemContext, block.number still will be resolved to 0. - L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - } - } else { - // We need to reset L2 block info in storage to process transaction in the current block context. - // Actual resetting will be done after `storage_view` is created. - let prev_l2_block_info = StoredL2BlockInfo::new( - connection, - resolved_block_info.state_l2_block_number - 1, - None, - ) + let next_block = if is_pending_block { + L2BlockEnv { + number: next_block.number + 1, + timestamp: resolved_block_info.l1_batch_timestamp, + prev_block_hash: resolved_block_info.state_l2_block_hash, + // For simplicity, we assume each L2 block create one virtual block. + // This may be wrong only during transition period. + max_virtual_blocks_to_create: 1, + } + } else if next_block.number == 0 { + // Special case: + // - For environments, where genesis block was created before virtual block upgrade it doesn't matter what we put here. + // - Otherwise, we need to put actual values here. We cannot create next L2 block with block_number=0 and `max_virtual_blocks_to_create=0` + // because of SystemContext requirements. But, due to intrinsics of SystemContext, block.number still will be resolved to 0. + L2BlockEnv { + number: 1, + timestamp: 0, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 1, + } + } else { + // We need to reset L2 block info in storage to process transaction in the current block context. + // Actual resetting will be done after `storage_view` is created. + let prev_block_number = resolved_block_info.state_l2_block_number - 1; + let prev_l2_block = read_stored_l2_block(connection, prev_block_number) .await .context("failed reading previous L2 block info")?; - l2_block_info_to_reset = Some(prev_l2_block_info); - L2BlockEnv { - number: current_l2_block_info.l2_block_number, - timestamp: current_l2_block_info.l2_block_timestamp, - prev_block_hash: prev_l2_block_info.l2_block_hash, - max_virtual_blocks_to_create: 1, - } + let mut prev_block_hash = connection + .blocks_web3_dal() + .get_l2_block_hash(prev_block_number) + .await + .map_err(DalError::generalize)?; + if prev_block_hash.is_none() { + // We might need to load the previous block hash from the snapshot recovery metadata + let snapshot_recovery = connection + .snapshot_recovery_dal() + .get_applied_snapshot_status() + .await + .map_err(DalError::generalize)?; + prev_block_hash = snapshot_recovery.and_then(|recovery| { + (recovery.l2_block_number == prev_block_number).then_some(recovery.l2_block_hash) + }); + } + + current_block = Some(prev_l2_block); + L2BlockEnv { + number: next_block.number, + timestamp: next_block.timestamp, + prev_block_hash: prev_block_hash.with_context(|| { + format!("missing hash for previous L2 block #{prev_block_number}") + })?, + max_virtual_blocks_to_create: 1, + } + }; + + Ok((next_block, current_block)) +} + +fn prepare_env( + setup_args: TxSetupArgs, + resolved_block_info: &ResolvedBlockInfo, + next_block: L2BlockEnv, +) -> (SystemEnv, L1BatchEnv) { + let TxSetupArgs { + execution_mode, + operator_account, + fee_input, + base_system_contracts, + validation_computational_gas_limit, + chain_id, + enforced_base_fee, + .. + } = setup_args; + + // In case we are executing in a past block, we'll use the historical fee data. + let fee_input = resolved_block_info + .historical_fee_input + .unwrap_or(fee_input); + let system_env = SystemEnv { + zk_porter_available: ZKPORTER_IS_AVAILABLE, + version: resolved_block_info.protocol_version, + base_system_smart_contracts: base_system_contracts + .get_by_protocol_version(resolved_block_info.protocol_version), + bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + execution_mode, + default_validation_computational_gas_limit: validation_computational_gas_limit, + chain_id, + pubdata_params: resolved_block_info.pubdata_params, + }; + let l1_batch_env = L1BatchEnv { + previous_batch_hash: None, + number: resolved_block_info.vm_l1_batch_number, + timestamp: resolved_block_info.l1_batch_timestamp, + fee_input, + fee_account: *operator_account.address(), + enforced_base_fee, + first_l2_block: next_block, + }; + (system_env, l1_batch_env) +} + +// public for testing purposes +#[derive(Debug)] +pub(super) struct VmSandbox { + vm: Box>, + storage_view: StoragePtr>, + transaction: Transaction, +} + +impl VmSandbox { + /// This method is blocking. + pub fn new(storage: S, mut env: OneshotEnv, execution_args: TxExecutionArgs) -> Self { + let mut storage_view = StorageView::new(storage); + Self::setup_storage_view(&mut storage_view, &execution_args, env.current_block); + + let protocol_version = env.system.version; + if execution_args.adjust_pubdata_price { + env.l1_batch.fee_input = adjust_pubdata_price_for_tx( + env.l1_batch.fee_input, + execution_args.transaction.gas_per_pubdata_byte_limit(), + env.l1_batch.enforced_base_fee.map(U256::from), + protocol_version.into(), + ); }; - Ok((next_l2_block_info, l2_block_info_to_reset)) + let storage_view = storage_view.to_rc_ptr(); + let vm = Box::new(VmInstance::new_with_specific_version( + env.l1_batch, + env.system, + storage_view.clone(), + protocol_version.into_api_vm_version(), + )); + + Self { + vm, + storage_view, + transaction: execution_args.transaction, + } } /// This method is blocking. - fn setup_storage_view(&mut self, tx: &Transaction) { + fn setup_storage_view( + storage_view: &mut StorageView, + execution_args: &TxExecutionArgs, + current_block: Option, + ) { let storage_view_setup_started_at = Instant::now(); - if let Some(nonce) = self.execution_args.enforced_nonce { - let nonce_key = get_nonce_key(&tx.initiator_account()); - let full_nonce = self.storage_view.read_value(&nonce_key); + if let Some(nonce) = execution_args.enforced_nonce { + let nonce_key = get_nonce_key(&execution_args.transaction.initiator_account()); + let full_nonce = storage_view.read_value(&nonce_key); let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce)); let enforced_full_nonce = nonces_to_full_nonce(U256::from(nonce.0), deployment_nonce); - self.storage_view - .set_value(nonce_key, u256_to_h256(enforced_full_nonce)); + storage_view.set_value(nonce_key, u256_to_h256(enforced_full_nonce)); } - let payer = tx.payer(); + let payer = execution_args.transaction.payer(); let balance_key = storage_key_for_eth_balance(&payer); - let mut current_balance = h256_to_u256(self.storage_view.read_value(&balance_key)); - current_balance += self.execution_args.added_balance; - self.storage_view - .set_value(balance_key, u256_to_h256(current_balance)); + let mut current_balance = h256_to_u256(storage_view.read_value(&balance_key)); + current_balance += execution_args.added_balance; + storage_view.set_value(balance_key, u256_to_h256(current_balance)); // Reset L2 block info if necessary. - if let Some(l2_block_info_to_reset) = self.l2_block_info_to_reset { + if let Some(current_block) = current_block { let l2_block_info_key = StorageKey::new( AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, ); - let l2_block_info = pack_block_info( - l2_block_info_to_reset.l2_block_number as u64, - l2_block_info_to_reset.l2_block_timestamp, - ); - self.storage_view - .set_value(l2_block_info_key, u256_to_h256(l2_block_info)); + let l2_block_info = + pack_block_info(current_block.number.into(), current_block.timestamp); + storage_view.set_value(l2_block_info_key, u256_to_h256(l2_block_info)); let l2_block_txs_rolling_hash_key = StorageKey::new( AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, ); - self.storage_view.set_value( + storage_view.set_value( l2_block_txs_rolling_hash_key, - l2_block_info_to_reset.txs_rolling_hash, + current_block.txs_rolling_hash, ); } @@ -221,202 +296,155 @@ impl<'a> Sandbox<'a> { } } - fn prepare_env( - shared_args: TxSharedArgs, - execution_args: &TxExecutionArgs, - resolved_block_info: &ResolvedBlockInfo, - next_l2_block_info: L2BlockEnv, - ) -> (SystemEnv, L1BatchEnv) { - let TxSharedArgs { - operator_account, - fee_input, - base_system_contracts, - validation_computational_gas_limit, - chain_id, - .. - } = shared_args; - - // In case we are executing in a past block, we'll use the historical fee data. - let fee_input = resolved_block_info - .historical_fee_input - .unwrap_or(fee_input); - let system_env = SystemEnv { - zk_porter_available: ZKPORTER_IS_AVAILABLE, - version: resolved_block_info.protocol_version, - base_system_smart_contracts: base_system_contracts - .get_by_protocol_version(resolved_block_info.protocol_version), - bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - execution_mode: execution_args.execution_mode, - default_validation_computational_gas_limit: validation_computational_gas_limit, - chain_id, - pubdata_params: resolved_block_info.pubdata_params, - }; - let l1_batch_env = L1BatchEnv { - previous_batch_hash: None, - number: resolved_block_info.vm_l1_batch_number, - timestamp: resolved_block_info.l1_batch_timestamp, - fee_input, - fee_account: *operator_account.address(), - enforced_base_fee: execution_args.enforced_base_fee, - first_l2_block: next_l2_block_info, - }; - (system_env, l1_batch_env) + fn wrap_tracers( + tracers: Vec, + env: &OneshotEnv, + missed_storage_invocation_limit: usize, + ) -> Vec, HistoryDisabled>> { + let storage_invocation_tracer = StorageInvocations::new(missed_storage_invocation_limit); + let protocol_version = env.system.version; + tracers + .into_iter() + .map(|tracer| tracer.into_boxed(protocol_version)) + .chain([storage_invocation_tracer.into_tracer_pointer()]) + .collect() } - /// This method is blocking. - fn into_vm( - mut self, - tx: &Transaction, - adjust_pubdata_price: bool, - ) -> (BoxedVm<'a>, StoragePtr>) { - self.setup_storage_view(tx); - let protocol_version = self.system_env.version; - if adjust_pubdata_price { - self.l1_batch_env.fee_input = adjust_pubdata_price_for_tx( - self.l1_batch_env.fee_input, - tx.gas_per_pubdata_byte_limit(), - self.l1_batch_env.enforced_base_fee.map(U256::from), - protocol_version.into(), - ); - }; + pub(super) fn apply(mut self, apply_fn: F) -> T + where + F: FnOnce(&mut VmInstance, Transaction) -> T, + { + let tx_id = format!( + "{:?}-{}", + self.transaction.initiator_account(), + self.transaction.nonce().unwrap_or(Nonce(0)) + ); - let storage_view = self.storage_view.to_rc_ptr(); - let vm = Box::new(VmInstance::new_with_specific_version( - self.l1_batch_env, - self.system_env, - storage_view.clone(), - protocol_version.into_api_vm_version(), - )); + let execution_latency = SANDBOX_METRICS.sandbox[&SandboxStage::Execution].start(); + let result = apply_fn(&mut *self.vm, self.transaction); + let vm_execution_took = execution_latency.observe(); - (vm, storage_view) + let memory_metrics = self.vm.record_vm_memory_metrics(); + vm_metrics::report_vm_memory_metrics( + &tx_id, + &memory_metrics, + vm_execution_took, + self.storage_view.as_ref().borrow_mut().metrics(), + ); + result } } -#[allow(clippy::too_many_arguments)] -pub(super) fn apply_vm_in_sandbox( - vm_permit: VmPermit, - shared_args: TxSharedArgs, - // If `true`, then the batch's L1/pubdata gas price will be adjusted so that the transaction's gas per pubdata limit is <= - // to the one in the block. This is often helpful in case we want the transaction validation to work regardless of the - // current L1 prices for gas or pubdata. - adjust_pubdata_price: bool, - execution_args: &TxExecutionArgs, - connection_pool: &ConnectionPool, - tx: Transaction, - block_args: BlockArgs, // Block arguments for the transaction. - state_override: Option, - apply: impl FnOnce( - &mut VmInstance>, HistoryDisabled>, - Transaction, - ProtocolVersionId, - ) -> T, -) -> anyhow::Result { - let stage_started_at = Instant::now(); - let span = tracing::debug_span!("initialization").entered(); - - let rt_handle = vm_permit.rt_handle(); - let connection = rt_handle - .block_on(connection_pool.connection_tagged("api")) - .context("failed acquiring DB connection")?; - let connection_acquire_time = stage_started_at.elapsed(); - // We don't want to emit too many logs. - if connection_acquire_time > Duration::from_millis(10) { - tracing::debug!("Obtained connection (took {connection_acquire_time:?})"); - } - - let sandbox = rt_handle.block_on(Sandbox::new( - connection, - shared_args, - execution_args, - block_args, - state_override.as_ref().unwrap_or(&StateOverride::default()), - ))?; - let protocol_version = sandbox.system_env.version; - let (mut vm, storage_view) = sandbox.into_vm(&tx, adjust_pubdata_price); - - SANDBOX_METRICS.sandbox[&SandboxStage::Initialization].observe(stage_started_at.elapsed()); - span.exit(); - - let tx_id = format!( - "{:?}-{}", - tx.initiator_account(), - tx.nonce().unwrap_or(Nonce(0)) - ); - - let execution_latency = SANDBOX_METRICS.sandbox[&SandboxStage::Execution].start(); - let result = apply(&mut vm, tx, protocol_version); - let vm_execution_took = execution_latency.observe(); - - let memory_metrics = vm.record_vm_memory_metrics(); - vm_metrics::report_vm_memory_metrics( - &tx_id, - &memory_metrics, - vm_execution_took, - storage_view.as_ref().borrow_mut().metrics(), - ); - Ok(result) +/// Main [`OneshotExecutor`] implementation used by the API server. +#[derive(Debug, Default)] +pub struct MainOneshotExecutor { + missed_storage_invocation_limit: usize, } -#[derive(Debug, Clone, Copy)] -struct StoredL2BlockInfo { - l2_block_number: u32, - l2_block_timestamp: u64, - l2_block_hash: H256, - txs_rolling_hash: H256, +impl MainOneshotExecutor { + /// Creates a new executor with the specified limit of cache misses for storage read operations (an anti-DoS measure). + /// The limit is applied for calls and gas estimations, but not during transaction validation. + pub fn new(missed_storage_invocation_limit: usize) -> Self { + Self { + missed_storage_invocation_limit, + } + } } -impl StoredL2BlockInfo { - /// If `l2_block_hash` is `None`, it needs to be fetched from the storage. - async fn new( - connection: &mut Connection<'_, Core>, - l2_block_number: L2BlockNumber, - l2_block_hash: Option, - ) -> anyhow::Result { - let l2_block_info_key = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let l2_block_info = connection - .storage_web3_dal() - .get_historical_value_unchecked(l2_block_info_key.hashed_key(), l2_block_number) - .await - .context("failed reading L2 block info from VM state")?; - let (l2_block_number_from_state, l2_block_timestamp) = - unpack_block_info(h256_to_u256(l2_block_info)); +#[async_trait] +impl OneshotExecutor for MainOneshotExecutor +where + S: ReadStorage + Send + 'static, +{ + type Tracers = Vec; - let l2_block_txs_rolling_hash_key = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let txs_rolling_hash = connection - .storage_web3_dal() - .get_historical_value_unchecked( - l2_block_txs_rolling_hash_key.hashed_key(), - l2_block_number, - ) - .await - .context("failed reading transaction rolling hash from VM state")?; + async fn inspect_transaction( + &self, + storage: S, + env: OneshotEnv, + args: TxExecutionArgs, + tracers: Self::Tracers, + ) -> anyhow::Result { + let missed_storage_invocation_limit = match env.system.execution_mode { + // storage accesses are not limited for tx validation + TxExecutionMode::VerifyExecute => usize::MAX, + TxExecutionMode::EthCall | TxExecutionMode::EstimateFee => { + self.missed_storage_invocation_limit + } + }; - let l2_block_hash = if let Some(hash) = l2_block_hash { - hash - } else { - connection - .blocks_web3_dal() - .get_l2_block_hash(l2_block_number) - .await - .map_err(DalError::generalize)? - .with_context(|| format!("L2 block #{l2_block_number} not present in storage"))? + tokio::task::spawn_blocking(move || { + let tracers = VmSandbox::wrap_tracers(tracers, &env, missed_storage_invocation_limit); + let executor = VmSandbox::new(storage, env, args); + executor.apply(|vm, transaction| { + vm.push_transaction(transaction); + vm.inspect(tracers.into(), VmExecutionMode::OneTx) + }) + }) + .await + .context("VM execution panicked") + } + + async fn inspect_transaction_with_bytecode_compression( + &self, + storage: S, + env: OneshotEnv, + args: TxExecutionArgs, + tracers: Self::Tracers, + ) -> anyhow::Result<( + Result<(), BytecodeCompressionError>, + VmExecutionResultAndLogs, + )> { + let missed_storage_invocation_limit = match env.system.execution_mode { + // storage accesses are not limited for tx validation + TxExecutionMode::VerifyExecute => usize::MAX, + TxExecutionMode::EthCall | TxExecutionMode::EstimateFee => { + self.missed_storage_invocation_limit + } }; - Ok(Self { - l2_block_number: l2_block_number_from_state as u32, - l2_block_timestamp, - l2_block_hash, - txs_rolling_hash, + tokio::task::spawn_blocking(move || { + let tracers = VmSandbox::wrap_tracers(tracers, &env, missed_storage_invocation_limit); + let executor = VmSandbox::new(storage, env, args); + executor.apply(|vm, transaction| { + vm.inspect_transaction_with_bytecode_compression(tracers.into(), transaction, true) + }) }) + .await + .context("VM execution panicked") } } +async fn read_stored_l2_block( + connection: &mut Connection<'_, Core>, + l2_block_number: L2BlockNumber, +) -> anyhow::Result { + let l2_block_info_key = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + ); + let l2_block_info = connection + .storage_web3_dal() + .get_historical_value_unchecked(l2_block_info_key.hashed_key(), l2_block_number) + .await?; + let (l2_block_number_from_state, timestamp) = unpack_block_info(h256_to_u256(l2_block_info)); + + let l2_block_txs_rolling_hash_key = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, + ); + let txs_rolling_hash = connection + .storage_web3_dal() + .get_historical_value_unchecked(l2_block_txs_rolling_hash_key.hashed_key(), l2_block_number) + .await?; + + Ok(StoredL2BlockEnv { + number: l2_block_number_from_state as u32, + timestamp, + txs_rolling_hash, + }) +} + #[derive(Debug)] pub(crate) struct ResolvedBlockInfo { state_l2_block_number: L2BlockNumber, @@ -445,7 +473,19 @@ impl BlockArgs { ) } - pub(crate) async fn resolve_block_info( + pub(crate) async fn default_eth_call_gas( + &self, + connection: &mut Connection<'_, Core>, + ) -> anyhow::Result { + let protocol_version = self + .resolve_block_info(connection) + .await + .context("failed to resolve block info")? + .protocol_version; + Ok(get_eth_call_gas_limit(protocol_version.into()).into()) + } + + async fn resolve_block_info( &self, connection: &mut Connection<'_, Core>, ) -> anyhow::Result { diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index 741bcaea18f4..086a75c81de9 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -1,80 +1,80 @@ //! Implementation of "executing" methods, e.g. `eth_call`. -use anyhow::Context as _; -use tracing::{span, Level}; -use zksync_dal::{ConnectionPool, Core}; -use zksync_multivm::{ - interface::{ - TransactionExecutionMetrics, TxExecutionMode, VmExecutionResultAndLogs, VmInterface, - }, - tracers::StorageInvocations, - MultiVMTracer, +use async_trait::async_trait; +use zksync_dal::{Connection, Core}; +use zksync_multivm::interface::{ + storage::ReadStorage, BytecodeCompressionError, OneshotEnv, TransactionExecutionMetrics, + VmExecutionResultAndLogs, }; use zksync_types::{ - l2::L2Tx, transaction_request::CallOverrides, ExecuteTransactionCommon, Nonce, + api::state_override::StateOverride, l2::L2Tx, ExecuteTransactionCommon, Nonce, PackedEthSignature, Transaction, U256, }; use super::{ - apply, testonly::MockTransactionExecutor, vm_metrics, ApiTracer, BlockArgs, TxSharedArgs, - VmPermit, + apply::{self, MainOneshotExecutor}, + storage::StorageWithOverrides, + testonly::MockOneshotExecutor, + vm_metrics, ApiTracer, BlockArgs, OneshotExecutor, TxSetupArgs, VmPermit, }; -use crate::execution_sandbox::api::state_override::StateOverride; +/// Executor-independent arguments necessary to for oneshot transaction execution. +/// +/// # Developer guidelines +/// +/// Please don't add fields that duplicate `SystemEnv` or `L1BatchEnv` information, since both of these +/// are also provided to an executor. #[derive(Debug)] pub(crate) struct TxExecutionArgs { - pub execution_mode: TxExecutionMode, + /// Transaction / call itself. + pub transaction: Transaction, + /// Nonce override for the initiator account. pub enforced_nonce: Option, + /// Balance added to the initiator account. pub added_balance: U256, - pub enforced_base_fee: Option, - pub missed_storage_invocation_limit: usize, + /// If `true`, then the batch's L1 / pubdata gas price will be adjusted so that the transaction's gas per pubdata limit is <= + /// to the one in the block. This is often helpful in case we want the transaction validation to work regardless of the + /// current L1 prices for gas or pubdata. + pub adjust_pubdata_price: bool, } impl TxExecutionArgs { - pub fn for_validation(tx: &L2Tx) -> Self { + pub fn for_validation(tx: L2Tx) -> Self { Self { - execution_mode: TxExecutionMode::VerifyExecute, enforced_nonce: Some(tx.nonce()), added_balance: U256::zero(), - enforced_base_fee: Some(tx.common_data.fee.max_fee_per_gas.as_u64()), - missed_storage_invocation_limit: usize::MAX, + adjust_pubdata_price: true, + transaction: tx.into(), } } - fn for_eth_call( - enforced_base_fee: Option, - vm_execution_cache_misses_limit: Option, - ) -> Self { - let missed_storage_invocation_limit = vm_execution_cache_misses_limit.unwrap_or(usize::MAX); + pub fn for_eth_call(mut call: L2Tx) -> Self { + if call.common_data.signature.is_empty() { + call.common_data.signature = PackedEthSignature::default().serialize_packed().into(); + } + Self { - execution_mode: TxExecutionMode::EthCall, enforced_nonce: None, added_balance: U256::zero(), - enforced_base_fee, - missed_storage_invocation_limit, + adjust_pubdata_price: false, + transaction: call.into(), } } - pub fn for_gas_estimate( - vm_execution_cache_misses_limit: Option, - tx: &Transaction, - base_fee: u64, - ) -> Self { - let missed_storage_invocation_limit = vm_execution_cache_misses_limit.unwrap_or(usize::MAX); + pub fn for_gas_estimate(transaction: Transaction) -> Self { // For L2 transactions we need to explicitly put enough balance into the account of the users // while for L1->L2 transactions the `to_mint` field plays this role - let added_balance = match &tx.common_data { + let added_balance = match &transaction.common_data { ExecuteTransactionCommon::L2(data) => data.fee.gas_limit * data.fee.max_fee_per_gas, ExecuteTransactionCommon::L1(_) => U256::zero(), ExecuteTransactionCommon::ProtocolUpgrade(_) => U256::zero(), }; Self { - execution_mode: TxExecutionMode::EstimateFee, - missed_storage_invocation_limit, - enforced_nonce: tx.nonce(), + enforced_nonce: transaction.nonce(), added_balance, - enforced_base_fee: Some(base_fee), + adjust_pubdata_price: true, + transaction, } } } @@ -92,68 +92,40 @@ pub(crate) struct TransactionExecutionOutput { /// Executor of transactions. #[derive(Debug)] pub(crate) enum TransactionExecutor { - Real, + Real(MainOneshotExecutor), #[doc(hidden)] // Intended for tests only - Mock(MockTransactionExecutor), + Mock(MockOneshotExecutor), } impl TransactionExecutor { + pub fn real(missed_storage_invocation_limit: usize) -> Self { + Self::Real(MainOneshotExecutor::new(missed_storage_invocation_limit)) + } + /// This method assumes that (block with number `resolved_block_number` is present in DB) /// or (`block_id` is `pending` and block with number `resolved_block_number - 1` is present in DB) #[allow(clippy::too_many_arguments)] - #[tracing::instrument(skip_all)] + #[tracing::instrument(level = "debug", skip_all)] pub async fn execute_tx_in_sandbox( &self, vm_permit: VmPermit, - shared_args: TxSharedArgs, - // If `true`, then the batch's L1/pubdata gas price will be adjusted so that the transaction's gas per pubdata limit is <= - // to the one in the block. This is often helpful in case we want the transaction validation to work regardless of the - // current L1 prices for gas or pubdata. - adjust_pubdata_price: bool, + setup_args: TxSetupArgs, execution_args: TxExecutionArgs, - connection_pool: ConnectionPool, - tx: Transaction, + connection: Connection<'static, Core>, block_args: BlockArgs, state_override: Option, - custom_tracers: Vec, + tracers: Vec, ) -> anyhow::Result { - if let Self::Mock(mock_executor) = self { - return mock_executor.execute_tx(&tx, &block_args); - } - - let total_factory_deps = tx.execute.factory_deps.len() as u16; - - let (published_bytecodes, execution_result) = tokio::task::spawn_blocking(move || { - let span = span!(Level::DEBUG, "execute_in_sandbox").entered(); - let result = apply::apply_vm_in_sandbox( - vm_permit, - shared_args, - adjust_pubdata_price, - &execution_args, - &connection_pool, - tx, - block_args, - state_override, - |vm, tx, _| { - let storage_invocation_tracer = - StorageInvocations::new(execution_args.missed_storage_invocation_limit); - let custom_tracers: Vec<_> = custom_tracers - .into_iter() - .map(|tracer| tracer.into_boxed()) - .chain(vec![storage_invocation_tracer.into_tracer_pointer()]) - .collect(); - vm.inspect_transaction_with_bytecode_compression( - custom_tracers.into(), - tx, - true, - ) - }, - ); - span.exit(); - result - }) - .await - .context("transaction execution panicked")??; + let total_factory_deps = execution_args.transaction.execute.factory_deps.len() as u16; + let (env, storage) = + apply::prepare_env_and_storage(connection, setup_args, &block_args).await?; + let state_override = state_override.unwrap_or_default(); + let storage = StorageWithOverrides::new(storage, &state_override); + + let (published_bytecodes, execution_result) = self + .inspect_transaction_with_bytecode_compression(storage, env, execution_args, tracers) + .await?; + drop(vm_permit); let metrics = vm_metrics::collect_tx_execution_metrics(total_factory_deps, &execution_result); @@ -163,42 +135,53 @@ impl TransactionExecutor { are_published_bytecodes_ok: published_bytecodes.is_ok(), }) } +} - #[allow(clippy::too_many_arguments)] - pub async fn execute_tx_eth_call( +#[async_trait] +impl OneshotExecutor for TransactionExecutor +where + S: ReadStorage + Send + 'static, +{ + type Tracers = Vec; + + async fn inspect_transaction( &self, - vm_permit: VmPermit, - shared_args: TxSharedArgs, - connection_pool: ConnectionPool, - call_overrides: CallOverrides, - mut tx: L2Tx, - block_args: BlockArgs, - vm_execution_cache_misses_limit: Option, - custom_tracers: Vec, - state_override: Option, + storage: S, + env: OneshotEnv, + args: TxExecutionArgs, + tracers: Self::Tracers, ) -> anyhow::Result { - let execution_args = TxExecutionArgs::for_eth_call( - call_overrides.enforced_base_fee, - vm_execution_cache_misses_limit, - ); - - if tx.common_data.signature.is_empty() { - tx.common_data.signature = PackedEthSignature::default().serialize_packed().into(); + match self { + Self::Real(executor) => { + executor + .inspect_transaction(storage, env, args, tracers) + .await + } + Self::Mock(executor) => executor.inspect_transaction(storage, env, args, ()).await, } + } - let output = self - .execute_tx_in_sandbox( - vm_permit, - shared_args, - false, - execution_args, - connection_pool, - tx.into(), - block_args, - state_override, - custom_tracers, - ) - .await?; - Ok(output.vm) + async fn inspect_transaction_with_bytecode_compression( + &self, + storage: S, + env: OneshotEnv, + args: TxExecutionArgs, + tracers: Self::Tracers, + ) -> anyhow::Result<( + Result<(), BytecodeCompressionError>, + VmExecutionResultAndLogs, + )> { + match self { + Self::Real(executor) => { + executor + .inspect_transaction_with_bytecode_compression(storage, env, args, tracers) + .await + } + Self::Mock(executor) => { + executor + .inspect_transaction_with_bytecode_compression(storage, env, args, ()) + .await + } + } } } diff --git a/core/node/api_server/src/execution_sandbox/mod.rs b/core/node/api_server/src/execution_sandbox/mod.rs index f7c876679cb0..f2a3f0e5f8c3 100644 --- a/core/node/api_server/src/execution_sandbox/mod.rs +++ b/core/node/api_server/src/execution_sandbox/mod.rs @@ -4,9 +4,13 @@ use std::{ }; use anyhow::Context as _; +use async_trait::async_trait; use rand::{thread_rng, Rng}; -use tokio::runtime::Handle; use zksync_dal::{pruning_dal::PruningInfo, Connection, Core, CoreDal, DalError}; +use zksync_multivm::interface::{ + storage::ReadStorage, BytecodeCompressionError, OneshotEnv, TxExecutionMode, + VmExecutionResultAndLogs, +}; use zksync_state::PostgresStorageCaches; use zksync_types::{ api, fee_model::BatchFeeInput, AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, @@ -40,17 +44,9 @@ mod vm_metrics; /// as a proof that the caller obtained a token from `VmConcurrencyLimiter`, #[derive(Debug, Clone)] pub struct VmPermit { - /// A handle to the runtime that is used to query the VM storage. - rt_handle: Handle, _permit: Arc, } -impl VmPermit { - fn rt_handle(&self) -> &Handle { - &self.rt_handle - } -} - /// Barrier-like synchronization primitive allowing to close a [`VmConcurrencyLimiter`] it's attached to /// so that it doesn't issue new permits, and to wait for all permits to drop. #[derive(Debug, Clone)] @@ -103,7 +99,6 @@ impl VmConcurrencyBarrier { pub struct VmConcurrencyLimiter { /// Semaphore that limits the number of concurrent VM executions. limiter: Arc, - rt_handle: Handle, } impl VmConcurrencyLimiter { @@ -116,7 +111,6 @@ impl VmConcurrencyLimiter { let this = Self { limiter: Arc::clone(&limiter), - rt_handle: Handle::current(), }; let barrier = VmConcurrencyBarrier { limiter, @@ -144,7 +138,6 @@ impl VmConcurrencyLimiter { } Some(VmPermit { - rt_handle: self.rt_handle.clone(), _permit: Arc::new(permit), }) } @@ -163,9 +156,10 @@ async fn get_pending_state( Ok((block_id, resolved_block_number)) } -/// Arguments for VM execution not specific to a particular transaction. +/// Arguments for VM execution necessary to set up storage and environment. #[derive(Debug, Clone)] -pub(crate) struct TxSharedArgs { +pub(crate) struct TxSetupArgs { + pub execution_mode: TxExecutionMode, pub operator_account: AccountTreeId, pub fee_input: BatchFeeInput, pub base_system_contracts: MultiVMBaseSystemContracts, @@ -173,12 +167,17 @@ pub(crate) struct TxSharedArgs { pub validation_computational_gas_limit: u32, pub chain_id: L2ChainId, pub whitelisted_tokens_for_aa: Vec
, + pub enforced_base_fee: Option, } -impl TxSharedArgs { +impl TxSetupArgs { #[cfg(test)] - pub fn mock(base_system_contracts: MultiVMBaseSystemContracts) -> Self { + pub fn mock( + execution_mode: TxExecutionMode, + base_system_contracts: MultiVMBaseSystemContracts, + ) -> Self { Self { + execution_mode, operator_account: AccountTreeId::default(), fee_input: BatchFeeInput::l1_pegged(55, 555), base_system_contracts, @@ -186,6 +185,7 @@ impl TxSharedArgs { validation_computational_gas_limit: u32::MAX, chain_id: L2ChainId::default(), whitelisted_tokens_for_aa: Vec::new(), + enforced_base_fee: None, } } } @@ -417,3 +417,28 @@ impl BlockArgs { ) } } + +/// VM executor capable of executing isolated transactions / calls (as opposed to batch execution). +#[async_trait] +trait OneshotExecutor { + type Tracers: Default; + + async fn inspect_transaction( + &self, + storage: S, + env: OneshotEnv, + args: TxExecutionArgs, + tracers: Self::Tracers, + ) -> anyhow::Result; + + async fn inspect_transaction_with_bytecode_compression( + &self, + storage: S, + env: OneshotEnv, + args: TxExecutionArgs, + tracers: Self::Tracers, + ) -> anyhow::Result<( + Result<(), BytecodeCompressionError>, + VmExecutionResultAndLogs, + )>; +} diff --git a/core/node/api_server/src/execution_sandbox/testonly.rs b/core/node/api_server/src/execution_sandbox/testonly.rs index 59fa2e38db7a..d9d60f52415a 100644 --- a/core/node/api_server/src/execution_sandbox/testonly.rs +++ b/core/node/api_server/src/execution_sandbox/testonly.rs @@ -1,24 +1,24 @@ use std::fmt; +use async_trait::async_trait; +#[cfg(test)] +use zksync_multivm::interface::ExecutionResult; use zksync_multivm::interface::{ - ExecutionResult, TransactionExecutionMetrics, VmExecutionResultAndLogs, + storage::ReadStorage, BytecodeCompressionError, OneshotEnv, TxExecutionMode, + VmExecutionResultAndLogs, }; -use zksync_types::{l2::L2Tx, ExecuteTransactionCommon, Transaction}; +use zksync_types::Transaction; -use super::{ - execute::{TransactionExecutionOutput, TransactionExecutor}, - validate::ValidationError, - BlockArgs, -}; +use super::{execute::TransactionExecutor, OneshotExecutor, TxExecutionArgs}; -type TxResponseFn = dyn Fn(&Transaction, &BlockArgs) -> VmExecutionResultAndLogs + Send + Sync; +type TxResponseFn = dyn Fn(&Transaction, &OneshotEnv) -> VmExecutionResultAndLogs + Send + Sync; -pub struct MockTransactionExecutor { +pub struct MockOneshotExecutor { call_responses: Box, tx_responses: Box, } -impl fmt::Debug for MockTransactionExecutor { +impl fmt::Debug for MockOneshotExecutor { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter .debug_struct("MockTransactionExecutor") @@ -26,7 +26,7 @@ impl fmt::Debug for MockTransactionExecutor { } } -impl Default for MockTransactionExecutor { +impl Default for MockOneshotExecutor { fn default() -> Self { Self { call_responses: Box::new(|tx, _| { @@ -42,11 +42,11 @@ impl Default for MockTransactionExecutor { } } -impl MockTransactionExecutor { +impl MockOneshotExecutor { #[cfg(test)] pub(crate) fn set_call_responses(&mut self, responses: F) where - F: Fn(&Transaction, &BlockArgs) -> ExecutionResult + 'static + Send + Sync, + F: Fn(&Transaction, &OneshotEnv) -> ExecutionResult + 'static + Send + Sync, { self.call_responses = self.wrap_responses(responses); } @@ -54,7 +54,7 @@ impl MockTransactionExecutor { #[cfg(test)] pub(crate) fn set_tx_responses(&mut self, responses: F) where - F: Fn(&Transaction, &BlockArgs) -> ExecutionResult + 'static + Send + Sync, + F: Fn(&Transaction, &OneshotEnv) -> ExecutionResult + 'static + Send + Sync, { self.tx_responses = self.wrap_responses(responses); } @@ -62,12 +62,12 @@ impl MockTransactionExecutor { #[cfg(test)] fn wrap_responses(&mut self, responses: F) -> Box where - F: Fn(&Transaction, &BlockArgs) -> ExecutionResult + 'static + Send + Sync, + F: Fn(&Transaction, &OneshotEnv) -> ExecutionResult + 'static + Send + Sync, { Box::new( - move |tx: &Transaction, ba: &BlockArgs| -> VmExecutionResultAndLogs { + move |tx: &Transaction, env: &OneshotEnv| -> VmExecutionResultAndLogs { VmExecutionResultAndLogs { - result: responses(tx, ba), + result: responses(tx, env), logs: Default::default(), statistics: Default::default(), refunds: Default::default(), @@ -79,56 +79,54 @@ impl MockTransactionExecutor { #[cfg(test)] pub(crate) fn set_tx_responses_with_logs(&mut self, responses: F) where - F: Fn(&Transaction, &BlockArgs) -> VmExecutionResultAndLogs + 'static + Send + Sync, + F: Fn(&Transaction, &OneshotEnv) -> VmExecutionResultAndLogs + 'static + Send + Sync, { self.tx_responses = Box::new(responses); } - pub(crate) fn validate_tx( - &self, - tx: L2Tx, - block_args: &BlockArgs, - ) -> Result<(), ValidationError> { - let result = (self.tx_responses)(&tx.into(), block_args); - match result.result { - ExecutionResult::Success { .. } => Ok(()), - other => Err(ValidationError::Internal(anyhow::anyhow!( - "transaction validation failed: {other:?}" - ))), + fn mock_inspect(&self, env: OneshotEnv, args: TxExecutionArgs) -> VmExecutionResultAndLogs { + match env.system.execution_mode { + TxExecutionMode::EthCall => (self.call_responses)(&args.transaction, &env), + TxExecutionMode::VerifyExecute | TxExecutionMode::EstimateFee => { + (self.tx_responses)(&args.transaction, &env) + } } } +} - pub(crate) fn execute_tx( - &self, - tx: &Transaction, - block_args: &BlockArgs, - ) -> anyhow::Result { - let result = self.get_execution_result(tx, block_args); - let output = TransactionExecutionOutput { - vm: result, - metrics: TransactionExecutionMetrics::default(), - are_published_bytecodes_ok: true, - }; +#[async_trait] +impl OneshotExecutor for MockOneshotExecutor +where + S: ReadStorage + Send + 'static, +{ + type Tracers = (); - Ok(output) + async fn inspect_transaction( + &self, + _storage: S, + env: OneshotEnv, + args: TxExecutionArgs, + (): Self::Tracers, + ) -> anyhow::Result { + Ok(self.mock_inspect(env, args)) } - fn get_execution_result( + async fn inspect_transaction_with_bytecode_compression( &self, - tx: &Transaction, - block_args: &BlockArgs, - ) -> VmExecutionResultAndLogs { - if let ExecuteTransactionCommon::L2(data) = &tx.common_data { - if data.input.is_none() { - return (self.call_responses)(tx, block_args); - } - } - (self.tx_responses)(tx, block_args) + _storage: S, + env: OneshotEnv, + args: TxExecutionArgs, + (): Self::Tracers, + ) -> anyhow::Result<( + Result<(), BytecodeCompressionError>, + VmExecutionResultAndLogs, + )> { + Ok((Ok(()), self.mock_inspect(env, args))) } } -impl From for TransactionExecutor { - fn from(executor: MockTransactionExecutor) -> Self { +impl From for TransactionExecutor { + fn from(executor: MockOneshotExecutor) -> Self { Self::Mock(executor) } } diff --git a/core/node/api_server/src/execution_sandbox/tests.rs b/core/node/api_server/src/execution_sandbox/tests.rs index 0a8af35597b3..da593292e2e1 100644 --- a/core/node/api_server/src/execution_sandbox/tests.rs +++ b/core/node/api_server/src/execution_sandbox/tests.rs @@ -4,9 +4,13 @@ use assert_matches::assert_matches; use zksync_dal::ConnectionPool; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_node_test_utils::{create_l2_block, create_l2_transaction, prepare_recovery_snapshot}; +use zksync_types::{api::state_override::StateOverride, Transaction}; use super::*; -use crate::{execution_sandbox::apply::apply_vm_in_sandbox, tx_sender::ApiContracts}; +use crate::{ + execution_sandbox::{apply::VmSandbox, storage::StorageWithOverrides}, + tx_sender::ApiContracts, +}; #[tokio::test] async fn creating_block_args() { @@ -165,43 +169,43 @@ async fn creating_block_args_after_snapshot_recovery() { #[tokio::test] async fn instantiating_vm() { let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.connection().await.unwrap(); - insert_genesis_batch(&mut storage, &GenesisParams::mock()) + let mut connection = pool.connection().await.unwrap(); + insert_genesis_batch(&mut connection, &GenesisParams::mock()) .await .unwrap(); - let block_args = BlockArgs::pending(&mut storage).await.unwrap(); - test_instantiating_vm(pool.clone(), block_args).await; - let start_info = BlockStartInfo::new(&mut storage, Duration::MAX) + let block_args = BlockArgs::pending(&mut connection).await.unwrap(); + test_instantiating_vm(connection, block_args).await; + + let mut connection = pool.connection().await.unwrap(); + let start_info = BlockStartInfo::new(&mut connection, Duration::MAX) .await .unwrap(); - let block_args = BlockArgs::new(&mut storage, api::BlockId::Number(0.into()), &start_info) + let block_args = BlockArgs::new(&mut connection, api::BlockId::Number(0.into()), &start_info) .await .unwrap(); - test_instantiating_vm(pool.clone(), block_args).await; + test_instantiating_vm(connection, block_args).await; } -async fn test_instantiating_vm(pool: ConnectionPool, block_args: BlockArgs) { - let (vm_concurrency_limiter, _) = VmConcurrencyLimiter::new(1); - let vm_permit = vm_concurrency_limiter.acquire().await.unwrap(); - let transaction = create_l2_transaction(10, 100).into(); +async fn test_instantiating_vm(connection: Connection<'static, Core>, block_args: BlockArgs) { + let transaction = Transaction::from(create_l2_transaction(10, 100)); let estimate_gas_contracts = ApiContracts::load_from_disk().await.unwrap().estimate_gas; + + let execution_args = TxExecutionArgs::for_gas_estimate(transaction.clone()); + let (env, storage) = apply::prepare_env_and_storage( + connection, + TxSetupArgs::mock(TxExecutionMode::EstimateFee, estimate_gas_contracts), + &block_args, + ) + .await + .unwrap(); + let storage = StorageWithOverrides::new(storage, &StateOverride::default()); + tokio::task::spawn_blocking(move || { - apply_vm_in_sandbox( - vm_permit, - TxSharedArgs::mock(estimate_gas_contracts), - true, - &TxExecutionArgs::for_gas_estimate(None, &transaction, 123), - &pool, - transaction.clone(), - block_args, - None, - |_, received_tx, _| { - assert_eq!(received_tx, transaction); - }, - ) + VmSandbox::new(storage, env, execution_args).apply(|_, received_tx| { + assert_eq!(received_tx, transaction); + }); }) .await - .expect("VM instantiation panicked") - .expect("VM instantiation errored"); + .expect("VM execution panicked") } diff --git a/core/node/api_server/src/execution_sandbox/tracers.rs b/core/node/api_server/src/execution_sandbox/tracers.rs index 8d61d896a362..31384b7a0898 100644 --- a/core/node/api_server/src/execution_sandbox/tracers.rs +++ b/core/node/api_server/src/execution_sandbox/tracers.rs @@ -3,26 +3,49 @@ use std::sync::Arc; use once_cell::sync::OnceCell; use zksync_multivm::{ interface::{storage::WriteStorage, Call}, - tracers::CallTracer, - vm_latest::HistoryMode, + tracers::{CallTracer, ValidationTracer, ValidationTracerParams, ViolatedValidationRule}, + vm_latest::HistoryDisabled, MultiVMTracer, MultiVmTracerPointer, }; +use zksync_types::ProtocolVersionId; -/// Custom tracers supported by our API +/// Custom tracers supported by the API sandbox. #[derive(Debug)] pub(crate) enum ApiTracer { CallTracer(Arc>>), + Validation { + params: ValidationTracerParams, + result: Arc>, + }, } impl ApiTracer { - pub fn into_boxed< - S: WriteStorage, - H: HistoryMode + zksync_multivm::HistoryMode + 'static, - >( + pub fn validation( + params: ValidationTracerParams, + ) -> (Self, Arc>) { + let result = Arc::>::default(); + let this = Self::Validation { + params, + result: result.clone(), + }; + (this, result) + } + + pub(super) fn into_boxed( self, - ) -> MultiVmTracerPointer { + protocol_version: ProtocolVersionId, + ) -> MultiVmTracerPointer + where + S: WriteStorage, + { match self { - ApiTracer::CallTracer(tracer) => CallTracer::new(tracer.clone()).into_tracer_pointer(), + Self::CallTracer(traces) => CallTracer::new(traces).into_tracer_pointer(), + Self::Validation { params, result } => { + let (mut tracer, _) = + ValidationTracer::::new(params, protocol_version.into()); + tracer.result = result; + tracer.into_tracer_pointer() + } } } } diff --git a/core/node/api_server/src/execution_sandbox/validate.rs b/core/node/api_server/src/execution_sandbox/validate.rs index a856386b4562..a95cf6c3a91e 100644 --- a/core/node/api_server/src/execution_sandbox/validate.rs +++ b/core/node/api_server/src/execution_sandbox/validate.rs @@ -1,23 +1,23 @@ use std::collections::HashSet; use anyhow::Context as _; -use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use tracing::Instrument; +use zksync_dal::{Connection, Core, CoreDal}; use zksync_multivm::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface}, - tracers::{ - StorageInvocations, ValidationError as RawValidationError, ValidationTracer, - ValidationTracerParams, - }, - vm_latest::HistoryDisabled, - MultiVMTracer, + interface::ExecutionResult, + tracers::{ValidationError as RawValidationError, ValidationTracerParams}, +}; +use zksync_types::{ + api::state_override::StateOverride, l2::L2Tx, Address, TRUSTED_ADDRESS_SLOTS, + TRUSTED_TOKEN_SLOTS, }; -use zksync_types::{l2::L2Tx, Address, Transaction, TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS}; use super::{ apply, execute::TransactionExecutor, + storage::StorageWithOverrides, vm_metrics::{SandboxStage, EXECUTION_METRICS, SANDBOX_METRICS}, - BlockArgs, TxExecutionArgs, TxSharedArgs, VmPermit, + ApiTracer, BlockArgs, OneshotExecutor, TxExecutionArgs, TxSetupArgs, VmPermit, }; /// Validation error used by the sandbox. Besides validation errors returned by VM, it also includes an internal error @@ -31,88 +31,46 @@ pub(crate) enum ValidationError { } impl TransactionExecutor { + #[tracing::instrument(level = "debug", skip_all)] pub(crate) async fn validate_tx_in_sandbox( &self, - connection_pool: ConnectionPool, + mut connection: Connection<'static, Core>, vm_permit: VmPermit, tx: L2Tx, - shared_args: TxSharedArgs, + setup_args: TxSetupArgs, block_args: BlockArgs, computational_gas_limit: u32, ) -> Result<(), ValidationError> { - if let Self::Mock(mock) = self { - return mock.validate_tx(tx, &block_args); - } - - let stage_latency = SANDBOX_METRICS.sandbox[&SandboxStage::ValidateInSandbox].start(); - let mut connection = connection_pool - .connection_tagged("api") - .await - .context("failed acquiring DB connection")?; - let validation_params = get_validation_params( + let total_latency = SANDBOX_METRICS.sandbox[&SandboxStage::ValidateInSandbox].start(); + let params = get_validation_params( &mut connection, &tx, computational_gas_limit, - &shared_args.whitelisted_tokens_for_aa, + &setup_args.whitelisted_tokens_for_aa, ) .await .context("failed getting validation params")?; - drop(connection); - - let execution_args = TxExecutionArgs::for_validation(&tx); - let tx: Transaction = tx.into(); - - let validation_result = tokio::task::spawn_blocking(move || { - let span = tracing::debug_span!("validate_in_sandbox").entered(); - let result = apply::apply_vm_in_sandbox( - vm_permit, - shared_args, - true, - &execution_args, - &connection_pool, - tx, - block_args, - None, - |vm, tx, protocol_version| { - let stage_latency = SANDBOX_METRICS.sandbox[&SandboxStage::Validation].start(); - let span = tracing::debug_span!("validation").entered(); - vm.push_transaction(tx); - - let (tracer, validation_result) = ValidationTracer::::new( - validation_params, - protocol_version.into(), - ); - - let result = vm.inspect( - vec![ - tracer.into_tracer_pointer(), - StorageInvocations::new(execution_args.missed_storage_invocation_limit) - .into_tracer_pointer(), - ] - .into(), - VmExecutionMode::OneTx, - ); - - let result = match (result.result, validation_result.get()) { - (_, Some(err)) => Err(RawValidationError::ViolatedRule(err.clone())), - (ExecutionResult::Halt { reason }, _) => { - Err(RawValidationError::FailedTx(reason)) - } - (_, None) => Ok(()), - }; - - stage_latency.observe(); - span.exit(); - result - }, - ); - span.exit(); - result - }) - .await - .context("transaction validation panicked")??; + let (env, storage) = + apply::prepare_env_and_storage(connection, setup_args, &block_args).await?; + let storage = StorageWithOverrides::new(storage, &StateOverride::default()); + + let execution_args = TxExecutionArgs::for_validation(tx); + let (tracer, validation_result) = ApiTracer::validation(params); + let stage_latency = SANDBOX_METRICS.sandbox[&SandboxStage::Validation].start(); + let result = self + .inspect_transaction(storage, env, execution_args, vec![tracer]) + .instrument(tracing::debug_span!("validation")) + .await?; + drop(vm_permit); stage_latency.observe(); + + let validation_result = match (result.result, validation_result.get()) { + (_, Some(rule)) => Err(RawValidationError::ViolatedRule(rule.clone())), + (ExecutionResult::Halt { reason }, _) => Err(RawValidationError::FailedTx(reason)), + (_, None) => Ok(()), + }; + total_latency.observe(); validation_result.map_err(ValidationError::Vm) } } diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index 2b3f674b6dac..f847776850e7 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -10,10 +10,10 @@ use zksync_dal::{ transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, Core, CoreDal, }; use zksync_multivm::{ - interface::{TransactionExecutionMetrics, VmExecutionResultAndLogs}, + interface::{TransactionExecutionMetrics, TxExecutionMode, VmExecutionResultAndLogs}, utils::{ adjust_pubdata_price_for_tx, derive_base_fee_and_gas_per_pubdata, derive_overhead, - get_eth_call_gas_limit, get_max_batch_gas_limit, + get_max_batch_gas_limit, }, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; @@ -41,7 +41,7 @@ pub(super) use self::result::SubmitTxError; use self::{master_pool_sink::MasterPoolSink, tx_sink::TxSink}; use crate::{ execution_sandbox::{ - BlockArgs, SubmitTxStage, TransactionExecutor, TxExecutionArgs, TxSharedArgs, + BlockArgs, SubmitTxStage, TransactionExecutor, TxExecutionArgs, TxSetupArgs, VmConcurrencyBarrier, VmConcurrencyLimiter, VmPermit, SANDBOX_METRICS, }, tx_sender::result::ApiCallResult, @@ -255,6 +255,10 @@ impl TxSenderBuilder { self.whitelisted_tokens_for_aa_cache.unwrap_or_else(|| { Arc::new(RwLock::new(self.config.whitelisted_tokens_for_aa.clone())) }); + let missed_storage_invocation_limit = self + .config + .vm_execution_cache_misses_limit + .unwrap_or(usize::MAX); TxSender(Arc::new(TxSenderInner { sender_config: self.config, @@ -266,7 +270,7 @@ impl TxSenderBuilder { storage_caches, whitelisted_tokens_for_aa_cache, sealer, - executor: TransactionExecutor::Real, + executor: TransactionExecutor::real(missed_storage_invocation_limit), })) } } @@ -323,7 +327,7 @@ pub struct TxSenderInner { // Cache for white-listed tokens. pub(super) whitelisted_tokens_for_aa_cache: Arc>>, /// Batch sealer used to check whether transaction can be executed by the sequencer. - sealer: Arc, + pub(super) sealer: Arc, pub(super) executor: TransactionExecutor, } @@ -349,7 +353,7 @@ impl TxSender { self.0.whitelisted_tokens_for_aa_cache.read().await.clone() } - async fn acquire_replica_connection(&self) -> anyhow::Result> { + async fn acquire_replica_connection(&self) -> anyhow::Result> { self.0 .replica_connection_pool .connection_tagged("api") @@ -371,23 +375,20 @@ impl TxSender { stage_latency.observe(); let stage_latency = SANDBOX_METRICS.start_tx_submit_stage(tx_hash, SubmitTxStage::DryRun); - let shared_args = self.shared_args().await?; + let setup_args = self.call_args(&tx, None).await?; let vm_permit = self.0.vm_concurrency_limiter.acquire().await; let vm_permit = vm_permit.ok_or(SubmitTxError::ServerShuttingDown)?; let mut connection = self.acquire_replica_connection().await?; let block_args = BlockArgs::pending(&mut connection).await?; - drop(connection); let execution_output = self .0 .executor .execute_tx_in_sandbox( vm_permit.clone(), - shared_args.clone(), - true, - TxExecutionArgs::for_validation(&tx), - self.0.replica_connection_pool.clone(), - tx.clone().into(), + setup_args.clone(), + TxExecutionArgs::for_validation(tx.clone()), + connection, block_args, None, vec![], @@ -401,15 +402,16 @@ impl TxSender { let stage_latency = SANDBOX_METRICS.start_tx_submit_stage(tx_hash, SubmitTxStage::VerifyExecute); + let connection = self.acquire_replica_connection().await?; let computational_gas_limit = self.0.sender_config.validation_computational_gas_limit; let validation_result = self .0 .executor .validate_tx_in_sandbox( - self.0.replica_connection_pool.clone(), + connection, vm_permit, tx.clone(), - shared_args, + setup_args, block_args, computational_gas_limit, ) @@ -465,14 +467,23 @@ impl TxSender { /// **Important.** For the main node, this method acquires a DB connection inside `get_batch_fee_input()`. /// Thus, you shouldn't call it if you're holding a DB connection already. - async fn shared_args(&self) -> anyhow::Result { + async fn call_args( + &self, + tx: &L2Tx, + call_overrides: Option<&CallOverrides>, + ) -> anyhow::Result { let fee_input = self .0 .batch_fee_input_provider .get_batch_fee_input() .await .context("cannot get batch fee input")?; - Ok(TxSharedArgs { + Ok(TxSetupArgs { + execution_mode: if call_overrides.is_some() { + TxExecutionMode::EthCall + } else { + TxExecutionMode::VerifyExecute + }, operator_account: AccountTreeId::new(self.0.sender_config.fee_account_addr), fee_input, base_system_contracts: self.0.api_contracts.eth_call.clone(), @@ -483,6 +494,11 @@ impl TxSender { .validation_computational_gas_limit, chain_id: self.0.sender_config.chain_id, whitelisted_tokens_for_aa: self.read_whitelisted_tokens_for_aa_cache().await, + enforced_base_fee: if let Some(overrides) = call_overrides { + overrides.enforced_base_fee + } else { + Some(tx.common_data.fee.max_fee_per_gas.as_u64()) + }, }) } @@ -699,20 +715,17 @@ impl TxSender { } } - let shared_args = self.shared_args_for_gas_estimate(fee_model_params).await; - let vm_execution_cache_misses_limit = self.0.sender_config.vm_execution_cache_misses_limit; - let execution_args = - TxExecutionArgs::for_gas_estimate(vm_execution_cache_misses_limit, &tx, base_fee); + let setup_args = self.args_for_gas_estimate(fee_model_params, base_fee).await; + let execution_args = TxExecutionArgs::for_gas_estimate(tx); + let connection = self.acquire_replica_connection().await?; let execution_output = self .0 .executor .execute_tx_in_sandbox( vm_permit, - shared_args, - true, + setup_args, execution_args, - self.0.replica_connection_pool.clone(), - tx.clone(), + connection, block_args, state_override, vec![], @@ -721,10 +734,10 @@ impl TxSender { Ok((execution_output.vm, execution_output.metrics)) } - async fn shared_args_for_gas_estimate(&self, fee_input: BatchFeeInput) -> TxSharedArgs { + async fn args_for_gas_estimate(&self, fee_input: BatchFeeInput, base_fee: u64) -> TxSetupArgs { let config = &self.0.sender_config; - - TxSharedArgs { + TxSetupArgs { + execution_mode: TxExecutionMode::EstimateFee, operator_account: AccountTreeId::new(config.fee_account_addr), fee_input, // We want to bypass the computation gas limit check for gas estimation @@ -733,6 +746,7 @@ impl TxSender { caches: self.storage_caches(), chain_id: config.chain_id, whitelisted_tokens_for_aa: self.read_whitelisted_tokens_for_aa_cache().await, + enforced_base_fee: Some(base_fee), } } @@ -1002,22 +1016,21 @@ impl TxSender { let vm_permit = self.0.vm_concurrency_limiter.acquire().await; let vm_permit = vm_permit.ok_or(SubmitTxError::ServerShuttingDown)?; - let vm_execution_cache_misses_limit = self.0.sender_config.vm_execution_cache_misses_limit; - self.0 + let connection = self.acquire_replica_connection().await?; + let result = self + .0 .executor - .execute_tx_eth_call( + .execute_tx_in_sandbox( vm_permit, - self.shared_args().await?, - self.0.replica_connection_pool.clone(), - call_overrides, - tx, + self.call_args(&tx, Some(&call_overrides)).await?, + TxExecutionArgs::for_eth_call(tx), + connection, block_args, - vm_execution_cache_misses_limit, - vec![], state_override, + vec![], ) - .await? - .into_api_call_result() + .await?; + result.vm.into_api_call_result() } pub async fn gas_price(&self) -> anyhow::Result { @@ -1070,19 +1083,4 @@ impl TxSender { } Ok(()) } - - pub(crate) async fn get_default_eth_call_gas( - &self, - block_args: BlockArgs, - ) -> anyhow::Result { - let mut connection = self.acquire_replica_connection().await?; - - let protocol_version = block_args - .resolve_block_info(&mut connection) - .await - .context("failed to resolve block info")? - .protocol_version; - - Ok(get_eth_call_gas_limit(protocol_version.into())) - } } diff --git a/core/node/api_server/src/tx_sender/tests.rs b/core/node/api_server/src/tx_sender/tests.rs index 06b6b7a1301b..5f0f0dc925a2 100644 --- a/core/node/api_server/src/tx_sender/tests.rs +++ b/core/node/api_server/src/tx_sender/tests.rs @@ -10,7 +10,7 @@ use zksync_utils::u256_to_h256; use super::*; use crate::{ - execution_sandbox::testonly::MockTransactionExecutor, web3::testonly::create_test_tx_sender, + execution_sandbox::testonly::MockOneshotExecutor, web3::testonly::create_test_tx_sender, }; #[tokio::test] @@ -31,7 +31,7 @@ async fn getting_nonce_for_account() { .await .unwrap(); - let tx_executor = MockTransactionExecutor::default().into(); + let tx_executor = MockOneshotExecutor::default().into(); let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); @@ -81,7 +81,7 @@ async fn getting_nonce_for_account_after_snapshot_recovery() { .await; let l2_chain_id = L2ChainId::default(); - let tx_executor = MockTransactionExecutor::default().into(); + let tx_executor = MockOneshotExecutor::default().into(); let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; storage @@ -136,7 +136,7 @@ async fn submitting_tx_requires_one_connection() { .unwrap(); drop(storage); - let mut tx_executor = MockTransactionExecutor::default(); + let mut tx_executor = MockOneshotExecutor::default(); tx_executor.set_tx_responses(move |received_tx, _| { assert_eq!(received_tx.hash(), tx_hash); ExecutionResult::Success { output: vec![] } diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index e71f4bd1e1ef..473391476a3b 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -4,7 +4,7 @@ use anyhow::Context as _; use once_cell::sync::OnceCell; use zksync_dal::{CoreDal, DalError}; use zksync_multivm::{ - interface::{Call, CallType, ExecutionResult}, + interface::{Call, CallType, ExecutionResult, TxExecutionMode}, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; use zksync_system_constants::MAX_ENCODED_TX_SIZE; @@ -19,7 +19,7 @@ use zksync_types::{ use zksync_web3_decl::error::Web3Error; use crate::{ - execution_sandbox::{ApiTracer, TxSharedArgs}, + execution_sandbox::{ApiTracer, TxExecutionArgs, TxSetupArgs}, tx_sender::{ApiContracts, TxSenderConfig}, web3::{backend_jsonrpsee::MethodTracer, state::RpcState}, }; @@ -167,29 +167,20 @@ impl DebugNamespace { .state .resolve_block_args(&mut connection, block_id) .await?; - drop(connection); - self.current_method().set_block_diff( self.state .last_sealed_l2_block .diff_with_block_args(&block_args), ); - if request.gas.is_none() { - request.gas = Some( - self.state - .tx_sender - .get_default_eth_call_gas(block_args) - .await - .map_err(Web3Error::InternalError)? - .into(), - ) + request.gas = Some(block_args.default_eth_call_gas(&mut connection).await?); } + drop(connection); let call_overrides = request.get_call_overrides()?; let tx = L2Tx::from_request(request.into(), MAX_ENCODED_TX_SIZE)?; - let shared_args = self.shared_args().await; + let setup_args = self.call_args(call_overrides.enforced_base_fee).await; let vm_permit = self .state .tx_sender @@ -206,20 +197,20 @@ impl DebugNamespace { vec![ApiTracer::CallTracer(call_tracer_result.clone())] }; + let connection = self.state.acquire_connection().await?; let executor = &self.state.tx_sender.0.executor; let result = executor - .execute_tx_eth_call( + .execute_tx_in_sandbox( vm_permit, - shared_args, - self.state.connection_pool.clone(), - call_overrides, - tx.clone(), + setup_args, + TxExecutionArgs::for_eth_call(tx.clone()), + connection, block_args, - self.sender_config().vm_execution_cache_misses_limit, - custom_tracers, None, + custom_tracers, ) - .await?; + .await? + .vm; let (output, revert_reason) = match result.result { ExecutionResult::Success { output, .. } => (output, None), @@ -249,9 +240,10 @@ impl DebugNamespace { Ok(Self::map_call(call, false)) } - async fn shared_args(&self) -> TxSharedArgs { + async fn call_args(&self, enforced_base_fee: Option) -> TxSetupArgs { let sender_config = self.sender_config(); - TxSharedArgs { + TxSetupArgs { + execution_mode: TxExecutionMode::EthCall, operator_account: AccountTreeId::default(), fee_input: self.batch_fee_input, base_system_contracts: self.api_contracts.eth_call.clone(), @@ -263,6 +255,7 @@ impl DebugNamespace { .tx_sender .read_whitelisted_tokens_for_aa_cache() .await, + enforced_base_fee, } } } diff --git a/core/node/api_server/src/web3/namespaces/eth.rs b/core/node/api_server/src/web3/namespaces/eth.rs index 1da4aaffbd0b..3d18e4279de6 100644 --- a/core/node/api_server/src/web3/namespaces/eth.rs +++ b/core/node/api_server/src/web3/namespaces/eth.rs @@ -70,18 +70,11 @@ impl EthNamespace { .last_sealed_l2_block .diff_with_block_args(&block_args), ); - drop(connection); - if request.gas.is_none() { - request.gas = Some( - self.state - .tx_sender - .get_default_eth_call_gas(block_args) - .await - .map_err(Web3Error::InternalError)? - .into(), - ) + request.gas = Some(block_args.default_eth_call_gas(&mut connection).await?); } + drop(connection); + let call_overrides = request.get_call_overrides()?; let tx = L2Tx::from_request(request.into(), self.state.api_config.max_tx_size)?; // It is assumed that the previous checks has already enforced that the `max_fee_per_gas` is at most u64. diff --git a/core/node/api_server/src/web3/testonly.rs b/core/node/api_server/src/web3/testonly.rs index 0f8c71aa6281..d8e7d0b65393 100644 --- a/core/node/api_server/src/web3/testonly.rs +++ b/core/node/api_server/src/web3/testonly.rs @@ -8,11 +8,12 @@ use zksync_dal::ConnectionPool; use zksync_health_check::CheckHealth; use zksync_node_fee_model::MockBatchFeeParamsProvider; use zksync_state::PostgresStorageCaches; +use zksync_state_keeper::seal_criteria::NoopSealer; use zksync_types::L2ChainId; use super::{metrics::ApiTransportLabel, *}; use crate::{ - execution_sandbox::{testonly::MockTransactionExecutor, TransactionExecutor}, + execution_sandbox::{testonly::MockOneshotExecutor, TransactionExecutor}, tx_sender::TxSenderConfig, }; @@ -48,7 +49,9 @@ pub(crate) async fn create_test_tx_sender( .await .expect("failed building transaction sender"); - Arc::get_mut(&mut tx_sender.0).unwrap().executor = tx_executor; + let tx_sender_inner = Arc::get_mut(&mut tx_sender.0).unwrap(); + tx_sender_inner.executor = tx_executor; + tx_sender_inner.sealer = Arc::new(NoopSealer); // prevents "unexecutable transaction" errors (tx_sender, vm_barrier) } @@ -99,7 +102,7 @@ impl ApiServerHandles { pub async fn spawn_http_server( api_config: InternalApiConfig, pool: ConnectionPool, - tx_executor: MockTransactionExecutor, + tx_executor: MockOneshotExecutor, method_tracer: Arc, stop_receiver: watch::Receiver, ) -> ApiServerHandles { @@ -127,7 +130,7 @@ pub async fn spawn_ws_server( api_config, pool, websocket_requests_per_minute_limit, - MockTransactionExecutor::default(), + MockOneshotExecutor::default(), Arc::default(), stop_receiver, ) @@ -139,7 +142,7 @@ async fn spawn_server( api_config: InternalApiConfig, pool: ConnectionPool, websocket_requests_per_minute_limit: Option, - tx_executor: MockTransactionExecutor, + tx_executor: MockOneshotExecutor, method_tracer: Arc, stop_receiver: watch::Receiver, ) -> (ApiServerHandles, mpsc::UnboundedReceiver) { diff --git a/core/node/api_server/src/web3/tests/mod.rs b/core/node/api_server/src/web3/tests/mod.rs index 409eb2004d17..5617b097c0c1 100644 --- a/core/node/api_server/src/web3/tests/mod.rs +++ b/core/node/api_server/src/web3/tests/mod.rs @@ -26,9 +26,12 @@ use zksync_node_test_utils::{ create_l1_batch, create_l1_batch_metadata, create_l2_block, create_l2_transaction, l1_batch_metadata_to_commitment_artifacts, prepare_recovery_snapshot, }; +use zksync_system_constants::{ + SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, +}; use zksync_types::{ api, - block::L2BlockHeader, + block::{pack_block_info, L2BlockHeader}, get_nonce_key, l2::L2Tx, storage::get_code_key, @@ -55,7 +58,7 @@ use zksync_web3_decl::{ use super::*; use crate::{ - execution_sandbox::testonly::MockTransactionExecutor, + execution_sandbox::testonly::MockOneshotExecutor, web3::testonly::{spawn_http_server, spawn_ws_server}, }; @@ -135,8 +138,8 @@ trait HttpTest: Send + Sync { StorageInitialization::Genesis } - fn transaction_executor(&self) -> MockTransactionExecutor { - MockTransactionExecutor::default() + fn transaction_executor(&self) -> MockOneshotExecutor { + MockOneshotExecutor::default() } fn method_tracer(&self) -> Arc { @@ -174,7 +177,7 @@ impl StorageInitialization { } async fn prepare_storage( - &self, + self, network_config: &NetworkConfig, storage: &mut Connection<'_, Core>, ) -> anyhow::Result<()> { @@ -189,17 +192,33 @@ impl StorageInitialization { insert_genesis_batch(storage, ¶ms).await?; } } - Self::Recovery { logs, factory_deps } => { + Self::Recovery { + mut logs, + factory_deps, + } => { + let l2_block_info_key = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + ); + let block_info = pack_block_info( + Self::SNAPSHOT_RECOVERY_BLOCK.0.into(), + Self::SNAPSHOT_RECOVERY_BLOCK.0.into(), + ); + logs.push(StorageLog::new_write_log( + l2_block_info_key, + u256_to_h256(block_info), + )); + prepare_recovery_snapshot( storage, Self::SNAPSHOT_RECOVERY_BATCH, Self::SNAPSHOT_RECOVERY_BLOCK, - logs, + &logs, ) .await; storage .factory_deps_dal() - .insert_factory_deps(Self::SNAPSHOT_RECOVERY_BLOCK, factory_deps) + .insert_factory_deps(Self::SNAPSHOT_RECOVERY_BLOCK, &factory_deps) .await?; // Insert the next L1 batch in the storage so that the API server doesn't hang up. @@ -282,7 +301,7 @@ fn execute_l2_transaction(transaction: L2Tx) -> TransactionExecutionResult { } } -/// Stores L2 block with a single transaction and returns the L2 block header + transaction hash. +/// Stores L2 block and returns the L2 block header. async fn store_l2_block( storage: &mut Connection<'_, Core>, number: L2BlockNumber, @@ -298,6 +317,18 @@ async fn store_l2_block( assert_matches!(tx_submission_result, L2TxSubmissionResult::Added); } + // Record L2 block info which is read by the VM sandbox logic + let l2_block_info_key = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + ); + let block_info = pack_block_info(number.0.into(), number.0.into()); + let l2_block_log = StorageLog::new_write_log(l2_block_info_key, u256_to_h256(block_info)); + storage + .storage_logs_dal() + .append_storage_logs(number, &[l2_block_log]) + .await?; + let new_l2_block = create_l2_block(number.0); storage.blocks_dal().insert_l2_block(&new_l2_block).await?; storage diff --git a/core/node/api_server/src/web3/tests/vm.rs b/core/node/api_server/src/web3/tests/vm.rs index 90e1373a5cc6..5b04250eebf4 100644 --- a/core/node/api_server/src/web3/tests/vm.rs +++ b/core/node/api_server/src/web3/tests/vm.rs @@ -30,15 +30,15 @@ impl CallTest { } } - fn create_executor(only_block: L2BlockNumber) -> MockTransactionExecutor { - let mut tx_executor = MockTransactionExecutor::default(); - tx_executor.set_call_responses(move |tx, block_args| { + fn create_executor(latest_block: L2BlockNumber) -> MockOneshotExecutor { + let mut tx_executor = MockOneshotExecutor::default(); + tx_executor.set_call_responses(move |tx, env| { let expected_block_number = match tx.execute.calldata() { - b"pending" => only_block + 1, - b"first" => only_block, + b"pending" => latest_block + 1, + b"latest" => latest_block, data => panic!("Unexpected calldata: {data:?}"), }; - assert_eq!(block_args.resolved_block_number(), expected_block_number); + assert_eq!(env.l1_batch.first_l2_block.number, expected_block_number.0); ExecutionResult::Success { output: b"output".to_vec(), @@ -50,15 +50,20 @@ impl CallTest { #[async_trait] impl HttpTest for CallTest { - fn transaction_executor(&self) -> MockTransactionExecutor { - Self::create_executor(L2BlockNumber(0)) + fn transaction_executor(&self) -> MockOneshotExecutor { + Self::create_executor(L2BlockNumber(1)) } async fn test( &self, client: &DynClient, - _pool: &ConnectionPool, + pool: &ConnectionPool, ) -> anyhow::Result<()> { + // Store an additional L2 block because L2 block #0 has some special processing making it work incorrectly. + let mut connection = pool.connection().await?; + store_l2_block(&mut connection, L2BlockNumber(1), &[]).await?; + drop(connection); + let call_result = client .call(Self::call_request(b"pending"), None, None) .await?; @@ -66,8 +71,8 @@ impl HttpTest for CallTest { let valid_block_numbers_and_calldata = [ (api::BlockNumber::Pending, b"pending" as &[_]), - (api::BlockNumber::Latest, b"first"), - (0.into(), b"first"), + (api::BlockNumber::Latest, b"latest"), + (0.into(), b"latest"), ]; for (number, calldata) in valid_block_numbers_and_calldata { let number = api::BlockIdVariant::BlockNumber(number); @@ -107,7 +112,7 @@ impl HttpTest for CallTestAfterSnapshotRecovery { StorageInitialization::empty_recovery() } - fn transaction_executor(&self) -> MockTransactionExecutor { + fn transaction_executor(&self) -> MockOneshotExecutor { let first_local_l2_block = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; CallTest::create_executor(first_local_l2_block) } @@ -146,7 +151,7 @@ impl HttpTest for CallTestAfterSnapshotRecovery { for number in first_l2_block_numbers { let number = api::BlockIdVariant::BlockNumber(number); let call_result = client - .call(CallTest::call_request(b"first"), Some(number), None) + .call(CallTest::call_request(b"latest"), Some(number), None) .await?; assert_eq!(call_result.0, b"output"); } @@ -213,16 +218,16 @@ impl HttpTest for SendRawTransactionTest { } } - fn transaction_executor(&self) -> MockTransactionExecutor { - let mut tx_executor = MockTransactionExecutor::default(); + fn transaction_executor(&self) -> MockOneshotExecutor { + let mut tx_executor = MockOneshotExecutor::default(); let pending_block = if self.snapshot_recovery { StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 2 } else { L2BlockNumber(1) }; - tx_executor.set_tx_responses(move |tx, block_args| { + tx_executor.set_tx_responses(move |tx, env| { assert_eq!(tx.hash(), Self::transaction_bytes_and_hash().1); - assert_eq!(block_args.resolved_block_number(), pending_block); + assert_eq!(env.l1_batch.first_l2_block.number, pending_block.0); ExecutionResult::Success { output: vec![] } }); tx_executor @@ -311,8 +316,8 @@ impl SendTransactionWithDetailedOutputTest { } #[async_trait] impl HttpTest for SendTransactionWithDetailedOutputTest { - fn transaction_executor(&self) -> MockTransactionExecutor { - let mut tx_executor = MockTransactionExecutor::default(); + fn transaction_executor(&self) -> MockOneshotExecutor { + let mut tx_executor = MockOneshotExecutor::default(); let tx_bytes_and_hash = SendRawTransactionTest::transaction_bytes_and_hash(); let vm_execution_logs = VmExecutionLogs { storage_logs: self.storage_logs(), @@ -322,9 +327,9 @@ impl HttpTest for SendTransactionWithDetailedOutputTest { total_log_queries_count: 0, }; - tx_executor.set_tx_responses_with_logs(move |tx, block_args| { + tx_executor.set_tx_responses_with_logs(move |tx, env| { assert_eq!(tx.hash(), tx_bytes_and_hash.1); - assert_eq!(block_args.resolved_block_number(), L2BlockNumber(1)); + assert_eq!(env.l1_batch.first_l2_block.number, 1); VmExecutionResultAndLogs { result: ExecutionResult::Success { output: vec![] }, @@ -406,15 +411,20 @@ impl TraceCallTest { #[async_trait] impl HttpTest for TraceCallTest { - fn transaction_executor(&self) -> MockTransactionExecutor { - CallTest::create_executor(L2BlockNumber(0)) + fn transaction_executor(&self) -> MockOneshotExecutor { + CallTest::create_executor(L2BlockNumber(1)) } async fn test( &self, client: &DynClient, - _pool: &ConnectionPool, + pool: &ConnectionPool, ) -> anyhow::Result<()> { + // Store an additional L2 block because L2 block #0 has some special processing making it work incorrectly. + let mut connection = pool.connection().await?; + store_l2_block(&mut connection, L2BlockNumber(1), &[]).await?; + drop(connection); + let call_request = CallTest::call_request(b"pending"); let call_result = client.trace_call(call_request.clone(), None, None).await?; Self::assert_debug_call(&call_request, &call_result); @@ -424,13 +434,9 @@ impl HttpTest for TraceCallTest { .await?; Self::assert_debug_call(&call_request, &call_result); - let genesis_block_numbers = [ - api::BlockNumber::Earliest, - api::BlockNumber::Latest, - 0.into(), - ]; - let call_request = CallTest::call_request(b"first"); - for number in genesis_block_numbers { + let latest_block_numbers = [api::BlockNumber::Latest, 1.into()]; + let call_request = CallTest::call_request(b"latest"); + for number in latest_block_numbers { let call_result = client .trace_call( call_request.clone(), @@ -474,7 +480,7 @@ impl HttpTest for TraceCallTestAfterSnapshotRecovery { StorageInitialization::empty_recovery() } - fn transaction_executor(&self) -> MockTransactionExecutor { + fn transaction_executor(&self) -> MockOneshotExecutor { let number = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; CallTest::create_executor(number) } @@ -504,7 +510,7 @@ impl HttpTest for TraceCallTestAfterSnapshotRecovery { assert_pruned_block_error(&error, first_local_l2_block); } - let call_request = CallTest::call_request(b"first"); + let call_request = CallTest::call_request(b"latest"); let first_l2_block_numbers = [api::BlockNumber::Latest, first_local_l2_block.0.into()]; for number in first_l2_block_numbers { let number = api::BlockId::Number(number); @@ -544,18 +550,18 @@ impl HttpTest for EstimateGasTest { SendRawTransactionTest { snapshot_recovery }.storage_initialization() } - fn transaction_executor(&self) -> MockTransactionExecutor { - let mut tx_executor = MockTransactionExecutor::default(); + fn transaction_executor(&self) -> MockOneshotExecutor { + let mut tx_executor = MockOneshotExecutor::default(); let pending_block_number = if self.snapshot_recovery { StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 2 } else { L2BlockNumber(1) }; let gas_limit_threshold = self.gas_limit_threshold.clone(); - tx_executor.set_call_responses(move |tx, block_args| { + tx_executor.set_tx_responses(move |tx, env| { assert_eq!(tx.execute.calldata(), [] as [u8; 0]); assert_eq!(tx.nonce(), Some(Nonce(0))); - assert_eq!(block_args.resolved_block_number(), pending_block_number); + assert_eq!(env.l1_batch.first_l2_block.number, pending_block_number.0); let gas_limit_threshold = gas_limit_threshold.load(Ordering::SeqCst); if tx.gas_limit() >= U256::from(gas_limit_threshold) { @@ -637,49 +643,17 @@ async fn estimate_gas_after_snapshot_recovery() { #[derive(Debug)] struct EstimateGasWithStateOverrideTest { - gas_limit_threshold: Arc, - snapshot_recovery: bool, -} - -impl EstimateGasWithStateOverrideTest { - fn new(snapshot_recovery: bool) -> Self { - Self { - gas_limit_threshold: Arc::default(), - snapshot_recovery, - } - } + inner: EstimateGasTest, } #[async_trait] impl HttpTest for EstimateGasWithStateOverrideTest { fn storage_initialization(&self) -> StorageInitialization { - let snapshot_recovery = self.snapshot_recovery; - SendRawTransactionTest { snapshot_recovery }.storage_initialization() + self.inner.storage_initialization() } - fn transaction_executor(&self) -> MockTransactionExecutor { - let mut tx_executor = MockTransactionExecutor::default(); - let pending_block_number = if self.snapshot_recovery { - StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 2 - } else { - L2BlockNumber(1) - }; - let gas_limit_threshold = self.gas_limit_threshold.clone(); - tx_executor.set_call_responses(move |tx, block_args| { - assert_eq!(tx.execute.calldata(), [] as [u8; 0]); - assert_eq!(tx.nonce(), Some(Nonce(0))); - assert_eq!(block_args.resolved_block_number(), pending_block_number); - - let gas_limit_threshold = gas_limit_threshold.load(Ordering::SeqCst); - if tx.gas_limit() >= U256::from(gas_limit_threshold) { - ExecutionResult::Success { output: vec![] } - } else { - ExecutionResult::Revert { - output: VmRevertReason::VmError, - } - } - }); - tx_executor + fn transaction_executor(&self) -> MockOneshotExecutor { + self.inner.transaction_executor() } async fn test( @@ -735,5 +709,6 @@ impl HttpTest for EstimateGasWithStateOverrideTest { #[tokio::test] async fn estimate_gas_with_state_override() { - test_http_server(EstimateGasWithStateOverrideTest::new(false)).await; + let inner = EstimateGasTest::new(false); + test_http_server(EstimateGasWithStateOverrideTest { inner }).await; } diff --git a/core/node/base_token_adjuster/Cargo.toml b/core/node/base_token_adjuster/Cargo.toml index c21576e37327..3a0beb2ea137 100644 --- a/core/node/base_token_adjuster/Cargo.toml +++ b/core/node/base_token_adjuster/Cargo.toml @@ -19,7 +19,8 @@ zksync_external_price_api.workspace = true zksync_contracts.workspace = true zksync_eth_client.workspace = true zksync_node_fee_model.workspace = true - +zksync_utils.workspace = true +vise.workspace = true tokio = { workspace = true, features = ["time"] } anyhow.workspace = true diff --git a/core/node/base_token_adjuster/src/base_token_ratio_persister.rs b/core/node/base_token_adjuster/src/base_token_ratio_persister.rs index 41796cf2197a..12cd6233efbb 100644 --- a/core/node/base_token_adjuster/src/base_token_ratio_persister.rs +++ b/core/node/base_token_adjuster/src/base_token_ratio_persister.rs @@ -1,4 +1,4 @@ -use std::{cmp::max, fmt::Debug, sync::Arc, time::Duration}; +use std::{cmp::max, fmt::Debug, sync::Arc, time::Instant}; use anyhow::Context as _; use tokio::{sync::watch, time::sleep}; @@ -14,6 +14,8 @@ use zksync_types::{ Address, U256, }; +use crate::metrics::{OperationResult, OperationResultLabels, METRICS}; + #[derive(Debug, Clone)] pub struct BaseTokenRatioPersisterL1Params { pub eth_client: Box, @@ -82,47 +84,7 @@ impl BaseTokenRatioPersister { // TODO(PE-148): Consider shifting retry upon adding external API redundancy. let new_ratio = self.retry_fetch_ratio().await?; self.persist_ratio(new_ratio).await?; - - let Some(l1_params) = &self.l1_params else { - return Ok(()); - }; - - let max_attempts = self.config.l1_tx_sending_max_attempts; - let sleep_duration = self.config.l1_tx_sending_sleep_duration(); - let mut result: anyhow::Result<()> = Ok(()); - let mut prev_base_fee_per_gas: Option = None; - let mut prev_priority_fee_per_gas: Option = None; - - for attempt in 0..max_attempts { - let (base_fee_per_gas, priority_fee_per_gas) = - self.get_eth_fees(l1_params, prev_base_fee_per_gas, prev_priority_fee_per_gas); - - result = self - .send_ratio_to_l1(l1_params, new_ratio, base_fee_per_gas, priority_fee_per_gas) - .await; - if let Some(err) = result.as_ref().err() { - tracing::info!( - "Failed to update base token multiplier on L1, attempt {}, base_fee_per_gas {}, priority_fee_per_gas {}: {}", - attempt + 1, - base_fee_per_gas, - priority_fee_per_gas, - err - ); - tokio::time::sleep(sleep_duration).await; - prev_base_fee_per_gas = Some(base_fee_per_gas); - prev_priority_fee_per_gas = Some(priority_fee_per_gas); - } else { - tracing::info!( - "Updated base token multiplier on L1: numerator {}, denominator {}, base_fee_per_gas {}, priority_fee_per_gas {}", - new_ratio.numerator.get(), - new_ratio.denominator.get(), - base_fee_per_gas, - priority_fee_per_gas - ); - return result; - } - } - result + self.retry_update_ratio_on_l1(new_ratio).await } fn get_eth_fees( @@ -157,36 +119,110 @@ impl BaseTokenRatioPersister { (base_fee_per_gas, priority_fee_per_gas) } + async fn retry_update_ratio_on_l1(&self, new_ratio: BaseTokenAPIRatio) -> anyhow::Result<()> { + let Some(l1_params) = &self.l1_params else { + return Ok(()); + }; + + let max_attempts = self.config.l1_tx_sending_max_attempts; + let sleep_duration = self.config.l1_tx_sending_sleep_duration(); + let mut prev_base_fee_per_gas: Option = None; + let mut prev_priority_fee_per_gas: Option = None; + let mut last_error = None; + for attempt in 0..max_attempts { + let (base_fee_per_gas, priority_fee_per_gas) = + self.get_eth_fees(l1_params, prev_base_fee_per_gas, prev_priority_fee_per_gas); + + let start_time = Instant::now(); + let result = self + .update_ratio_on_l1(l1_params, new_ratio, base_fee_per_gas, priority_fee_per_gas) + .await; + + match result { + Ok(x) => { + tracing::info!( + "Updated base token multiplier on L1: numerator {}, denominator {}, base_fee_per_gas {}, priority_fee_per_gas {}", + new_ratio.numerator.get(), + new_ratio.denominator.get(), + base_fee_per_gas, + priority_fee_per_gas + ); + METRICS + .l1_gas_used + .set(x.unwrap_or(U256::zero()).low_u128() as u64); + METRICS.l1_update_latency[&OperationResultLabels { + result: OperationResult::Success, + }] + .observe(start_time.elapsed()); + + return Ok(()); + } + Err(err) => { + tracing::info!( + "Failed to update base token multiplier on L1, attempt {}, base_fee_per_gas {}, priority_fee_per_gas {}: {}", + attempt, + base_fee_per_gas, + priority_fee_per_gas, + err + ); + METRICS.l1_update_latency[&OperationResultLabels { + result: OperationResult::Failure, + }] + .observe(start_time.elapsed()); + + tokio::time::sleep(sleep_duration).await; + prev_base_fee_per_gas = Some(base_fee_per_gas); + prev_priority_fee_per_gas = Some(priority_fee_per_gas); + last_error = Some(err) + } + } + } + + let error_message = "Failed to update base token multiplier on L1"; + Err(last_error + .map(|x| x.context(error_message)) + .unwrap_or_else(|| anyhow::anyhow!(error_message))) + } + async fn retry_fetch_ratio(&self) -> anyhow::Result { - let sleep_duration = Duration::from_secs(1); - let max_retries = 5; - let mut attempts = 0; + let sleep_duration = self.config.price_fetching_sleep_duration(); + let max_retries = self.config.price_fetching_max_attempts; + let mut last_error = None; - loop { + for attempt in 0..max_retries { + let start_time = Instant::now(); match self .price_api_client .fetch_ratio(self.base_token_address) .await { Ok(ratio) => { + METRICS.external_price_api_latency[&OperationResultLabels { + result: OperationResult::Success, + }] + .observe(start_time.elapsed()); return Ok(ratio); } - Err(err) if attempts < max_retries => { - attempts += 1; + Err(err) => { tracing::warn!( - "Attempt {}/{} to fetch ratio from coingecko failed with err: {}. Retrying...", - attempts, + "Attempt {}/{} to fetch ratio from external price api failed with err: {}. Retrying...", + attempt, max_retries, err ); + last_error = Some(err); + METRICS.external_price_api_latency[&OperationResultLabels { + result: OperationResult::Failure, + }] + .observe(start_time.elapsed()); sleep(sleep_duration).await; } - Err(err) => { - return Err(err) - .context("Failed to fetch base token ratio after multiple attempts"); - } } } + let error_message = "Failed to fetch base token ratio after multiple attempts"; + Err(last_error + .map(|x| x.context(error_message)) + .unwrap_or_else(|| anyhow::anyhow!(error_message))) } async fn persist_ratio(&self, api_ratio: BaseTokenAPIRatio) -> anyhow::Result { @@ -209,13 +245,13 @@ impl BaseTokenRatioPersister { Ok(id) } - async fn send_ratio_to_l1( + async fn update_ratio_on_l1( &self, l1_params: &BaseTokenRatioPersisterL1Params, api_ratio: BaseTokenAPIRatio, base_fee_per_gas: u64, priority_fee_per_gas: u64, - ) -> anyhow::Result<()> { + ) -> anyhow::Result> { let fn_set_token_multiplier = l1_params .chain_admin_contract .function("setTokenMultiplier") @@ -276,7 +312,7 @@ impl BaseTokenRatioPersister { .context("failed getting receipt for `setTokenMultiplier` transaction")?; if let Some(receipt) = maybe_receipt { if receipt.status == Some(1.into()) { - return Ok(()); + return Ok(receipt.gas_used); } return Err(anyhow::Error::msg(format!( "`setTokenMultiplier` transaction {:?} failed with status {:?}", diff --git a/core/node/base_token_adjuster/src/lib.rs b/core/node/base_token_adjuster/src/lib.rs index 332fb5f47aab..d786b440f622 100644 --- a/core/node/base_token_adjuster/src/lib.rs +++ b/core/node/base_token_adjuster/src/lib.rs @@ -5,3 +5,4 @@ pub use self::{ mod base_token_ratio_persister; mod base_token_ratio_provider; +mod metrics; diff --git a/core/node/base_token_adjuster/src/metrics.rs b/core/node/base_token_adjuster/src/metrics.rs new file mode 100644 index 000000000000..d84e4da0c0c7 --- /dev/null +++ b/core/node/base_token_adjuster/src/metrics.rs @@ -0,0 +1,28 @@ +use std::time::Duration; + +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet, EncodeLabelValue)] +#[metrics(label = "operation_result", rename_all = "snake_case")] +pub(super) enum OperationResult { + Success, + Failure, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet)] +pub(crate) struct OperationResultLabels { + pub result: OperationResult, +} + +#[derive(Debug, Metrics)] +#[metrics(prefix = "base_token_adjuster")] +pub(crate) struct BaseTokenAdjusterMetrics { + pub l1_gas_used: Gauge, + #[metrics(buckets = Buckets::LATENCIES)] + pub external_price_api_latency: Family>, + #[metrics(buckets = Buckets::LATENCIES)] + pub l1_update_latency: Family>, +} + +#[vise::register] +pub(crate) static METRICS: vise::Global = vise::Global::new(); diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index ea1858da25d3..f8e6f6b31723 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -5,7 +5,10 @@ use chrono::Utc; use rand::Rng; use tokio::sync::watch::Receiver; use zksync_config::DADispatcherConfig; -use zksync_da_client::{types::DAError, DataAvailabilityClient}; +use zksync_da_client::{ + types::{DAError, InclusionData}, + DataAvailabilityClient, +}; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_types::L1BatchNumber; @@ -133,16 +136,21 @@ impl DataAvailabilityDispatcher { return Ok(()); }; - let inclusion_data = self - .client - .get_inclusion_data(blob_info.blob_id.as_str()) - .await - .with_context(|| { - format!( - "failed to get inclusion data for blob_id: {}, batch_number: {}", - blob_info.blob_id, blob_info.l1_batch_number - ) - })?; + let inclusion_data = if self.config.use_dummy_inclusion_data() { + self.client + .get_inclusion_data(blob_info.blob_id.as_str()) + .await + .with_context(|| { + format!( + "failed to get inclusion data for blob_id: {}, batch_number: {}", + blob_info.blob_id, blob_info.l1_batch_number + ) + })? + } else { + // if the inclusion verification is disabled, we don't need to wait for the inclusion + // data before committing the batch, so simply return an empty vector + Some(InclusionData { data: vec![] }) + }; let Some(inclusion_data) = inclusion_data else { return Ok(()); diff --git a/core/node/external_proof_integration_api/Cargo.toml b/core/node/external_proof_integration_api/Cargo.toml index ae7cd4c4d031..2e8176cd8832 100644 --- a/core/node/external_proof_integration_api/Cargo.toml +++ b/core/node/external_proof_integration_api/Cargo.toml @@ -21,3 +21,4 @@ zksync_dal.workspace = true tokio.workspace = true bincode.workspace = true anyhow.workspace = true +vise.workspace = true diff --git a/core/node/external_proof_integration_api/src/lib.rs b/core/node/external_proof_integration_api/src/lib.rs index 51fecf8c23fc..b1ef33b44c10 100644 --- a/core/node/external_proof_integration_api/src/lib.rs +++ b/core/node/external_proof_integration_api/src/lib.rs @@ -1,4 +1,5 @@ mod error; +mod metrics; mod processor; use std::{net::SocketAddr, sync::Arc}; diff --git a/core/node/external_proof_integration_api/src/metrics.rs b/core/node/external_proof_integration_api/src/metrics.rs new file mode 100644 index 000000000000..70815f542a05 --- /dev/null +++ b/core/node/external_proof_integration_api/src/metrics.rs @@ -0,0 +1,55 @@ +use std::time::Duration; + +use tokio::time::Instant; +use vise::{EncodeLabelSet, EncodeLabelValue, Histogram, LabeledFamily, Metrics}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet, EncodeLabelValue)] +#[metrics(label = "outcome", rename_all = "snake_case")] +pub(crate) enum CallOutcome { + Success, + Failure, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet, EncodeLabelValue)] +#[metrics(label = "type", rename_all = "snake_case")] +pub(crate) enum Method { + GetLatestProofGenerationData, + GetSpecificProofGenerationData, + VerifyProof, +} + +#[derive(Debug, Metrics)] +#[metrics(prefix = "external_proof_integration_api")] +pub(crate) struct ProofIntegrationApiMetrics { + #[metrics(labels = ["method", "outcome"], buckets = vise::Buckets::LATENCIES)] + pub call_latency: LabeledFamily<(Method, CallOutcome), Histogram, 2>, +} + +pub(crate) struct MethodCallGuard { + method_type: Method, + outcome: CallOutcome, + started_at: Instant, +} + +impl MethodCallGuard { + pub(crate) fn new(method_type: Method) -> Self { + MethodCallGuard { + method_type, + outcome: CallOutcome::Failure, + started_at: Instant::now(), + } + } + + pub(crate) fn mark_successful(&mut self) { + self.outcome = CallOutcome::Success; + } +} + +impl Drop for MethodCallGuard { + fn drop(&mut self) { + METRICS.call_latency[&(self.method_type, self.outcome)].observe(self.started_at.elapsed()); + } +} + +#[vise::register] +pub(crate) static METRICS: vise::Global = vise::Global::new(); diff --git a/core/node/external_proof_integration_api/src/processor.rs b/core/node/external_proof_integration_api/src/processor.rs index a15e45e48037..e9e56df4a068 100644 --- a/core/node/external_proof_integration_api/src/processor.rs +++ b/core/node/external_proof_integration_api/src/processor.rs @@ -17,7 +17,10 @@ use zksync_prover_interface::{ outputs::L1BatchProofForL1, }; -use crate::error::ProcessorError; +use crate::{ + error::ProcessorError, + metrics::{Method, MethodCallGuard}, +}; #[derive(Clone)] pub(crate) struct Processor { @@ -39,6 +42,36 @@ impl Processor { } } + pub(crate) async fn verify_proof( + &self, + Path(l1_batch_number): Path, + Json(payload): Json, + ) -> Result<(), ProcessorError> { + let mut guard = MethodCallGuard::new(Method::VerifyProof); + + let l1_batch_number = L1BatchNumber(l1_batch_number); + tracing::info!( + "Received request to verify proof for batch: {:?}", + l1_batch_number + ); + + let serialized_proof = bincode::serialize(&payload.0)?; + let expected_proof = bincode::serialize( + &self + .blob_store + .get::((l1_batch_number, payload.0.protocol_version)) + .await?, + )?; + + if serialized_proof != expected_proof { + return Err(ProcessorError::InvalidProof); + } + + guard.mark_successful(); + + Ok(()) + } + #[tracing::instrument(skip_all)] pub(crate) async fn get_proof_generation_data( &mut self, @@ -46,13 +79,18 @@ impl Processor { ) -> Result, ProcessorError> { tracing::info!("Received request for proof generation data: {:?}", request); + let mut guard = match request.0 .0 { + Some(_) => MethodCallGuard::new(Method::GetSpecificProofGenerationData), + None => MethodCallGuard::new(Method::GetLatestProofGenerationData), + }; + let latest_available_batch = self .pool .connection() .await .unwrap() .proof_generation_dal() - .get_available_batch() + .get_latest_proven_batch() .await?; let l1_batch_number = if let Some(l1_batch_number) = request.0 .0 { @@ -74,9 +112,13 @@ impl Processor { .await; match proof_generation_data { - Ok(data) => Ok(Json(ProofGenerationDataResponse::Success(Some(Box::new( - data, - ))))), + Ok(data) => { + guard.mark_successful(); + + Ok(Json(ProofGenerationDataResponse::Success(Some(Box::new( + data, + ))))) + } Err(err) => Err(err), } } @@ -161,30 +203,4 @@ impl Processor { l1_verifier_config: protocol_version.l1_verifier_config, }) } - - pub(crate) async fn verify_proof( - &self, - Path(l1_batch_number): Path, - Json(payload): Json, - ) -> Result<(), ProcessorError> { - let l1_batch_number = L1BatchNumber(l1_batch_number); - tracing::info!( - "Received request to verify proof for batch: {:?}", - l1_batch_number - ); - - let serialized_proof = bincode::serialize(&payload.0)?; - let expected_proof = bincode::serialize( - &self - .blob_store - .get::((l1_batch_number, payload.0.protocol_version)) - .await?, - )?; - - if serialized_proof != expected_proof { - return Err(ProcessorError::InvalidProof); - } - - Ok(()) - } } diff --git a/core/node/node_framework/src/implementations/layers/external_proof_integration_api.rs b/core/node/node_framework/src/implementations/layers/external_proof_integration_api.rs index 6f8805bc5fa3..9678c0a97932 100644 --- a/core/node/node_framework/src/implementations/layers/external_proof_integration_api.rs +++ b/core/node/node_framework/src/implementations/layers/external_proof_integration_api.rs @@ -26,7 +26,7 @@ pub struct ExternalProofIntegrationApiLayer { #[derive(Debug, FromContext)] #[context(crate = crate)] pub struct Input { - pub master_pool: PoolResource, + pub replica_pool: PoolResource, pub object_store: ObjectStoreResource, } @@ -34,7 +34,7 @@ pub struct Input { #[context(crate = crate)] pub struct Output { #[context(task)] - pub task: ProverApiTask, + pub task: ExternalProofIntegrationApiTask, } impl ExternalProofIntegrationApiLayer { @@ -59,13 +59,13 @@ impl WiringLayer for ExternalProofIntegrationApiLayer { } async fn wire(self, input: Self::Input) -> Result { - let main_pool = input.master_pool.get().await?; + let replica_pool = input.replica_pool.get().await.unwrap(); let blob_store = input.object_store.0; - let task = ProverApiTask { + let task = ExternalProofIntegrationApiTask { external_proof_integration_api_config: self.external_proof_integration_api_config, blob_store, - main_pool, + replica_pool, commitment_mode: self.commitment_mode, }; @@ -74,15 +74,15 @@ impl WiringLayer for ExternalProofIntegrationApiLayer { } #[derive(Debug)] -pub struct ProverApiTask { +pub struct ExternalProofIntegrationApiTask { external_proof_integration_api_config: ExternalProofIntegrationApiConfig, blob_store: Arc, - main_pool: ConnectionPool, + replica_pool: ConnectionPool, commitment_mode: L1BatchCommitmentMode, } #[async_trait::async_trait] -impl Task for ProverApiTask { +impl Task for ExternalProofIntegrationApiTask { fn id(&self) -> TaskId { "external_proof_integration_api".into() } @@ -91,7 +91,7 @@ impl Task for ProverApiTask { zksync_external_proof_integration_api::run_server( self.external_proof_integration_api_config, self.blob_store, - self.main_pool, + self.replica_pool, self.commitment_mode, stop_receiver.0, ) diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs b/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs index 002ee9543006..967bc83b11c3 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs @@ -43,8 +43,8 @@ pub struct OutputHandlerLayer { /// before they are included into L2 blocks. pre_insert_txs: bool, /// Whether protective reads persistence is enabled. - /// Must be `true` for any node that maintains a full Merkle Tree (e.g. any instance of main node). - /// May be set to `false` for nodes that do not participate in the sequencing process (e.g. external nodes). + /// May be set to `false` for nodes that do not participate in the sequencing process (e.g. external nodes) + /// or run `vm_runner_protective_reads` component. protective_reads_persistence_enabled: bool, } @@ -74,7 +74,7 @@ impl OutputHandlerLayer { l2_block_seal_queue_capacity, l2_native_token_vault_proxy_addr, pre_insert_txs: false, - protective_reads_persistence_enabled: true, + protective_reads_persistence_enabled: false, } } @@ -119,9 +119,6 @@ impl WiringLayer for OutputHandlerLayer { persistence = persistence.with_tx_insertion(); } if !self.protective_reads_persistence_enabled { - // **Important:** Disabling protective reads persistence is only sound if the node will never - // run a full Merkle tree OR an accompanying protective-reads-writer is being run. - tracing::warn!("Disabling persisting protective reads; this should be safe, but is considered an experimental option at the moment"); persistence = persistence.without_protective_reads(); } diff --git a/core/node/proof_data_handler/src/errors.rs b/core/node/proof_data_handler/src/errors.rs index f170b3b53e7c..15ef393294aa 100644 --- a/core/node/proof_data_handler/src/errors.rs +++ b/core/node/proof_data_handler/src/errors.rs @@ -10,6 +10,12 @@ pub(crate) enum RequestProcessorError { Dal(DalError), } +impl From for RequestProcessorError { + fn from(err: DalError) -> Self { + RequestProcessorError::Dal(err) + } +} + impl IntoResponse for RequestProcessorError { fn into_response(self) -> Response { let (status_code, message) = match self { diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs index d85591dd2c90..4ae1a5026f14 100644 --- a/core/node/proof_data_handler/src/tee_request_processor.rs +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -3,15 +3,12 @@ use std::sync::Arc; use axum::{extract::Path, Json}; use zksync_config::configs::ProofDataHandlerConfig; use zksync_dal::{ConnectionPool, Core, CoreDal}; -use zksync_object_store::ObjectStore; -use zksync_prover_interface::{ - api::{ - RegisterTeeAttestationRequest, RegisterTeeAttestationResponse, SubmitProofResponse, - SubmitTeeProofRequest, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, - }, - inputs::TeeVerifierInput, +use zksync_object_store::{ObjectStore, ObjectStoreError}; +use zksync_prover_interface::api::{ + RegisterTeeAttestationRequest, RegisterTeeAttestationResponse, SubmitProofResponse, + SubmitTeeProofRequest, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, }; -use zksync_types::L1BatchNumber; +use zksync_types::{tee_types::TeeType, L1BatchNumber}; use crate::errors::RequestProcessorError; @@ -41,32 +38,77 @@ impl TeeRequestProcessor { ) -> Result, RequestProcessorError> { tracing::info!("Received request for proof generation data: {:?}", request); - let mut connection = self - .pool - .connection() - .await - .map_err(RequestProcessorError::Dal)?; - - let l1_batch_number_result = connection - .tee_proof_generation_dal() - .get_next_batch_to_be_proven(request.tee_type, self.config.proof_generation_timeout()) - .await - .map_err(RequestProcessorError::Dal)?; - - let l1_batch_number = match l1_batch_number_result { - Some(number) => number, - None => return Ok(Json(TeeProofGenerationDataResponse(None))), + let mut min_batch_number: Option = None; + let mut missing_range: Option<(L1BatchNumber, L1BatchNumber)> = None; + + let result = loop { + let l1_batch_number = match self + .lock_batch_for_proving(request.tee_type, min_batch_number) + .await? + { + Some(number) => number, + None => break Ok(Json(TeeProofGenerationDataResponse(None))), + }; + + match self.blob_store.get(l1_batch_number).await { + Ok(input) => break Ok(Json(TeeProofGenerationDataResponse(Some(Box::new(input))))), + Err(ObjectStoreError::KeyNotFound(_)) => { + missing_range = match missing_range { + Some((start, _)) => Some((start, l1_batch_number)), + None => Some((l1_batch_number, l1_batch_number)), + }; + self.unlock_batch(l1_batch_number, request.tee_type).await?; + min_batch_number = Some(min_batch_number.unwrap_or(l1_batch_number) + 1); + } + Err(err) => { + self.unlock_batch(l1_batch_number, request.tee_type).await?; + break Err(RequestProcessorError::ObjectStore(err)); + } + } }; - let tee_verifier_input: TeeVerifierInput = self - .blob_store - .get(l1_batch_number) - .await - .map_err(RequestProcessorError::ObjectStore)?; + if let Some((start, end)) = missing_range { + tracing::warn!( + "Blobs for batch numbers {} to {} not found in the object store. Marked as unpicked.", + start, + end + ); + } + + result + } - let response = TeeProofGenerationDataResponse(Some(Box::new(tee_verifier_input))); + async fn lock_batch_for_proving( + &self, + tee_type: TeeType, + min_batch_number: Option, + ) -> Result, RequestProcessorError> { + let result = self + .pool + .connection() + .await? + .tee_proof_generation_dal() + .lock_batch_for_proving( + tee_type, + self.config.proof_generation_timeout(), + min_batch_number, + ) + .await?; + Ok(result) + } - Ok(Json(response)) + async fn unlock_batch( + &self, + l1_batch_number: L1BatchNumber, + tee_type: TeeType, + ) -> Result<(), RequestProcessorError> { + self.pool + .connection() + .await? + .tee_proof_generation_dal() + .unlock_batch(l1_batch_number, tee_type) + .await?; + Ok(()) } pub(crate) async fn submit_proof( @@ -75,11 +117,7 @@ impl TeeRequestProcessor { Json(proof): Json, ) -> Result, RequestProcessorError> { let l1_batch_number = L1BatchNumber(l1_batch_number); - let mut connection = self - .pool - .connection() - .await - .map_err(RequestProcessorError::Dal)?; + let mut connection = self.pool.connection().await?; let mut dal = connection.tee_proof_generation_dal(); tracing::info!( @@ -94,8 +132,7 @@ impl TeeRequestProcessor { &proof.0.signature, &proof.0.proof, ) - .await - .map_err(RequestProcessorError::Dal)?; + .await?; Ok(Json(SubmitProofResponse::Success)) } @@ -106,16 +143,11 @@ impl TeeRequestProcessor { ) -> Result, RequestProcessorError> { tracing::info!("Received attestation: {:?}", payload); - let mut connection = self - .pool - .connection() - .await - .map_err(RequestProcessorError::Dal)?; + let mut connection = self.pool.connection().await?; let mut dal = connection.tee_proof_generation_dal(); dal.save_attestation(&payload.pubkey, &payload.attestation) - .await - .map_err(RequestProcessorError::Dal)?; + .await?; Ok(Json(RegisterTeeAttestationResponse::Success)) } diff --git a/core/tests/ts-integration/tests/api/web3.test.ts b/core/tests/ts-integration/tests/api/web3.test.ts index f9b6232d86ca..6d9cb96845c7 100644 --- a/core/tests/ts-integration/tests/api/web3.test.ts +++ b/core/tests/ts-integration/tests/api/web3.test.ts @@ -249,14 +249,16 @@ describe.skip('web3 API compatibility tests', () => { test('Should check transactions from API / Legacy tx', async () => { const LEGACY_TX_TYPE = 0; + const gasPrice = (await alice._providerL2().getGasPrice()) * 2n; const legacyTx = await alice.sendTransaction({ type: LEGACY_TX_TYPE, - to: alice.address + to: alice.address, + gasPrice }); await legacyTx.wait(); const legacyApiReceipt = await alice.provider.getTransaction(legacyTx.hash); - expect(legacyApiReceipt.gasPrice).toBeLessThanOrEqual(legacyTx.gasPrice!); + expect(legacyApiReceipt.gasPrice).toEqual(gasPrice); }); test('Should check transactions from API / EIP1559 tx', async () => { diff --git a/core/tests/vm-benchmark/Cargo.toml b/core/tests/vm-benchmark/Cargo.toml index 27218d79aafe..4586c637e128 100644 --- a/core/tests/vm-benchmark/Cargo.toml +++ b/core/tests/vm-benchmark/Cargo.toml @@ -6,46 +6,30 @@ license.workspace = true publish = false [dependencies] +zksync_contracts.workspace = true +zksync_multivm.workspace = true zksync_types.workspace = true +zksync_utils.workspace = true zksync_vlog.workspace = true -zksync_vm_benchmark_harness.workspace = true +criterion.workspace = true +once_cell.workspace = true rand.workspace = true vise.workspace = true tokio.workspace = true [dev-dependencies] -criterion.workspace = true +assert_matches.workspace = true iai.workspace = true [[bench]] -name = "criterion" +name = "oneshot" harness = false [[bench]] -name = "diy_benchmark" +name = "batch" harness = false [[bench]] name = "iai" harness = false - -[[bench]] -name = "fill_bootloader" -harness = false - -[[bin]] -name = "iai_results_to_prometheus" -path = "src/iai_results_to_prometheus.rs" - -[[bin]] -name = "compare_iai_results" -path = "src/compare_iai_results.rs" - -[[bin]] -name = "find-slowest" -path = "src/find_slowest.rs" - -[[bin]] -name = "instruction-counts" -path = "src/instruction_counts.rs" diff --git a/core/tests/vm-benchmark/README.md b/core/tests/vm-benchmark/README.md index cecbdb31d0cf..b7f056894e73 100644 --- a/core/tests/vm-benchmark/README.md +++ b/core/tests/vm-benchmark/README.md @@ -9,35 +9,22 @@ benchmarks, however. There are three different benchmarking tools available: ```sh -cargo bench --bench criterion -cargo bench --bench diy_benchmark +cargo bench --bench oneshot +cargo bench --bench batch cargo +nightly bench --bench iai ``` -Criterion is the de-facto microbenchmarking tool for Rust. Run it, then optimize something and run the command again to -see if your changes have made a difference. +`oneshot` and `batch` targets use Criterion, the de-facto standard micro-benchmarking tool for Rust. `oneshot` measures +VM performance on single transactions, and `batch` on entire batches of up to 5,000 transactions. Run these benches, +then optimize something and run the command again to see if your changes have made a difference. -The DIY benchmark works a bit better in noisy environments and is used to push benchmark data to Prometheus -automatically. +IAI uses cachegrind to simulate the CPU, so noise is completely irrelevant to it, but it also doesn't measure exactly +the same thing as normal benchmarks. You need valgrind to be able to run it. -IAI uses cachegrind to simulate the CPU, so noise is completely irrelevant to it but it also doesn't measure exactly the -same thing as normal benchmarks. You need valgrind to be able to run it. - -You can add your own bytecodes to be benchmarked into the folder "deployment_benchmarks". For iai, you also need to add -them to "benches/iai.rs". +You can add new bytecodes to be benchmarked into the [`bytecodes`](src/bytecodes) directory and then add them to the +`BYTECODES` constant exported by the crate. ## Profiling (Linux only) You can also use `sh perf.sh bytecode_file` to produce data that can be fed into the [firefox profiler](https://profiler.firefox.com/) for a specific bytecode. - -## Fuzzing - -There is a fuzzer using this library at core/lib/vm/fuzz. The fuzz.sh script located there starts a fuzzer which -attempts to make cover as much code as it can to ultimately produce a valid deployment bytecode. - -It has no chance of succeeding currently because the fuzzing speed drops to 10 executions/s easily. Optimizing the VM or -lowering the gas limit will help with that. - -The fuzzer has been useful for producing synthetic benchmark inputs. It may be a good tool for finding show transactions -with a certain gas limit, an empirical way of evaluating gas prices of instructions. diff --git a/core/tests/vm-benchmark/benches/fill_bootloader.rs b/core/tests/vm-benchmark/benches/batch.rs similarity index 79% rename from core/tests/vm-benchmark/benches/fill_bootloader.rs rename to core/tests/vm-benchmark/benches/batch.rs index 13fa1df0b2fc..608f6be6d089 100644 --- a/core/tests/vm-benchmark/benches/fill_bootloader.rs +++ b/core/tests/vm-benchmark/benches/batch.rs @@ -14,17 +14,15 @@ use std::{iter, time::Duration}; -use criterion::{ - black_box, criterion_group, criterion_main, measurement::WallTime, BatchSize, BenchmarkGroup, - BenchmarkId, Criterion, Throughput, -}; +use criterion::{black_box, criterion_group, criterion_main, Criterion, Throughput}; use rand::{rngs::StdRng, Rng, SeedableRng}; -use zksync_types::Transaction; -use zksync_vm_benchmark_harness::{ - cut_to_allowed_bytecode_size, get_deploy_tx_with_gas_limit, get_heavy_load_test_tx, - get_load_test_deploy_tx, get_load_test_tx, get_realistic_load_test_tx, get_transfer_tx, - BenchmarkingVm, BenchmarkingVmFactory, Fast, Legacy, LoadTestParams, +use vm_benchmark::{ + criterion::{is_test_mode, BenchmarkGroup, BenchmarkId, CriterionExt, MeteredTime}, + get_deploy_tx_with_gas_limit, get_heavy_load_test_tx, get_load_test_deploy_tx, + get_load_test_tx, get_realistic_load_test_tx, get_transfer_tx, BenchmarkingVm, + BenchmarkingVmFactory, Bytecode, Fast, Legacy, LoadTestParams, }; +use zksync_types::Transaction; /// Gas limit for deployment transactions. const DEPLOY_GAS_LIMIT: u32 = 30_000_000; @@ -59,7 +57,7 @@ fn bench_vm( } fn run_vm_expecting_failures( - group: &mut BenchmarkGroup<'_, WallTime>, + group: &mut BenchmarkGroup<'_>, name: &str, txs: &[Transaction], expected_failures: &[bool], @@ -70,25 +68,24 @@ fn run_vm_expecting_failures( } group.throughput(Throughput::Elements(*txs_in_batch as u64)); - group.bench_with_input( + group.bench_metered_with_input( BenchmarkId::new(name, txs_in_batch), txs_in_batch, |bencher, &txs_in_batch| { if FULL { // Include VM initialization / drop into the measured time - bencher.iter(|| { + bencher.iter(|timer| { + let _guard = timer.start(); let mut vm = BenchmarkingVm::::default(); bench_vm::<_, true>(&mut vm, &txs[..txs_in_batch], expected_failures); }); } else { - bencher.iter_batched( - BenchmarkingVm::::default, - |mut vm| { - bench_vm::<_, false>(&mut vm, &txs[..txs_in_batch], expected_failures); - vm - }, - BatchSize::LargeInput, // VM can consume significant amount of RAM, especially the new one - ); + bencher.iter(|timer| { + let mut vm = BenchmarkingVm::::default(); + let guard = timer.start(); + bench_vm::<_, false>(&mut vm, &txs[..txs_in_batch], expected_failures); + drop(guard); + }); } }, ); @@ -96,22 +93,23 @@ fn run_vm_expecting_failures( } fn run_vm( - group: &mut BenchmarkGroup<'_, WallTime>, + group: &mut BenchmarkGroup<'_>, name: &str, txs: &[Transaction], ) { run_vm_expecting_failures::(group, name, txs, &[]); } -fn bench_fill_bootloader(c: &mut Criterion) { - let is_test_mode = !std::env::args().any(|arg| arg == "--bench"); - let txs_in_batch = if is_test_mode { +fn bench_fill_bootloader( + c: &mut Criterion, +) { + let txs_in_batch = if is_test_mode() { &TXS_IN_BATCH[..3] // Reduce the number of transactions in a batch so that tests don't take long } else { TXS_IN_BATCH }; - let mut group = c.benchmark_group(if FULL { + let mut group = c.metered_group(if FULL { format!("fill_bootloader_full{}", VM::LABEL.as_suffix()) } else { format!("fill_bootloader{}", VM::LABEL.as_suffix()) @@ -121,12 +119,12 @@ fn bench_fill_bootloader(c: &mut Cr .measurement_time(Duration::from_secs(10)); // Deploying simple contract - let test_contract = - std::fs::read("deployment_benchmarks/deploy_simple_contract").expect("failed to read file"); - let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); + let test_contract = Bytecode::get("deploy_simple_contract"); let max_txs = *txs_in_batch.last().unwrap() as u32; let txs: Vec<_> = (0..max_txs) - .map(|nonce| get_deploy_tx_with_gas_limit(code, DEPLOY_GAS_LIMIT, nonce)) + .map(|nonce| { + get_deploy_tx_with_gas_limit(test_contract.bytecode(), DEPLOY_GAS_LIMIT, nonce) + }) .collect(); run_vm::(&mut group, "deploy_simple_contract", &txs); drop(txs); @@ -187,9 +185,12 @@ fn bench_fill_bootloader(c: &mut Cr } criterion_group!( - benches, - bench_fill_bootloader::, - bench_fill_bootloader::, - bench_fill_bootloader:: + name = benches; + config = Criterion::default() + .configure_from_args() + .with_measurement(MeteredTime::new("fill_bootloader")); + targets = bench_fill_bootloader::, + bench_fill_bootloader::, + bench_fill_bootloader:: ); criterion_main!(benches); diff --git a/core/tests/vm-benchmark/benches/criterion.rs b/core/tests/vm-benchmark/benches/criterion.rs deleted file mode 100644 index 9e12fc25f54c..000000000000 --- a/core/tests/vm-benchmark/benches/criterion.rs +++ /dev/null @@ -1,98 +0,0 @@ -use std::time::Duration; - -use criterion::{ - black_box, criterion_group, criterion_main, measurement::WallTime, BatchSize, BenchmarkGroup, - Criterion, -}; -use zksync_types::Transaction; -use zksync_vm_benchmark_harness::{ - cut_to_allowed_bytecode_size, get_deploy_tx, get_heavy_load_test_tx, get_load_test_deploy_tx, - get_load_test_tx, get_realistic_load_test_tx, BenchmarkingVm, BenchmarkingVmFactory, Fast, - Legacy, LoadTestParams, -}; - -const SAMPLE_SIZE: usize = 20; - -fn benches_in_folder(c: &mut Criterion) { - let mut group = c.benchmark_group(VM::LABEL.as_str()); - group - .sample_size(SAMPLE_SIZE) - .measurement_time(Duration::from_secs(10)); - - for path in std::fs::read_dir("deployment_benchmarks").unwrap() { - let path = path.unwrap().path(); - - let test_contract = std::fs::read(&path).expect("failed to read file"); - - let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); - let tx = get_deploy_tx(code); - let file_name = path.file_name().unwrap().to_str().unwrap(); - let full_suffix = if FULL { "/full" } else { "" }; - let bench_name = format!("{file_name}{full_suffix}"); - group.bench_function(bench_name, |bencher| { - if FULL { - // Include VM initialization / drop into the measured time - bencher.iter(|| BenchmarkingVm::::default().run_transaction(black_box(&tx))); - } else { - bencher.iter_batched( - BenchmarkingVm::::default, - |mut vm| { - let result = vm.run_transaction(black_box(&tx)); - (vm, result) - }, - BatchSize::LargeInput, // VM can consume significant amount of RAM, especially the new one - ); - } - }); - } -} - -fn bench_load_test(c: &mut Criterion) { - let mut group = c.benchmark_group(VM::LABEL.as_str()); - group - .sample_size(SAMPLE_SIZE) - .measurement_time(Duration::from_secs(10)); - - // Nonce 0 is used for the deployment transaction - let tx = get_load_test_tx(1, 10_000_000, LoadTestParams::default()); - bench_load_test_transaction::(&mut group, "load_test", &tx); - - let tx = get_realistic_load_test_tx(1); - bench_load_test_transaction::(&mut group, "load_test_realistic", &tx); - - let tx = get_heavy_load_test_tx(1); - bench_load_test_transaction::(&mut group, "load_test_heavy", &tx); -} - -fn bench_load_test_transaction( - group: &mut BenchmarkGroup<'_, WallTime>, - name: &str, - tx: &Transaction, -) { - group.bench_function(name, |bencher| { - bencher.iter_batched( - || { - let mut vm = BenchmarkingVm::::default(); - vm.run_transaction(&get_load_test_deploy_tx()); - vm - }, - |mut vm| { - let result = vm.run_transaction(black_box(tx)); - assert!(!result.result.is_failed(), "{:?}", result.result); - (vm, result) - }, - BatchSize::LargeInput, - ); - }); -} - -criterion_group!( - benches, - benches_in_folder::, - benches_in_folder::, - benches_in_folder::, - benches_in_folder::, - bench_load_test::, - bench_load_test:: -); -criterion_main!(benches); diff --git a/core/tests/vm-benchmark/benches/diy_benchmark.rs b/core/tests/vm-benchmark/benches/diy_benchmark.rs deleted file mode 100644 index 1601de5eb85f..000000000000 --- a/core/tests/vm-benchmark/benches/diy_benchmark.rs +++ /dev/null @@ -1,53 +0,0 @@ -use std::time::{Duration, Instant}; - -use criterion::black_box; -use vise::{Gauge, LabeledFamily, Metrics}; -use zksync_vm_benchmark_harness::{cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm}; - -fn main() { - let mut results = vec![]; - - for path in std::fs::read_dir("deployment_benchmarks").unwrap() { - let path = path.unwrap().path(); - - let test_contract = std::fs::read(&path).expect("failed to read file"); - - let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); - let tx = get_deploy_tx(code); - - let name = path.file_name().unwrap().to_str().unwrap(); - - println!("benchmarking: {}", name); - - let mut timings = vec![]; - let benchmark_start = Instant::now(); - while benchmark_start.elapsed() < Duration::from_secs(5) { - let start = Instant::now(); - BenchmarkingVm::new().run_transaction(black_box(&tx)); - timings.push(start.elapsed()); - } - - println!("{:?}", timings.iter().min().unwrap()); - results.push((name.to_owned(), timings)); - } - - if option_env!("PUSH_VM_BENCHMARKS_TO_PROMETHEUS").is_some() { - vm_benchmark::with_prometheus::with_prometheus(|| { - for (name, timings) in results { - for (i, timing) in timings.into_iter().enumerate() { - VM_BENCHMARK_METRICS.timing[&(name.clone(), i.to_string())].set(timing); - } - } - }); - } -} - -#[derive(Debug, Metrics)] -#[metrics(prefix = "vm_benchmark")] -pub(crate) struct VmBenchmarkMetrics { - #[metrics(labels = ["benchmark", "run_no"])] - pub timing: LabeledFamily<(String, String), Gauge, 2>, -} - -#[vise::register] -pub(crate) static VM_BENCHMARK_METRICS: vise::Global = vise::Global::new(); diff --git a/core/tests/vm-benchmark/benches/iai.rs b/core/tests/vm-benchmark/benches/iai.rs index 2837a2345a5a..6b8965afa4f1 100644 --- a/core/tests/vm-benchmark/benches/iai.rs +++ b/core/tests/vm-benchmark/benches/iai.rs @@ -1,14 +1,8 @@ use iai::black_box; -use zksync_vm_benchmark_harness::{ - cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm, BenchmarkingVmFactory, Fast, - Legacy, -}; - -fn run_bytecode(path: &str) { - let test_contract = std::fs::read(path).expect("failed to read file"); - let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); - let tx = get_deploy_tx(code); +use vm_benchmark::{BenchmarkingVm, BenchmarkingVmFactory, Bytecode, Fast, Legacy}; +fn run_bytecode(name: &str) { + let tx = Bytecode::get(name).deploy_tx(); black_box(BenchmarkingVm::::default().run_transaction(&tx)); } @@ -16,11 +10,11 @@ macro_rules! make_functions_and_main { ($($file:ident => $legacy_name:ident,)+) => { $( fn $file() { - run_bytecode::(concat!("deployment_benchmarks/", stringify!($file))); + run_bytecode::(stringify!($file)); } fn $legacy_name() { - run_bytecode::(concat!("deployment_benchmarks/", stringify!($file))); + run_bytecode::(stringify!($file)); } )+ diff --git a/core/tests/vm-benchmark/benches/oneshot.rs b/core/tests/vm-benchmark/benches/oneshot.rs new file mode 100644 index 000000000000..58a90af4981f --- /dev/null +++ b/core/tests/vm-benchmark/benches/oneshot.rs @@ -0,0 +1,91 @@ +use std::time::Duration; + +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use vm_benchmark::{ + criterion::{BenchmarkGroup, CriterionExt, MeteredTime}, + get_heavy_load_test_tx, get_load_test_deploy_tx, get_load_test_tx, get_realistic_load_test_tx, + BenchmarkingVm, BenchmarkingVmFactory, Fast, Legacy, LoadTestParams, BYTECODES, +}; +use zksync_types::Transaction; + +const SAMPLE_SIZE: usize = 20; + +fn benches_in_folder(c: &mut Criterion) { + let mut group = c.metered_group(VM::LABEL.as_str()); + group + .sample_size(SAMPLE_SIZE) + .measurement_time(Duration::from_secs(10)); + + for bytecode in BYTECODES { + let tx = bytecode.deploy_tx(); + let bench_name = bytecode.name; + let full_suffix = if FULL { "/full" } else { "" }; + let bench_name = format!("{bench_name}{full_suffix}"); + + group.bench_metered(bench_name, |bencher| { + if FULL { + // Include VM initialization / drop into the measured time + bencher.iter(|timer| { + let _guard = timer.start(); + BenchmarkingVm::::default().run_transaction(black_box(&tx)); + }); + } else { + bencher.iter(|timer| { + let mut vm = BenchmarkingVm::::default(); + let guard = timer.start(); + let _result = vm.run_transaction(black_box(&tx)); + drop(guard); // do not include latency of dropping `_result` + }); + } + }); + } +} + +fn bench_load_test(c: &mut Criterion) { + let mut group = c.metered_group(VM::LABEL.as_str()); + group + .sample_size(SAMPLE_SIZE) + .measurement_time(Duration::from_secs(10)); + + // Nonce 0 is used for the deployment transaction + let tx = get_load_test_tx(1, 10_000_000, LoadTestParams::default()); + bench_load_test_transaction::(&mut group, "load_test", &tx); + + let tx = get_realistic_load_test_tx(1); + bench_load_test_transaction::(&mut group, "load_test_realistic", &tx); + + let tx = get_heavy_load_test_tx(1); + bench_load_test_transaction::(&mut group, "load_test_heavy", &tx); +} + +fn bench_load_test_transaction( + group: &mut BenchmarkGroup<'_>, + name: &str, + tx: &Transaction, +) { + group.bench_metered(name, |bencher| { + bencher.iter(|timer| { + let mut vm = BenchmarkingVm::::default(); + vm.run_transaction(&get_load_test_deploy_tx()); + + let guard = timer.start(); + let result = vm.run_transaction(black_box(tx)); + drop(guard); // do not include the latency of `result` checks / drop + assert!(!result.result.is_failed(), "{:?}", result.result); + }); + }); +} + +criterion_group!( + name = benches; + config = Criterion::default() + .configure_from_args() + .with_measurement(MeteredTime::new("criterion")); + targets = benches_in_folder::, + benches_in_folder::, + benches_in_folder::, + benches_in_folder::, + bench_load_test::, + bench_load_test:: +); +criterion_main!(benches); diff --git a/core/tests/vm-benchmark/harness/Cargo.toml b/core/tests/vm-benchmark/harness/Cargo.toml deleted file mode 100644 index a24d3fa1294a..000000000000 --- a/core/tests/vm-benchmark/harness/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "zksync_vm_benchmark_harness" -version.workspace = true -edition.workspace = true -license.workspace = true -publish = false - -[dependencies] -zksync_multivm.workspace = true -zksync_types.workspace = true -zksync_state.workspace = true -zksync_utils.workspace = true -zksync_system_constants.workspace = true -zksync_contracts.workspace = true -zk_evm.workspace = true -once_cell.workspace = true - -[dev-dependencies] -assert_matches.workspace = true diff --git a/core/tests/vm-benchmark/src/parse_iai.rs b/core/tests/vm-benchmark/src/bin/common/mod.rs similarity index 98% rename from core/tests/vm-benchmark/src/parse_iai.rs rename to core/tests/vm-benchmark/src/bin/common/mod.rs index 61376b429a32..a92c9d5f710c 100644 --- a/core/tests/vm-benchmark/src/parse_iai.rs +++ b/core/tests/vm-benchmark/src/bin/common/mod.rs @@ -1,5 +1,6 @@ use std::io::BufRead; +#[derive(Debug)] pub struct IaiResult { pub name: String, pub instructions: u64, diff --git a/core/tests/vm-benchmark/src/compare_iai_results.rs b/core/tests/vm-benchmark/src/bin/compare_iai_results.rs similarity index 98% rename from core/tests/vm-benchmark/src/compare_iai_results.rs rename to core/tests/vm-benchmark/src/bin/compare_iai_results.rs index d2c9d73f7e36..faf72a18f451 100644 --- a/core/tests/vm-benchmark/src/compare_iai_results.rs +++ b/core/tests/vm-benchmark/src/bin/compare_iai_results.rs @@ -4,7 +4,9 @@ use std::{ io::{BufRead, BufReader}, }; -use vm_benchmark::parse_iai::parse_iai; +pub use crate::common::parse_iai; + +mod common; fn main() { let [iai_before, iai_after, opcodes_before, opcodes_after] = std::env::args() diff --git a/core/tests/vm-benchmark/src/bin/iai_results_to_prometheus.rs b/core/tests/vm-benchmark/src/bin/iai_results_to_prometheus.rs new file mode 100644 index 000000000000..3b3aa05bf69c --- /dev/null +++ b/core/tests/vm-benchmark/src/bin/iai_results_to_prometheus.rs @@ -0,0 +1,52 @@ +use std::{env, io::BufReader, time::Duration}; + +use tokio::sync::watch; +use vise::{Gauge, LabeledFamily, Metrics}; +use zksync_vlog::prometheus::PrometheusExporterConfig; + +use crate::common::{parse_iai, IaiResult}; + +mod common; + +#[derive(Debug, Metrics)] +#[metrics(prefix = "vm_cachegrind")] +pub(crate) struct VmCachegrindMetrics { + #[metrics(labels = ["benchmark"])] + pub instructions: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + pub l1_accesses: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + pub l2_accesses: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + pub ram_accesses: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + pub cycles: LabeledFamily>, +} + +#[vise::register] +pub(crate) static VM_CACHEGRIND_METRICS: vise::Global = vise::Global::new(); + +#[tokio::main] +async fn main() { + let results: Vec = parse_iai(BufReader::new(std::io::stdin())).collect(); + + let endpoint = env::var("BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL") + .expect("`BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL` env var is not set"); + let (stop_sender, stop_receiver) = watch::channel(false); + let prometheus_config = + PrometheusExporterConfig::push(endpoint.to_owned(), Duration::from_millis(100)); + tokio::spawn(prometheus_config.run(stop_receiver)); + + for result in results { + let name = result.name; + VM_CACHEGRIND_METRICS.instructions[&name.clone()].set(result.instructions); + VM_CACHEGRIND_METRICS.l1_accesses[&name.clone()].set(result.l1_accesses); + VM_CACHEGRIND_METRICS.l2_accesses[&name.clone()].set(result.l2_accesses); + VM_CACHEGRIND_METRICS.ram_accesses[&name.clone()].set(result.ram_accesses); + VM_CACHEGRIND_METRICS.cycles[&name].set(result.cycles); + } + + println!("Waiting for push to happen..."); + tokio::time::sleep(Duration::from_secs(1)).await; + stop_sender.send_replace(true); +} diff --git a/core/tests/vm-benchmark/src/bin/instruction_counts.rs b/core/tests/vm-benchmark/src/bin/instruction_counts.rs new file mode 100644 index 000000000000..f9bb04c01bff --- /dev/null +++ b/core/tests/vm-benchmark/src/bin/instruction_counts.rs @@ -0,0 +1,11 @@ +//! Runs all benchmarks and prints out the number of zkEVM opcodes each one executed. + +use vm_benchmark::{BenchmarkingVm, BYTECODES}; + +fn main() { + for bytecode in BYTECODES { + let tx = bytecode.deploy_tx(); + let name = bytecode.name; + println!("{name} {}", BenchmarkingVm::new().instruction_count(&tx)); + } +} diff --git a/core/tests/vm-benchmark/deployment_benchmarks/access_memory b/core/tests/vm-benchmark/src/bytecodes/access_memory similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/access_memory rename to core/tests/vm-benchmark/src/bytecodes/access_memory diff --git a/core/tests/vm-benchmark/deployment_benchmarks/call_far b/core/tests/vm-benchmark/src/bytecodes/call_far similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/call_far rename to core/tests/vm-benchmark/src/bytecodes/call_far diff --git a/core/tests/vm-benchmark/deployment_benchmarks/decode_shl_sub b/core/tests/vm-benchmark/src/bytecodes/decode_shl_sub similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/decode_shl_sub rename to core/tests/vm-benchmark/src/bytecodes/decode_shl_sub diff --git a/core/tests/vm-benchmark/deployment_benchmarks/deploy_simple_contract b/core/tests/vm-benchmark/src/bytecodes/deploy_simple_contract similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/deploy_simple_contract rename to core/tests/vm-benchmark/src/bytecodes/deploy_simple_contract diff --git a/core/tests/vm-benchmark/deployment_benchmarks/event_spam b/core/tests/vm-benchmark/src/bytecodes/event_spam similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/event_spam rename to core/tests/vm-benchmark/src/bytecodes/event_spam diff --git a/core/tests/vm-benchmark/deployment_benchmarks/finish_eventful_frames b/core/tests/vm-benchmark/src/bytecodes/finish_eventful_frames similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/finish_eventful_frames rename to core/tests/vm-benchmark/src/bytecodes/finish_eventful_frames diff --git a/core/tests/vm-benchmark/deployment_benchmarks/heap_read_write b/core/tests/vm-benchmark/src/bytecodes/heap_read_write similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/heap_read_write rename to core/tests/vm-benchmark/src/bytecodes/heap_read_write diff --git a/core/tests/vm-benchmark/deployment_benchmarks/slot_hash_collision b/core/tests/vm-benchmark/src/bytecodes/slot_hash_collision similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/slot_hash_collision rename to core/tests/vm-benchmark/src/bytecodes/slot_hash_collision diff --git a/core/tests/vm-benchmark/deployment_benchmarks/write_and_decode b/core/tests/vm-benchmark/src/bytecodes/write_and_decode similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/write_and_decode rename to core/tests/vm-benchmark/src/bytecodes/write_and_decode diff --git a/core/tests/vm-benchmark/src/criterion.rs b/core/tests/vm-benchmark/src/criterion.rs new file mode 100644 index 000000000000..9515ac4ef988 --- /dev/null +++ b/core/tests/vm-benchmark/src/criterion.rs @@ -0,0 +1,477 @@ +//! Criterion helpers and extensions used to record benchmark timings as Prometheus metrics. + +use std::{ + cell::RefCell, + convert::Infallible, + env, fmt, mem, + rc::Rc, + sync::Once, + thread, + time::{Duration, Instant}, +}; + +use criterion::{ + measurement::{Measurement, ValueFormatter, WallTime}, + Criterion, Throughput, +}; +use once_cell::{sync::OnceCell as SyncOnceCell, unsync::OnceCell}; +use tokio::sync::watch; +use vise::{EncodeLabelSet, Family, Gauge, Metrics, Unit}; +use zksync_vlog::prometheus::PrometheusExporterConfig; + +/// Checks whether a benchmark binary is running in the test mode (as opposed to benchmarking). +pub fn is_test_mode() -> bool { + !env::args().any(|arg| arg == "--bench") +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash, EncodeLabelSet)] +struct BenchLabels { + bin: &'static str, + group: String, + benchmark: String, + arg: Option, +} + +// We don't use histograms because benchmark results are uploaded in short bursts, which leads to missing zero values. +#[derive(Debug, Metrics)] +#[metrics(prefix = "vm_benchmark")] +struct VmBenchmarkMetrics { + /// Number of samples for a benchmark. + sample_count: Family>, + + /// Mean latency for a benchmark. + #[metrics(unit = Unit::Seconds)] + mean_timing: Family>, + /// Minimum latency for a benchmark. + #[metrics(unit = Unit::Seconds)] + min_timing: Family>, + /// Maximum latency for a benchmark. + #[metrics(unit = Unit::Seconds)] + max_timing: Family>, + /// Median latency for a benchmark. + #[metrics(unit = Unit::Seconds)] + median_timing: Family>, +} + +#[vise::register] +static METRICS: vise::Global = vise::Global::new(); + +#[derive(Debug)] +struct PrometheusRuntime { + stop_sender: watch::Sender, + _runtime: tokio::runtime::Runtime, +} + +impl Drop for PrometheusRuntime { + fn drop(&mut self) { + self.stop_sender.send_replace(true); + // Metrics are pushed automatically on exit, so we wait *after* sending a stop signal + println!("Waiting for Prometheus metrics to be pushed"); + thread::sleep(Duration::from_secs(1)); + } +} + +impl PrometheusRuntime { + fn new() -> Option { + const PUSH_INTERVAL: Duration = Duration::from_millis(100); + + let gateway_url = env::var("BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL").ok()?; + let runtime = tokio::runtime::Runtime::new().expect("Failed initializing Tokio runtime"); + println!("Pushing Prometheus metrics to {gateway_url} each {PUSH_INTERVAL:?}"); + let (stop_sender, stop_receiver) = watch::channel(false); + let prometheus_config = PrometheusExporterConfig::push(gateway_url, PUSH_INTERVAL); + runtime.spawn(prometheus_config.run(stop_receiver)); + Some(Self { + stop_sender, + _runtime: runtime, + }) + } +} + +/// Guard returned by [`CurrentBenchmark::set()`] that unsets the current benchmark on drop. +#[must_use = "Will unset the current benchmark when dropped"] +#[derive(Debug)] +struct CurrentBenchmarkGuard; + +impl Drop for CurrentBenchmarkGuard { + fn drop(&mut self) { + CURRENT_BENCH.take(); + } +} + +#[derive(Debug)] +struct CurrentBenchmark { + metrics: &'static VmBenchmarkMetrics, + labels: BenchLabels, + observations: Vec, +} + +impl CurrentBenchmark { + fn set(metrics: &'static VmBenchmarkMetrics, labels: BenchLabels) -> CurrentBenchmarkGuard { + CURRENT_BENCH.replace(Some(Self { + metrics, + labels, + observations: vec![], + })); + CurrentBenchmarkGuard + } + + fn observe(timing: Duration) { + CURRENT_BENCH.with_borrow_mut(|this| { + if let Some(this) = this { + this.observations.push(timing); + } + }); + } +} + +impl Drop for CurrentBenchmark { + fn drop(&mut self) { + let mut observations = mem::take(&mut self.observations); + if observations.is_empty() { + return; + } + + let len = observations.len(); + self.metrics.sample_count[&self.labels].set(len); + let mean = observations + .iter() + .copied() + .sum::() + .div_f32(len as f32); + self.metrics.mean_timing[&self.labels].set(mean); + + // Could use quick median algorithm, but since there aren't that many observations expected, + // sorting looks acceptable. + observations.sort_unstable(); + let (min, max) = (observations[0], *observations.last().unwrap()); + self.metrics.min_timing[&self.labels].set(min); + self.metrics.max_timing[&self.labels].set(max); + let median = if len % 2 == 0 { + (observations[len / 2 - 1] + observations[len / 2]) / 2 + } else { + observations[len / 2] + }; + self.metrics.median_timing[&self.labels].set(median); + + println!("Exported timings: min={min:?}, max={max:?}, mean={mean:?}, median={median:?}"); + } +} + +thread_local! { + static CURRENT_BENCH: RefCell> = const { RefCell::new(None) }; +} + +static BIN_NAME: SyncOnceCell<&'static str> = SyncOnceCell::new(); + +/// Measurement for criterion that exports . +#[derive(Debug)] +pub struct MeteredTime { + _prometheus: Option, +} + +impl MeteredTime { + pub fn new(bin_name: &'static str) -> Self { + static PROMETHEUS_INIT: Once = Once::new(); + + let mut prometheus = None; + if !is_test_mode() { + PROMETHEUS_INIT.call_once(|| { + prometheus = PrometheusRuntime::new(); + }); + } + + if let Err(prev_name) = BIN_NAME.set(bin_name) { + assert_eq!(prev_name, bin_name, "attempted to redefine binary name"); + } + + Self { + _prometheus: prometheus, + } + } +} + +impl Measurement for MeteredTime { + type Intermediate = Infallible; + type Value = Duration; + + fn start(&self) -> Self::Intermediate { + // All measurements must be done via `Bencher::iter()` + unreachable!("must not be invoked directly"); + } + + fn end(&self, _: Self::Intermediate) -> Self::Value { + unreachable!("must not be invoked directly"); + } + + fn add(&self, v1: &Self::Value, v2: &Self::Value) -> Self::Value { + *v1 + *v2 + } + + fn zero(&self) -> Self::Value { + Duration::ZERO + } + + fn to_f64(&self, value: &Self::Value) -> f64 { + WallTime.to_f64(value) + } + + fn formatter(&self) -> &dyn ValueFormatter { + WallTime.formatter() + } +} + +/// Drop-in replacement for `criterion::BenchmarkId`. +pub struct BenchmarkId { + inner: criterion::BenchmarkId, + benchmark: String, + arg: String, +} + +impl BenchmarkId { + pub fn new, P: fmt::Display>(function_name: S, parameter: P) -> Self { + let function_name = function_name.into(); + Self { + benchmark: function_name.clone(), + arg: parameter.to_string(), + inner: criterion::BenchmarkId::new(function_name, parameter), + } + } +} + +/// Drop-in replacement for `criterion::BenchmarkGroup`. +pub struct BenchmarkGroup<'a> { + name: String, + inner: criterion::BenchmarkGroup<'a, MeteredTime>, + metrics: &'static VmBenchmarkMetrics, +} + +impl BenchmarkGroup<'_> { + pub fn sample_size(&mut self, size: usize) -> &mut Self { + self.inner.sample_size(size); + self + } + + pub fn throughput(&mut self, throughput: Throughput) -> &mut Self { + self.inner.throughput(throughput); + self + } + + pub fn measurement_time(&mut self, dur: Duration) -> &mut Self { + self.inner.measurement_time(dur); + self + } + + fn start_bench(&self, benchmark: String, arg: Option) -> CurrentBenchmarkGuard { + let labels = BenchLabels { + bin: BIN_NAME.get().copied().unwrap_or(""), + group: self.name.clone(), + benchmark, + arg, + }; + CurrentBenchmark::set(self.metrics, labels) + } + + pub fn bench_metered(&mut self, id: impl Into, mut bench_fn: F) + where + F: FnMut(&mut Bencher<'_, '_>), + { + let id = id.into(); + let _guard = self.start_bench(id.clone(), None); + self.inner + .bench_function(id, |bencher| bench_fn(&mut Bencher { inner: bencher })); + } + + pub fn bench_metered_with_input(&mut self, id: BenchmarkId, input: &I, mut bench_fn: F) + where + I: ?Sized, + F: FnMut(&mut Bencher<'_, '_>, &I), + { + let _guard = self.start_bench(id.benchmark, Some(id.arg)); + self.inner + .bench_with_input(id.inner, input, |bencher, input| { + bench_fn(&mut Bencher { inner: bencher }, input) + }); + } +} + +pub struct Bencher<'a, 'r> { + inner: &'r mut criterion::Bencher<'a, MeteredTime>, +} + +impl Bencher<'_, '_> { + pub fn iter(&mut self, mut routine: impl FnMut(BenchmarkTimer)) { + self.inner.iter_custom(move |iters| { + let mut total = Duration::ZERO; + for _ in 0..iters { + let timer = BenchmarkTimer::new(); + let observation = timer.observation.clone(); + routine(timer); + let timing = observation.get().copied().unwrap_or_default(); + CurrentBenchmark::observe(timing); + total += timing; + } + total + }) + } +} + +/// Timer for benchmarks supplied to the `Bencher::iter()` closure. +#[derive(Debug)] +#[must_use = "should be started to start measurements"] +pub struct BenchmarkTimer { + observation: Rc>, +} + +impl BenchmarkTimer { + fn new() -> Self { + Self { + observation: Rc::default(), + } + } + + /// Starts the timer. The timer will remain active until the returned guard is dropped. If you drop the timer implicitly, + /// be careful with the drop order (inverse to the variable declaration order); when in doubt, drop the guard explicitly. + pub fn start(self) -> BenchmarkTimerGuard { + BenchmarkTimerGuard { + started_at: Instant::now(), + observation: self.observation, + } + } +} + +/// Guard returned from [`BenchmarkTimer::start()`]. +#[derive(Debug)] +#[must_use = "will stop the timer on drop"] +pub struct BenchmarkTimerGuard { + started_at: Instant, + observation: Rc>, +} + +impl Drop for BenchmarkTimerGuard { + fn drop(&mut self) { + let latency = self.started_at.elapsed(); + self.observation.set(latency).ok(); + } +} + +pub trait CriterionExt { + fn metered_group(&mut self, name: impl Into) -> BenchmarkGroup<'_>; +} + +impl CriterionExt for Criterion { + fn metered_group(&mut self, name: impl Into) -> BenchmarkGroup<'_> { + let name = name.into(); + BenchmarkGroup { + inner: self.benchmark_group(name.clone()), + name, + metrics: &METRICS, + } + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashSet; + + use super::*; + use crate::BYTECODES; + + fn test_benchmark(c: &mut Criterion, metrics: &'static VmBenchmarkMetrics) { + let mut group = c.metered_group("single"); + group.metrics = metrics; + for bytecode in BYTECODES { + group.bench_metered(bytecode.name, |bencher| { + bencher.iter(|timer| { + let _guard = timer.start(); + thread::sleep(Duration::from_millis(1)) + }) + }); + } + drop(group); + + let mut group = c.metered_group("with_arg"); + group.metrics = metrics; + for bytecode in BYTECODES { + for arg in [1, 10, 100] { + group.bench_metered_with_input( + BenchmarkId::new(bytecode.name, arg), + &arg, + |bencher, _arg| { + bencher.iter(|timer| { + let _guard = timer.start(); + thread::sleep(Duration::from_millis(1)) + }); + }, + ) + } + } + } + + #[test] + fn recording_benchmarks() { + let metered_time = MeteredTime::new("test"); + let metrics = &*Box::leak(Box::::default()); + + let mut criterion = Criterion::default() + .warm_up_time(Duration::from_millis(10)) + .measurement_time(Duration::from_millis(10)) + .sample_size(10) + .with_measurement(metered_time); + test_benchmark(&mut criterion, metrics); + + let timing_labels: HashSet<_> = metrics.mean_timing.to_entries().into_keys().collect(); + // Check that labels are as expected. + for bytecode in BYTECODES { + assert!(timing_labels.contains(&BenchLabels { + bin: "test", + group: "single".to_owned(), + benchmark: bytecode.name.to_owned(), + arg: None, + })); + assert!(timing_labels.contains(&BenchLabels { + bin: "test", + group: "with_arg".to_owned(), + benchmark: bytecode.name.to_owned(), + arg: Some("1".to_owned()), + })); + assert!(timing_labels.contains(&BenchLabels { + bin: "test", + group: "with_arg".to_owned(), + benchmark: bytecode.name.to_owned(), + arg: Some("10".to_owned()), + })); + assert!(timing_labels.contains(&BenchLabels { + bin: "test", + group: "with_arg".to_owned(), + benchmark: bytecode.name.to_owned(), + arg: Some("100".to_owned()), + })); + } + assert_eq!( + timing_labels.len(), + 4 * BYTECODES.len(), + "{timing_labels:#?}" + ); + + // Sanity-check relations among collected metrics + for label in &timing_labels { + let mean = metrics.mean_timing[label].get(); + let min = metrics.min_timing[label].get(); + let max = metrics.max_timing[label].get(); + let median = metrics.median_timing[label].get(); + assert!( + min > Duration::ZERO, + "min={min:?}, mean={mean:?}, median = {median:?}, max={max:?}" + ); + assert!( + min <= mean && min <= median, + "min={min:?}, mean={mean:?}, median = {median:?}, max={max:?}" + ); + assert!( + mean <= max && median <= max, + "min={min:?}, mean={mean:?}, median = {median:?}, max={max:?}" + ); + } + } +} diff --git a/core/tests/vm-benchmark/src/find_slowest.rs b/core/tests/vm-benchmark/src/find_slowest.rs deleted file mode 100644 index 97a6acd5acd9..000000000000 --- a/core/tests/vm-benchmark/src/find_slowest.rs +++ /dev/null @@ -1,43 +0,0 @@ -use std::{ - io::Write, - time::{Duration, Instant}, -}; - -use zksync_vm_benchmark_harness::*; - -fn main() { - let mut results = vec![]; - - let arg = std::env::args() - .nth(1) - .expect("Expected directory of contracts to rank as first argument."); - let files = std::fs::read_dir(arg).expect("Failed to list dir"); - - let mut last_progress_update = Instant::now(); - - for (i, file) in files.enumerate() { - let path = file.unwrap().path(); - - let test_contract = std::fs::read(&path).expect("failed to read file"); - - if let Some(code) = cut_to_allowed_bytecode_size(&test_contract) { - let tx = get_deploy_tx(code); - - let start_time = Instant::now(); - BenchmarkingVm::new().run_transaction(&tx); - results.push((start_time.elapsed(), path)); - } - - if last_progress_update.elapsed() > Duration::from_millis(100) { - print!("\r{}", i); - std::io::stdout().flush().unwrap(); - last_progress_update = Instant::now(); - } - } - println!(); - - results.sort(); - for (time, path) in results.iter().rev().take(30) { - println!("{} took {:?}", path.display(), time); - } -} diff --git a/core/tests/vm-benchmark/src/iai_results_to_prometheus.rs b/core/tests/vm-benchmark/src/iai_results_to_prometheus.rs deleted file mode 100644 index d419603bae87..000000000000 --- a/core/tests/vm-benchmark/src/iai_results_to_prometheus.rs +++ /dev/null @@ -1,37 +0,0 @@ -use std::io::BufReader; - -use vise::{Gauge, LabeledFamily, Metrics}; -use vm_benchmark::parse_iai::IaiResult; - -fn main() { - let results: Vec = - vm_benchmark::parse_iai::parse_iai(BufReader::new(std::io::stdin())).collect(); - - vm_benchmark::with_prometheus::with_prometheus(|| { - for r in results { - VM_CACHEGRIND_METRICS.instructions[&r.name.clone()].set(r.instructions as f64); - VM_CACHEGRIND_METRICS.l1_accesses[&r.name.clone()].set(r.l1_accesses as f64); - VM_CACHEGRIND_METRICS.l2_accesses[&r.name.clone()].set(r.l2_accesses as f64); - VM_CACHEGRIND_METRICS.ram_accesses[&r.name.clone()].set(r.ram_accesses as f64); - VM_CACHEGRIND_METRICS.cycles[&r.name.clone()].set(r.cycles as f64); - } - }) -} - -#[derive(Debug, Metrics)] -#[metrics(prefix = "vm_cachegrind")] -pub(crate) struct VmCachegrindMetrics { - #[metrics(labels = ["benchmark"])] - pub instructions: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub l1_accesses: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub l2_accesses: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub ram_accesses: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub cycles: LabeledFamily>, -} - -#[vise::register] -pub(crate) static VM_CACHEGRIND_METRICS: vise::Global = vise::Global::new(); diff --git a/core/tests/vm-benchmark/harness/src/instruction_counter.rs b/core/tests/vm-benchmark/src/instruction_counter.rs similarity index 100% rename from core/tests/vm-benchmark/harness/src/instruction_counter.rs rename to core/tests/vm-benchmark/src/instruction_counter.rs diff --git a/core/tests/vm-benchmark/src/instruction_counts.rs b/core/tests/vm-benchmark/src/instruction_counts.rs deleted file mode 100644 index c038c8f2bf6b..000000000000 --- a/core/tests/vm-benchmark/src/instruction_counts.rs +++ /dev/null @@ -1,28 +0,0 @@ -//! Runs all benchmarks and prints out the number of zkEVM opcodes each one executed. - -use std::path::Path; - -use zksync_vm_benchmark_harness::{cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm}; - -fn main() { - // using source file location because this is just a script, the binary isn't meant to be reused - let benchmark_folder = Path::new(file!()) - .parent() - .unwrap() - .parent() - .unwrap() - .join("deployment_benchmarks"); - - for path in std::fs::read_dir(benchmark_folder).unwrap() { - let path = path.unwrap().path(); - - let test_contract = std::fs::read(&path).expect("failed to read file"); - - let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); - let tx = get_deploy_tx(code); - - let name = path.file_name().unwrap().to_str().unwrap(); - - println!("{} {}", name, BenchmarkingVm::new().instruction_count(&tx)); - } -} diff --git a/core/tests/vm-benchmark/src/lib.rs b/core/tests/vm-benchmark/src/lib.rs index 38cc311105b3..4bd008d33196 100644 --- a/core/tests/vm-benchmark/src/lib.rs +++ b/core/tests/vm-benchmark/src/lib.rs @@ -1,2 +1,72 @@ -pub mod parse_iai; -pub mod with_prometheus; +use zksync_types::Transaction; + +pub use crate::{ + transaction::{ + get_deploy_tx, get_deploy_tx_with_gas_limit, get_heavy_load_test_tx, + get_load_test_deploy_tx, get_load_test_tx, get_realistic_load_test_tx, get_transfer_tx, + LoadTestParams, + }, + vm::{BenchmarkingVm, BenchmarkingVmFactory, Fast, Legacy, VmLabel}, +}; + +pub mod criterion; +mod instruction_counter; +mod transaction; +mod vm; + +#[derive(Debug, Clone, Copy)] +pub struct Bytecode { + pub name: &'static str, + raw_bytecode: &'static [u8], +} + +impl Bytecode { + pub fn get(name: &str) -> Self { + BYTECODES + .iter() + .find(|bytecode| bytecode.name == name) + .copied() + .unwrap_or_else(|| panic!("bytecode `{name}` is not defined")) + } + + /// Bytecodes must consist of an odd number of 32 byte words. + /// This function "fixes" bytecodes of wrong length by cutting off their end. + fn cut_to_allowed_bytecode_size(bytes: &[u8]) -> &[u8] { + let mut words = bytes.len() / 32; + assert!(words > 0, "bytecode is empty"); + + if words & 1 == 0 { + words -= 1; + } + &bytes[..32 * words] + } + + pub fn bytecode(&self) -> &'static [u8] { + Self::cut_to_allowed_bytecode_size(self.raw_bytecode) + } + + pub fn deploy_tx(&self) -> Transaction { + get_deploy_tx(self.bytecode()) + } +} + +macro_rules! include_bytecode { + ($name:ident) => { + Bytecode { + name: stringify!($name), + raw_bytecode: include_bytes!(concat!("bytecodes/", stringify!($name))), + } + }; +} + +pub const BYTECODES: &[Bytecode] = &[ + include_bytecode!(access_memory), + include_bytecode!(call_far), + include_bytecode!(decode_shl_sub), + include_bytecode!(deploy_simple_contract), + include_bytecode!(event_spam), + include_bytecode!(finish_eventful_frames), + include_bytecode!(heap_read_write), + include_bytecode!(slot_hash_collision), + include_bytecode!(write_and_decode), +]; diff --git a/core/tests/vm-benchmark/src/main.rs b/core/tests/vm-benchmark/src/main.rs index 925ec78ceb3c..6e2b397d746d 100644 --- a/core/tests/vm-benchmark/src/main.rs +++ b/core/tests/vm-benchmark/src/main.rs @@ -1,16 +1,10 @@ -use zksync_vm_benchmark_harness::*; +use vm_benchmark::{BenchmarkingVm, Bytecode}; fn main() { - let test_contract = std::fs::read( - std::env::args() - .nth(1) - .expect("please provide an input file"), - ) - .expect("failed to read file"); - - let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); - let tx = get_deploy_tx(code); - + let bytecode_name = std::env::args() + .nth(1) + .expect("please provide bytecode name, e.g. 'access_memory'"); + let tx = Bytecode::get(&bytecode_name).deploy_tx(); for _ in 0..100 { let mut vm = BenchmarkingVm::new(); vm.run_transaction(&tx); diff --git a/core/tests/vm-benchmark/src/transaction.rs b/core/tests/vm-benchmark/src/transaction.rs new file mode 100644 index 000000000000..90e1c6360b81 --- /dev/null +++ b/core/tests/vm-benchmark/src/transaction.rs @@ -0,0 +1,194 @@ +use once_cell::sync::Lazy; +pub use zksync_contracts::test_contracts::LoadnextContractExecutionParams as LoadTestParams; +use zksync_contracts::{deployer_contract, TestContract}; +use zksync_multivm::utils::get_max_gas_per_pubdata_byte; +use zksync_types::{ + ethabi::{encode, Token}, + fee::Fee, + l2::L2Tx, + utils::deployed_address_create, + Address, K256PrivateKey, L2ChainId, Nonce, ProtocolVersionId, Transaction, + CONTRACT_DEPLOYER_ADDRESS, H256, U256, +}; +use zksync_utils::bytecode::hash_bytecode; + +const LOAD_TEST_MAX_READS: usize = 100; + +pub(crate) static PRIVATE_KEY: Lazy = + Lazy::new(|| K256PrivateKey::from_bytes(H256([42; 32])).expect("invalid key bytes")); +static LOAD_TEST_CONTRACT_ADDRESS: Lazy
= + Lazy::new(|| deployed_address_create(PRIVATE_KEY.address(), 0.into())); + +static LOAD_TEST_CONTRACT: Lazy = Lazy::new(zksync_contracts::get_loadnext_contract); + +static CREATE_FUNCTION_SIGNATURE: Lazy<[u8; 4]> = Lazy::new(|| { + deployer_contract() + .function("create") + .unwrap() + .short_signature() +}); + +pub fn get_deploy_tx(code: &[u8]) -> Transaction { + get_deploy_tx_with_gas_limit(code, 30_000_000, 0) +} + +pub fn get_deploy_tx_with_gas_limit(code: &[u8], gas_limit: u32, nonce: u32) -> Transaction { + let mut salt = vec![0_u8; 32]; + salt[28..32].copy_from_slice(&nonce.to_be_bytes()); + let params = [ + Token::FixedBytes(salt), + Token::FixedBytes(hash_bytecode(code).0.to_vec()), + Token::Bytes([].to_vec()), + ]; + let calldata = CREATE_FUNCTION_SIGNATURE + .iter() + .cloned() + .chain(encode(¶ms)) + .collect(); + + let mut signed = L2Tx::new_signed( + CONTRACT_DEPLOYER_ADDRESS, + calldata, + Nonce(nonce), + tx_fee(gas_limit), + U256::zero(), + L2ChainId::from(270), + &PRIVATE_KEY, + vec![code.to_vec()], // maybe not needed? + Default::default(), + ) + .expect("should create a signed execute transaction"); + + signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); + signed.into() +} + +fn tx_fee(gas_limit: u32) -> Fee { + Fee { + gas_limit: U256::from(gas_limit), + max_fee_per_gas: U256::from(250_000_000), + max_priority_fee_per_gas: U256::from(0), + gas_per_pubdata_limit: U256::from(get_max_gas_per_pubdata_byte( + ProtocolVersionId::latest().into(), + )), + } +} + +pub fn get_transfer_tx(nonce: u32) -> Transaction { + let mut signed = L2Tx::new_signed( + PRIVATE_KEY.address(), + vec![], // calldata + Nonce(nonce), + tx_fee(1_000_000), + 1_000_000_000.into(), // value + L2ChainId::from(270), + &PRIVATE_KEY, + vec![], // factory deps + Default::default(), // paymaster params + ) + .expect("should create a signed execute transaction"); + + signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); + signed.into() +} + +pub fn get_load_test_deploy_tx() -> Transaction { + let calldata = [Token::Uint(LOAD_TEST_MAX_READS.into())]; + let params = [ + Token::FixedBytes(vec![0_u8; 32]), + Token::FixedBytes(hash_bytecode(&LOAD_TEST_CONTRACT.bytecode).0.to_vec()), + Token::Bytes(encode(&calldata)), + ]; + let create_calldata = CREATE_FUNCTION_SIGNATURE + .iter() + .cloned() + .chain(encode(¶ms)) + .collect(); + + let mut factory_deps = LOAD_TEST_CONTRACT.factory_deps.clone(); + factory_deps.push(LOAD_TEST_CONTRACT.bytecode.clone()); + + let mut signed = L2Tx::new_signed( + CONTRACT_DEPLOYER_ADDRESS, + create_calldata, + Nonce(0), + tx_fee(100_000_000), + U256::zero(), + L2ChainId::from(270), + &PRIVATE_KEY, + factory_deps, + Default::default(), + ) + .expect("should create a signed execute transaction"); + + signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); + signed.into() +} + +pub fn get_load_test_tx(nonce: u32, gas_limit: u32, params: LoadTestParams) -> Transaction { + assert!( + params.reads <= LOAD_TEST_MAX_READS, + "Too many reads: {params:?}, should be <={LOAD_TEST_MAX_READS}" + ); + + let execute_function = LOAD_TEST_CONTRACT + .contract + .function("execute") + .expect("no `execute` function in load test contract"); + let calldata = execute_function + .encode_input(&vec![ + Token::Uint(U256::from(params.reads)), + Token::Uint(U256::from(params.writes)), + Token::Uint(U256::from(params.hashes)), + Token::Uint(U256::from(params.events)), + Token::Uint(U256::from(params.recursive_calls)), + Token::Uint(U256::from(params.deploys)), + ]) + .expect("cannot encode `execute` inputs"); + + let mut signed = L2Tx::new_signed( + *LOAD_TEST_CONTRACT_ADDRESS, + calldata, + Nonce(nonce), + tx_fee(gas_limit), + U256::zero(), + L2ChainId::from(270), + &PRIVATE_KEY, + LOAD_TEST_CONTRACT.factory_deps.clone(), + Default::default(), + ) + .expect("should create a signed execute transaction"); + + signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); + signed.into() +} + +pub fn get_realistic_load_test_tx(nonce: u32) -> Transaction { + get_load_test_tx( + nonce, + 10_000_000, + LoadTestParams { + reads: 30, + writes: 2, + events: 5, + hashes: 10, + recursive_calls: 0, + deploys: 0, + }, + ) +} + +pub fn get_heavy_load_test_tx(nonce: u32) -> Transaction { + get_load_test_tx( + nonce, + 10_000_000, + LoadTestParams { + reads: 100, + writes: 5, + events: 20, + hashes: 100, + recursive_calls: 20, + deploys: 5, + }, + ) +} diff --git a/core/tests/vm-benchmark/harness/src/lib.rs b/core/tests/vm-benchmark/src/vm.rs similarity index 54% rename from core/tests/vm-benchmark/harness/src/lib.rs rename to core/tests/vm-benchmark/src/vm.rs index 528dd2009728..572e2d629017 100644 --- a/core/tests/vm-benchmark/harness/src/lib.rs +++ b/core/tests/vm-benchmark/src/vm.rs @@ -1,51 +1,27 @@ use std::{cell::RefCell, rc::Rc}; use once_cell::sync::Lazy; -pub use zksync_contracts::test_contracts::LoadnextContractExecutionParams as LoadTestParams; -use zksync_contracts::{deployer_contract, BaseSystemContracts, TestContract}; +use zksync_contracts::BaseSystemContracts; use zksync_multivm::{ interface::{ storage::{InMemoryStorage, StorageView}, ExecutionResult, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, }, - utils::get_max_gas_per_pubdata_byte, vm_fast, vm_latest, vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, HistoryEnabled}, + zk_evm_latest::ethereum_types::{Address, U256}, }; use zksync_types::{ - block::L2BlockHasher, - ethabi::{encode, Token}, - fee::Fee, - fee_model::BatchFeeInput, - helpers::unix_timestamp_ms, - l2::L2Tx, - utils::{deployed_address_create, storage_key_for_eth_balance}, - Address, K256PrivateKey, L1BatchNumber, L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, - Transaction, CONTRACT_DEPLOYER_ADDRESS, H256, U256, + block::L2BlockHasher, fee_model::BatchFeeInput, helpers::unix_timestamp_ms, + utils::storage_key_for_eth_balance, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, + Transaction, }; use zksync_utils::bytecode::hash_bytecode; -mod instruction_counter; +use crate::transaction::PRIVATE_KEY; -/// Bytecodes have consist of an odd number of 32 byte words -/// This function "fixes" bytecodes of wrong length by cutting off their end. -pub fn cut_to_allowed_bytecode_size(bytes: &[u8]) -> Option<&[u8]> { - let mut words = bytes.len() / 32; - if words == 0 { - return None; - } - - if words & 1 == 0 { - words -= 1; - } - Some(&bytes[..32 * words]) -} - -const LOAD_TEST_MAX_READS: usize = 100; - -static LOAD_TEST_CONTRACT_ADDRESS: Lazy
= - Lazy::new(|| deployed_address_create(PRIVATE_KEY.address(), 0.into())); +static SYSTEM_CONTRACTS: Lazy = Lazy::new(BaseSystemContracts::load_from_disk); static STORAGE: Lazy = Lazy::new(|| { let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); @@ -56,20 +32,6 @@ static STORAGE: Lazy = Lazy::new(|| { storage }); -static SYSTEM_CONTRACTS: Lazy = Lazy::new(BaseSystemContracts::load_from_disk); - -static LOAD_TEST_CONTRACT: Lazy = Lazy::new(zksync_contracts::get_loadnext_contract); - -static CREATE_FUNCTION_SIGNATURE: Lazy<[u8; 4]> = Lazy::new(|| { - deployer_contract() - .function("create") - .unwrap() - .short_signature() -}); - -static PRIVATE_KEY: Lazy = - Lazy::new(|| K256PrivateKey::from_bytes(H256([42; 32])).expect("invalid key bytes")); - /// VM label used to name `criterion` benchmarks. #[derive(Debug, Clone, Copy)] pub enum VmLabel { @@ -230,178 +192,17 @@ impl BenchmarkingVm { } } -pub fn get_deploy_tx(code: &[u8]) -> Transaction { - get_deploy_tx_with_gas_limit(code, 30_000_000, 0) -} - -pub fn get_deploy_tx_with_gas_limit(code: &[u8], gas_limit: u32, nonce: u32) -> Transaction { - let mut salt = vec![0_u8; 32]; - salt[28..32].copy_from_slice(&nonce.to_be_bytes()); - let params = [ - Token::FixedBytes(salt), - Token::FixedBytes(hash_bytecode(code).0.to_vec()), - Token::Bytes([].to_vec()), - ]; - let calldata = CREATE_FUNCTION_SIGNATURE - .iter() - .cloned() - .chain(encode(¶ms)) - .collect(); - - let mut signed = L2Tx::new_signed( - CONTRACT_DEPLOYER_ADDRESS, - calldata, - Nonce(nonce), - tx_fee(gas_limit), - U256::zero(), - L2ChainId::from(270), - &PRIVATE_KEY, - vec![code.to_vec()], // maybe not needed? - Default::default(), - ) - .expect("should create a signed execute transaction"); - - signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); - signed.into() -} - -fn tx_fee(gas_limit: u32) -> Fee { - Fee { - gas_limit: U256::from(gas_limit), - max_fee_per_gas: U256::from(250_000_000), - max_priority_fee_per_gas: U256::from(0), - gas_per_pubdata_limit: U256::from(get_max_gas_per_pubdata_byte( - ProtocolVersionId::latest().into(), - )), - } -} - -pub fn get_transfer_tx(nonce: u32) -> Transaction { - let mut signed = L2Tx::new_signed( - PRIVATE_KEY.address(), - vec![], // calldata - Nonce(nonce), - tx_fee(1_000_000), - 1_000_000_000.into(), // value - L2ChainId::from(270), - &PRIVATE_KEY, - vec![], // factory deps - Default::default(), // paymaster params - ) - .expect("should create a signed execute transaction"); - - signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); - signed.into() -} - -pub fn get_load_test_deploy_tx() -> Transaction { - let calldata = [Token::Uint(LOAD_TEST_MAX_READS.into())]; - let params = [ - Token::FixedBytes(vec![0_u8; 32]), - Token::FixedBytes(hash_bytecode(&LOAD_TEST_CONTRACT.bytecode).0.to_vec()), - Token::Bytes(encode(&calldata)), - ]; - let create_calldata = CREATE_FUNCTION_SIGNATURE - .iter() - .cloned() - .chain(encode(¶ms)) - .collect(); - - let mut factory_deps = LOAD_TEST_CONTRACT.factory_deps.clone(); - factory_deps.push(LOAD_TEST_CONTRACT.bytecode.clone()); - - let mut signed = L2Tx::new_signed( - CONTRACT_DEPLOYER_ADDRESS, - create_calldata, - Nonce(0), - tx_fee(100_000_000), - U256::zero(), - L2ChainId::from(270), - &PRIVATE_KEY, - factory_deps, - Default::default(), - ) - .expect("should create a signed execute transaction"); - - signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); - signed.into() -} - -pub fn get_load_test_tx(nonce: u32, gas_limit: u32, params: LoadTestParams) -> Transaction { - assert!( - params.reads <= LOAD_TEST_MAX_READS, - "Too many reads: {params:?}, should be <={LOAD_TEST_MAX_READS}" - ); - - let execute_function = LOAD_TEST_CONTRACT - .contract - .function("execute") - .expect("no `execute` function in load test contract"); - let calldata = execute_function - .encode_input(&vec![ - Token::Uint(U256::from(params.reads)), - Token::Uint(U256::from(params.writes)), - Token::Uint(U256::from(params.hashes)), - Token::Uint(U256::from(params.events)), - Token::Uint(U256::from(params.recursive_calls)), - Token::Uint(U256::from(params.deploys)), - ]) - .expect("cannot encode `execute` inputs"); - - let mut signed = L2Tx::new_signed( - *LOAD_TEST_CONTRACT_ADDRESS, - calldata, - Nonce(nonce), - tx_fee(gas_limit), - U256::zero(), - L2ChainId::from(270), - &PRIVATE_KEY, - LOAD_TEST_CONTRACT.factory_deps.clone(), - Default::default(), - ) - .expect("should create a signed execute transaction"); - - signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); - signed.into() -} - -pub fn get_realistic_load_test_tx(nonce: u32) -> Transaction { - get_load_test_tx( - nonce, - 10_000_000, - LoadTestParams { - reads: 30, - writes: 2, - events: 5, - hashes: 10, - recursive_calls: 0, - deploys: 0, - }, - ) -} - -pub fn get_heavy_load_test_tx(nonce: u32) -> Transaction { - get_load_test_tx( - nonce, - 10_000_000, - LoadTestParams { - reads: 100, - writes: 5, - events: 20, - hashes: 100, - recursive_calls: 20, - deploys: 5, - }, - ) -} - #[cfg(test)] mod tests { use assert_matches::assert_matches; use zksync_contracts::read_bytecode; use zksync_multivm::interface::ExecutionResult; - use crate::*; + use super::*; + use crate::{ + get_deploy_tx, get_heavy_load_test_tx, get_load_test_deploy_tx, get_load_test_tx, + get_realistic_load_test_tx, get_transfer_tx, LoadTestParams, + }; #[test] fn can_deploy_contract() { diff --git a/core/tests/vm-benchmark/src/with_prometheus.rs b/core/tests/vm-benchmark/src/with_prometheus.rs deleted file mode 100644 index f9b79adedc09..000000000000 --- a/core/tests/vm-benchmark/src/with_prometheus.rs +++ /dev/null @@ -1,27 +0,0 @@ -use std::time::Duration; - -use tokio::sync::watch; -use zksync_vlog::prometheus::PrometheusExporterConfig; - -pub fn with_prometheus(f: F) { - tokio::runtime::Runtime::new() - .unwrap() - .block_on(with_prometheus_async(f)); -} - -async fn with_prometheus_async(f: F) { - println!("Pushing results to Prometheus"); - - let endpoint = - "http://vmagent.stage.matterlabs.corp/api/v1/import/prometheus/metrics/job/vm-benchmark"; - let (stop_sender, stop_receiver) = watch::channel(false); - let prometheus_config = - PrometheusExporterConfig::push(endpoint.to_owned(), Duration::from_millis(100)); - tokio::spawn(prometheus_config.run(stop_receiver)); - - f(); - - println!("Waiting for push to happen..."); - tokio::time::sleep(Duration::from_secs(1)).await; - stop_sender.send_replace(true); -} diff --git a/deny.toml b/deny.toml index 3ed6dcb74413..1e4a30ad6231 100644 --- a/deny.toml +++ b/deny.toml @@ -6,9 +6,7 @@ vulnerability = "deny" unmaintained = "warn" yanked = "warn" notice = "warn" -ignore = [ - "RUSTSEC-2024-0363", # allows sqlx@0.8.0 until fix is released, more here -- https://github.com/launchbadge/sqlx/issues/3440 -] +ignore = [] [licenses] unlicensed = "deny" diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 6f7df349d66f..7ed1906b8574 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -34,7 +34,7 @@ RUN mkdir -p /etc/zksolc-bin/vm-1.5.0-a167aa3 && \ chmod +x /etc/zksolc-bin/vm-1.5.0-a167aa3/zksolc # install zksolc 1.5.x -RUN for VERSION in $(seq -f "v1.5.%g" 0 2); do \ +RUN for VERSION in $(seq -f "v1.5.%g" 0 3); do \ mkdir -p /etc/zksolc-bin/$VERSION && \ wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-$VERSION -O /etc/zksolc-bin/$VERSION/zksolc && \ chmod +x /etc/zksolc-bin/$VERSION/zksolc; \ @@ -55,7 +55,7 @@ RUN for VERSION in $(seq -f "v1.4.%g" 0 1); do \ done # install zkvyper 1.5.x -RUN for VERSION in $(seq -f "v1.5.%g" 0 3); do \ +RUN for VERSION in $(seq -f "v1.5.%g" 0 4); do \ mkdir -p /etc/zkvyper-bin/$VERSION && \ wget https://github.com/matter-labs/zkvyper-bin/raw/main/linux-amd64/zkvyper-linux-amd64-musl-$VERSION -O /etc/zkvyper-bin/$VERSION/zkvyper && \ chmod +x /etc/zkvyper-bin/$VERSION/zkvyper; \ diff --git a/docker/prover-job-monitor/Dockerfile b/docker/prover-job-monitor/Dockerfile new file mode 100644 index 000000000000..25d5dcd3af95 --- /dev/null +++ b/docker/prover-job-monitor/Dockerfile @@ -0,0 +1,15 @@ +FROM matterlabs/zksync-build-base:latest as builder + +ARG DEBIAN_FRONTEND=noninteractive + +WORKDIR /usr/src/zksync +COPY . . + +RUN cd prover && cargo build --release --bin zksync_prover_job_monitor + +FROM debian:bookworm-slim +RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* + +COPY --from=builder /usr/src/zksync/prover/target/release/zksync_prover_job_monitor /usr/bin/ + +ENTRYPOINT ["/usr/bin/zksync_prover_job_monitor"] diff --git a/docs/guides/setup-dev.md b/docs/guides/setup-dev.md index f656eab0fdc6..10eb329628c1 100644 --- a/docs/guides/setup-dev.md +++ b/docs/guides/setup-dev.md @@ -43,7 +43,7 @@ yarn set version 1.22.19 # For running unit tests cargo install cargo-nextest # SQL tools -cargo install sqlx-cli --version 0.8.0 +cargo install sqlx-cli --version 0.8.1 # Foundry curl -L https://foundry.paradigm.xyz | bash @@ -217,7 +217,7 @@ SQLx is a Rust library we use to interact with Postgres, and its CLI is used to features of the library. ```bash -cargo install --locked sqlx-cli --version 0.8.0 +cargo install --locked sqlx-cli --version 0.8.1 ``` ## Easier method using `nix` diff --git a/etc/contracts-test-data/contracts/counter/counter.sol b/etc/contracts-test-data/contracts/counter/counter.sol index 748ab91aa70f..c0f4bda130d0 100644 --- a/etc/contracts-test-data/contracts/counter/counter.sol +++ b/etc/contracts-test-data/contracts/counter/counter.sol @@ -5,7 +5,7 @@ pragma solidity ^0.8.0; contract Counter { uint256 value; - function increment(uint256 x) public { + function increment(uint256 x) external { value += x; } diff --git a/etc/contracts-test-data/contracts/counter/proxy_counter.sol b/etc/contracts-test-data/contracts/counter/proxy_counter.sol new file mode 100644 index 000000000000..1c1883cd4c9d --- /dev/null +++ b/etc/contracts-test-data/contracts/counter/proxy_counter.sol @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 + +pragma solidity ^0.8.0; + +interface ICounter { + function increment(uint256 x) external; +} + +contract ProxyCounter { + ICounter counter; + + constructor(ICounter _counter) { + counter = _counter; + } + + function increment(uint256 x, uint gasToPass) public { + while (gasleft() > gasToPass) { + // Burn gas so that there's about `gasToPass` left before the external call. + } + counter.increment(x); + } +} diff --git a/etc/env/base/chain.toml b/etc/env/base/chain.toml index 57744333116e..05c2fa9729db 100644 --- a/etc/env/base/chain.toml +++ b/etc/env/base/chain.toml @@ -90,8 +90,8 @@ fee_model_version = "V2" validation_computational_gas_limit = 300000 save_call_traces = true -bootloader_hash = "0x010008ebd07a24010d2cf7f75a10a73d387b84bd026586b6502e5059f4dbc475" -default_aa_hash = "0x0100055dd4b983f1999e4591b19086b90a4c27d304424f2af57bea693526e4ca" +bootloader_hash = "0x010008c79a8fece61d5d29508af0214834522fb17f3419f7df7400cd2776a9d5" +default_aa_hash = "0x0100055da05bf3eb2d670dec0f54ebbdacdfc0dba488f0c0b57738a69127a5d0" protective_reads_persistence_enabled = false diff --git a/etc/env/base/contracts.toml b/etc/env/base/contracts.toml index 0e7ad442767e..244db08e6dbc 100644 --- a/etc/env/base/contracts.toml +++ b/etc/env/base/contracts.toml @@ -28,11 +28,13 @@ RECURSION_NODE_LEVEL_VK_HASH = "0x1186ec268d49f1905f8d9c1e9d39fc33e98c74f91d91a2 RECURSION_LEAF_LEVEL_VK_HASH = "0x101e08b00193e529145ee09823378ef51a3bc8966504064f1f6ba3f1ba863210" RECURSION_CIRCUITS_SET_VKS_HASH = "0x18c1639094f58177409186e8c48d9f577c9410901d2f1d486b3e7d6cf553ae4c" GENESIS_TX_HASH = "0xb99ebfea46cbe05a21cd80fe5597d97b204befc52a16303f579c607dc1ac2e2e" +GENESIS_ROOT = "0xdc891cfaf85ba2cab541db37d6deac74e35cdf4a7e6eacbce5c49d9fee4d059b" +GENESIS_BATCH_COMMITMENT = "0xe09426f45a55576aeafa378f9722c0c9ace5306a9e7a2d93f5a3592879571e65" PRIORITY_TX_MAX_GAS_LIMIT = 72000000 DEPLOY_L2_BRIDGE_COUNTERPART_GAS_LIMIT = 10000000 GENESIS_ROLLUP_LEAF_INDEX = "71" GENESIS_PROTOCOL_VERSION = "25" -GENESIS_PROTOCOL_SEMANTIC_VERSION = "0.25.1" +GENESIS_PROTOCOL_SEMANTIC_VERSION = "0.25.2" L1_WETH_BRIDGE_IMPL_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" L1_WETH_BRIDGE_PROXY_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" L1_WETH_TOKEN_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" @@ -41,9 +43,6 @@ L2_WETH_TOKEN_IMPL_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" L2_WETH_TOKEN_PROXY_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" BLOB_VERSIONED_HASH_RETRIEVER_ADDR = "0x0000000000000000000000000000000000000000" -GENESIS_ROOT = "0x983953c1543a88f574de41de25e932a80f11827d28613be27ad51891601640e7" -GENESIS_BATCH_COMMITMENT = "0xa3f7b359f67c752d148422434eeda761af80d7fa670f6aca9021592c3e365c92" - # Ecosystem-wide params L1_ROLLUP_DA_VALIDATOR = "0x0000000000000000000000000000000000000000" L1_VALIDIUM_DA_VALIDATOR = "0x0000000000000000000000000000000000000000" diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 8e7e6eca4280..19921cf536c4 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -140,7 +140,7 @@ prover: file_backed: file_backed_base_path: artifacts max_retries: 10 - setup_data_path: vk_setup_data_generator_server_fri/data + setup_data_path: crates/bin/vk_setup_data_generator_server_fri/data prometheus_port: 3315 max_attempts: 10 generation_timeout_in_secs: 600 diff --git a/etc/multivm_bootloaders/vm_protocol_defense/fee_estimate.yul/fee_estimate.yul.zbin b/etc/multivm_bootloaders/vm_protocol_defense/fee_estimate.yul/fee_estimate.yul.zbin new file mode 100644 index 000000000000..f1e4fea448d4 Binary files /dev/null and b/etc/multivm_bootloaders/vm_protocol_defense/fee_estimate.yul/fee_estimate.yul.zbin differ diff --git a/etc/multivm_bootloaders/vm_protocol_defense/gas_test.yul/gas_test.yul.zbin b/etc/multivm_bootloaders/vm_protocol_defense/gas_test.yul/gas_test.yul.zbin new file mode 100644 index 000000000000..febc7363df05 Binary files /dev/null and b/etc/multivm_bootloaders/vm_protocol_defense/gas_test.yul/gas_test.yul.zbin differ diff --git a/etc/multivm_bootloaders/vm_protocol_defense/playground_batch.yul/playground_batch.yul.zbin b/etc/multivm_bootloaders/vm_protocol_defense/playground_batch.yul/playground_batch.yul.zbin new file mode 100644 index 000000000000..8a27d4617fdb Binary files /dev/null and b/etc/multivm_bootloaders/vm_protocol_defense/playground_batch.yul/playground_batch.yul.zbin differ diff --git a/etc/multivm_bootloaders/vm_protocol_defense/proved_batch.yul/proved_batch.yul.zbin b/etc/multivm_bootloaders/vm_protocol_defense/proved_batch.yul/proved_batch.yul.zbin new file mode 100644 index 000000000000..c784db5a53e8 Binary files /dev/null and b/etc/multivm_bootloaders/vm_protocol_defense/proved_batch.yul/proved_batch.yul.zbin differ diff --git a/infrastructure/zk/src/docker.ts b/infrastructure/zk/src/docker.ts index 76576fd243cb..27de68d1d98d 100644 --- a/infrastructure/zk/src/docker.ts +++ b/infrastructure/zk/src/docker.ts @@ -12,6 +12,7 @@ const IMAGES = [ 'prover-gpu-fri', 'witness-vector-generator', 'prover-fri-gateway', + 'prover-job-monitor', 'proof-fri-gpu-compressor', 'snapshots-creator', 'verified-sources-fetcher' @@ -73,6 +74,7 @@ function defaultTagList(image: string, imageTagSha: string, imageTagShaTS: strin 'external-node', 'contract-verifier', 'prover-fri-gateway', + 'prover-job-monitor', 'snapshots-creator' ].includes(image) ? ['latest', 'latest2.0', `2.0-${imageTagSha}`, `${imageTagSha}`, `2.0-${imageTagShaTS}`, `${imageTagShaTS}`] diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 7a29ce3ab561..b67e4117d78b 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -839,12 +839,13 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.6" +version = "1.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aba8f4e9906c7ce3c73463f62a7f0c65183ada1a2d47e397cc8810827f9694f" +checksum = "50d2eb3cd3d1bf4529e31c215ee6f93ec5a3d536d9f578f93d9d33ee19562932" dependencies = [ "jobserver", "libc", + "shlex", ] [[package]] @@ -3281,9 +3282,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.28.0" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f" +checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149" dependencies = [ "cc", "pkg-config", @@ -5743,9 +5744,9 @@ dependencies = [ [[package]] name = "sqlx" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27144619c6e5802f1380337a209d2ac1c431002dd74c6e60aebff3c506dc4f0c" +checksum = "fcfa89bea9500db4a0d038513d7a060566bfc51d46d1c014847049a45cce85e8" dependencies = [ "sqlx-core", "sqlx-macros", @@ -5756,9 +5757,9 @@ dependencies = [ [[package]] name = "sqlx-core" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a999083c1af5b5d6c071d34a708a19ba3e02106ad82ef7bbd69f5e48266b613b" +checksum = "d06e2f2bd861719b1f3f0c7dbe1d80c30bf59e76cf019f07d9014ed7eefb8e08" dependencies = [ "atoi", "bigdecimal", @@ -5800,9 +5801,9 @@ dependencies = [ [[package]] name = "sqlx-macros" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a23217eb7d86c584b8cbe0337b9eacf12ab76fe7673c513141ec42565698bb88" +checksum = "2f998a9defdbd48ed005a89362bd40dd2117502f15294f61c8d47034107dbbdc" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.36", @@ -5813,9 +5814,9 @@ dependencies = [ [[package]] name = "sqlx-macros-core" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a099220ae541c5db479c6424bdf1b200987934033c2584f79a0e1693601e776" +checksum = "3d100558134176a2629d46cec0c8891ba0be8910f7896abfdb75ef4ab6f4e7ce" dependencies = [ "dotenvy", "either", @@ -5839,9 +5840,9 @@ dependencies = [ [[package]] name = "sqlx-mysql" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5afe4c38a9b417b6a9a5eeffe7235d0a106716495536e7727d1c7f4b1ff3eba6" +checksum = "936cac0ab331b14cb3921c62156d913e4c15b74fb6ec0f3146bd4ef6e4fb3c12" dependencies = [ "atoi", "base64 0.22.1", @@ -5884,9 +5885,9 @@ dependencies = [ [[package]] name = "sqlx-postgres" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1dbb157e65f10dbe01f729339c06d239120221c9ad9fa0ba8408c4cc18ecf21" +checksum = "9734dbce698c67ecf67c442f768a5e90a49b2a4d61a9f1d59f73874bd4cf0710" dependencies = [ "atoi", "base64 0.22.1", @@ -5927,9 +5928,9 @@ dependencies = [ [[package]] name = "sqlx-sqlite" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2cdd83c008a622d94499c0006d8ee5f821f36c89b7d625c900e5dc30b5c5ee" +checksum = "a75b419c3c1b1697833dd927bdc4c6545a620bc1bbafabd44e1efbe9afcd337e" dependencies = [ "atoi", "chrono", @@ -6833,7 +6834,7 @@ dependencies = [ [[package]] name = "vm2" version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=9a38900d7af9b1d72b47ce3be980e77c1239a61d#9a38900d7af9b1d72b47ce3be980e77c1239a61d" +source = "git+https://github.com/matter-labs/vm2.git?rev=2276b7b5af520fca0477bdafe43781b51896d235#2276b7b5af520fca0477bdafe43781b51896d235" dependencies = [ "enum_dispatch", "primitive-types", diff --git a/prover/Cargo.toml b/prover/Cargo.toml index 9a1a50a2ddb5..88b5b626704b 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -45,7 +45,7 @@ serde = "1.0" serde_derive = "1.0" serde_json = "1.0" sha3 = "0.10.8" -sqlx = { version = "0.8.0", default-features = false } +sqlx = { version = "0.8.1", default-features = false } structopt = "0.3.26" strum = { version = "0.26" } tempfile = "3" diff --git a/prover/crates/bin/proof_fri_compressor/src/compressor.rs b/prover/crates/bin/proof_fri_compressor/src/compressor.rs index dc5ca939d9b4..067114ca5a6c 100644 --- a/prover/crates/bin/proof_fri_compressor/src/compressor.rs +++ b/prover/crates/bin/proof_fri_compressor/src/compressor.rs @@ -35,6 +35,7 @@ pub struct ProofCompressor { compression_mode: u8, max_attempts: u32, protocol_version: ProtocolSemanticVersion, + setup_data_path: String, } impl ProofCompressor { @@ -44,6 +45,7 @@ impl ProofCompressor { compression_mode: u8, max_attempts: u32, protocol_version: ProtocolSemanticVersion, + setup_data_path: String, ) -> Self { Self { blob_store, @@ -51,6 +53,7 @@ impl ProofCompressor { compression_mode, max_attempts, protocol_version, + setup_data_path, } } @@ -59,8 +62,9 @@ impl ProofCompressor { l1_batch: L1BatchNumber, proof: ZkSyncRecursionLayerProof, _compression_mode: u8, + setup_data_path: String, ) -> anyhow::Result { - let keystore = Keystore::default(); + let keystore = Keystore::new_with_setup_data_path(setup_data_path); let scheduler_vk = keystore .load_recursive_layer_verification_key( ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8, @@ -136,7 +140,7 @@ impl JobProcessor for ProofCompressor { .get_scheduler_proof_job_id(l1_batch_number) .await else { - return Ok(None); + anyhow::bail!("Scheduler proof is missing from database for batch {l1_batch_number}"); }; tracing::info!( "Started proof compression for L1 batch: {:?}", @@ -174,8 +178,9 @@ impl JobProcessor for ProofCompressor { ) -> JoinHandle> { let compression_mode = self.compression_mode; let block_number = *job_id; + let setup_data_path = self.setup_data_path.clone(); tokio::task::spawn_blocking(move || { - Self::compress_proof(block_number, job, compression_mode) + Self::compress_proof(block_number, job, compression_mode, setup_data_path) }) } diff --git a/prover/crates/bin/proof_fri_compressor/src/main.rs b/prover/crates/bin/proof_fri_compressor/src/main.rs index a1a8ac90253e..e2086b228b69 100644 --- a/prover/crates/bin/proof_fri_compressor/src/main.rs +++ b/prover/crates/bin/proof_fri_compressor/src/main.rs @@ -59,6 +59,7 @@ async fn main() -> anyhow::Result<()> { let object_store_config = ProverObjectStoreConfig( general_config .prover_config + .clone() .expect("ProverConfig") .prover_object_store .context("ProverObjectStoreConfig")?, @@ -75,6 +76,10 @@ async fn main() -> anyhow::Result<()> { config.compression_mode, config.max_attempts, protocol_version, + general_config + .prover_config + .expect("ProverConfig doesn't exist") + .setup_data_path, ); let (stop_sender, stop_receiver) = watch::channel(false); diff --git a/prover/crates/bin/prover_cli/src/cli.rs b/prover/crates/bin/prover_cli/src/cli.rs index 0c7022cae297..41ef94980056 100644 --- a/prover/crates/bin/prover_cli/src/cli.rs +++ b/prover/crates/bin/prover_cli/src/cli.rs @@ -2,7 +2,8 @@ use clap::{command, Args, Parser, Subcommand}; use zksync_types::url::SensitiveUrl; use crate::commands::{ - config, debug_proof, delete, get_file_info, requeue, restart, stats, status::StatusCommand, + config, debug_proof, delete, get_file_info, insert_batch, insert_version, requeue, restart, + stats, status::StatusCommand, }; pub const VERSION_STRING: &str = env!("CARGO_PKG_VERSION"); @@ -27,6 +28,8 @@ impl ProverCLI { ProverCommand::Restart(args) => restart::run(args).await?, ProverCommand::DebugProof(args) => debug_proof::run(args).await?, ProverCommand::Stats(args) => stats::run(args, self.config).await?, + ProverCommand::InsertVersion(args) => insert_version::run(args, self.config).await?, + ProverCommand::InsertBatch(args) => insert_batch::run(args, self.config).await?, }; Ok(()) } @@ -55,4 +58,6 @@ pub enum ProverCommand { Restart(restart::Args), #[command(about = "Displays L1 Batch proving stats for a given period")] Stats(stats::Options), + InsertVersion(insert_version::Args), + InsertBatch(insert_batch::Args), } diff --git a/prover/crates/bin/prover_cli/src/commands/insert_batch.rs b/prover/crates/bin/prover_cli/src/commands/insert_batch.rs new file mode 100644 index 000000000000..add1474633d7 --- /dev/null +++ b/prover/crates/bin/prover_cli/src/commands/insert_batch.rs @@ -0,0 +1,43 @@ +use anyhow::Context as _; +use clap::Args as ClapArgs; +use zksync_basic_types::{ + protocol_version::{ProtocolSemanticVersion, ProtocolVersionId, VersionPatch}, + L1BatchNumber, +}; +use zksync_db_connection::connection_pool::ConnectionPool; +use zksync_prover_dal::{Prover, ProverDal}; + +use crate::cli::ProverCLIConfig; + +#[derive(ClapArgs)] +pub struct Args { + #[clap(short, long)] + pub number: L1BatchNumber, + #[clap(short, long)] + pub version: u16, + #[clap(short, long)] + pub patch: u32, +} + +pub async fn run(args: Args, config: ProverCLIConfig) -> anyhow::Result<()> { + let connection = ConnectionPool::::singleton(config.db_url) + .build() + .await + .context("failed to build a prover_connection_pool")?; + let mut conn = connection.connection().await.unwrap(); + + let protocol_version = ProtocolVersionId::try_from(args.version) + .map_err(|_| anyhow::anyhow!("Invalid protocol version"))?; + + let protocol_version_patch = VersionPatch(args.patch); + + conn.fri_witness_generator_dal() + .save_witness_inputs( + args.number, + &format!("witness_inputs_{}", args.number.0), + ProtocolSemanticVersion::new(protocol_version, protocol_version_patch), + ) + .await; + + Ok(()) +} diff --git a/prover/crates/bin/prover_cli/src/commands/insert_version.rs b/prover/crates/bin/prover_cli/src/commands/insert_version.rs new file mode 100644 index 000000000000..7f30719a713b --- /dev/null +++ b/prover/crates/bin/prover_cli/src/commands/insert_version.rs @@ -0,0 +1,52 @@ +use std::str::FromStr; + +use anyhow::Context as _; +use clap::Args as ClapArgs; +use zksync_basic_types::{ + protocol_version::{ + L1VerifierConfig, ProtocolSemanticVersion, ProtocolVersionId, VersionPatch, + }, + H256, +}; +use zksync_db_connection::connection_pool::ConnectionPool; +use zksync_prover_dal::{Prover, ProverDal}; + +use crate::cli::ProverCLIConfig; + +#[derive(ClapArgs)] +pub struct Args { + #[clap(short, long)] + pub version: u16, + #[clap(short, long)] + pub patch: u32, + #[clap(short, long)] + pub snark_wrapper: String, +} + +pub async fn run(args: Args, config: ProverCLIConfig) -> anyhow::Result<()> { + let connection = ConnectionPool::::singleton(config.db_url) + .build() + .await + .context("failed to build a prover_connection_pool")?; + let mut conn = connection.connection().await.unwrap(); + + let protocol_version = ProtocolVersionId::try_from(args.version) + .map_err(|_| anyhow::anyhow!("Invalid protocol version"))?; + + let protocol_version_patch = VersionPatch(args.patch); + + let snark_wrapper = H256::from_str(&args.snark_wrapper).unwrap_or_else(|_| { + panic!("Invalid snark wrapper hash"); + }); + + conn.fri_protocol_versions_dal() + .save_prover_protocol_version( + ProtocolSemanticVersion::new(protocol_version, protocol_version_patch), + L1VerifierConfig { + recursion_scheduler_level_vk_hash: snark_wrapper, + }, + ) + .await; + + Ok(()) +} diff --git a/prover/crates/bin/prover_cli/src/commands/mod.rs b/prover/crates/bin/prover_cli/src/commands/mod.rs index d9dde52284b4..bafe229884b9 100644 --- a/prover/crates/bin/prover_cli/src/commands/mod.rs +++ b/prover/crates/bin/prover_cli/src/commands/mod.rs @@ -2,6 +2,8 @@ pub(crate) mod config; pub(crate) mod debug_proof; pub(crate) mod delete; pub(crate) mod get_file_info; +pub(crate) mod insert_batch; +pub(crate) mod insert_version; pub(crate) mod requeue; pub(crate) mod restart; pub(crate) mod stats; diff --git a/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs b/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs index 4407dbcd8523..dc8594cbdc1b 100644 --- a/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs +++ b/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs @@ -112,7 +112,8 @@ pub mod gpu_prover { .clone(), SetupLoadMode::FromDisk => { let started_at = Instant::now(); - let keystore = Keystore::default(); + let keystore = + Keystore::new_with_setup_data_path(self.config.setup_data_path.clone()); let artifact: GoldilocksGpuProverSetupData = keystore .load_gpu_setup_data_for_circuit_type(key.clone()) .context("load_gpu_setup_data_for_circuit_type()")?; @@ -347,7 +348,7 @@ pub mod gpu_prover { &config.specialized_group_id, prover_setup_metadata_list ); - let keystore = Keystore::default(); + let keystore = Keystore::new_with_setup_data_path(config.setup_data_path.clone()); for prover_setup_metadata in prover_setup_metadata_list { let key = setup_metadata_to_setup_data_key(&prover_setup_metadata); let setup_data = keystore diff --git a/prover/crates/bin/prover_fri/src/prover_job_processor.rs b/prover/crates/bin/prover_fri/src/prover_job_processor.rs index 09c9d38348ff..2df1b626497f 100644 --- a/prover/crates/bin/prover_fri/src/prover_job_processor.rs +++ b/prover/crates/bin/prover_fri/src/prover_job_processor.rs @@ -85,7 +85,8 @@ impl Prover { .clone(), SetupLoadMode::FromDisk => { let started_at = Instant::now(); - let keystore = Keystore::default(); + let keystore = + Keystore::new_with_setup_data_path(self.config.setup_data_path.clone()); let artifact: GoldilocksProverSetupData = keystore .load_cpu_setup_data_for_circuit_type(key.clone()) .context("get_cpu_setup_data_for_circuit_type()")?; @@ -298,7 +299,7 @@ pub fn load_setup_data_cache(config: &FriProverConfig) -> anyhow::Result anyhow::Result<()> { let general_config = load_general_config(opt.config_path).context("general config")?; - println!("general_config = {general_config:?}"); let database_secrets = load_database_secrets(opt.secrets_path).context("database secrets")?; let observability_config = general_config diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/proptest-regressions/tests.txt b/prover/crates/bin/vk_setup_data_generator_server_fri/proptest-regressions/tests.txt deleted file mode 100644 index 7e50d86cb4f8..000000000000 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/proptest-regressions/tests.txt +++ /dev/null @@ -1,9 +0,0 @@ -# Seeds for failure cases proptest has generated in the past. It is -# automatically read and these particular cases re-run before any -# novel cases are generated. -# -# It is recommended to check this file in to source control so that -# everyone who runs the test benefits from these saved cases. -cc ca181a7669a6e07b68bce71c8c723efcb8fd2a4e895fc962ca1d33ce5f8188f7 # shrinks to circuit_id = 1 -cc ce71957c410fa7af30e04b3e85423555a8e1bbd26b4682b748fa67162bc5687f # shrinks to circuit_id = 1 -cc 6d3b0c60d8a5e7d7dc3bb4a2a21cce97461827583ae01b2414345175a02a1221 # shrinks to key = ProverServiceDataKey { circuit_id: 1, round: BasicCircuits } diff --git a/prover/crates/bin/witness_generator/README.md b/prover/crates/bin/witness_generator/README.md index a318a4612069..dc476ca44fc3 100644 --- a/prover/crates/bin/witness_generator/README.md +++ b/prover/crates/bin/witness_generator/README.md @@ -50,76 +50,3 @@ One round of prover generation consists of: Note that the very first input table (`witness_inputs`) is populated by the tree (as the input artifact for the `WitnessGeneratorJobType::BasicCircuits` is the merkle proofs) - -## Running BWG for custom batch - -After releases `prover-v15.1.0` and `core-v24.9.0` basic witness generator doesn't need access to core database anymore. -Database information now lives in input file, called `witness_inputs_.bin` generated by different core -components). - -This file is stored by prover gateway in GCS (or your choice of object storage -- check config). To access it from GCS -(assuming you have access to the bucket), run: - -```shell -gsutil cp gs://your_bucket/witness_inputs/witness_inputs_.bin -``` - -Note, that you need to have `gsutil` installed, and you need to have access to the bucket. - -Now, database needs to know about the batch and the protocol version it should use. Check the latest protocol version in -the codebase by checking const `PROVER_PROTOCOL_SEMANTIC_VERSION` or run the binary in `prover` workspace: - -```console -cargo run --bin prover_version -``` - -It will give you the latest prover protocol version in a semver format, like `0.24.2`, you need to know only minor and -patch versions. Now, go to the `prover/crates/bin/vk_setup_data_generator_server_fri/data/commitments.json` and get -`snark_wrapper` value from it. Then, you need to insert the info about protocol version into the database. First, -connect to the database, e.g. locally you can do it like that: - -```shell -psql postgres://postgres:notsecurepassword@localhost/prover_local -``` - -And run the following query: - -```shell -INSERT INTO -prover_fri_protocol_versions ( -id, -recursion_scheduler_level_vk_hash, -created_at, -protocol_version_patch -) -VALUES -(, ''::bytea, NOW(), ) -ON CONFLICT (id, protocol_version_patch) DO NOTHING - -``` - -Now, you need to insert the batch into the database. Run the following query: - -```shell -INSERT INTO -witness_inputs_fri ( -l1_batch_number, -witness_inputs_blob_url, -protocol_version, -status, -created_at, -updated_at, -protocol_version_patch -) -VALUES -(, 'witness_inputs_.bin', , 'queued', NOW(), NOW(), ) -ON CONFLICT (l1_batch_number) DO NOTHING -``` - -Finally, run the basic witness generator itself: - -```shell -API_PROMETHEUS_LISTENER_PORT=3116 zk f cargo run --release --bin zksync_witness_generator -- --round=basic_circuits -``` - -And you are good to go! diff --git a/prover/crates/bin/witness_generator/src/leaf_aggregation.rs b/prover/crates/bin/witness_generator/src/leaf_aggregation.rs index d8cad84e777d..2f4494187975 100644 --- a/prover/crates/bin/witness_generator/src/leaf_aggregation.rs +++ b/prover/crates/bin/witness_generator/src/leaf_aggregation.rs @@ -72,6 +72,7 @@ pub struct LeafAggregationWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, + setup_data_path: String, } impl LeafAggregationWitnessGenerator { @@ -80,12 +81,14 @@ impl LeafAggregationWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, + setup_data_path: String, ) -> Self { Self { config, object_store, prover_connection_pool, protocol_version, + setup_data_path, } } @@ -131,9 +134,13 @@ impl JobProcessor for LeafAggregationWitnessGenerator { tracing::info!("Processing leaf aggregation job {:?}", metadata.id); Ok(Some(( metadata.id, - prepare_leaf_aggregation_job(metadata, &*self.object_store) - .await - .context("prepare_leaf_aggregation_job()")?, + prepare_leaf_aggregation_job( + metadata, + &*self.object_store, + self.setup_data_path.clone(), + ) + .await + .context("prepare_leaf_aggregation_job()")?, ))) } @@ -219,6 +226,7 @@ impl JobProcessor for LeafAggregationWitnessGenerator { pub async fn prepare_leaf_aggregation_job( metadata: LeafAggregationJobMetadata, object_store: &dyn ObjectStore, + setup_data_path: String, ) -> anyhow::Result { let started_at = Instant::now(); let closed_form_input = get_artifacts(&metadata, object_store).await; @@ -227,7 +235,7 @@ pub async fn prepare_leaf_aggregation_job( .observe(started_at.elapsed()); let started_at = Instant::now(); - let keystore = Keystore::default(); + let keystore = Keystore::new_with_setup_data_path(setup_data_path); let base_vk = keystore .load_base_layer_verification_key(metadata.circuit_id) .context("get_base_layer_vk_for_circuit_type()")?; diff --git a/prover/crates/bin/witness_generator/src/main.rs b/prover/crates/bin/witness_generator/src/main.rs index a88dd8726d39..50c955168602 100644 --- a/prover/crates/bin/witness_generator/src/main.rs +++ b/prover/crates/bin/witness_generator/src/main.rs @@ -80,9 +80,10 @@ async fn main() -> anyhow::Result<()> { let store_factory = ObjectStoreFactory::new(object_store_config.0); let config = general_config .witness_generator_config - .context("witness generator config")?; + .context("witness generator config")? + .clone(); - let prometheus_config = general_config.prometheus_config; + let prometheus_config = general_config.prometheus_config.clone(); // If the prometheus listener port is not set in the witness generator config, use the one from the prometheus config. let prometheus_listener_port = if let Some(port) = config.prometheus_listener_port { @@ -158,6 +159,8 @@ async fn main() -> anyhow::Result<()> { let mut tasks = Vec::new(); tasks.push(tokio::spawn(prometheus_task)); + let setup_data_path = prover_config.setup_data_path.clone(); + for round in rounds { tracing::info!( "initializing the {:?} witness generator, batch size: {:?} with protocol_version: {:?}", @@ -168,8 +171,7 @@ async fn main() -> anyhow::Result<()> { let witness_generator_task = match round { AggregationRound::BasicCircuits => { - let setup_data_path = prover_config.setup_data_path.clone(); - let vk_commitments = get_cached_commitments(Some(setup_data_path)); + let vk_commitments = get_cached_commitments(Some(setup_data_path.clone())); assert_eq!( vk_commitments, vk_commitments_in_db, @@ -204,6 +206,7 @@ async fn main() -> anyhow::Result<()> { store_factory.create_store().await?, prover_connection_pool.clone(), protocol_version, + setup_data_path.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } @@ -213,6 +216,7 @@ async fn main() -> anyhow::Result<()> { store_factory.create_store().await?, prover_connection_pool.clone(), protocol_version, + setup_data_path.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } @@ -222,6 +226,7 @@ async fn main() -> anyhow::Result<()> { store_factory.create_store().await?, prover_connection_pool.clone(), protocol_version, + setup_data_path.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } @@ -231,6 +236,7 @@ async fn main() -> anyhow::Result<()> { store_factory.create_store().await?, prover_connection_pool.clone(), protocol_version, + setup_data_path.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } diff --git a/prover/crates/bin/witness_generator/src/node_aggregation.rs b/prover/crates/bin/witness_generator/src/node_aggregation.rs index a7dce2a513d8..b6fc6b8f7c65 100644 --- a/prover/crates/bin/witness_generator/src/node_aggregation.rs +++ b/prover/crates/bin/witness_generator/src/node_aggregation.rs @@ -70,6 +70,7 @@ pub struct NodeAggregationWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, + setup_data_path: String, } impl NodeAggregationWitnessGenerator { @@ -78,12 +79,14 @@ impl NodeAggregationWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, + setup_data_path: String, ) -> Self { Self { config, object_store, prover_connection_pool, protocol_version, + setup_data_path, } } @@ -241,7 +244,7 @@ impl JobProcessor for NodeAggregationWitnessGenerator { tracing::info!("Processing node aggregation job {:?}", metadata.id); Ok(Some(( metadata.id, - prepare_job(metadata, &*self.object_store) + prepare_job(metadata, &*self.object_store, self.setup_data_path.clone()) .await .context("prepare_job()")?, ))) @@ -326,6 +329,7 @@ impl JobProcessor for NodeAggregationWitnessGenerator { pub async fn prepare_job( metadata: NodeAggregationJobMetadata, object_store: &dyn ObjectStore, + setup_data_path: String, ) -> anyhow::Result { let started_at = Instant::now(); let artifacts = get_artifacts(&metadata, object_store).await; @@ -334,7 +338,7 @@ pub async fn prepare_job( .observe(started_at.elapsed()); let started_at = Instant::now(); - let keystore = Keystore::default(); + let keystore = Keystore::new_with_setup_data_path(setup_data_path); let leaf_vk = keystore .load_recursive_layer_verification_key(metadata.circuit_id) .context("get_recursive_layer_vk_for_circuit_type")?; diff --git a/prover/crates/bin/witness_generator/src/recursion_tip.rs b/prover/crates/bin/witness_generator/src/recursion_tip.rs index 2a57ffff85ff..e05a0cc38cf8 100644 --- a/prover/crates/bin/witness_generator/src/recursion_tip.rs +++ b/prover/crates/bin/witness_generator/src/recursion_tip.rs @@ -75,6 +75,7 @@ pub struct RecursionTipWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, + setup_data_path: String, } impl RecursionTipWitnessGenerator { @@ -83,12 +84,14 @@ impl RecursionTipWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, + setup_data_path: String, ) -> Self { Self { config, object_store, prover_connection_pool, protocol_version, + setup_data_path, } } @@ -172,6 +175,7 @@ impl JobProcessor for RecursionTipWitnessGenerator { l1_batch_number, final_node_proof_job_ids, &*self.object_store, + self.setup_data_path.clone(), ) .await .context("prepare_job()")?, @@ -284,6 +288,7 @@ pub async fn prepare_job( l1_batch_number: L1BatchNumber, final_node_proof_job_ids: Vec<(u8, u32)>, object_store: &dyn ObjectStore, + setup_data_path: String, ) -> anyhow::Result { let started_at = Instant::now(); let recursion_tip_proofs = @@ -291,7 +296,7 @@ pub async fn prepare_job( WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::RecursionTip.into()] .observe(started_at.elapsed()); - let keystore = Keystore::default(); + let keystore = Keystore::new_with_setup_data_path(setup_data_path); let node_vk = keystore .load_recursive_layer_verification_key( ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, diff --git a/prover/crates/bin/witness_generator/src/scheduler.rs b/prover/crates/bin/witness_generator/src/scheduler.rs index f69d338061e2..c389e037ffa6 100644 --- a/prover/crates/bin/witness_generator/src/scheduler.rs +++ b/prover/crates/bin/witness_generator/src/scheduler.rs @@ -57,6 +57,7 @@ pub struct SchedulerWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, + setup_data_path: String, } impl SchedulerWitnessGenerator { @@ -65,12 +66,14 @@ impl SchedulerWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, + setup_data_path: String, ) -> Self { Self { config, object_store, prover_connection_pool, protocol_version, + setup_data_path, } } @@ -147,9 +150,14 @@ impl JobProcessor for SchedulerWitnessGenerator { Ok(Some(( l1_batch_number, - prepare_job(l1_batch_number, recursion_tip_job_id, &*self.object_store) - .await - .context("prepare_job()")?, + prepare_job( + l1_batch_number, + recursion_tip_job_id, + &*self.object_store, + self.setup_data_path.clone(), + ) + .await + .context("prepare_job()")?, ))) } @@ -258,6 +266,7 @@ pub async fn prepare_job( l1_batch_number: L1BatchNumber, recursion_tip_job_id: u32, object_store: &dyn ObjectStore, + setup_data_path: String, ) -> anyhow::Result { let started_at = Instant::now(); let wrapper = object_store.get(recursion_tip_job_id).await?; @@ -271,7 +280,7 @@ pub async fn prepare_job( .observe(started_at.elapsed()); let started_at = Instant::now(); - let keystore = Keystore::default(); + let keystore = Keystore::new_with_setup_data_path(setup_data_path); let node_vk = keystore .load_recursive_layer_verification_key( ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, diff --git a/prover/crates/bin/witness_generator/tests/basic_test.rs b/prover/crates/bin/witness_generator/tests/basic_test.rs index f8a21179adb7..b034ab57d82c 100644 --- a/prover/crates/bin/witness_generator/tests/basic_test.rs +++ b/prover/crates/bin/witness_generator/tests/basic_test.rs @@ -50,9 +50,13 @@ async fn test_leaf_witness_gen() { .await .unwrap(); - let job = prepare_leaf_aggregation_job(leaf_aggregation_job_metadata, &*object_store) - .await - .unwrap(); + let job = prepare_leaf_aggregation_job( + leaf_aggregation_job_metadata, + &*object_store, + "crates/bin/vk_setup_data_generator/data".to_string(), + ) + .await + .unwrap(); let artifacts = LeafAggregationWitnessGenerator::process_job_impl( job, @@ -139,9 +143,13 @@ async fn test_node_witness_gen() { prover_job_ids_for_proofs: vec![5211320], }; - let job = node_aggregation::prepare_job(node_aggregation_job_metadata, &*object_store) - .await - .unwrap(); + let job = node_aggregation::prepare_job( + node_aggregation_job_metadata, + &*object_store, + "crates/bin/vk_setup_data_generator/data".to_string(), + ) + .await + .unwrap(); let artifacts = NodeAggregationWitnessGenerator::process_job_impl( job, diff --git a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs index 65d490ee4e08..9958527a98b0 100644 --- a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs @@ -927,12 +927,12 @@ impl FriWitnessGeneratorDal<'_, '_> { "#, AggregationRound::RecursionTip as i64, ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| (row.l1_batch_number as u64)) - .collect() + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| (row.l1_batch_number as u64)) + .collect() } pub async fn requeue_stuck_leaf_jobs( diff --git a/prover/docs/01_gcp_vm.md b/prover/docs/01_gcp_vm.md index a541495e978a..8cc9f31de696 100644 --- a/prover/docs/01_gcp_vm.md +++ b/prover/docs/01_gcp_vm.md @@ -31,9 +31,8 @@ When you choose the region, set the following options: - GPU Type: NVIDIA L4 - Number of GPUs: 1 - Machine type: Preset, `g2-standard-16` -- Availability policies: Spot instances are much cheaper, but there is a chance that it will be preempted while you work - on it. If you're working on something that is not very important, spot instances are recommended. If any disruption - will be harmful, choose standard provisioning. +- Availability policies: Choose standard provisioning. Spot instances can be preempted while you work on them, which + will disrupt your flow. - Then click on "VM provisioning model advanced settings" and - Click on "Set a time limit for the VM" - Set the limit to 8 hours diff --git a/prover/docs/05_proving_batch.md b/prover/docs/05_proving_batch.md new file mode 100644 index 000000000000..441a8225f866 --- /dev/null +++ b/prover/docs/05_proving_batch.md @@ -0,0 +1,145 @@ +# Proving a batch + +If you got to this section, then most likely you are wondering how to prove and verify the batch by yourself. After +releases `prover-v15.1.0` and `core-v24.9.0` prover subsystem doesn't need access to core database anymore, which means +you can run only prover subsystem and prove batches without running the whole core system. This guide will help you with +that. + +## Requirements + +### Hardware + +Setup for running the whole process should be the same as described [here](./01_gcp_vm.md), except you need 48 GB of +GPU, which requires an NVIDIA A100 80GB GPU. + +### Prerequisites + +First of all, you need to install CUDA drivers, all other things will be dealt with by `zk_inception` and `prover_cli` +tools. For that, check the following [guide](./02_setup.md)(you can skip bellman-cuda step). + +Install the prerequisites, which you can find +[here](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/setup-dev.md). Note, that if you are not using +Google VM instance, you also need to install [gcloud](https://cloud.google.com/sdk/docs/install#deb). + +Now, you can use `zk_inception` and `prover_cli` tools for setting up the env and running prover subsystem. + +```shell +cargo +nightly-2024-08-01 install --git https://github.com/matter-labs/zksync-era/ --locked zk_inception zk_supervisor prover_cli --force +``` + +## Initializing system + +After you have installed the tool, you can create ecosystem(you need to run only if you are outside of `zksync-era`) by +running: + +```shell +zk_inception ecosystem create --l1-network=localhost --prover-mode=gpu --wallet-creation=localhost --l1-batch-commit-data-generator-mode=rollup --start-containers=true +``` + +The command will create the ecosystem and all the necessary components for the prover subsystem. You can leave default +values for all the prompts you will see Now, you need to initialize the prover subsystem by running: + +```shell +zk_inception prover init --shall-save-to-public-bucket=false --setup-database=true --use-default=true --dont-drop=false +``` + +For prompts you can leave default values as well. + +## Proving the batch + +### Getting data needed for proving + +At this step, we need to get the witness inputs data for the batch you want to prove. Database information now lives in +input file, called `witness_inputs_.bin` generated by different core components). + +- If batch was produced by your system, the file is stored by prover gateway in GCS (or your choice of object storage -- + check config). At the point of getting it, most likely there is no artifacts directory created. If you have cloned the + zksync-era repo, then it is in the root of ecosystem directory. Create artifacts directory by running: + + ```shell + mkdir -p + ``` + + To access it from GCS (assuming you have access to the bucket), run: + + ```shell + gsutil cp gs://your_bucket/witness_inputs/witness_inputs_.bin + ``` + +- If you want to prove the batch produced by zkSync, you can get the data from the `ExternalProofIntegrationAPI` using + `{address}/proof_generation_data` endpoint. You need to replace `{address}` with the address of the API and provide + the batch number as a query data to get the data for specific batch, otherwise, you will receive latest data for the + batch, that was already proven. Example: + + ```shell + curl -H "Content-Type: application/json" -X POST {address}/proof_generation_data -d 'null' + ``` + + or + + ```shell + curl -H "Content-Type: application/json" -X POST {address}/proof_generation_data -d '1000' + ``` + +### Preparing database + +After you have the data, you need to prepare the system to run the batch. So, database needs to know about the batch and +the protocol version it should use. You can do that with running + +```shell +zk_supervisor prover-version +``` + +Example output: + +```shell +Current protocol version found in zksync-era: 0.24.2, snark_wrapper: "0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824ed4f42bbca2" +``` + +This command will provide you with the information about the semantic protocol version(you need to know only minor and +patch versions) and snark wrapper value. In the example, `MINOR_VERSION` is 24, `PATCH_VERSION` is 2, and +`SNARK_WRAPPER` is `0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824ed4f42bbca2`. + +Now, with the use of `prover_cli` tool, you can insert the data about the batch and protocol version into the database: + +First, get the database URL(you can find it in `/chains//configs/secrets.yaml` - it is the +`prover_url` value) Now, insert the information about protocol version in the database: + +```shell +prover_cli insert-version --version= --patch= --snark-wrapper= +``` + +And finally, provide the data about the batch: + +```shell +prover_cli insert-batch --number= --version= --patch= +``` + +Also, provers need to know which setup keys they should use. It may take some time, but you can generate them with: + +```shell +zk_inception prover generate-sk +``` + +## Running prover subsystem + +At this step, all the data is prepared and you can run the prover subsystem. To do that, run the following commands: + +```shell +zk_inception prover run --component=prover +zk_inception prover run --component=witness-generator --round=all-rounds +zk_inception prover run --component=witness-vector-generator --threads=10 +zk_inception prover run --component=compressor +zk_inception prover run --component=prover-job-monitor +``` + +And you are good to go! The prover subsystem will prove the batch and you can check the results in the database. + +## Verifying zkSync batch + +Now, assuming the proof is already generated, you can verify using `ExternalProofIntegrationAPI`. Usually proof is +stored in GCS bucket(for which you can use the same steps as for getting the witness inputs data +[here](#getting-data-needed-for-proving), but locally you can find it in `/artifacts/proofs_fri` directory). Now, simply +send the data to the endpoint `{address}/verify_batch/{batch_number}`. Note, that you need to pass the generated proof +as serialized JSON data when calling the endpoint. API will respond with status 200 if the proof is valid and with the +error message otherwise. diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index 1b4f434b889f..44a82256c027 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -501,13 +501,13 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.104" +version = "1.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74b6a57f98764a267ff415d50a25e6e166f3831a5071af4995296ea97d210490" +checksum = "50d2eb3cd3d1bf4529e31c215ee6f93ec5a3d536d9f578f93d9d33ee19562932" dependencies = [ "jobserver", "libc", - "once_cell", + "shlex", ] [[package]] @@ -2573,9 +2573,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.28.0" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f" +checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149" dependencies = [ "cc", "pkg-config", @@ -4564,6 +4564,12 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + [[package]] name = "signal-hook-registry" version = "1.4.2" @@ -4716,9 +4722,9 @@ dependencies = [ [[package]] name = "sqlx" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27144619c6e5802f1380337a209d2ac1c431002dd74c6e60aebff3c506dc4f0c" +checksum = "fcfa89bea9500db4a0d038513d7a060566bfc51d46d1c014847049a45cce85e8" dependencies = [ "sqlx-core", "sqlx-macros", @@ -4729,9 +4735,9 @@ dependencies = [ [[package]] name = "sqlx-core" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a999083c1af5b5d6c071d34a708a19ba3e02106ad82ef7bbd69f5e48266b613b" +checksum = "d06e2f2bd861719b1f3f0c7dbe1d80c30bf59e76cf019f07d9014ed7eefb8e08" dependencies = [ "atoi", "byteorder", @@ -4768,9 +4774,9 @@ dependencies = [ [[package]] name = "sqlx-macros" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a23217eb7d86c584b8cbe0337b9eacf12ab76fe7673c513141ec42565698bb88" +checksum = "2f998a9defdbd48ed005a89362bd40dd2117502f15294f61c8d47034107dbbdc" dependencies = [ "proc-macro2", "quote", @@ -4781,9 +4787,9 @@ dependencies = [ [[package]] name = "sqlx-macros-core" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a099220ae541c5db479c6424bdf1b200987934033c2584f79a0e1693601e776" +checksum = "3d100558134176a2629d46cec0c8891ba0be8910f7896abfdb75ef4ab6f4e7ce" dependencies = [ "dotenvy", "either", @@ -4807,9 +4813,9 @@ dependencies = [ [[package]] name = "sqlx-mysql" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5afe4c38a9b417b6a9a5eeffe7235d0a106716495536e7727d1c7f4b1ff3eba6" +checksum = "936cac0ab331b14cb3921c62156d913e4c15b74fb6ec0f3146bd4ef6e4fb3c12" dependencies = [ "atoi", "base64 0.22.1", @@ -4849,9 +4855,9 @@ dependencies = [ [[package]] name = "sqlx-postgres" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1dbb157e65f10dbe01f729339c06d239120221c9ad9fa0ba8408c4cc18ecf21" +checksum = "9734dbce698c67ecf67c442f768a5e90a49b2a4d61a9f1d59f73874bd4cf0710" dependencies = [ "atoi", "base64 0.22.1", @@ -4887,9 +4893,9 @@ dependencies = [ [[package]] name = "sqlx-sqlite" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2cdd83c008a622d94499c0006d8ee5f821f36c89b7d625c900e5dc30b5c5ee" +checksum = "a75b419c3c1b1697833dd927bdc4c6545a620bc1bbafabd44e1efbe9afcd337e" dependencies = [ "atoi", "flume", @@ -6298,6 +6304,7 @@ dependencies = [ "futures", "human-panic", "serde", + "serde_json", "strum", "tokio", "url", diff --git a/zk_toolbox/Cargo.toml b/zk_toolbox/Cargo.toml index ef2aed7c99c1..4a08776558ed 100644 --- a/zk_toolbox/Cargo.toml +++ b/zk_toolbox/Cargo.toml @@ -47,7 +47,7 @@ rand = "0.8.5" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" serde_yaml = "0.9" -sqlx = { version = "0.8.0", features = [ +sqlx = { version = "0.8.1", features = [ "runtime-tokio", "migrate", "postgres", diff --git a/zk_toolbox/README.md b/zk_toolbox/README.md index debbb511df3f..b35d4c8d56f1 100644 --- a/zk_toolbox/README.md +++ b/zk_toolbox/README.md @@ -20,7 +20,7 @@ Install `zk_inception` from Git: cargo install --git https://github.com/matter-labs/zksync-era/ --locked zk_inception zk_supervisor --force ``` -Or manually build from a local copy of the [ZkSync](https://github.com/matter-labs/zksync-era/) repository: +Or manually build from a local copy of the [ZKsync](https://github.com/matter-labs/zksync-era/) repository: ```bash ./bin/zkt @@ -37,7 +37,7 @@ Foundry is used for deploying smart contracts. Pass flags for Foundry integratio ZK Stack allows you to create a new ecosystem or connect to an existing one. An ecosystem includes components like the BridgeHub, shared bridges, and state transition managers. -[Learn more](https://docs.zksync.io/zk-stack/components/shared-bridges.html). +[Learn more](https://docs.zksync.io/zk-stack/components/shared-bridges). #### Global Config @@ -260,7 +260,7 @@ needed. ## ZK Supervisor -Tools for developing zkSync. +Tools for developing ZKsync. ### Database @@ -296,7 +296,7 @@ Possible commands: ### Tests -Run zkSync tests: +Run ZKsync tests: ```bash zk_supervisor test @@ -342,7 +342,7 @@ Lint code: zks lint ``` -By default, this command runs the linter on all files. To target specific file types, use the `--extension` option. +By default, this command runs the linter on all files. To target specific file types, use the `--target` option. Supported extensions include: - `rs`: Rust files. @@ -350,3 +350,4 @@ Supported extensions include: - `sol`: Solidity files. - `js`: JavaScript files. - `ts`: TypeScript files. +- `contracts`: files in `contracts` directory. diff --git a/zk_toolbox/crates/config/src/secrets.rs b/zk_toolbox/crates/config/src/secrets.rs index 5bcad19ad339..f0a39148b034 100644 --- a/zk_toolbox/crates/config/src/secrets.rs +++ b/zk_toolbox/crates/config/src/secrets.rs @@ -26,6 +26,18 @@ pub fn set_databases( Ok(()) } +pub fn set_prover_database( + secrets: &mut SecretsConfig, + prover_db_config: &DatabaseConfig, +) -> anyhow::Result<()> { + let database = secrets + .database + .as_mut() + .context("Databases must be presented")?; + database.prover_url = Some(SensitiveUrl::from(prover_db_config.full_url())); + Ok(()) +} + pub fn set_l1_rpc_url(secrets: &mut SecretsConfig, l1_rpc_url: String) -> anyhow::Result<()> { secrets .l1 diff --git a/zk_toolbox/crates/types/src/l1_network.rs b/zk_toolbox/crates/types/src/l1_network.rs index 822235611a33..cc7b47147548 100644 --- a/zk_toolbox/crates/types/src/l1_network.rs +++ b/zk_toolbox/crates/types/src/l1_network.rs @@ -21,6 +21,7 @@ pub enum L1Network { #[default] Localhost, Sepolia, + Holesky, Mainnet, } @@ -30,6 +31,7 @@ impl L1Network { match self { L1Network::Localhost => 9, L1Network::Sepolia => 11_155_111, + L1Network::Holesky => 17000, L1Network::Mainnet => 1, } } diff --git a/zk_toolbox/crates/zk_inception/README.md b/zk_toolbox/crates/zk_inception/README.md index 4cb6d213688e..037a7e3fc925 100644 --- a/zk_toolbox/crates/zk_inception/README.md +++ b/zk_toolbox/crates/zk_inception/README.md @@ -33,6 +33,7 @@ This document contains the help content for the `zk_inception` command-line prog - [`zk_inception contract-verifier`↴](#zk_inception-contract-verifier) - [`zk_inception contract-verifier run`↴](#zk_inception-contract-verifier-run) - [`zk_inception contract-verifier init`↴](#zk_inception-contract-verifier-init) +- [`zk_inception portal`↴](#zk_inception-portal) - [`zk_inception update`↴](#zk_inception-update) ## `zk_inception` @@ -50,7 +51,8 @@ ZK Toolbox is a set of tools for working with zk stack. - `external-node` — External Node related commands - `containers` — Run containers for local development - `contract-verifier` — Run contract verifier -- `update` — Update zkSync +- `portal` — Run dapp-portal +- `update` — Update ZKsync ###### **Options:** @@ -76,21 +78,18 @@ Ecosystem related commands Create a new ecosystem and chain, setting necessary configurations for later initialization -**Usage:** `zk_inception ecosystem create [OPTIONS] [CHAIN_ID]` - -###### **Arguments:** - -- `` +**Usage:** `zk_inception ecosystem create [OPTIONS]` ###### **Options:** - `--ecosystem-name ` - `--l1-network ` — L1 Network - Possible values: `localhost`, `sepolia`, `mainnet` + Possible values: `localhost`, `sepolia`, `holesky`, `mainnet` - `--link-to-code ` — Code link - `--chain-name ` +- `--chain-id ` — Chain ID - `--prover-mode ` — Prover options Possible values: `no-proofs`, `gpu` @@ -166,7 +165,9 @@ Initialize ecosystem and chain, deploying necessary contracts and performing on- - `-u`, `--use-default` — Use default database urls and names - `-d`, `--dont-drop` - `--dev` — Deploy ecosystem using all defaults. Suitable for local development -- `-o`, `--observability` — Enable Grafana +- `-o`, `--observability ` — Enable Grafana + + Possible values: `true`, `false` ## `zk_inception ecosystem change-default-chain` @@ -199,21 +200,18 @@ Chain related commands - `deploy-l2-contracts` — Deploy all l2 contracts - `upgrader` — Deploy Default Upgrader - `deploy-paymaster` — Deploy paymaster smart contract -- `update-token-multiplier-setter` — Update Token Multiplier Setter address on l1 +- `update-token-multiplier-setter` — Update Token Multiplier Setter address on L1 ## `zk_inception chain create` Create a new chain, setting the necessary configurations for later initialization -**Usage:** `zk_inception chain create [OPTIONS] [CHAIN_ID]` - -###### **Arguments:** - -- `` +**Usage:** `zk_inception chain create [OPTIONS]` ###### **Options:** - `--chain-name ` +- `--chain-id ` — Chain ID - `--prover-mode ` — Prover options Possible values: `no-proofs`, `gpu` @@ -393,11 +391,28 @@ Deploy paymaster smart contract ## `zk_inception chain update-token-multiplier-setter` -Update Token Multiplier Setter address on l1. Token Multiplier Setter is used by chains with custom base token to -propagate the changes to numerator / denominator to the l1. Address of the Token Multiplier Setter is taken from the -wallets configuration. +Update Token Multiplier Setter address on L1 + +**Usage:** `zk_inception chain update-token-multiplier-setter [OPTIONS]` + +###### **Options:** + +- `--verify ` — Verify deployed contracts + + Possible values: `true`, `false` + +- `--verifier ` — Verifier to use + + Default value: `etherscan` -**Usage:** `zk_inception chain update-token-multiplier-setter` + Possible values: `etherscan`, `sourcify`, `blockscout`, `oklink` + +- `--verifier-url ` — Verifier URL, if using a custom provider +- `--verifier-api-key ` — Verifier API key +- `--resume` +- `-a`, `--additional-args ` — List of additional arguments that can be passed through the CLI. + + e.g.: `zk_inception init -a --private-key=` ## `zk_inception prover` @@ -442,6 +457,20 @@ Initialize prover Possible values: `true`, `false` - `--setup-key-path ` +- `--setup-database ` + + Possible values: `true`, `false` + +- `--prover-db-url ` — Prover database url without database name +- `--prover-db-name ` — Prover database name +- `-u`, `--use-default ` — Use default database urls and names + + Possible values: `true`, `false` + +- `-d`, `--dont-drop ` + + Possible values: `true`, `false` + - `--cloud-type ` Possible values: `gcp`, `local` @@ -462,7 +491,8 @@ Run prover - `--component ` - Possible values: `gateway`, `witness-generator`, `witness-vector-generator`, `prover`, `compressor` + Possible values: `gateway`, `witness-generator`, `witness-vector-generator`, `prover`, `compressor`, + `prover-job-monitor` - `--round ` @@ -544,7 +574,9 @@ Run containers for local development ###### **Options:** -- `-o`, `--observability` — Enable Grafana +- `-o`, `--observability ` — Enable Grafana + + Possible values: `true`, `false` ## `zk_inception contract-verifier` @@ -576,9 +608,21 @@ Download required binaries for contract verifier - `--solc-version ` — Version of solc to install - `--vyper-version ` — Version of vyper to install +## `zk_inception portal` + +Run dapp-portal + +**Usage:** `zk_inception portal [OPTIONS]` + +###### **Options:** + +- `--port ` — The port number for the portal app + + Default value: `3030` + ## `zk_inception update` -Update zkSync +Update ZKsync **Usage:** `zk_inception update [OPTIONS]` diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs index 101d272494a0..fc4a3c9b3201 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs @@ -44,7 +44,8 @@ use crate::{ }, }, messages::{ - msg_ecosystem_initialized, msg_initializing_chain, MSG_CHAIN_NOT_INITIALIZED, + msg_ecosystem_initialized, msg_ecosystem_no_found_preexisting_contract, + msg_initializing_chain, MSG_CHAIN_NOT_INITIALIZED, MSG_DEPLOYING_ECOSYSTEM_CONTRACTS_SPINNER, MSG_DEPLOYING_ERC20, MSG_DEPLOYING_ERC20_SPINNER, MSG_ECOSYSTEM_CONTRACTS_PATH_INVALID_ERR, MSG_ECOSYSTEM_CONTRACTS_PATH_PROMPT, MSG_INITIALIZING_ECOSYSTEM, @@ -242,17 +243,30 @@ async fn deploy_ecosystem( } }; + let ecosystem_preexisting_configs_path = + ecosystem_config + .get_preexisting_configs_path() + .join(format!( + "{}.yaml", + ecosystem_config.l1_network.to_string().to_lowercase() + )); + + // currently there are not some preexisting ecosystem contracts in + // chains, so we need check if this file exists. + if ecosystem_contracts_path.is_none() && !ecosystem_preexisting_configs_path.exists() { + anyhow::bail!(msg_ecosystem_no_found_preexisting_contract( + &ecosystem_config.l1_network.to_string() + )) + } + let ecosystem_contracts_path = ecosystem_contracts_path.unwrap_or_else(|| match ecosystem_config.l1_network { L1Network::Localhost => { ContractsConfig::get_path_with_base_path(&ecosystem_config.config) } - L1Network::Sepolia | L1Network::Mainnet => ecosystem_config - .get_preexisting_configs_path() - .join(format!( - "{}.yaml", - ecosystem_config.l1_network.to_string().to_lowercase() - )), + L1Network::Sepolia | L1Network::Holesky | L1Network::Mainnet => { + ecosystem_preexisting_configs_path + } }); ContractsConfig::read(shell, ecosystem_contracts_path) diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs index cef435625716..e8c9cf1888d5 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs @@ -1,7 +1,10 @@ use clap::{Parser, ValueEnum}; -use common::{logger, Prompt, PromptConfirm, PromptSelect}; +use common::{db::DatabaseConfig, logger, Prompt, PromptConfirm, PromptSelect}; +use config::ChainConfig; use serde::{Deserialize, Serialize}; +use slugify_rs::slugify; use strum::{EnumIter, IntoEnumIterator}; +use url::Url; use xshell::Shell; use zksync_config::configs::fri_prover::CloudConnectionMode; @@ -9,15 +12,18 @@ use super::init_bellman_cuda::InitBellmanCudaArgs; use crate::{ commands::prover::gcs::get_project_ids, consts::{DEFAULT_CREDENTIALS_FILE, DEFAULT_PROOF_STORE_DIR}, + defaults::{generate_db_names, DBNames, DATABASE_PROVER_URL}, messages::{ - MSG_CLOUD_TYPE_PROMPT, MSG_CREATE_GCS_BUCKET_LOCATION_PROMPT, - MSG_CREATE_GCS_BUCKET_NAME_PROMTP, MSG_CREATE_GCS_BUCKET_PROJECT_ID_NO_PROJECTS_PROMPT, + msg_prover_db_name_prompt, msg_prover_db_url_prompt, MSG_CLOUD_TYPE_PROMPT, + MSG_CREATE_GCS_BUCKET_LOCATION_PROMPT, MSG_CREATE_GCS_BUCKET_NAME_PROMTP, + MSG_CREATE_GCS_BUCKET_PROJECT_ID_NO_PROJECTS_PROMPT, MSG_CREATE_GCS_BUCKET_PROJECT_ID_PROMPT, MSG_CREATE_GCS_BUCKET_PROMPT, MSG_DOWNLOAD_SETUP_KEY_PROMPT, MSG_GETTING_PROOF_STORE_CONFIG, MSG_GETTING_PUBLIC_STORE_CONFIG, MSG_PROOF_STORE_CONFIG_PROMPT, MSG_PROOF_STORE_DIR_PROMPT, MSG_PROOF_STORE_GCS_BUCKET_BASE_URL_ERR, MSG_PROOF_STORE_GCS_BUCKET_BASE_URL_PROMPT, - MSG_PROOF_STORE_GCS_CREDENTIALS_FILE_PROMPT, MSG_SAVE_TO_PUBLIC_BUCKET_PROMPT, - MSG_SETUP_KEY_PATH_PROMPT, + MSG_PROOF_STORE_GCS_CREDENTIALS_FILE_PROMPT, MSG_PROVER_DB_NAME_HELP, + MSG_PROVER_DB_URL_HELP, MSG_SAVE_TO_PUBLIC_BUCKET_PROMPT, MSG_SETUP_KEY_PATH_PROMPT, + MSG_USE_DEFAULT_DATABASES_HELP, }, }; @@ -54,6 +60,17 @@ pub struct ProverInitArgs { #[serde(flatten)] pub setup_key_config: SetupKeyConfigTmp, + #[clap(long)] + pub setup_database: Option, + #[clap(long, help = MSG_PROVER_DB_URL_HELP)] + pub prover_db_url: Option, + #[clap(long, help = MSG_PROVER_DB_NAME_HELP)] + pub prover_db_name: Option, + #[clap(long, short, help = MSG_USE_DEFAULT_DATABASES_HELP)] + pub use_default: Option, + #[clap(long, short, action)] + pub dont_drop: Option, + #[clap(long)] cloud_type: Option, } @@ -160,6 +177,12 @@ pub struct SetupKeyConfig { pub setup_key_path: String, } +#[derive(Debug, Clone)] +pub struct ProverDatabaseConfig { + pub database_config: DatabaseConfig, + pub dont_drop: bool, +} + #[derive(Debug, Clone)] pub struct ProverInitArgsFinal { pub proof_store: ProofStorageConfig, @@ -167,6 +190,7 @@ pub struct ProverInitArgsFinal { pub setup_key_config: SetupKeyConfig, pub bellman_cuda_config: InitBellmanCudaArgs, pub cloud_type: CloudConnectionMode, + pub database_config: Option, } impl ProverInitArgs { @@ -174,12 +198,14 @@ impl ProverInitArgs { &self, shell: &Shell, setup_key_path: &str, + chain_config: &ChainConfig, ) -> anyhow::Result { let proof_store = self.fill_proof_storage_values_with_prompt(shell)?; let public_store = self.fill_public_storage_values_with_prompt(shell)?; let setup_key_config = self.fill_setup_key_values_with_prompt(setup_key_path); let bellman_cuda_config = self.fill_bellman_cuda_values_with_prompt()?; let cloud_type = self.get_cloud_type_with_prompt(); + let database_config = self.fill_database_values_with_prompt(chain_config); Ok(ProverInitArgsFinal { proof_store, @@ -187,6 +213,7 @@ impl ProverInitArgs { setup_key_config, bellman_cuda_config, cloud_type, + database_config, }) } @@ -314,7 +341,11 @@ impl ProverInitArgs { .clone() .setup_key_config .download_key - .unwrap_or_else(|| PromptConfirm::new(MSG_DOWNLOAD_SETUP_KEY_PROMPT).ask()); + .unwrap_or_else(|| { + PromptConfirm::new(MSG_DOWNLOAD_SETUP_KEY_PROMPT) + .default(true) + .ask() + }); let setup_key_path = self .clone() .setup_key_config @@ -435,9 +466,65 @@ impl ProverInitArgs { fn get_cloud_type_with_prompt(&self) -> CloudConnectionMode { let cloud_type = self.cloud_type.clone().unwrap_or_else(|| { - PromptSelect::new(MSG_CLOUD_TYPE_PROMPT, InternalCloudConnectionMode::iter()).ask() + PromptSelect::new( + MSG_CLOUD_TYPE_PROMPT, + InternalCloudConnectionMode::iter().rev(), + ) + .ask() }); cloud_type.into() } + + fn fill_database_values_with_prompt( + &self, + config: &ChainConfig, + ) -> Option { + let setup_database = self + .setup_database + .unwrap_or_else(|| PromptConfirm::new("Do you want to setup the database?").ask()); + + if setup_database { + let DBNames { prover_name, .. } = generate_db_names(config); + let chain_name = config.name.clone(); + + let dont_drop = self.dont_drop.unwrap_or_else(|| { + !PromptConfirm::new("Do you want to drop the database?") + .default(true) + .ask() + }); + + if self.use_default.unwrap_or_else(|| { + PromptConfirm::new(MSG_USE_DEFAULT_DATABASES_HELP) + .default(true) + .ask() + }) { + Some(ProverDatabaseConfig { + database_config: DatabaseConfig::new(DATABASE_PROVER_URL.clone(), prover_name), + dont_drop, + }) + } else { + let prover_db_url = self.prover_db_url.clone().unwrap_or_else(|| { + Prompt::new(&msg_prover_db_url_prompt(&chain_name)) + .default(DATABASE_PROVER_URL.as_str()) + .ask() + }); + + let prover_db_name: String = self.prover_db_name.clone().unwrap_or_else(|| { + Prompt::new(&msg_prover_db_name_prompt(&chain_name)) + .default(&prover_name) + .ask() + }); + + let prover_db_name = slugify!(&prover_db_name, separator = "_"); + + Some(ProverDatabaseConfig { + database_config: DatabaseConfig::new(prover_db_url, prover_db_name), + dont_drop, + }) + } + } else { + None + } + } } diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs index c2d5cef26ad4..6bdd62c1d488 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs @@ -28,6 +28,8 @@ pub enum ProverComponent { Prover, #[strum(to_string = "Compressor")] Compressor, + #[strum(to_string = "ProverJobMonitor")] + ProverJobMonitor, } #[derive(Debug, Clone, Parser, Default)] diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs index 1657ab2c99fb..7f678470d178 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs @@ -17,9 +17,9 @@ pub(crate) async fn run(shell: &Shell) -> anyhow::Result<()> { let cmd = Cmd::new(cmd!( shell, "cargo run --features gpu --release --bin key_generator -- - generate-sk all --recompute-if-missing - --setup-path=vk_setup_data_generator_server_fri/data - --path={link_to_prover}/vk_setup_data_generator_server_fri/data" + generate-sk-gpu all --recompute-if-missing + --setup-path=crates/bin/vk_setup_data_generator_server_fri/data + --path={link_to_prover}/crates/bin/vk_setup_data_generator_server_fri/data" )); cmd.run()?; spinner.finish(); diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs index a27e5f1b0bec..803ef56df832 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs @@ -1,6 +1,15 @@ +use std::path::PathBuf; + use anyhow::Context; -use common::{check_prover_prequisites, cmd::Cmd, logger, spinner::Spinner}; -use config::EcosystemConfig; +use common::{ + check_prover_prequisites, + cmd::Cmd, + config::global_config, + db::{drop_db_if_exists, init_db, migrate_db, DatabaseConfig}, + logger, + spinner::Spinner, +}; +use config::{copy_configs, set_prover_database, traits::SaveConfigWithBasePath, EcosystemConfig}; use xshell::{cmd, Shell}; use zksync_config::{ configs::{object_store::ObjectStoreMode, GeneralConfig}, @@ -14,28 +23,36 @@ use super::{ utils::get_link_to_prover, }; use crate::{ - consts::PROVER_STORE_MAX_RETRIES, + consts::{PROVER_MIGRATIONS, PROVER_STORE_MAX_RETRIES}, messages::{ MSG_CHAIN_NOT_FOUND_ERR, MSG_DOWNLOADING_SETUP_KEY_SPINNER, - MSG_GENERAL_CONFIG_NOT_FOUND_ERR, MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR, - MSG_PROVER_CONFIG_NOT_FOUND_ERR, MSG_PROVER_INITIALIZED, MSG_SETUP_KEY_PATH_ERROR, + MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR, MSG_GENERAL_CONFIG_NOT_FOUND_ERR, + MSG_INITIALIZING_DATABASES_SPINNER, MSG_INITIALIZING_PROVER_DATABASE, + MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR, MSG_PROVER_CONFIG_NOT_FOUND_ERR, + MSG_PROVER_INITIALIZED, MSG_SETUP_KEY_PATH_ERROR, }, }; pub(crate) async fn run(args: ProverInitArgs, shell: &Shell) -> anyhow::Result<()> { check_prover_prequisites(shell); + let ecosystem_config = EcosystemConfig::from_file(shell)?; + + let setup_key_path = get_default_setup_key_path(&ecosystem_config)?; + let chain_config = ecosystem_config .load_chain(Some(ecosystem_config.default_chain.clone())) .context(MSG_CHAIN_NOT_FOUND_ERR)?; + let args = args.fill_values_with_prompt(shell, &setup_key_path, &chain_config)?; + + if chain_config.get_general_config().is_err() || chain_config.get_secrets_config().is_err() { + copy_configs(shell, &ecosystem_config.link_to_code, &chain_config.configs)?; + } + let mut general_config = chain_config .get_general_config() .context(MSG_GENERAL_CONFIG_NOT_FOUND_ERR)?; - let setup_key_path = get_default_setup_key_path(&ecosystem_config)?; - - let args = args.fill_values_with_prompt(shell, &setup_key_path)?; - let proof_object_store_config = get_object_store_config(shell, Some(args.proof_store))?; let public_object_store_config = get_object_store_config(shell, args.public_store)?; @@ -72,6 +89,23 @@ pub(crate) async fn run(args: ProverInitArgs, shell: &Shell) -> anyhow::Result<( init_bellman_cuda(shell, args.bellman_cuda_config).await?; + if let Some(prover_db) = &args.database_config { + let spinner = Spinner::new(MSG_INITIALIZING_DATABASES_SPINNER); + + let mut secrets = chain_config.get_secrets_config()?; + set_prover_database(&mut secrets, &prover_db.database_config)?; + secrets.save_with_base_path(shell, &chain_config.configs)?; + initialize_prover_database( + shell, + &prover_db.database_config, + ecosystem_config.link_to_code.clone(), + prover_db.dont_drop, + ) + .await?; + + spinner.finish(); + } + logger::outro(MSG_PROVER_INITIALIZED); Ok(()) } @@ -138,3 +172,29 @@ fn get_object_store_config( Ok(object_store) } + +async fn initialize_prover_database( + shell: &Shell, + prover_db_config: &DatabaseConfig, + link_to_code: PathBuf, + dont_drop: bool, +) -> anyhow::Result<()> { + if global_config().verbose { + logger::debug(MSG_INITIALIZING_PROVER_DATABASE) + } + if !dont_drop { + drop_db_if_exists(prover_db_config) + .await + .context(MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR)?; + init_db(prover_db_config).await?; + } + let path_to_prover_migration = link_to_code.join(PROVER_MIGRATIONS); + migrate_db( + shell, + path_to_prover_migration, + &prover_db_config.full_url(), + ) + .await?; + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs index 5497db8a21e0..056723836662 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs @@ -13,9 +13,10 @@ use super::{ use crate::messages::{ MSG_BELLMAN_CUDA_DIR_ERR, MSG_CHAIN_NOT_FOUND_ERR, MSG_MISSING_COMPONENT_ERR, MSG_RUNNING_COMPRESSOR, MSG_RUNNING_COMPRESSOR_ERR, MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR, - MSG_RUNNING_PROVER_GATEWAY, MSG_RUNNING_PROVER_GATEWAY_ERR, MSG_RUNNING_WITNESS_GENERATOR, - MSG_RUNNING_WITNESS_GENERATOR_ERR, MSG_RUNNING_WITNESS_VECTOR_GENERATOR, - MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR, MSG_WITNESS_GENERATOR_ROUND_ERR, + MSG_RUNNING_PROVER_GATEWAY, MSG_RUNNING_PROVER_GATEWAY_ERR, MSG_RUNNING_PROVER_JOB_MONITOR, + MSG_RUNNING_WITNESS_GENERATOR, MSG_RUNNING_WITNESS_GENERATOR_ERR, + MSG_RUNNING_WITNESS_VECTOR_GENERATOR, MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR, + MSG_WITNESS_GENERATOR_ROUND_ERR, }; pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<()> { @@ -39,6 +40,7 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() } Some(ProverComponent::Prover) => run_prover(shell, &chain)?, Some(ProverComponent::Compressor) => run_compressor(shell, &chain, &ecosystem_config)?, + Some(ProverComponent::ProverJobMonitor) => run_prover_job_monitor(shell, &chain)?, None => anyhow::bail!(MSG_MISSING_COMPONENT_ERR), } @@ -127,3 +129,13 @@ fn run_compressor( cmd = cmd.with_force_run(); cmd.run().context(MSG_RUNNING_COMPRESSOR_ERR) } + +fn run_prover_job_monitor(shell: &Shell, chain: &ChainConfig) -> anyhow::Result<()> { + logger::info(MSG_RUNNING_PROVER_JOB_MONITOR); + let config_path = chain.path_to_general_config(); + let secrets_path = chain.path_to_secrets_config(); + + let mut cmd = Cmd::new(cmd!(shell, "cargo run --release --bin zksync_prover_job_monitor -- --config-path={config_path} --secrets-path={secrets_path}")); + cmd = cmd.with_force_run(); + cmd.run().context(MSG_RUNNING_PROVER_JOB_MONITOR) +} diff --git a/zk_toolbox/crates/zk_inception/src/main.rs b/zk_toolbox/crates/zk_inception/src/main.rs index 8895b212a59f..cb1b5388196a 100644 --- a/zk_toolbox/crates/zk_inception/src/main.rs +++ b/zk_toolbox/crates/zk_inception/src/main.rs @@ -61,7 +61,7 @@ pub enum InceptionSubcommands { ContractVerifier(ContractVerifierCommands), /// Run dapp-portal Portal(PortalArgs), - /// Update zkSync + /// Update ZKsync #[command(alias = "u")] Update(UpdateArgs), #[command(hide = true)] diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zk_toolbox/crates/zk_inception/src/messages.rs index f0e46aaf4869..9975627025ac 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zk_toolbox/crates/zk_inception/src/messages.rs @@ -38,11 +38,11 @@ pub(super) const MSG_ECOSYSTEM_CONFIG_INVALID_ERR: &str = "Invalid ecosystem con pub(super) const MSG_LINK_TO_CODE_SELECTION_CLONE: &str = "Clone for me (recommended)"; pub(super) const MSG_LINK_TO_CODE_SELECTION_PATH: &str = "I have the code already"; pub(super) const MSG_NOT_MAIN_REPO_OR_FORK_ERR: &str = - "It's not a zkSync Era main repository or fork"; + "It's not a ZKsync Era main repository or fork"; pub(super) const MSG_CONFIRM_STILL_USE_FOLDER: &str = "Do you still want to use this folder?"; pub(super) fn msg_path_to_zksync_does_not_exist_err(path: &str) -> String { - format!("Path to zkSync Era repo does not exist: {path:?}") + format!("Path to ZKsync Era repo does not exist: {path:?}") } /// Ecosystem and chain init related messages @@ -57,7 +57,7 @@ pub(super) const MSG_DEPLOY_ECOSYSTEM_PROMPT: &str = pub(super) const MSG_L1_RPC_URL_PROMPT: &str = "What is the RPC URL of the L1 network?"; pub(super) const MSG_DEPLOY_PAYMASTER_PROMPT: &str = "Do you want to deploy Paymaster contract?"; pub(super) const MSG_DEPLOY_ERC20_PROMPT: &str = "Do you want to deploy some test ERC20s?"; -pub(super) const MSG_ECOSYSTEM_CONTRACTS_PATH_PROMPT: &str = "Provide the path to the ecosystem contracts or keep it empty and you will use ZkSync ecosystem config. \ +pub(super) const MSG_ECOSYSTEM_CONTRACTS_PATH_PROMPT: &str = "Provide the path to the ecosystem contracts or keep it empty and you will use ZKsync ecosystem config. \ For using this config, you need to have governance wallet"; pub(super) const MSG_L1_RPC_URL_INVALID_ERR: &str = "Invalid RPC URL"; pub(super) const MSG_ECOSYSTEM_CONTRACTS_PATH_INVALID_ERR: &str = "Invalid path"; @@ -84,6 +84,10 @@ pub(super) const MSG_ERA_OBSERVABILITY_ALREADY_SETUP: &str = "Era observability pub(super) const MSG_DOWNLOADING_ERA_OBSERVABILITY_SPINNER: &str = "Downloading era observability..."; +pub(super) fn msg_ecosystem_no_found_preexisting_contract(chains: &str) -> String { + format!("Not found preexisting ecosystem Contracts with chains {chains}") +} + pub(super) fn msg_initializing_chain(chain_name: &str) -> String { format!("Initializing chain {chain_name}") } @@ -259,6 +263,7 @@ pub(super) const MSG_GENERATING_SK_SPINNER: &str = "Generating setup keys..."; pub(super) const MSG_SK_GENERATED: &str = "Setup keys generated successfully"; pub(super) const MSG_MISSING_COMPONENT_ERR: &str = "Missing component"; pub(super) const MSG_RUNNING_PROVER_GATEWAY: &str = "Running gateway"; +pub(super) const MSG_RUNNING_PROVER_JOB_MONITOR: &str = "Running prover job monitor"; pub(super) const MSG_RUNNING_WITNESS_GENERATOR: &str = "Running witness generator"; pub(super) const MSG_RUNNING_WITNESS_VECTOR_GENERATOR: &str = "Running witness vector generator"; pub(super) const MSG_RUNNING_PROVER: &str = "Running prover"; @@ -355,8 +360,8 @@ pub(super) fn msg_downloading_binary_spinner(name: &str, version: &str) -> Strin /// Update related messages pub(super) const MSG_UPDATE_ONLY_CONFIG_HELP: &str = "Update only the config files"; -pub(super) const MSG_UPDATING_ZKSYNC: &str = "Updating ZkSync"; -pub(super) const MSG_ZKSYNC_UPDATED: &str = "ZkSync updated successfully"; +pub(super) const MSG_UPDATING_ZKSYNC: &str = "Updating ZKsync"; +pub(super) const MSG_ZKSYNC_UPDATED: &str = "ZKsync updated successfully"; pub(super) const MSG_PULLING_ZKSYNC_CODE_SPINNER: &str = "Pulling zksync-era repo..."; pub(super) const MSG_UPDATING_SUBMODULES_SPINNER: &str = "Updating submodules..."; pub(super) const MSG_DIFF_GENERAL_CONFIG: &str = diff --git a/zk_toolbox/crates/zk_supervisor/Cargo.toml b/zk_toolbox/crates/zk_supervisor/Cargo.toml index e1225de96d32..e24c88f3ec25 100644 --- a/zk_toolbox/crates/zk_supervisor/Cargo.toml +++ b/zk_toolbox/crates/zk_supervisor/Cargo.toml @@ -23,3 +23,4 @@ xshell.workspace = true serde.workspace = true clap-markdown.workspace = true futures.workspace = true +serde_json.workspace = true diff --git a/zk_toolbox/crates/zk_supervisor/README.md b/zk_toolbox/crates/zk_supervisor/README.md index 4648fe6cb366..c3fac876ace6 100644 --- a/zk_toolbox/crates/zk_supervisor/README.md +++ b/zk_toolbox/crates/zk_supervisor/README.md @@ -18,12 +18,21 @@ This document contains the help content for the `zk_supervisor` command-line pro - [`zk_supervisor test revert`↴](#zk_supervisor-test-revert) - [`zk_supervisor test recovery`↴](#zk_supervisor-test-recovery) - [`zk_supervisor test upgrade`↴](#zk_supervisor-test-upgrade) +- [`zk_supervisor test rust`↴](#zk_supervisor-test-rust) +- [`zk_supervisor test l1-contracts`↴](#zk_supervisor-test-l1-contracts) +- [`zk_supervisor test prover`↴](#zk_supervisor-test-prover) - [`zk_supervisor clean`↴](#zk_supervisor-clean) - [`zk_supervisor clean all`↴](#zk_supervisor-clean-all) - [`zk_supervisor clean containers`↴](#zk_supervisor-clean-containers) - [`zk_supervisor clean contracts-cache`↴](#zk_supervisor-clean-contracts-cache) - [`zk_supervisor snapshot`↴](#zk_supervisor-snapshot) - [`zk_supervisor snapshot create`↴](#zk_supervisor-snapshot-create) +- [`zk_supervisor lint`↴](#zk_supervisor-lint) +- [`zk_supervisor fmt`↴](#zk_supervisor-fmt) +- [`zk_supervisor fmt rustfmt`↴](#zk_supervisor-fmt-rustfmt) +- [`zk_supervisor fmt contract`↴](#zk_supervisor-fmt-contract) +- [`zk_supervisor fmt prettier`↴](#zk_supervisor-fmt-prettier) +- [`zk_supervisor prover-version`↴](#zk_supervisor-prover-version) ## `zk_supervisor` @@ -37,6 +46,9 @@ ZK Toolbox is a set of tools for working with zk stack. - `test` — Run tests - `clean` — Clean artifacts - `snapshot` — Snapshots creator +- `lint` — Lint code +- `fmt` — Format code +- `prover-version` — Protocol version used by provers ###### **Options:** @@ -182,6 +194,9 @@ Run tests - `revert` — Run revert tests - `recovery` — Run recovery tests - `upgrade` — Run upgrade tests +- `rust` — Run unit-tests, accepts optional cargo test flags +- `l1-contracts` — Run L1 contracts tests +- `prover` — Run prover tests ## `zk_supervisor test integration` @@ -220,6 +235,28 @@ Run upgrade tests **Usage:** `zk_supervisor test upgrade` +## `zk_supervisor test rust` + +Run unit-tests, accepts optional cargo test flags + +**Usage:** `zk_supervisor test rust [OPTIONS]` + +###### **Options:** + +- `--options ` — Cargo test flags + +## `zk_supervisor test l1-contracts` + +Run L1 contracts tests + +**Usage:** `zk_supervisor test l1-contracts` + +## `zk_supervisor test prover` + +Run prover tests + +**Usage:** `zk_supervisor test prover` + ## `zk_supervisor clean` Clean artifacts @@ -264,6 +301,59 @@ Snapshots creator **Usage:** `zk_supervisor snapshot create` +## `zk_supervisor lint` + +Lint code + +**Usage:** `zk_supervisor lint [OPTIONS]` + +###### **Options:** + +- `-c`, `--check` +- `-e`, `--extensions ` + + Possible values: `md`, `sol`, `js`, `ts`, `rs` + +## `zk_supervisor fmt` + +Format code + +**Usage:** `zk_supervisor fmt [OPTIONS] [COMMAND]` + +###### **Subcommands:** + +- `rustfmt` — +- `contract` — +- `prettier` — + +###### **Options:** + +- `-c`, `--check` + +## `zk_supervisor fmt rustfmt` + +**Usage:** `zk_supervisor fmt rustfmt` + +## `zk_supervisor fmt contract` + +**Usage:** `zk_supervisor fmt contract` + +## `zk_supervisor fmt prettier` + +**Usage:** `zk_supervisor fmt prettier [OPTIONS]` + +###### **Options:** + +- `-e`, `--extensions ` + + Possible values: `md`, `sol`, `js`, `ts`, `rs` + +## `zk_supervisor prover-version` + +Protocol version used by provers + +**Usage:** `zk_supervisor prover-version` +
This document was generated automatically by diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs b/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs index d25f2a8cd54b..88f2069bf3ae 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs @@ -26,7 +26,7 @@ pub async fn run(shell: &Shell, args: DatabaseCommonArgs) -> anyhow::Result<()> let dals = get_dals(shell, &args.selected_dals)?; for dal in dals { - logger::info(&msg_database_loading(MSG_DATABASE_RESET_GERUND, &dal.path)); + logger::info(msg_database_loading(MSG_DATABASE_RESET_GERUND, &dal.path)); reset_database(shell, ecoseystem_config.link_to_code.clone(), dal).await?; } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs b/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs index fa0f4cef7bfe..5ee0c4efb343 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs @@ -6,16 +6,16 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use crate::{ - commands::lint_utils::{get_unignored_files, Extension}, + commands::lint_utils::{get_unignored_files, Target}, messages::{ msg_running_fmt_for_extension_spinner, msg_running_fmt_for_extensions_spinner, msg_running_rustfmt_for_dir_spinner, MSG_RUNNING_CONTRACTS_FMT_SPINNER, }, }; -async fn prettier(shell: Shell, extension: Extension, check: bool) -> anyhow::Result<()> { - let spinner = Spinner::new(&msg_running_fmt_for_extension_spinner(extension)); - let files = get_unignored_files(&shell, &extension)?; +async fn prettier(shell: Shell, target: Target, check: bool) -> anyhow::Result<()> { + let spinner = Spinner::new(&msg_running_fmt_for_extension_spinner(target)); + let files = get_unignored_files(&shell, &target)?; if files.is_empty() { return Ok(()); @@ -23,7 +23,7 @@ async fn prettier(shell: Shell, extension: Extension, check: bool) -> anyhow::Re spinner.freeze(); let mode = if check { "--check" } else { "--write" }; - let config = format!("etc/prettier-config/{extension}.js"); + let config = format!("etc/prettier-config/{target}.js"); Ok( Cmd::new(cmd!(shell, "yarn --silent prettier {mode} --config {config}").args(files)) .run()?, @@ -68,7 +68,7 @@ pub enum Formatter { Contract, Prettier { #[arg(short, long)] - extensions: Vec, + targets: Vec, }, } @@ -85,8 +85,7 @@ pub async fn run(shell: Shell, args: FmtArgs) -> anyhow::Result<()> { match args.formatter { None => { let mut tasks = vec![]; - let extensions: Vec<_> = - vec![Extension::Js, Extension::Ts, Extension::Md, Extension::Sol]; + let extensions: Vec<_> = vec![Target::Js, Target::Ts, Target::Md, Target::Sol]; let spinner = Spinner::new(&msg_running_fmt_for_extensions_spinner(&extensions)); spinner.freeze(); for ext in extensions { @@ -108,13 +107,13 @@ pub async fn run(shell: Shell, args: FmtArgs) -> anyhow::Result<()> { } }); } - Some(Formatter::Prettier { mut extensions }) => { - if extensions.is_empty() { - extensions = vec![Extension::Js, Extension::Ts, Extension::Md, Extension::Sol]; + Some(Formatter::Prettier { mut targets }) => { + if targets.is_empty() { + targets = vec![Target::Js, Target::Ts, Target::Md, Target::Sol]; } - let spinner = Spinner::new(&msg_running_fmt_for_extensions_spinner(&extensions)); - for ext in extensions { - prettier(shell.clone(), ext, args.check).await? + let spinner = Spinner::new(&msg_running_fmt_for_extensions_spinner(&targets)); + for target in targets { + prettier(shell.clone(), target, args.check).await? } spinner.finish() } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs b/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs index 17c8680f1d24..1861d164ce44 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs @@ -4,7 +4,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use crate::{ - commands::lint_utils::{get_unignored_files, Extension}, + commands::lint_utils::{get_unignored_files, Target}, messages::{ msg_running_linter_for_extension_spinner, msg_running_linters_for_files, MSG_LINT_CONFIG_PATH_ERR, MSG_RUNNING_CONTRACTS_LINTER_SPINNER, @@ -17,31 +17,32 @@ const CONFIG_PATH: &str = "etc/lint-config"; pub struct LintArgs { #[clap(long, short = 'c')] pub check: bool, - #[clap(long, short = 'e')] - pub extensions: Vec, + #[clap(long, short = 't')] + pub targets: Vec, } pub fn run(shell: &Shell, args: LintArgs) -> anyhow::Result<()> { - let extensions = if args.extensions.is_empty() { + let targets = if args.targets.is_empty() { vec![ - Extension::Rs, - Extension::Md, - Extension::Sol, - Extension::Js, - Extension::Ts, + Target::Rs, + Target::Md, + Target::Sol, + Target::Js, + Target::Ts, + Target::Contracts, ] } else { - args.extensions.clone() + args.targets.clone() }; - logger::info(msg_running_linters_for_files(&extensions)); + logger::info(msg_running_linters_for_files(&targets)); let ecosystem = EcosystemConfig::from_file(shell)?; - for extension in extensions { - match extension { - Extension::Rs => lint_rs(shell, &ecosystem, args.check)?, - Extension::Sol => lint_contracts(shell, &ecosystem, args.check)?, + for target in targets { + match target { + Target::Rs => lint_rs(shell, &ecosystem, args.check)?, + Target::Contracts => lint_contracts(shell, &ecosystem, args.check)?, ext => lint(shell, &ecosystem, &ext, args.check)?, } } @@ -50,7 +51,7 @@ pub fn run(shell: &Shell, args: LintArgs) -> anyhow::Result<()> { } fn lint_rs(shell: &Shell, ecosystem: &EcosystemConfig, check: bool) -> anyhow::Result<()> { - let spinner = Spinner::new(&msg_running_linter_for_extension_spinner(&Extension::Rs)); + let spinner = Spinner::new(&msg_running_linter_for_extension_spinner(&Target::Rs)); let link_to_code = &ecosystem.link_to_code; let lint_to_prover = &ecosystem.link_to_code.join("prover"); @@ -61,14 +62,7 @@ fn lint_rs(shell: &Shell, ecosystem: &EcosystemConfig, check: bool) -> anyhow::R for path in paths { let _dir_guard = shell.push_dir(path); let mut cmd = cmd!(shell, "cargo clippy"); - let common_args = &[ - "--locked", - "--", - "-D", - "warnings", - "-D", - "unstable_features", - ]; + let common_args = &["--locked", "--", "-D", "warnings"]; if !check { cmd = cmd.args(&["--fix", "--allow-dirty"]); } @@ -79,34 +73,35 @@ fn lint_rs(shell: &Shell, ecosystem: &EcosystemConfig, check: bool) -> anyhow::R Ok(()) } -fn get_linter(extension: &Extension) -> Vec { - match extension { - Extension::Rs => vec!["cargo".to_string(), "clippy".to_string()], - Extension::Md => vec!["markdownlint".to_string()], - Extension::Sol => vec!["solhint".to_string()], - Extension::Js => vec!["eslint".to_string()], - Extension::Ts => vec!["eslint".to_string(), "--ext".to_string(), "ts".to_string()], +fn get_linter(target: &Target) -> Vec { + match target { + Target::Rs => vec!["cargo".to_string(), "clippy".to_string()], + Target::Md => vec!["markdownlint".to_string()], + Target::Sol => vec!["solhint".to_string()], + Target::Js => vec!["eslint".to_string()], + Target::Ts => vec!["eslint".to_string(), "--ext".to_string(), "ts".to_string()], + Target::Contracts => vec![], } } fn lint( shell: &Shell, ecosystem: &EcosystemConfig, - extension: &Extension, + target: &Target, check: bool, ) -> anyhow::Result<()> { - let spinner = Spinner::new(&msg_running_linter_for_extension_spinner(extension)); + let spinner = Spinner::new(&msg_running_linter_for_extension_spinner(target)); let _dir_guard = shell.push_dir(&ecosystem.link_to_code); - let files = get_unignored_files(shell, extension)?; + let files = get_unignored_files(shell, target)?; let cmd = cmd!(shell, "yarn"); let config_path = ecosystem.link_to_code.join(CONFIG_PATH); - let config_path = config_path.join(format!("{}.js", extension)); + let config_path = config_path.join(format!("{}.js", target)); let config_path = config_path .to_str() .expect(MSG_LINT_CONFIG_PATH_ERR) .to_string(); - let linter = get_linter(extension); + let linter = get_linter(target); let fix_option = if check { vec![] @@ -128,8 +123,6 @@ fn lint( } fn lint_contracts(shell: &Shell, ecosystem: &EcosystemConfig, check: bool) -> anyhow::Result<()> { - lint(shell, ecosystem, &Extension::Sol, check)?; - let spinner = Spinner::new(MSG_RUNNING_CONTRACTS_LINTER_SPINNER); let _dir_guard = shell.push_dir(&ecosystem.link_to_code); let cmd = cmd!(shell, "yarn"); diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs b/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs index 92fac6ea815f..5b5f3a91bcec 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs @@ -2,7 +2,7 @@ use clap::ValueEnum; use strum::EnumIter; use xshell::{cmd, Shell}; -const IGNORED_DIRS: [&str; 18] = [ +const IGNORED_DIRS: [&str; 19] = [ "target", "node_modules", "volumes", @@ -22,6 +22,7 @@ const IGNORED_DIRS: [&str; 18] = [ "cache-zk", // Ignore directories with OZ and forge submodules. "contracts/l1-contracts/lib", + "contracts/lib", ]; const IGNORED_FILES: [&str; 4] = [ @@ -33,15 +34,16 @@ const IGNORED_FILES: [&str; 4] = [ #[derive(Debug, ValueEnum, EnumIter, strum::Display, PartialEq, Eq, Clone, Copy)] #[strum(serialize_all = "lowercase")] -pub enum Extension { +pub enum Target { Md, Sol, Js, Ts, Rs, + Contracts, } -pub fn get_unignored_files(shell: &Shell, extension: &Extension) -> anyhow::Result> { +pub fn get_unignored_files(shell: &Shell, target: &Target) -> anyhow::Result> { let mut files = Vec::new(); let output = cmd!(shell, "git ls-files --recurse-submodules").read()?; @@ -49,7 +51,7 @@ pub fn get_unignored_files(shell: &Shell, extension: &Extension) -> anyhow::Resu let path = line.to_string(); if !IGNORED_DIRS.iter().any(|dir| path.contains(dir)) && !IGNORED_FILES.contains(&path.as_str()) - && path.ends_with(&format!(".{}", extension)) + && path.ends_with(&format!(".{}", target)) { files.push(path); } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs index 99a8fa5e0a5f..181ce50c2134 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs @@ -3,5 +3,6 @@ pub mod database; pub mod fmt; pub mod lint; pub(crate) mod lint_utils; +pub mod prover_version; pub mod snapshot; pub mod test; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover_version.rs b/zk_toolbox/crates/zk_supervisor/src/commands/prover_version.rs new file mode 100644 index 000000000000..479f796294fa --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/prover_version.rs @@ -0,0 +1,41 @@ +use std::{fs, path::Path}; + +use common::logger; +use config::EcosystemConfig; +use xshell::{cmd, Shell}; + +pub async fn run(shell: &Shell) -> anyhow::Result<()> { + let link_to_code = EcosystemConfig::from_file(shell)?.link_to_code; + let link_to_prover = link_to_code.join("prover"); + + let protocol_version = get_protocol_version(shell, &link_to_prover).await?; + let snark_wrapper = get_snark_wrapper(&link_to_prover).await?; + + logger::info(format!( + "Current protocol version found in zksync-era: {}, snark_wrapper: {}", + protocol_version, snark_wrapper + )); + + Ok(()) +} + +async fn get_protocol_version(shell: &Shell, link_to_prover: &Path) -> anyhow::Result { + shell.change_dir(link_to_prover); + let protocol_version = cmd!(shell, "cargo run --release --bin prover_version").read()?; + + Ok(protocol_version) +} + +async fn get_snark_wrapper(link_to_prover: &Path) -> anyhow::Result { + let path = + link_to_prover.join("crates/bin/vk_setup_data_generator_server_fri/data/commitments.json"); + let file = fs::File::open(path).expect("Could not find commitments file in zksync-era"); + let json: serde_json::Value = + serde_json::from_reader(file).expect("Could not parse commitments.json"); + + let snark_wrapper = json + .get("snark_wrapper") + .expect("Could not find snark_wrapper in commitments.json"); + + Ok(snark_wrapper.to_string()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs index 3d8131a180c3..4e9c4fc25283 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs @@ -6,7 +6,7 @@ use crate::messages::MSG_PROVER_TEST_SUCCESS; pub fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem = EcosystemConfig::from_file(shell)?; - let _dir_guard = shell.push_dir(&ecosystem.link_to_code.join("prover")); + let _dir_guard = shell.push_dir(ecosystem.link_to_code.join("prover")); Cmd::new(cmd!(shell, "cargo test --release --workspace --locked")) .with_force_run() diff --git a/zk_toolbox/crates/zk_supervisor/src/main.rs b/zk_toolbox/crates/zk_supervisor/src/main.rs index 965def9263aa..9a1c1ad74bcd 100644 --- a/zk_toolbox/crates/zk_supervisor/src/main.rs +++ b/zk_toolbox/crates/zk_supervisor/src/main.rs @@ -10,9 +10,9 @@ use common::{ }; use config::EcosystemConfig; use messages::{ - msg_global_chain_does_not_exist, MSG_SUBCOMMAND_CLEAN, MSG_SUBCOMMAND_DATABASE_ABOUT, - MSG_SUBCOMMAND_FMT_ABOUT, MSG_SUBCOMMAND_LINT_ABOUT, MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT, - MSG_SUBCOMMAND_TESTS_ABOUT, + msg_global_chain_does_not_exist, MSG_PROVER_VERSION_ABOUT, MSG_SUBCOMMAND_CLEAN, + MSG_SUBCOMMAND_DATABASE_ABOUT, MSG_SUBCOMMAND_FMT_ABOUT, MSG_SUBCOMMAND_LINT_ABOUT, + MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT, MSG_SUBCOMMAND_TESTS_ABOUT, }; use xshell::Shell; @@ -47,6 +47,8 @@ enum SupervisorSubcommands { Fmt(FmtArgs), #[command(hide = true)] Markdown, + #[command(about = MSG_PROVER_VERSION_ABOUT)] + ProverVersion, } #[derive(Parser, Debug)] @@ -103,6 +105,7 @@ async fn run_subcommand(args: Supervisor, shell: &Shell) -> anyhow::Result<()> { } SupervisorSubcommands::Lint(args) => commands::lint::run(shell, args)?, SupervisorSubcommands::Fmt(args) => commands::fmt::run(shell.clone(), args).await?, + SupervisorSubcommands::ProverVersion => commands::prover_version::run(shell).await?, } Ok(()) } diff --git a/zk_toolbox/crates/zk_supervisor/src/messages.rs b/zk_toolbox/crates/zk_supervisor/src/messages.rs index df0cf0c311df..00e49131de77 100644 --- a/zk_toolbox/crates/zk_supervisor/src/messages.rs +++ b/zk_toolbox/crates/zk_supervisor/src/messages.rs @@ -1,4 +1,4 @@ -use crate::commands::lint_utils::Extension; +use crate::commands::lint_utils::Target; // Ecosystem related messages pub(super) const MSG_CHAIN_NOT_FOUND_ERR: &str = "Chain not found"; @@ -8,6 +8,7 @@ pub(super) fn msg_global_chain_does_not_exist(chain: &str, available_chains: &st } // Subcommands help +pub(super) const MSG_PROVER_VERSION_ABOUT: &str = "Protocol version used by provers"; pub(super) const MSG_SUBCOMMAND_DATABASE_ABOUT: &str = "Database related commands"; pub(super) const MSG_SUBCOMMAND_TESTS_ABOUT: &str = "Run tests"; pub(super) const MSG_SUBCOMMAND_CLEAN: &str = "Clean artifacts"; @@ -151,28 +152,25 @@ pub(super) const MSG_CONTRACTS_CLEANING_FINISHED: &str = pub(super) const MSG_RUNNING_SNAPSHOT_CREATOR: &str = "Running snapshot creator"; // Lint related messages -pub(super) fn msg_running_linters_for_files(extensions: &[Extension]) -> String { - let extensions: Vec = extensions.iter().map(|e| format!(".{}", e)).collect(); - format!( - "Running linters for files with extensions: {:?}", - extensions - ) +pub(super) fn msg_running_linters_for_files(targets: &[Target]) -> String { + let targets: Vec = targets.iter().map(|e| format!(".{}", e)).collect(); + format!("Running linters for targets: {:?}", targets) } -pub(super) fn msg_running_linter_for_extension_spinner(extension: &Extension) -> String { - format!("Running linter for files with extension: .{}", extension) +pub(super) fn msg_running_linter_for_extension_spinner(target: &Target) -> String { + format!("Running linter for files with extension: .{}", target) } -pub(super) fn msg_running_fmt_for_extension_spinner(extension: Extension) -> String { - format!("Running prettier for: {extension:?}") +pub(super) fn msg_running_fmt_for_extension_spinner(target: Target) -> String { + format!("Running prettier for: {target:?}") } pub(super) fn msg_running_rustfmt_for_dir_spinner(dir: &str) -> String { format!("Running rustfmt for: {dir:?}") } -pub(super) fn msg_running_fmt_for_extensions_spinner(extensions: &[Extension]) -> String { - format!("Running prettier for: {extensions:?} and rustfmt") +pub(super) fn msg_running_fmt_for_extensions_spinner(targets: &[Target]) -> String { + format!("Running prettier for: {targets:?} and rustfmt") } pub(super) const MSG_LINT_CONFIG_PATH_ERR: &str = "Lint config path error"; diff --git a/zk_toolbox/rust-toolchain b/zk_toolbox/rust-toolchain index 54227249d1ff..aaceec04e040 100644 --- a/zk_toolbox/rust-toolchain +++ b/zk_toolbox/rust-toolchain @@ -1 +1 @@ -1.78.0 +1.80.0