diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 4c1d3095bc24..811c773b6f54 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { - "core": "24.23.0", + "core": "24.24.0", "prover": "16.5.0", "zk_toolbox": "0.1.2" } diff --git a/.github/workflows/build-docker-from-tag.yml b/.github/workflows/build-docker-from-tag.yml index cd222a6e43bb..791f44117477 100644 --- a/.github/workflows/build-docker-from-tag.yml +++ b/.github/workflows/build-docker-from-tag.yml @@ -23,7 +23,7 @@ concurrency: docker-build jobs: setup: name: Setup - runs-on: [ubuntu-latest] + runs-on: [ ubuntu-latest ] outputs: image_tag_suffix: ${{ steps.set.outputs.image_tag_suffix }} prover_fri_gpu_key_id: ${{ steps.extract-prover-fri-setup-key-ids.outputs.gpu_short_commit_sha }} @@ -48,7 +48,7 @@ jobs: build-push-core-images: name: Build and push image - needs: [setup] + needs: [ setup ] uses: ./.github/workflows/build-core-template.yml if: contains(github.ref_name, 'core') secrets: @@ -60,7 +60,7 @@ jobs: build-push-tee-prover-images: name: Build and push images - needs: [setup] + needs: [ setup ] uses: ./.github/workflows/build-tee-prover-template.yml if: contains(github.ref_name, 'core') secrets: @@ -72,7 +72,7 @@ jobs: build-push-contract-verifier: name: Build and push image - needs: [setup] + needs: [ setup ] uses: ./.github/workflows/build-contract-verifier-template.yml if: contains(github.ref_name, 'contract_verifier') secrets: @@ -83,20 +83,20 @@ jobs: build-push-prover-images: name: Build and push image - needs: [setup] + needs: [ setup ] uses: ./.github/workflows/build-prover-template.yml if: contains(github.ref_name, 'prover') with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} - CUDA_ARCH: "60;70;75;89" + CUDA_ARCH: "60;70;75;80;89" secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} build-push-witness-generator-image-avx512: name: Build and push image - needs: [setup] + needs: [ setup ] uses: ./.github/workflows/build-witness-generator-template.yml if: contains(github.ref_name, 'prover') with: @@ -110,7 +110,7 @@ jobs: build-gar-prover-fri-gpu: name: Build GAR prover FRI GPU - needs: [setup, build-push-prover-images] + needs: [ setup, build-push-prover-images ] uses: ./.github/workflows/build-prover-fri-gpu-gar.yml if: contains(github.ref_name, 'prover') with: diff --git a/.github/workflows/build-prover-fri-gpu-gar.yml b/.github/workflows/build-prover-fri-gpu-gar.yml index b13fca82445a..c0ea060b07e9 100644 --- a/.github/workflows/build-prover-fri-gpu-gar.yml +++ b/.github/workflows/build-prover-fri-gpu-gar.yml @@ -34,13 +34,13 @@ jobs: gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://us-docker.pkg.dev - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - name: Build and push - uses: docker/build-push-action@v4 + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 with: context: docker/prover-gpu-fri-gar build-args: | diff --git a/.github/workflows/build-prover-template.yml b/.github/workflows/build-prover-template.yml index 84e1b4f0f5d0..4f3cad7f1d02 100644 --- a/.github/workflows/build-prover-template.yml +++ b/.github/workflows/build-prover-template.yml @@ -180,7 +180,7 @@ jobs: - witness-vector-generator steps: - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - name: Login to us-central1 GAR run: | diff --git a/.github/workflows/build-tee-prover-template.yml b/.github/workflows/build-tee-prover-template.yml index 21c7f9340ba0..0e5b80d2e3a2 100644 --- a/.github/workflows/build-tee-prover-template.yml +++ b/.github/workflows/build-tee-prover-template.yml @@ -28,15 +28,15 @@ jobs: IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }} runs-on: [matterlabs-ci-runner-high-performance] steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 if: ${{ github.event_name == 'workflow_dispatch' }} with: ref: ${{ github.event.inputs.target_branch }} - - uses: actions/checkout@v4 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 if: ${{ github.event_name != 'workflow_dispatch' }} - - uses: cachix/install-nix-action@v27 + - uses: cachix/install-nix-action@ba0dd844c9180cbf77aa72a116d6fbc515d0e87b # v27 with: extra_nix_config: | access-tokens = github.com=${{ github.token }} @@ -45,7 +45,7 @@ jobs: sandbox = true - name: Setup Attic cache - uses: ryanccn/attic-action@v0 + uses: ryanccn/attic-action@618a980988d704a7709daeea88526146acd1d45f # v0.2.1 with: endpoint: https://attic.teepot.org/ cache: tee-pot diff --git a/.github/workflows/cargo-license.yaml b/.github/workflows/cargo-license.yaml index b1909fc75039..72eb8d0d865b 100644 --- a/.github/workflows/cargo-license.yaml +++ b/.github/workflows/cargo-license.yaml @@ -6,3 +6,6 @@ jobs: steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: EmbarkStudios/cargo-deny-action@8371184bd11e21dcf8ac82ebf8c9c9f74ebf7268 # v2.0.1 + with: + command: check + command-arguments: "--hide-inclusion-graph" diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 62bd76dd0efc..53bd1ab7a518 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -52,22 +52,19 @@ jobs: - name: Init run: | - ci_run zk ci_run run_retried rustup show - ci_run zk run yarn - ci_run zk db setup - ci_run zk compiler all - ci_run zk contract build + ci_run ./bin/zkt + ci_run zk_supervisor contracts - name: Contracts unit tests run: ci_run yarn l1-contracts test - name: Rust unit tests run: | - ci_run zk test rust + ci_run zk_supervisor test rust # Benchmarks are not tested by `cargo nextest` unless specified explicitly, and even then `criterion` harness is incompatible # with how `cargo nextest` runs tests. Thus, we run criterion-based benchmark tests manually. - ci_run zk f cargo test --release -p vm-benchmark --bench oneshot --bench batch + ci_run cargo test --release -p vm-benchmark --bench oneshot --bench batch loadtest: runs-on: [ matterlabs-ci-runner-high-performance ] @@ -90,8 +87,10 @@ jobs: - name: Loadtest configuration run: | - echo EXPECTED_TX_COUNT=${{ matrix.vm_mode == 'new' && 22000 || 16000 }} >> .env - echo ACCOUNTS_AMOUNT="150" >> .env + echo EXPECTED_TX_COUNT=${{ matrix.vm_mode == 'new' && 21000 || 16000 }} >> .env + echo ACCOUNTS_AMOUNT="100" >> .env + echo MAX_INFLIGHT_TXS="10" >> .env + echo SYNC_API_REQUESTS_LIMIT="15" >> .env echo FAIL_FAST=true >> .env echo IN_DOCKER=1 >> .env echo DATABASE_MERKLE_TREE_MODE=lightweight >> .env @@ -115,7 +114,8 @@ jobs: - name: Run server run: | EXPERIMENTAL_VM_STATE_KEEPER_FAST_VM_MODE=${{ matrix.vm_mode }} \ - PASSED_ENV_VARS="EXPERIMENTAL_VM_STATE_KEEPER_FAST_VM_MODE" \ + CHAIN_MEMPOOL_DELAY_INTERVAL=50 \ + PASSED_ENV_VARS="EXPERIMENTAL_VM_STATE_KEEPER_FAST_VM_MODE,CHAIN_MEMPOOL_DELAY_INTERVAL" \ ci_run zk server --uring --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log & ci_run sleep 60 diff --git a/.github/workflows/ci-zk-toolbox-reusable.yml b/.github/workflows/ci-zk-toolbox-reusable.yml index 21ffdc0523c9..78e1e485cafc 100644 --- a/.github/workflows/ci-zk-toolbox-reusable.yml +++ b/.github/workflows/ci-zk-toolbox-reusable.yml @@ -4,6 +4,11 @@ on: env: CLICOLOR: 1 + # We run multiple binaries in parallel, and by default they will try to utilize all the + # available CPUs. In tests, there is not much CPU-intensive work (rayon), but a lot of + # async work (tokio), so we prioritize tokio. + TOKIO_WORKER_THREADS: 4 + RAYON_NUM_THREADS: 2 jobs: lint: @@ -11,7 +16,7 @@ jobs: uses: ./.github/workflows/ci-core-lint-reusable.yml tests: - runs-on: [ matterlabs-ci-runner ] + runs-on: [ matterlabs-ci-runner-ultra-performance ] steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: @@ -249,7 +254,7 @@ jobs: - name: Upload logs - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 if: always() with: name: logs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bcafbfc0b6b1..53c169114915 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -31,7 +31,7 @@ jobs: - name: Get changed files id: changed-files - uses: tj-actions/changed-files@v39 + uses: tj-actions/changed-files@fea790cb660e33aef4bdf07304e28fedd77dfa13 # v39 with: files_yaml: | prover: diff --git a/.github/workflows/nodejs-license.yaml b/.github/workflows/nodejs-license.yaml index b776673e1298..642ded744021 100644 --- a/.github/workflows/nodejs-license.yaml +++ b/.github/workflows/nodejs-license.yaml @@ -47,7 +47,7 @@ jobs: uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - name: Use Node.js - uses: actions/setup-node@v3 + uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 with: node-version: 18 diff --git a/.github/workflows/protobuf.yaml b/.github/workflows/protobuf.yaml index d2885f613aa0..9c2c34186701 100644 --- a/.github/workflows/protobuf.yaml +++ b/.github/workflows/protobuf.yaml @@ -33,7 +33,7 @@ jobs: compatibility: runs-on: [ubuntu-22.04-github-hosted-16core] steps: - - uses: mozilla-actions/sccache-action@v0.0.3 + - uses: mozilla-actions/sccache-action@89e9040de88b577a072e3760aaf59f585da083af # v0.0.5 # before - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 @@ -69,7 +69,7 @@ jobs: | xargs cat > ./after.binpb # compare - - uses: bufbuild/buf-setup-action@v1 + - uses: bufbuild/buf-setup-action@54abbed4fe8d8d45173eca4798b0c39a53a7b658 # v1.39.0 with: github_token: ${{ github.token }} - name: buf breaking diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml index 692a420eed81..4a8f527f45c6 100644 --- a/.github/workflows/release-please.yml +++ b/.github/workflows/release-please.yml @@ -15,7 +15,7 @@ jobs: steps: - name: Run release-please id: release - uses: google-github-actions/release-please-action@v4 + uses: google-github-actions/release-please-action@e4dc86ba9405554aeba3c6bb2d169500e7d3b4ee # v4.1.1 with: token: ${{ secrets.RELEASE_TOKEN }} config-file: .github/release-please/config.json diff --git a/.github/workflows/release-test-stage.yml b/.github/workflows/release-test-stage.yml index 9f921be78292..1da5aa9ac928 100644 --- a/.github/workflows/release-test-stage.yml +++ b/.github/workflows/release-test-stage.yml @@ -23,7 +23,7 @@ jobs: - name: Get all test, doc and src files that have changed id: changed-files-yaml - uses: tj-actions/changed-files@v37 + uses: tj-actions/changed-files@fea790cb660e33aef4bdf07304e28fedd77dfa13 # v39 with: files_yaml: | # TODO: make it more granular, as already implemented in CI workflow diff --git a/.github/workflows/vm-perf-comparison.yml b/.github/workflows/vm-perf-comparison.yml index db729cbadc07..cfcfff93037f 100644 --- a/.github/workflows/vm-perf-comparison.yml +++ b/.github/workflows/vm-perf-comparison.yml @@ -68,7 +68,7 @@ jobs: id: comparison - name: Comment on PR - uses: thollander/actions-comment-pull-request@v2 + uses: thollander/actions-comment-pull-request@fabd468d3a1a0b97feee5f6b9e499eab0dd903f6 # v2.5.0 with: message: | ${{ steps.comparison.outputs.speedup == '' && '## No performance difference detected (anymore)' || '## Detected VM performance changes' }} diff --git a/.github/workflows/zk-environment-publish.yml b/.github/workflows/zk-environment-publish.yml index 7e232475b148..5a08dff178c4 100644 --- a/.github/workflows/zk-environment-publish.yml +++ b/.github/workflows/zk-environment-publish.yml @@ -36,7 +36,7 @@ jobs: - name: Get changed files id: changed-files-yaml - uses: tj-actions/changed-files@v39 + uses: tj-actions/changed-files@fea790cb660e33aef4bdf07304e28fedd77dfa13 # v39 with: files_yaml: | zk_env: diff --git a/Cargo.lock b/Cargo.lock index 7c45ba3dad99..d5abe5c3b151 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -778,9 +778,9 @@ dependencies = [ [[package]] name = "build_html" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3108fe6fe7ac796fb7625bdde8fa2b67b5a7731496251ca57c7b8cadd78a16a1" +checksum = "225eb82ce9e70dcc0cfa6e404d0f353326b6e163bf500ec4711cec317d11935c" [[package]] name = "bumpalo" @@ -5575,9 +5575,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.20" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" dependencies = [ "serde", ] @@ -5704,9 +5704,9 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" [[package]] name = "serde" -version = "1.0.189" +version = "1.0.208" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" +checksum = "cff085d2cb684faa248efb494c39b68e522822ac0de72ccf08109abde717cfb2" dependencies = [ "serde_derive", ] @@ -5723,9 +5723,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.189" +version = "1.0.208" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" +checksum = "24008e81ff7613ed8e5ba0cfaf24e2c2f1e5b8a0495711e44fcd4882fca62bcf" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.36", @@ -8188,9 +8188,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0e31a9fc9a390b440cd12bbe040330dc64f64697a8a8ecbc3beb98cd0747909" +checksum = "a49ad68bfaf6fb8542c68894b68b28be31514786549855aaa8a46b36defbb100" dependencies = [ "anyhow", "once_cell", @@ -8213,6 +8213,7 @@ dependencies = [ "rand 0.8.5", "secrecy", "serde", + "serde_json", "tracing", "url", "zksync_basic_types", @@ -8224,9 +8225,9 @@ dependencies = [ [[package]] name = "zksync_consensus_bft" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e22e3bfe96fa30a57313e774a5e8c74ffee884abff57ecacc10e8832315ee8a2" +checksum = "b1dcab481683131c093271c19602bd495b1d682f7a94f764f2227111a0a104f0" dependencies = [ "anyhow", "async-trait", @@ -8246,9 +8247,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efb7ff3ec44b7b92fd4e28d9d92b83d61dc74125ccfc90bcfb27a5750d8a8580" +checksum = "ace39bdf50b8421c4d546381fe1ecc5212f953ce61cf93d4fa69172078dbe4af" dependencies = [ "anyhow", "blst", @@ -8270,13 +8271,14 @@ dependencies = [ [[package]] name = "zksync_consensus_executor" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7fcde1275970a6b8a33ea2ade5cc994d6392f95509ce374e0e7a26cde4cd6db" +checksum = "216e3d9f3df8c119e037e44c41db12fa6448dafbf1eaf5015d13b22400866980" dependencies = [ "anyhow", "async-trait", "rand 0.8.5", + "semver", "tracing", "vise", "zksync_concurrency", @@ -8291,9 +8293,9 @@ dependencies = [ [[package]] name = "zksync_consensus_network" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ee48bee7dae8adb2769c7315adde1780832d05ecb6a77c08cdda53a315992a" +checksum = "19d7dd832b1bbcd0a2b977b2d85986437105fd5e1e82bd4becb2e6a9db112655" dependencies = [ "anyhow", "async-trait", @@ -8308,6 +8310,7 @@ dependencies = [ "pin-project", "prost 0.12.1", "rand 0.8.5", + "semver", "snow", "thiserror", "tls-listener", @@ -8326,9 +8329,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72223c0b20621775db51bcc4b043addafeaf784d444af2ad4bc8bcdee477367c" +checksum = "06277266e31efdc1465f6a27ce96c7435392a270978a91956b8a848732df2cfa" dependencies = [ "anyhow", "bit-vec", @@ -8348,9 +8351,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d1750ad93f7e3a0c2f5880f9bcc1244a3b46d3e6c124c4f65f545032b87464" +checksum = "9099b2295f550176d824b5287f2f31b7739c4d10247faec1132f1c6e9d18059c" dependencies = [ "anyhow", "async-trait", @@ -8368,9 +8371,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ff679f8b5f671d887a750b8107f3b5c01fd6085f68eef37ab01de8d2bd0736b" +checksum = "4d624f55e2449f43b2c85588b5dd2a28b3c5ea629effc89df76e3254f8d9d2fb" dependencies = [ "anyhow", "rand 0.8.5", @@ -8709,7 +8712,7 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "24.23.0" +version = "24.24.0" dependencies = [ "anyhow", "assert_matches", @@ -9051,13 +9054,17 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "hex", + "jsonrpsee", "rand 0.8.5", "secrecy", + "semver", "tempfile", "test-casing", "thiserror", "tokio", "tracing", + "zksync_basic_types", "zksync_concurrency", "zksync_config", "zksync_consensus_bft", @@ -9072,16 +9079,20 @@ dependencies = [ "zksync_l1_contract_interface", "zksync_merkle_tree", "zksync_metadata_calculator", + "zksync_multivm", "zksync_node_api_server", "zksync_node_genesis", "zksync_node_sync", "zksync_node_test_utils", "zksync_protobuf", + "zksync_state", "zksync_state_keeper", + "zksync_storage", "zksync_system_constants", "zksync_test_account", "zksync_types", "zksync_utils", + "zksync_vm_interface", "zksync_web3_decl", ] @@ -9138,6 +9149,7 @@ dependencies = [ "ctrlc", "futures 0.3.28", "pin-project-lite", + "semver", "thiserror", "tokio", "tracing", @@ -9340,9 +9352,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f6ba3bf0aac20de18b4ae18a22d8c81b83f8f72e8fdec1c879525ecdacd2f5" +checksum = "d26fb2beb3aeafb5e9babf1acf6494662cc7157b893fa248bd151494f931d07f" dependencies = [ "anyhow", "bit-vec", @@ -9361,9 +9373,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7798c248b9a64505f0586bd5fadad6b26c999be4a8dec6b1a86b10b3888169c5" +checksum = "58e86c198e056d921b4f3f1d2755c23d090e942b5a70b03bcb7e7c02445aa491" dependencies = [ "anyhow", "heck 0.5.0", diff --git a/Cargo.toml b/Cargo.toml index d244d436b9f5..075f5007be4c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -218,16 +218,16 @@ zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.4" } vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "4ef15d46410ffc11744771a3a6c7c09dd9470c90" } # Consensus dependencies. -zksync_concurrency = "=0.1.0-rc.11" -zksync_consensus_bft = "=0.1.0-rc.11" -zksync_consensus_crypto = "=0.1.0-rc.11" -zksync_consensus_executor = "=0.1.0-rc.11" -zksync_consensus_network = "=0.1.0-rc.11" -zksync_consensus_roles = "=0.1.0-rc.11" -zksync_consensus_storage = "=0.1.0-rc.11" -zksync_consensus_utils = "=0.1.0-rc.11" -zksync_protobuf = "=0.1.0-rc.11" -zksync_protobuf_build = "=0.1.0-rc.11" +zksync_concurrency = "=0.1.0-rc.12" +zksync_consensus_bft = "=0.1.0-rc.12" +zksync_consensus_crypto = "=0.1.0-rc.12" +zksync_consensus_executor = "=0.1.0-rc.12" +zksync_consensus_network = "=0.1.0-rc.12" +zksync_consensus_roles = "=0.1.0-rc.12" +zksync_consensus_storage = "=0.1.0-rc.12" +zksync_consensus_utils = "=0.1.0-rc.12" +zksync_protobuf = "=0.1.0-rc.12" +zksync_protobuf_build = "=0.1.0-rc.12" # "Local" dependencies zksync_multivm = { version = "0.1.0", path = "core/lib/multivm" } diff --git a/contracts b/contracts index fd4aebcfe883..d3687694f71d 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit fd4aebcfe8833b26e096e87e142a5e7e4744f3fa +Subproject commit d3687694f71d83fa286b9c186b4c3ea173028f83 diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 4dea58651129..7d4381b09bef 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,37 @@ # Changelog +## [24.24.0](https://github.com/matter-labs/zksync-era/compare/core-v24.23.0...core-v24.24.0) (2024-09-05) + + +### Features + +* conditional cbt l1 updates ([#2748](https://github.com/matter-labs/zksync-era/issues/2748)) ([6d18061](https://github.com/matter-labs/zksync-era/commit/6d18061df4a18803d3c6377305ef711ce60317e1)) +* **eth-watch:** do not query events from earliest block ([#2810](https://github.com/matter-labs/zksync-era/issues/2810)) ([1da3f7e](https://github.com/matter-labs/zksync-era/commit/1da3f7ea1df94312e7c6818c17bf4109f888e547)) +* **genesis:** Validate genesis config against L1 ([#2786](https://github.com/matter-labs/zksync-era/issues/2786)) ([b2dd9a5](https://github.com/matter-labs/zksync-era/commit/b2dd9a5c08fecf0a878632b33a32a78aac11c065)) +* Integrate tracers and implement circuits tracer in vm2 ([#2653](https://github.com/matter-labs/zksync-era/issues/2653)) ([87b02e3](https://github.com/matter-labs/zksync-era/commit/87b02e3ab5c1f61d59dd0f0eefa9ec33a7b55488)) +* Move prover data to /home/popzxc/workspace/current/zksync-era/prover/data ([#2778](https://github.com/matter-labs/zksync-era/issues/2778)) ([62e4d46](https://github.com/matter-labs/zksync-era/commit/62e4d4619dde9d6bd9102f1410eea75b0e2051c5)) +* Remove prover db from house keeper ([#2795](https://github.com/matter-labs/zksync-era/issues/2795)) ([85b7346](https://github.com/matter-labs/zksync-era/commit/85b734664b4306e988da07005860a7ea0fb7d22d)) +* **vm-runner:** Implement batch data prefetching ([#2724](https://github.com/matter-labs/zksync-era/issues/2724)) ([d01840d](https://github.com/matter-labs/zksync-era/commit/d01840d5de2cb0f4bead8f1c384b24ba713e6a66)) +* **vm:** Extract batch executor to separate crate ([#2702](https://github.com/matter-labs/zksync-era/issues/2702)) ([b82dfa4](https://github.com/matter-labs/zksync-era/commit/b82dfa4d29fce107223c3638fe490b5cb0f28d8c)) +* **vm:** Simplify VM interface ([#2760](https://github.com/matter-labs/zksync-era/issues/2760)) ([c3bde47](https://github.com/matter-labs/zksync-era/commit/c3bde47c1e7d16bc00f9b089516ed3691e4f3eb1)) +* **zk_toolbox:** add multi-chain CI integration test ([#2594](https://github.com/matter-labs/zksync-era/issues/2594)) ([05c940e](https://github.com/matter-labs/zksync-era/commit/05c940efbd93023c315e5e13c98faee2153cc1cd)) + + +### Bug Fixes + +* **config:** Do not panic for observability config ([#2639](https://github.com/matter-labs/zksync-era/issues/2639)) ([1e768d4](https://github.com/matter-labs/zksync-era/commit/1e768d402012f6c7ce83fdd46c55f830ec31416a)) +* **core:** Batched event processing support for Reth ([#2623](https://github.com/matter-labs/zksync-era/issues/2623)) ([958dfdc](https://github.com/matter-labs/zksync-era/commit/958dfdcac358897bfd4d2a2ddc1633a23dbfcdc9)) +* return correct witness inputs ([#2770](https://github.com/matter-labs/zksync-era/issues/2770)) ([2516e2e](https://github.com/matter-labs/zksync-era/commit/2516e2e5c83673687d61d143daa70e98ccecce53)) +* **tee-prover:** increase retries to reduce spurious alerts ([#2776](https://github.com/matter-labs/zksync-era/issues/2776)) ([4fdc806](https://github.com/matter-labs/zksync-era/commit/4fdc80636437090f6ebcfa4e2f1eb50edf53631a)) +* **tee-prover:** mitigate panic on redeployments ([#2764](https://github.com/matter-labs/zksync-era/issues/2764)) ([178b386](https://github.com/matter-labs/zksync-era/commit/178b38644f507c5f6d12ba862d0c699e87985dd7)) +* **tee:** lowercase enum TEE types ([#2798](https://github.com/matter-labs/zksync-era/issues/2798)) ([0f2f9bd](https://github.com/matter-labs/zksync-era/commit/0f2f9bd9ef4c2c7ba98a1fdbfca15d1de2b29997)) +* **vm-runner:** Fix statement timeouts in VM playground ([#2772](https://github.com/matter-labs/zksync-era/issues/2772)) ([d3cd553](https://github.com/matter-labs/zksync-era/commit/d3cd553888a5c903c6eae13a88e92c11602e93de)) + + +### Performance Improvements + +* **vm:** Fix VM performance regression on CI loadtest ([#2782](https://github.com/matter-labs/zksync-era/issues/2782)) ([bc0d7d5](https://github.com/matter-labs/zksync-era/commit/bc0d7d5935c8f5409a8e53f1c04c5141409aef31)) + ## [24.23.0](https://github.com/matter-labs/zksync-era/compare/core-v24.22.0...core-v24.23.0) (2024-08-28) diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index ecfc60d7ec03..498b11b279b0 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "zksync_external_node" description = "Non-validator ZKsync node" -version = "24.23.0" # x-release-please-version +version = "24.24.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs index c30cc1a432bb..7b94ca7a0c2a 100644 --- a/core/bin/external_node/src/node_builder.rs +++ b/core/bin/external_node/src/node_builder.rs @@ -242,7 +242,13 @@ impl ExternalNodeBuilder { let config = self.config.consensus.clone(); let secrets = config::read_consensus_secrets().context("config::read_consensus_secrets()")?; - let layer = ExternalNodeConsensusLayer { config, secrets }; + let layer = ExternalNodeConsensusLayer { + build_version: crate::metadata::SERVER_VERSION + .parse() + .context("CRATE_VERSION.parse()")?, + config, + secrets, + }; self.node.add_layer(layer); Ok(self) } diff --git a/core/bin/verified_sources_fetcher/README.md b/core/bin/verified_sources_fetcher/README.md new file mode 100644 index 000000000000..0abddb7a8843 --- /dev/null +++ b/core/bin/verified_sources_fetcher/README.md @@ -0,0 +1,4 @@ +# Verified sources fetcher + +This tool downloads verified contract sources from SQL database from `contract_verification_requests` table. Then it +saves sources and compilation settings to files. diff --git a/core/bin/zksync_tee_prover/src/config.rs b/core/bin/zksync_tee_prover/src/config.rs index 5b009e33f25e..1c2eb229d616 100644 --- a/core/bin/zksync_tee_prover/src/config.rs +++ b/core/bin/zksync_tee_prover/src/config.rs @@ -22,11 +22,21 @@ pub(crate) struct TeeProverConfig { pub max_retries: usize, /// Initial back-off interval when retrying recovery on a retriable error. Each subsequent retry interval /// will be multiplied by [`Self.retry_backoff_multiplier`]. - pub initial_retry_backoff: Duration, + pub initial_retry_backoff_sec: u64, /// Multiplier for the back-off interval when retrying recovery on a retriable error. pub retry_backoff_multiplier: f32, /// Maximum back-off interval when retrying recovery on a retriable error. - pub max_backoff: Duration, + pub max_backoff_sec: u64, +} + +impl TeeProverConfig { + pub fn initial_retry_backoff(&self) -> Duration { + Duration::from_secs(self.initial_retry_backoff_sec) + } + + pub fn max_backoff(&self) -> Duration { + Duration::from_secs(self.max_backoff_sec) + } } impl FromEnv for TeeProverConfig { @@ -39,9 +49,9 @@ impl FromEnv for TeeProverConfig { /// export TEE_PROVER_TEE_TYPE="sgx" /// export TEE_PROVER_API_URL="http://127.0.0.1:3320" /// export TEE_PROVER_MAX_RETRIES=10 - /// export TEE_PROVER_INITIAL_RETRY_BACKOFF=1 + /// export TEE_PROVER_INITIAL_RETRY_BACKOFF_SEC=1 /// export TEE_PROVER_RETRY_BACKOFF_MULTIPLIER=2.0 - /// export TEE_PROVER_MAX_BACKOFF=128 + /// export TEE_PROVER_MAX_BACKOFF_SEC=128 /// ``` fn from_env() -> anyhow::Result { let config: Self = envy::prefixed("TEE_PROVER_").from_env()?; diff --git a/core/bin/zksync_tee_prover/src/tee_prover.rs b/core/bin/zksync_tee_prover/src/tee_prover.rs index 3d227118e57f..1511f0c88e3d 100644 --- a/core/bin/zksync_tee_prover/src/tee_prover.rs +++ b/core/bin/zksync_tee_prover/src/tee_prover.rs @@ -129,7 +129,7 @@ impl Task for TeeProver { .await?; let mut retries = 1; - let mut backoff = config.initial_retry_backoff; + let mut backoff = config.initial_retry_backoff(); let mut observer = METRICS.job_waiting_time.start(); loop { @@ -141,7 +141,7 @@ impl Task for TeeProver { let need_to_sleep = match result { Ok(batch_number) => { retries = 1; - backoff = config.initial_retry_backoff; + backoff = config.initial_retry_backoff(); if let Some(batch_number) = batch_number { observer.observe(); observer = METRICS.job_waiting_time.start(); @@ -162,7 +162,7 @@ impl Task for TeeProver { retries += 1; backoff = std::cmp::min( backoff.mul_f32(config.retry_backoff_multiplier), - config.max_backoff, + config.max_backoff(), ); true } diff --git a/core/lib/basic_types/src/protocol_version.rs b/core/lib/basic_types/src/protocol_version.rs index 265c06987afd..640a92c00da0 100644 --- a/core/lib/basic_types/src/protocol_version.rs +++ b/core/lib/basic_types/src/protocol_version.rs @@ -238,7 +238,12 @@ impl Detokenize for VerifierParams { #[derive(Debug, Clone, Copy, Default, Eq, PartialEq, Serialize, Deserialize)] pub struct L1VerifierConfig { - pub recursion_scheduler_level_vk_hash: H256, + // Rename is required to not introduce breaking changes in the API for existing clients. + #[serde( + alias = "recursion_scheduler_level_vk_hash", + rename(serialize = "recursion_scheduler_level_vk_hash") + )] + pub snark_wrapper_vk_hash: H256, } impl From for VmVersion { @@ -394,4 +399,22 @@ mod tests { assert_eq!(version, unpacked); } + + #[test] + fn test_verifier_config_serde() { + let de = [ + r#"{"recursion_scheduler_level_vk_hash": "0x1111111111111111111111111111111111111111111111111111111111111111"}"#, + r#"{"snark_wrapper_vk_hash": "0x1111111111111111111111111111111111111111111111111111111111111111"}"#, + ]; + for de in de.iter() { + let _: L1VerifierConfig = serde_json::from_str(de) + .unwrap_or_else(|err| panic!("Failed deserialization. String: {de}, error {err}")); + } + let ser = L1VerifierConfig { + snark_wrapper_vk_hash: H256::repeat_byte(0x11), + }; + let ser_str = serde_json::to_string(&ser).unwrap(); + let expected_str = r#"{"recursion_scheduler_level_vk_hash":"0x1111111111111111111111111111111111111111111111111111111111111111"}"#; + assert_eq!(ser_str, expected_str); + } } diff --git a/core/lib/basic_types/src/tee_types.rs b/core/lib/basic_types/src/tee_types.rs index c9be9b6e99d8..d49f2f183885 100644 --- a/core/lib/basic_types/src/tee_types.rs +++ b/core/lib/basic_types/src/tee_types.rs @@ -1,9 +1,49 @@ +use std::fmt; + use serde::{Deserialize, Serialize}; -use strum::{Display, EnumString}; -#[derive(Debug, Clone, Copy, PartialEq, EnumString, Display, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] #[non_exhaustive] pub enum TeeType { - #[strum(serialize = "sgx")] Sgx, } + +impl fmt::Display for TeeType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + TeeType::Sgx => write!(f, "sgx"), + } + } +} + +#[cfg(test)] +mod tests { + use serde_json; + + use super::*; + + #[test] + fn test_serialize_teetype() { + let json_str = "\"sgx\""; + let tee_type: TeeType = serde_json::from_str(json_str).unwrap(); + assert_eq!(tee_type, TeeType::Sgx); + + for json_str in &["\"Sgx\"", "\"SGX\""] { + let result: Result = serde_json::from_str(json_str); + assert!(result.is_err()); + } + } + + #[test] + fn test_deserialize_teetype() { + let tee_type = TeeType::Sgx; + let json_str = serde_json::to_string(&tee_type).unwrap(); + assert_eq!(json_str, "\"sgx\""); + } + + #[test] + fn test_display_teetype() { + assert_eq!(TeeType::Sgx.to_string(), "sgx"); + } +} diff --git a/core/lib/config/Cargo.toml b/core/lib/config/Cargo.toml index b13948448cdd..d1ab5ce8438f 100644 --- a/core/lib/config/Cargo.toml +++ b/core/lib/config/Cargo.toml @@ -24,6 +24,9 @@ rand.workspace = true secrecy.workspace = true serde = { workspace = true, features = ["derive"] } +[dev-dependencies] +serde_json.workspace = true + [features] default = [] observability_ext = ["zksync_vlog", "tracing"] diff --git a/core/lib/config/src/configs/consensus.rs b/core/lib/config/src/configs/consensus.rs index 50885a6ec6fe..e5e01f880feb 100644 --- a/core/lib/config/src/configs/consensus.rs +++ b/core/lib/config/src/configs/consensus.rs @@ -1,7 +1,7 @@ use std::collections::{BTreeMap, BTreeSet}; use secrecy::{ExposeSecret as _, Secret}; -use zksync_basic_types::L2ChainId; +use zksync_basic_types::{ethabi, L2ChainId}; use zksync_concurrency::{limiter, time}; /// `zksync_consensus_crypto::TextFmt` representation of `zksync_consensus_roles::validator::PublicKey`. @@ -89,6 +89,8 @@ pub struct GenesisSpec { /// Leader of the committee. Represents /// `zksync_consensus_roles::validator::LeaderSelectionMode::Sticky`. pub leader: ValidatorPublicKey, + /// Address of the registry contract. + pub registry_address: Option, } #[derive(Clone, Debug, PartialEq, Default)] diff --git a/core/lib/config/src/configs/genesis.rs b/core/lib/config/src/configs/genesis.rs index 2c5c91128431..6c4bacc3a6e2 100644 --- a/core/lib/config/src/configs/genesis.rs +++ b/core/lib/config/src/configs/genesis.rs @@ -20,7 +20,14 @@ pub struct GenesisConfig { pub l1_chain_id: L1ChainId, pub sl_chain_id: Option, pub l2_chain_id: L2ChainId, - pub recursion_scheduler_level_vk_hash: H256, + // Note: `serde` isn't used with protobuf config. The same alias is implemented in + // `zksync_protobuf_config` manually. + // Rename is required to not introduce breaking changes in the API for existing clients. + #[serde( + alias = "recursion_scheduler_level_vk_hash", + rename(serialize = "recursion_scheduler_level_vk_hash") + )] + pub snark_wrapper_vk_hash: H256, pub fee_account: Address, pub dummy_verifier: bool, pub l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, @@ -37,7 +44,7 @@ impl GenesisConfig { GenesisConfig { genesis_root_hash: Some(H256::repeat_byte(0x01)), rollup_last_leaf_index: Some(26), - recursion_scheduler_level_vk_hash: H256::repeat_byte(0x02), + snark_wrapper_vk_hash: H256::repeat_byte(0x02), fee_account: Default::default(), genesis_commitment: Some(H256::repeat_byte(0x17)), bootloader_hash: Default::default(), @@ -54,3 +61,37 @@ impl GenesisConfig { } } } + +#[cfg(test)] +mod tests { + use super::GenesisConfig; + + // This test checks that serde overrides (`rename`, `alias`) work for `snark_wrapper_vk_hash` field. + #[test] + fn genesis_serde_snark_wrapper_vk_hash() { + let genesis = GenesisConfig::for_tests(); + let genesis_str = serde_json::to_string(&genesis).unwrap(); + + // Check that we use backward-compatible name in serialization. + // If you want to remove this check, make sure that all the potential clients are updated. + assert!( + genesis_str.contains("recursion_scheduler_level_vk_hash"), + "Serialization should use backward-compatible name" + ); + + let genesis2: GenesisConfig = serde_json::from_str(&genesis_str).unwrap(); + assert_eq!(genesis, genesis2); + + let genesis_json = r#"{ + "snark_wrapper_vk_hash": "0x1111111111111111111111111111111111111111111111111111111111111111", + "l1_chain_id": 1, + "l2_chain_id": 1, + "fee_account": "0x1111111111111111111111111111111111111111", + "dummy_verifier": false, + "l1_batch_commit_data_generator_mode": "Rollup" + }"#; + serde_json::from_str::(genesis_json).unwrap_or_else(|err| { + panic!("Failed to parse genesis config with a new name: {}", err) + }); + } +} diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index ea27bf8ab3ab..bc3b6025b15a 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -243,17 +243,17 @@ impl Distribution for EncodeDist { default_upgrade_addr: rng.gen(), diamond_proxy_addr: rng.gen(), validator_timelock_addr: rng.gen(), - l1_erc20_bridge_proxy_addr: rng.gen(), - l2_erc20_bridge_addr: rng.gen(), - l1_shared_bridge_proxy_addr: rng.gen(), - l2_shared_bridge_addr: rng.gen(), - l1_weth_bridge_proxy_addr: rng.gen(), - l2_weth_bridge_addr: rng.gen(), - l2_testnet_paymaster_addr: rng.gen(), + l1_erc20_bridge_proxy_addr: self.sample_opt(|| rng.gen()), + l2_erc20_bridge_addr: self.sample_opt(|| rng.gen()), + l1_shared_bridge_proxy_addr: self.sample_opt(|| rng.gen()), + l2_shared_bridge_addr: self.sample_opt(|| rng.gen()), + l1_weth_bridge_proxy_addr: self.sample_opt(|| rng.gen()), + l2_weth_bridge_addr: self.sample_opt(|| rng.gen()), + l2_testnet_paymaster_addr: self.sample_opt(|| rng.gen()), l1_multicall3_addr: rng.gen(), - base_token_addr: rng.gen(), - chain_admin_addr: rng.gen(), ecosystem_contracts: self.sample(rng), + base_token_addr: self.sample_opt(|| rng.gen()), + chain_admin_addr: self.sample_opt(|| rng.gen()), } } } @@ -728,7 +728,7 @@ impl Distribution for EncodeDist { l1_chain_id: L1ChainId(self.sample(rng)), sl_chain_id: None, l2_chain_id: L2ChainId::default(), - recursion_scheduler_level_vk_hash: rng.gen(), + snark_wrapper_vk_hash: rng.gen(), dummy_verifier: rng.gen(), l1_batch_commit_data_generator_mode: match rng.gen_range(0..2) { 0 => L1BatchCommitmentMode::Rollup, @@ -777,6 +777,7 @@ impl Distribution for EncodeDist { validators: self.sample_collect(rng), attesters: self.sample_collect(rng), leader: ValidatorPublicKey(self.sample(rng)), + registry_address: self.sample_opt(|| rng.gen()), } } } diff --git a/core/lib/dal/.sqlx/query-0d421637db03b83aa33468b7d3424b83027a8e050598b0cd4cfeb75e7fe89fdd.json b/core/lib/dal/.sqlx/query-0d421637db03b83aa33468b7d3424b83027a8e050598b0cd4cfeb75e7fe89fdd.json deleted file mode 100644 index 5e10786c7e3f..000000000000 --- a/core/lib/dal/.sqlx/query-0d421637db03b83aa33468b7d3424b83027a8e050598b0cd4cfeb75e7fe89fdd.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n protocol_patches (minor, patch, recursion_scheduler_level_vk_hash, created_at)\n VALUES\n ($1, $2, $3, NOW())\n ON CONFLICT DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int4", - "Int4", - "Bytea" - ] - }, - "nullable": [] - }, - "hash": "0d421637db03b83aa33468b7d3424b83027a8e050598b0cd4cfeb75e7fe89fdd" -} diff --git a/core/lib/dal/.sqlx/query-14c0caee921199f799400dbea719ed36420c15081ff5f60da0a1c769c2dbc542.json b/core/lib/dal/.sqlx/query-14c0caee921199f799400dbea719ed36420c15081ff5f60da0a1c769c2dbc542.json deleted file mode 100644 index 3baa610d7d78..000000000000 --- a/core/lib/dal/.sqlx/query-14c0caee921199f799400dbea719ed36420c15081ff5f60da0a1c769c2dbc542.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n genesis\n FROM\n consensus_replica_state\n WHERE\n fake_key\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "genesis", - "type_info": "Jsonb" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - true - ] - }, - "hash": "14c0caee921199f799400dbea719ed36420c15081ff5f60da0a1c769c2dbc542" -} diff --git a/core/lib/dal/.sqlx/query-17c760825deaa18fc8862c950dc38ff77f5a0d5dfcc7c3f1519f882d2fa60634.json b/core/lib/dal/.sqlx/query-17c760825deaa18fc8862c950dc38ff77f5a0d5dfcc7c3f1519f882d2fa60634.json new file mode 100644 index 000000000000..28a1e54230d8 --- /dev/null +++ b/core/lib/dal/.sqlx/query-17c760825deaa18fc8862c950dc38ff77f5a0d5dfcc7c3f1519f882d2fa60634.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n genesis,\n global_config\n FROM\n consensus_replica_state\n WHERE\n fake_key\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "genesis", + "type_info": "Jsonb" + }, + { + "ordinal": 1, + "name": "global_config", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true, + true + ] + }, + "hash": "17c760825deaa18fc8862c950dc38ff77f5a0d5dfcc7c3f1519f882d2fa60634" +} diff --git a/core/lib/dal/.sqlx/query-f87c50d37f78d6b3c5a752ea88799a1f6ee5a046ece2ef949aee7ab3d2549975.json b/core/lib/dal/.sqlx/query-1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85.json similarity index 51% rename from core/lib/dal/.sqlx/query-f87c50d37f78d6b3c5a752ea88799a1f6ee5a046ece2ef949aee7ab3d2549975.json rename to core/lib/dal/.sqlx/query-1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85.json index 38b88c316eef..3817369ecc16 100644 --- a/core/lib/dal/.sqlx/query-f87c50d37f78d6b3c5a752ea88799a1f6ee5a046ece2ef949aee7ab3d2549975.json +++ b/core/lib/dal/.sqlx/query-1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85.json @@ -1,15 +1,16 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n consensus_replica_state (fake_key, genesis, state)\n VALUES\n (TRUE, $1, $2)\n ", + "query": "\n INSERT INTO\n consensus_replica_state (fake_key, global_config, genesis, state)\n VALUES\n (TRUE, $1, $2, $3)\n ", "describe": { "columns": [], "parameters": { "Left": [ + "Jsonb", "Jsonb", "Jsonb" ] }, "nullable": [] }, - "hash": "f87c50d37f78d6b3c5a752ea88799a1f6ee5a046ece2ef949aee7ab3d2549975" + "hash": "1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85" } diff --git a/core/lib/dal/.sqlx/query-d9d71913a116abf390c71f5229426306b02e328d7b1b69c495443bd2ca7f7510.json b/core/lib/dal/.sqlx/query-311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1.json similarity index 58% rename from core/lib/dal/.sqlx/query-d9d71913a116abf390c71f5229426306b02e328d7b1b69c495443bd2ca7f7510.json rename to core/lib/dal/.sqlx/query-311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1.json index a42fbe98ff2f..cabe0a3dc557 100644 --- a/core/lib/dal/.sqlx/query-d9d71913a116abf390c71f5229426306b02e328d7b1b69c495443bd2ca7f7510.json +++ b/core/lib/dal/.sqlx/query-311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n l1_batches_consensus (l1_batch_number, certificate, created_at, updated_at)\n VALUES\n ($1, $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", + "query": "\n INSERT INTO\n l1_batches_consensus (l1_batch_number, certificate, updated_at, created_at)\n VALUES\n ($1, $2, NOW(), NOW())\n ", "describe": { "columns": [], "parameters": { @@ -11,5 +11,5 @@ }, "nullable": [] }, - "hash": "d9d71913a116abf390c71f5229426306b02e328d7b1b69c495443bd2ca7f7510" + "hash": "311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1" } diff --git a/core/lib/dal/.sqlx/query-3ddb13ffbafae2374527886e592b3c0210383d8698f6f84f694fece9fd59f3d5.json b/core/lib/dal/.sqlx/query-3ddb13ffbafae2374527886e592b3c0210383d8698f6f84f694fece9fd59f3d5.json new file mode 100644 index 000000000000..5652e186ceb9 --- /dev/null +++ b/core/lib/dal/.sqlx/query-3ddb13ffbafae2374527886e592b3c0210383d8698f6f84f694fece9fd59f3d5.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n protocol_patches (minor, patch, snark_wrapper_vk_hash, created_at)\n VALUES\n ($1, $2, $3, NOW())\n ON CONFLICT DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Int4", + "Bytea" + ] + }, + "nullable": [] + }, + "hash": "3ddb13ffbafae2374527886e592b3c0210383d8698f6f84f694fece9fd59f3d5" +} diff --git a/core/lib/dal/.sqlx/query-65bbf852d677e1f00a04785374148aa4e4a804519bcf68e14c5bbb0f58939da1.json b/core/lib/dal/.sqlx/query-65bbf852d677e1f00a04785374148aa4e4a804519bcf68e14c5bbb0f58939da1.json new file mode 100644 index 000000000000..ec17f2e0b61b --- /dev/null +++ b/core/lib/dal/.sqlx/query-65bbf852d677e1f00a04785374148aa4e4a804519bcf68e14c5bbb0f58939da1.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n attesters\n FROM\n l1_batches_consensus_committees\n WHERE\n l1_batch_number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "attesters", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "65bbf852d677e1f00a04785374148aa4e4a804519bcf68e14c5bbb0f58939da1" +} diff --git a/core/lib/dal/.sqlx/query-849d54b4cf9212010fb4e41ce8137978579ba22eec525912c4aeeb235c3b984c.json b/core/lib/dal/.sqlx/query-849d54b4cf9212010fb4e41ce8137978579ba22eec525912c4aeeb235c3b984c.json deleted file mode 100644 index 5130763af73c..000000000000 --- a/core/lib/dal/.sqlx/query-849d54b4cf9212010fb4e41ce8137978579ba22eec525912c4aeeb235c3b984c.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n MAX(l1_batch_number) AS \"number\"\n FROM\n l1_batches_consensus\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - null - ] - }, - "hash": "849d54b4cf9212010fb4e41ce8137978579ba22eec525912c4aeeb235c3b984c" -} diff --git a/core/lib/dal/.sqlx/query-e89e8cc58a2078157d06f3064ccad9773d45ef6d548f03d643007d3bc1072526.json b/core/lib/dal/.sqlx/query-85576fdbb4bd6e3a6e43511c065a2e3eaf72dfe0fa96b335b76c9506cb1ebdcc.json similarity index 70% rename from core/lib/dal/.sqlx/query-e89e8cc58a2078157d06f3064ccad9773d45ef6d548f03d643007d3bc1072526.json rename to core/lib/dal/.sqlx/query-85576fdbb4bd6e3a6e43511c065a2e3eaf72dfe0fa96b335b76c9506cb1ebdcc.json index 68b595b50274..3297d411d8a7 100644 --- a/core/lib/dal/.sqlx/query-e89e8cc58a2078157d06f3064ccad9773d45ef6d548f03d643007d3bc1072526.json +++ b/core/lib/dal/.sqlx/query-85576fdbb4bd6e3a6e43511c065a2e3eaf72dfe0fa96b335b76c9506cb1ebdcc.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_versions.id AS \"minor!\",\n protocol_versions.timestamp,\n protocol_versions.bootloader_code_hash,\n protocol_versions.default_account_code_hash,\n protocol_patches.patch,\n protocol_patches.recursion_scheduler_level_vk_hash\n FROM\n protocol_versions\n JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id\n WHERE\n id = $1\n ORDER BY\n protocol_patches.patch DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n protocol_versions.id AS \"minor!\",\n protocol_versions.timestamp,\n protocol_versions.bootloader_code_hash,\n protocol_versions.default_account_code_hash,\n protocol_patches.patch,\n protocol_patches.snark_wrapper_vk_hash\n FROM\n protocol_versions\n JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id\n WHERE\n id = $1\n ORDER BY\n protocol_patches.patch DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -30,7 +30,7 @@ }, { "ordinal": 5, - "name": "recursion_scheduler_level_vk_hash", + "name": "snark_wrapper_vk_hash", "type_info": "Bytea" } ], @@ -48,5 +48,5 @@ false ] }, - "hash": "e89e8cc58a2078157d06f3064ccad9773d45ef6d548f03d643007d3bc1072526" + "hash": "85576fdbb4bd6e3a6e43511c065a2e3eaf72dfe0fa96b335b76c9506cb1ebdcc" } diff --git a/core/lib/dal/.sqlx/query-883be3789994eac050df85056e4987e056c2bf423054e40236aba60f4d3b8a97.json b/core/lib/dal/.sqlx/query-883be3789994eac050df85056e4987e056c2bf423054e40236aba60f4d3b8a97.json new file mode 100644 index 000000000000..a59468bd516c --- /dev/null +++ b/core/lib/dal/.sqlx/query-883be3789994eac050df85056e4987e056c2bf423054e40236aba60f4d3b8a97.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n l1_batch_number\n FROM\n l1_batches_consensus\n ORDER BY\n l1_batch_number DESC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "883be3789994eac050df85056e4987e056c2bf423054e40236aba60f4d3b8a97" +} diff --git a/core/lib/dal/.sqlx/query-6f05b8ad720f9c1fae9292c16b5960b7bd48b48b63d9db071ef94c5fec4660c9.json b/core/lib/dal/.sqlx/query-a23ae928d8351d3a6ed64d8db777e7ed268bb6c5f3465c7e64beaa226c066f2b.json similarity index 64% rename from core/lib/dal/.sqlx/query-6f05b8ad720f9c1fae9292c16b5960b7bd48b48b63d9db071ef94c5fec4660c9.json rename to core/lib/dal/.sqlx/query-a23ae928d8351d3a6ed64d8db777e7ed268bb6c5f3465c7e64beaa226c066f2b.json index 32a9955cc270..ac10e8b1a8f0 100644 --- a/core/lib/dal/.sqlx/query-6f05b8ad720f9c1fae9292c16b5960b7bd48b48b63d9db071ef94c5fec4660c9.json +++ b/core/lib/dal/.sqlx/query-a23ae928d8351d3a6ed64d8db777e7ed268bb6c5f3465c7e64beaa226c066f2b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n patch\n FROM\n protocol_patches\n WHERE\n minor = $1\n AND recursion_scheduler_level_vk_hash = $2\n ORDER BY\n patch DESC\n ", + "query": "\n SELECT\n patch\n FROM\n protocol_patches\n WHERE\n minor = $1\n AND snark_wrapper_vk_hash = $2\n ORDER BY\n patch DESC\n ", "describe": { "columns": [ { @@ -19,5 +19,5 @@ false ] }, - "hash": "6f05b8ad720f9c1fae9292c16b5960b7bd48b48b63d9db071ef94c5fec4660c9" + "hash": "a23ae928d8351d3a6ed64d8db777e7ed268bb6c5f3465c7e64beaa226c066f2b" } diff --git a/core/lib/dal/.sqlx/query-b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7.json b/core/lib/dal/.sqlx/query-b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7.json new file mode 100644 index 000000000000..356fd8e9d999 --- /dev/null +++ b/core/lib/dal/.sqlx/query-b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n l1_batches_consensus_committees (l1_batch_number, attesters, updated_at)\n VALUES\n ($1, $2, NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n l1_batch_number = $1,\n attesters = $2,\n updated_at = NOW()\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7" +} diff --git a/core/lib/dal/.sqlx/query-d93ebd47a227a6086a5eb963c7ed36e6c9d9e70dc52677c6b335b3ed4025db85.json b/core/lib/dal/.sqlx/query-d93ebd47a227a6086a5eb963c7ed36e6c9d9e70dc52677c6b335b3ed4025db85.json deleted file mode 100644 index 0fd16adc474d..000000000000 --- a/core/lib/dal/.sqlx/query-d93ebd47a227a6086a5eb963c7ed36e6c9d9e70dc52677c6b335b3ed4025db85.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n recursion_scheduler_level_vk_hash\n FROM\n protocol_patches\n WHERE\n minor = $1\n AND patch = $2\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "recursion_scheduler_level_vk_hash", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int4", - "Int4" - ] - }, - "nullable": [ - false - ] - }, - "hash": "d93ebd47a227a6086a5eb963c7ed36e6c9d9e70dc52677c6b335b3ed4025db85" -} diff --git a/core/lib/dal/.sqlx/query-e9d91aa8e30152a5b6a321cb94a298ed3fc5e6eb1c78c285bd20f6401771df25.json b/core/lib/dal/.sqlx/query-e9d91aa8e30152a5b6a321cb94a298ed3fc5e6eb1c78c285bd20f6401771df25.json new file mode 100644 index 000000000000..fa47ccab50ab --- /dev/null +++ b/core/lib/dal/.sqlx/query-e9d91aa8e30152a5b6a321cb94a298ed3fc5e6eb1c78c285bd20f6401771df25.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n snark_wrapper_vk_hash\n FROM\n protocol_patches\n WHERE\n minor = $1\n AND patch = $2\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "snark_wrapper_vk_hash", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int4", + "Int4" + ] + }, + "nullable": [ + false + ] + }, + "hash": "e9d91aa8e30152a5b6a321cb94a298ed3fc5e6eb1c78c285bd20f6401771df25" +} diff --git a/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.down.sql b/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.down.sql new file mode 100644 index 000000000000..fee0b42079f3 --- /dev/null +++ b/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE consensus_replica_state DROP COLUMN global_config; + +DROP TABLE l1_batches_consensus_committees; diff --git a/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.up.sql b/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.up.sql new file mode 100644 index 000000000000..c31952b96465 --- /dev/null +++ b/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.up.sql @@ -0,0 +1,8 @@ +ALTER TABLE consensus_replica_state + ADD COLUMN global_config JSONB NULL; + +CREATE TABLE l1_batches_consensus_committees ( + l1_batch_number BIGINT PRIMARY KEY REFERENCES l1_batches (number) ON DELETE CASCADE, + attesters JSONB NOT NULL, + updated_at TIMESTAMP NOT NULL +); diff --git a/core/lib/dal/migrations/20240905123059_rename-recurision-scheduler-level-vk-hash.down.sql b/core/lib/dal/migrations/20240905123059_rename-recurision-scheduler-level-vk-hash.down.sql new file mode 100644 index 000000000000..daa108d4ff39 --- /dev/null +++ b/core/lib/dal/migrations/20240905123059_rename-recurision-scheduler-level-vk-hash.down.sql @@ -0,0 +1,3 @@ +UPDATE protocol_patches SET recursion_scheduler_level_vk_hash = snark_wrapper_vk_hash WHERE recursion_scheduler_level_vk_hash = ''::bytea; +ALTER TABLE protocol_patches DROP COLUMN snark_wrapper_vk_hash; +ALTER TABLE protocol_patches ALTER COLUMN recursion_scheduler_level_vk_hash DROP DEFAULT; diff --git a/core/lib/dal/migrations/20240905123059_rename-recurision-scheduler-level-vk-hash.up.sql b/core/lib/dal/migrations/20240905123059_rename-recurision-scheduler-level-vk-hash.up.sql new file mode 100644 index 000000000000..730b3a50d8a0 --- /dev/null +++ b/core/lib/dal/migrations/20240905123059_rename-recurision-scheduler-level-vk-hash.up.sql @@ -0,0 +1,8 @@ +ALTER TABLE protocol_patches ADD COLUMN snark_wrapper_vk_hash BYTEA NOT NULL DEFAULT ''::bytea; +ALTER TABLE protocol_patches ALTER COLUMN recursion_scheduler_level_vk_hash SET DEFAULT ''::bytea; +UPDATE protocol_patches SET snark_wrapper_vk_hash = recursion_scheduler_level_vk_hash; +-- Default was only needed to migrate old rows, we don't want this field to be forgotten by accident after migration. +ALTER TABLE protocol_patches ALTER COLUMN snark_wrapper_vk_hash DROP DEFAULT; + +-- Old column should be removed once the migration is on the mainnet. +COMMENT ON COLUMN protocol_patches.recursion_scheduler_level_vk_hash IS 'This column is deprecated and will be removed in the future. Use snark_wrapper_vk_hash instead.'; diff --git a/core/lib/dal/src/consensus/mod.rs b/core/lib/dal/src/consensus/mod.rs index 658da6c76821..f0ef336bc543 100644 --- a/core/lib/dal/src/consensus/mod.rs +++ b/core/lib/dal/src/consensus/mod.rs @@ -22,6 +22,36 @@ use zksync_utils::{h256_to_u256, u256_to_h256}; use crate::models::{parse_h160, parse_h256}; +/// Global config of the consensus. +#[derive(Debug, PartialEq, Clone)] +pub struct GlobalConfig { + pub genesis: validator::Genesis, + pub registry_address: Option, +} + +impl ProtoFmt for GlobalConfig { + type Proto = proto::GlobalConfig; + + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + genesis: read_required(&r.genesis).context("genesis")?, + registry_address: r + .registry_address + .as_ref() + .map(|a| parse_h160(a)) + .transpose() + .context("registry_address")?, + }) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + genesis: Some(self.genesis.build()), + registry_address: self.registry_address.map(|a| a.as_bytes().to_vec()), + } + } +} + /// Global attestation status served by /// `attestationStatus` RPC. #[derive(Debug, PartialEq, Clone)] @@ -469,3 +499,24 @@ impl ProtoRepr for proto::Transaction { } } } + +impl ProtoRepr for proto::AttesterCommittee { + type Type = attester::Committee; + + fn read(&self) -> anyhow::Result { + let members: Vec<_> = self + .members + .iter() + .enumerate() + .map(|(i, m)| attester::WeightedAttester::read(m).context(i)) + .collect::>() + .context("members")?; + Self::Type::new(members) + } + + fn build(this: &Self::Type) -> Self { + Self { + members: this.iter().map(|x| x.build()).collect(), + } + } +} diff --git a/core/lib/dal/src/consensus/proto/mod.proto b/core/lib/dal/src/consensus/proto/mod.proto index ea0c12f1b5f3..da9151f10f4d 100644 --- a/core/lib/dal/src/consensus/proto/mod.proto +++ b/core/lib/dal/src/consensus/proto/mod.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package zksync.dal; import "zksync/roles/validator.proto"; +import "zksync/roles/attester.proto"; message Payload { // zksync-era ProtocolVersionId @@ -117,6 +118,15 @@ message PaymasterParams { optional bytes paymaster_input = 2; // required } +message AttesterCommittee { + repeated roles.attester.WeightedAttester members = 1; // required +} + +message GlobalConfig { + optional roles.validator.Genesis genesis = 1; // required + optional bytes registry_address = 2; // optional; H160 +} + message AttestationStatus { optional roles.validator.GenesisHash genesis = 1; // required optional uint64 next_batch_to_attest = 2; // required diff --git a/core/lib/dal/src/consensus_dal.rs b/core/lib/dal/src/consensus_dal.rs index 8f05cb381777..2dca58e2a6a6 100644 --- a/core/lib/dal/src/consensus_dal.rs +++ b/core/lib/dal/src/consensus_dal.rs @@ -1,5 +1,4 @@ use anyhow::Context as _; -use bigdecimal::Zero as _; use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::{BlockStoreState, ReplicaState}; use zksync_db_connection::{ @@ -7,10 +6,10 @@ use zksync_db_connection::{ error::{DalError, DalResult, SqlxContext}, instrument::{InstrumentExt, Instrumented}, }; -use zksync_protobuf::ProtoFmt as _; +use zksync_protobuf::ProtoRepr as _; use zksync_types::L2BlockNumber; -pub use crate::consensus::{AttestationStatus, Payload}; +pub use crate::consensus::{proto, AttestationStatus, GlobalConfig, Payload}; use crate::{Core, CoreDal}; /// Storage access methods for `zksync_core::consensus` module. @@ -33,72 +32,77 @@ pub enum InsertCertificateError { } impl ConsensusDal<'_, '_> { - /// Fetches genesis. - pub async fn genesis(&mut self) -> DalResult> { - Ok(sqlx::query!( + /// Fetch consensus global config. + pub async fn global_config(&mut self) -> anyhow::Result> { + // global_config contains a superset of genesis information. + // genesis column is deprecated and will be removed once the main node + // is fully upgraded. + // For now we keep the information between both columns in sync. + let Some(row) = sqlx::query!( r#" SELECT - genesis + genesis, + global_config FROM consensus_replica_state WHERE fake_key "# ) - .try_map(|row| { - let Some(genesis) = row.genesis else { - return Ok(None); - }; - // Deserialize the json, but don't allow for unknown fields. - // We might encounter an unknown fields here in case if support for the previous - // consensus protocol version is removed before the migration to a new version - // is performed. The node should NOT operate in such a state. - Ok(Some( - validator::GenesisRaw::read( - &zksync_protobuf::serde::deserialize_proto_with_options( - &genesis, /*deny_unknown_fields=*/ true, - ) - .decode_column("genesis")?, - ) - .decode_column("genesis")? - .with_hash(), - )) - }) - .instrument("genesis") + .instrument("global_config") .fetch_optional(self.storage) .await? - .flatten()) + else { + return Ok(None); + }; + if let Some(global_config) = row.global_config { + return Ok(Some( + zksync_protobuf::serde::deserialize(&global_config).context("global_config")?, + )); + } + if let Some(genesis) = row.genesis { + let genesis: validator::Genesis = + zksync_protobuf::serde::deserialize(&genesis).context("genesis")?; + return Ok(Some(GlobalConfig { + genesis, + registry_address: None, + })); + } + Ok(None) } - /// Attempts to update the genesis. + /// Attempts to update the global config. /// Fails if the new genesis is invalid. /// Fails if the new genesis has different `chain_id`. /// Fails if the storage contains a newer genesis (higher fork number). - /// Noop if the new genesis is the same as the current one. + /// Noop if the new global config is the same as the current one. /// Resets the stored consensus state otherwise and purges all certificates. - pub async fn try_update_genesis(&mut self, genesis: &validator::Genesis) -> anyhow::Result<()> { + pub async fn try_update_global_config(&mut self, want: &GlobalConfig) -> anyhow::Result<()> { let mut txn = self.storage.start_transaction().await?; - if let Some(got) = txn.consensus_dal().genesis().await? { + if let Some(got) = txn.consensus_dal().global_config().await? { // Exit if the genesis didn't change. - if &got == genesis { + if &got == want { return Ok(()); } anyhow::ensure!( - got.chain_id == genesis.chain_id, + got.genesis.chain_id == want.genesis.chain_id, "changing chain_id is not allowed: old = {:?}, new = {:?}", - got.chain_id, - genesis.chain_id, + got.genesis.chain_id, + want.genesis.chain_id, ); anyhow::ensure!( - got.fork_number < genesis.fork_number, + got.genesis.fork_number < want.genesis.fork_number, "transition to a past fork is not allowed: old = {:?}, new = {:?}", - got.fork_number, - genesis.fork_number, + got.genesis.fork_number, + want.genesis.fork_number, ); - genesis.verify().context("genesis.verify()")?; + want.genesis.verify().context("genesis.verify()")?; } let genesis = - zksync_protobuf::serde::serialize(genesis, serde_json::value::Serializer).unwrap(); + zksync_protobuf::serde::serialize(&want.genesis, serde_json::value::Serializer) + .unwrap(); + let global_config = + zksync_protobuf::serde::serialize(want, serde_json::value::Serializer).unwrap(); let state = zksync_protobuf::serde::serialize( &ReplicaState::default(), serde_json::value::Serializer, @@ -131,14 +135,15 @@ impl ConsensusDal<'_, '_> { sqlx::query!( r#" INSERT INTO - consensus_replica_state (fake_key, genesis, state) + consensus_replica_state (fake_key, global_config, genesis, state) VALUES - (TRUE, $1, $2) + (TRUE, $1, $2, $3) "#, + global_config, genesis, state, ) - .instrument("try_update_genesis#INSERT INTO consenuss_replica_state") + .instrument("try_update_global_config#INSERT INTO consensus_replica_state") .execute(&mut txn) .await?; txn.commit().await?; @@ -154,25 +159,33 @@ impl ConsensusDal<'_, '_> { .start_transaction() .await .context("start_transaction")?; - let Some(old) = txn.consensus_dal().genesis().await.context("genesis()")? else { + let Some(old) = txn + .consensus_dal() + .global_config() + .await + .context("global_config()")? + else { return Ok(()); }; - let new = validator::GenesisRaw { - chain_id: old.chain_id, - fork_number: old.fork_number.next(), - first_block: txn - .consensus_dal() - .next_block() - .await - .context("next_block()")?, - - protocol_version: old.protocol_version, - validators: old.validators.clone(), - attesters: old.attesters.clone(), - leader_selection: old.leader_selection.clone(), - } - .with_hash(); - txn.consensus_dal().try_update_genesis(&new).await?; + let new = GlobalConfig { + genesis: validator::GenesisRaw { + chain_id: old.genesis.chain_id, + fork_number: old.genesis.fork_number.next(), + first_block: txn + .consensus_dal() + .next_block() + .await + .context("next_block()")?, + + protocol_version: old.genesis.protocol_version, + validators: old.genesis.validators.clone(), + attesters: old.genesis.attesters.clone(), + leader_selection: old.genesis.leader_selection.clone(), + } + .with_hash(), + registry_address: old.registry_address, + }; + txn.consensus_dal().try_update_global_config(&new).await?; txn.commit().await?; Ok(()) } @@ -259,7 +272,12 @@ impl ConsensusDal<'_, '_> { /// so it might NOT be the certificate for the last L2 block. pub async fn block_certificates_range(&mut self) -> anyhow::Result { // It cannot be older than genesis first block. - let mut start = self.genesis().await?.context("genesis()")?.first_block; + let mut start = self + .global_config() + .await? + .context("genesis()")? + .genesis + .first_block; start = start.max(self.first_block().await.context("first_block()")?); let row = sqlx::query!( r#" @@ -422,21 +440,96 @@ impl ConsensusDal<'_, '_> { Ok(()) } + /// Persist the attester committee for the given batch. + pub async fn upsert_attester_committee( + &mut self, + number: attester::BatchNumber, + committee: &attester::Committee, + ) -> anyhow::Result<()> { + let committee = proto::AttesterCommittee::build(committee); + let committee = + zksync_protobuf::serde::serialize_proto(&committee, serde_json::value::Serializer) + .unwrap(); + sqlx::query!( + r#" + INSERT INTO + l1_batches_consensus_committees (l1_batch_number, attesters, updated_at) + VALUES + ($1, $2, NOW()) + ON CONFLICT (l1_batch_number) DO + UPDATE + SET + l1_batch_number = $1, + attesters = $2, + updated_at = NOW() + "#, + i64::try_from(number.0).context("overflow")?, + committee + ) + .instrument("upsert_attester_committee") + .report_latency() + .execute(self.storage) + .await?; + Ok(()) + } + + /// Fetches the attester committee for the L1 batch with the given number. + pub async fn attester_committee( + &mut self, + n: attester::BatchNumber, + ) -> anyhow::Result> { + let Some(row) = sqlx::query!( + r#" + SELECT + attesters + FROM + l1_batches_consensus_committees + WHERE + l1_batch_number = $1 + "#, + i64::try_from(n.0)? + ) + .instrument("attester_committee") + .report_latency() + .fetch_optional(self.storage) + .await? + else { + return Ok(None); + }; + let raw = zksync_protobuf::serde::deserialize_proto(&row.attesters) + .context("deserialize_proto()")?; + Ok(Some( + proto::AttesterCommittee::read(&raw).context("read()")?, + )) + } + /// Inserts a certificate for the L1 batch. /// Noop if a certificate for the same L1 batch is already present. - /// No verification is performed - it cannot be performed due to circular dependency on + /// Verification against previously stored attester committee is performed. + /// Batch hash is not verified - it cannot be performed due to circular dependency on /// `zksync_l1_contract_interface`. pub async fn insert_batch_certificate( &mut self, cert: &attester::BatchQC, ) -> anyhow::Result<()> { - let res = sqlx::query!( + let cfg = self + .global_config() + .await + .context("global_config()")? + .context("genesis is missing")?; + let committee = self + .attester_committee(cert.message.number) + .await + .context("attester_committee()")? + .context("attester committee is missing")?; + cert.verify(cfg.genesis.hash(), &committee) + .context("cert.verify()")?; + sqlx::query!( r#" INSERT INTO - l1_batches_consensus (l1_batch_number, certificate, created_at, updated_at) + l1_batches_consensus (l1_batch_number, certificate, updated_at, created_at) VALUES ($1, $2, NOW(), NOW()) - ON CONFLICT (l1_batch_number) DO NOTHING "#, i64::try_from(cert.message.number.0).context("overflow")?, // Unwrap is ok, because serialization should always succeed. @@ -446,9 +539,6 @@ impl ConsensusDal<'_, '_> { .report_latency() .execute(self.storage) .await?; - if res.rows_affected().is_zero() { - tracing::debug!(l1_batch_number = ?cert.message.number, "duplicate batch certificate"); - } Ok(()) } @@ -457,24 +547,28 @@ impl ConsensusDal<'_, '_> { pub async fn last_batch_certificate_number( &mut self, ) -> anyhow::Result> { - let row = sqlx::query!( + let Some(row) = sqlx::query!( r#" SELECT - MAX(l1_batch_number) AS "number" + l1_batch_number FROM l1_batches_consensus + ORDER BY + l1_batch_number DESC + LIMIT + 1 "# ) .instrument("last_batch_certificate_number") .report_latency() - .fetch_one(self.storage) - .await?; - - let Some(n) = row.number else { + .fetch_optional(self.storage) + .await? + else { return Ok(None); }; + Ok(Some(attester::BatchNumber( - n.try_into().context("overflow")?, + row.l1_batch_number.try_into().context("overflow")?, ))) } @@ -529,7 +623,7 @@ impl ConsensusDal<'_, '_> { /// This is a main node only query. /// ENs should call the attestation_status RPC of the main node. pub async fn attestation_status(&mut self) -> anyhow::Result> { - let Some(genesis) = self.genesis().await.context("genesis()")? else { + let Some(cfg) = self.global_config().await.context("genesis()")? else { return Ok(None); }; let Some(next_batch_to_attest) = async { @@ -542,18 +636,21 @@ impl ConsensusDal<'_, '_> { return Ok(Some(last + 1)); } // Otherwise start with the batch containing the first block of the fork. - self.batch_of_block(genesis.first_block) + self.batch_of_block(cfg.genesis.first_block) .await .context("batch_of_block()") } .await? else { - tracing::info!(%genesis.first_block, "genesis block not found"); + tracing::info!(%cfg.genesis.first_block, "genesis block not found"); return Ok(None); }; Ok(Some(AttestationStatus { - genesis: genesis.hash(), - next_batch_to_attest, + genesis: cfg.genesis.hash(), + // We never attest batch 0 for technical reasons: + // * it is not supported to read state before batch 0. + // * the registry contract needs to be deployed before we can start operating on it + next_batch_to_attest: next_batch_to_attest.max(attester::BatchNumber(1)), })) } } @@ -563,8 +660,9 @@ mod tests { use rand::Rng as _; use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::ReplicaState; - use zksync_types::{L1BatchNumber, ProtocolVersion}; + use zksync_types::ProtocolVersion; + use super::GlobalConfig; use crate::{ tests::{create_l1_batch_header, create_l2_block_header}, ConnectionPool, Core, CoreDal, @@ -575,19 +673,22 @@ mod tests { let rng = &mut rand::thread_rng(); let pool = ConnectionPool::::test_pool().await; let mut conn = pool.connection().await.unwrap(); - assert_eq!(None, conn.consensus_dal().genesis().await.unwrap()); + assert_eq!(None, conn.consensus_dal().global_config().await.unwrap()); for n in 0..3 { let setup = validator::testonly::Setup::new(rng, 3); let mut genesis = (*setup.genesis).clone(); genesis.fork_number = validator::ForkNumber(n); - let genesis = genesis.with_hash(); + let cfg = GlobalConfig { + genesis: genesis.with_hash(), + registry_address: Some(rng.gen()), + }; conn.consensus_dal() - .try_update_genesis(&genesis) + .try_update_global_config(&cfg) .await .unwrap(); assert_eq!( - genesis, - conn.consensus_dal().genesis().await.unwrap().unwrap() + cfg, + conn.consensus_dal().global_config().await.unwrap().unwrap() ); assert_eq!( ReplicaState::default(), @@ -597,8 +698,8 @@ mod tests { let want: ReplicaState = rng.gen(); conn.consensus_dal().set_replica_state(&want).await.unwrap(); assert_eq!( - genesis, - conn.consensus_dal().genesis().await.unwrap().unwrap() + cfg, + conn.consensus_dal().global_config().await.unwrap().unwrap() ); assert_eq!(want, conn.consensus_dal().replica_state().await.unwrap()); } @@ -608,14 +709,32 @@ mod tests { #[tokio::test] async fn test_batch_certificate() { let rng = &mut rand::thread_rng(); + let setup = validator::testonly::Setup::new(rng, 3); let pool = ConnectionPool::::test_pool().await; let mut conn = pool.connection().await.unwrap(); + let cfg = GlobalConfig { + genesis: setup.genesis.clone(), + registry_address: Some(rng.gen()), + }; + conn.consensus_dal() + .try_update_global_config(&cfg) + .await + .unwrap(); - let mut mock_batch_qc = |number: L1BatchNumber| { - let mut cert: attester::BatchQC = rng.gen(); - cert.message.number.0 = u64::from(number.0); - cert.signatures.add(rng.gen(), rng.gen()); - cert + let mut make_cert = |number: attester::BatchNumber| { + let m = attester::Batch { + genesis: setup.genesis.hash(), + hash: rng.gen(), + number, + }; + let mut sigs = attester::MultiSig::default(); + for k in &setup.attester_keys { + sigs.add(k.public(), k.sign_msg(m.clone()).sig); + } + attester::BatchQC { + message: m, + signatures: sigs, + } }; // Required for inserting l2 blocks @@ -627,8 +746,7 @@ mod tests { // Insert some mock L2 blocks and L1 batches let mut block_number = 0; let mut batch_number = 0; - let num_batches = 3; - for _ in 0..num_batches { + for _ in 0..3 { for _ in 0..3 { block_number += 1; let l2_block = create_l2_block_header(block_number); @@ -636,64 +754,56 @@ mod tests { } batch_number += 1; let l1_batch = create_l1_batch_header(batch_number); - conn.blocks_dal() .insert_mock_l1_batch(&l1_batch) .await .unwrap(); - conn.blocks_dal() .mark_l2_blocks_as_executed_in_l1_batch(l1_batch.number) .await .unwrap(); } - let l1_batch_number = L1BatchNumber(batch_number); + let n = attester::BatchNumber(batch_number.into()); // Insert a batch certificate for the last L1 batch. - let cert1 = mock_batch_qc(l1_batch_number); - + let want = make_cert(n); conn.consensus_dal() - .insert_batch_certificate(&cert1) + .upsert_attester_committee(n, setup.genesis.attesters.as_ref().unwrap()) .await .unwrap(); - - // Try insert duplicate batch certificate for the same batch. - let cert2 = mock_batch_qc(l1_batch_number); - conn.consensus_dal() - .insert_batch_certificate(&cert2) + .insert_batch_certificate(&want) .await .unwrap(); + // Reinserting a cert should fail. + assert!(conn + .consensus_dal() + .insert_batch_certificate(&make_cert(n)) + .await + .is_err()); + // Retrieve the latest certificate. - let number = conn + let got_n = conn .consensus_dal() .last_batch_certificate_number() .await .unwrap() .unwrap(); - - let cert = conn + let got = conn .consensus_dal() - .batch_certificate(number) + .batch_certificate(got_n) .await .unwrap() .unwrap(); - - assert_eq!(cert, cert1, "duplicates are ignored"); + assert_eq!(got, want); // Try insert batch certificate for non-existing batch - let cert3 = mock_batch_qc(l1_batch_number.next()); - conn.consensus_dal() - .insert_batch_certificate(&cert3) - .await - .expect_err("missing payload"); - - // Insert one more L1 batch without a certificate. - conn.blocks_dal() - .insert_mock_l1_batch(&create_l1_batch_header(batch_number + 1)) + assert!(conn + .consensus_dal() + .insert_batch_certificate(&make_cert(n.next())) .await - .unwrap(); + .is_err()); } } diff --git a/core/lib/dal/src/models/storage_protocol_version.rs b/core/lib/dal/src/models/storage_protocol_version.rs index c19fa560b67c..e53bf7b9d0a4 100644 --- a/core/lib/dal/src/models/storage_protocol_version.rs +++ b/core/lib/dal/src/models/storage_protocol_version.rs @@ -13,7 +13,7 @@ pub struct StorageProtocolVersion { pub minor: i32, pub patch: i32, pub timestamp: i64, - pub recursion_scheduler_level_vk_hash: Vec, + pub snark_wrapper_vk_hash: Vec, pub bootloader_code_hash: Vec, pub default_account_code_hash: Vec, } @@ -29,9 +29,7 @@ pub(crate) fn protocol_version_from_storage( }, timestamp: storage_version.timestamp as u64, l1_verifier_config: L1VerifierConfig { - recursion_scheduler_level_vk_hash: H256::from_slice( - &storage_version.recursion_scheduler_level_vk_hash, - ), + snark_wrapper_vk_hash: H256::from_slice(&storage_version.snark_wrapper_vk_hash), }, base_system_contracts_hashes: BaseSystemContractsHashes { bootloader: H256::from_slice(&storage_version.bootloader_code_hash), diff --git a/core/lib/dal/src/protocol_versions_dal.rs b/core/lib/dal/src/protocol_versions_dal.rs index 0d17044e6c51..8cb5094fd49e 100644 --- a/core/lib/dal/src/protocol_versions_dal.rs +++ b/core/lib/dal/src/protocol_versions_dal.rs @@ -71,16 +71,14 @@ impl ProtocolVersionsDal<'_, '_> { sqlx::query!( r#" INSERT INTO - protocol_patches (minor, patch, recursion_scheduler_level_vk_hash, created_at) + protocol_patches (minor, patch, snark_wrapper_vk_hash, created_at) VALUES ($1, $2, $3, NOW()) ON CONFLICT DO NOTHING "#, version.minor as i32, version.patch.0 as i32, - l1_verifier_config - .recursion_scheduler_level_vk_hash - .as_bytes(), + l1_verifier_config.snark_wrapper_vk_hash.as_bytes(), ) .instrument("save_protocol_version#patch") .with_arg("version", &version) @@ -235,7 +233,7 @@ impl ProtocolVersionsDal<'_, '_> { protocol_versions.bootloader_code_hash, protocol_versions.default_account_code_hash, protocol_patches.patch, - protocol_patches.recursion_scheduler_level_vk_hash + protocol_patches.snark_wrapper_vk_hash FROM protocol_versions JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id @@ -268,7 +266,7 @@ impl ProtocolVersionsDal<'_, '_> { let row = sqlx::query!( r#" SELECT - recursion_scheduler_level_vk_hash + snark_wrapper_vk_hash FROM protocol_patches WHERE @@ -282,16 +280,14 @@ impl ProtocolVersionsDal<'_, '_> { .await .unwrap()?; Some(L1VerifierConfig { - recursion_scheduler_level_vk_hash: H256::from_slice( - &row.recursion_scheduler_level_vk_hash, - ), + snark_wrapper_vk_hash: H256::from_slice(&row.snark_wrapper_vk_hash), }) } pub async fn get_patch_versions_for_vk( &mut self, minor_version: ProtocolVersionId, - recursion_scheduler_level_vk_hash: H256, + snark_wrapper_vk_hash: H256, ) -> DalResult> { let rows = sqlx::query!( r#" @@ -301,12 +297,12 @@ impl ProtocolVersionsDal<'_, '_> { protocol_patches WHERE minor = $1 - AND recursion_scheduler_level_vk_hash = $2 + AND snark_wrapper_vk_hash = $2 ORDER BY patch DESC "#, minor_version as i32, - recursion_scheduler_level_vk_hash.as_bytes() + snark_wrapper_vk_hash.as_bytes() ) .instrument("get_patch_versions_for_vk") .fetch_all(self.storage) diff --git a/core/lib/env_config/src/contracts.rs b/core/lib/env_config/src/contracts.rs index 3365f56add77..298c43b80ccd 100644 --- a/core/lib/env_config/src/contracts.rs +++ b/core/lib/env_config/src/contracts.rs @@ -89,6 +89,7 @@ CONTRACTS_L2_ERC20_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L1_WETH_BRIDGE_PROXY_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L2_WETH_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L2_TESTNET_PAYMASTER_ADDR="FC073319977e314F251EAE6ae6bE76B0B3BAeeCF" +CONTRACTS_L2_CONSENSUS_REGISTRY_ADDR="D64e136566a9E04eb05B30184fF577F52682D182" CONTRACTS_L1_MULTICALL3_ADDR="0xcA11bde05977b3631167028862bE2a173976CA11" CONTRACTS_L1_SHARED_BRIDGE_PROXY_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L2_SHARED_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" diff --git a/core/lib/env_config/src/genesis.rs b/core/lib/env_config/src/genesis.rs index 1eb83ae2f39e..bf30fd4cc339 100644 --- a/core/lib/env_config/src/genesis.rs +++ b/core/lib/env_config/src/genesis.rs @@ -72,7 +72,7 @@ impl FromEnv for GenesisConfig { l1_chain_id: L1ChainId(network_config.network.chain_id().0), sl_chain_id: Some(network_config.network.chain_id()), l2_chain_id: network_config.zksync_network_id, - recursion_scheduler_level_vk_hash: contracts_config.snark_wrapper_vk_hash, + snark_wrapper_vk_hash: contracts_config.snark_wrapper_vk_hash, fee_account: state_keeper .fee_account_addr .context("Fee account required for genesis")?, diff --git a/core/lib/multivm/src/versions/shadow.rs b/core/lib/multivm/src/versions/shadow.rs index 32a4463c425d..871258f43b85 100644 --- a/core/lib/multivm/src/versions/shadow.rs +++ b/core/lib/multivm/src/versions/shadow.rs @@ -77,7 +77,7 @@ where tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { let tx_hash = tx.hash(); let main_result = self.main.inspect_transaction_with_bytecode_compression( tracer, diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs index eb1ae45542db..8068e4847b83 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs @@ -83,7 +83,7 @@ impl VmInterface for Vm { tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { if let Some(storage_invocations) = tracer.storage_invocations { self.vm .execution_mode @@ -156,7 +156,7 @@ impl VmInterface for Vm { result, ) } else { - (Ok(compressed_bytecodes), result) + (Ok(compressed_bytecodes.into()), result) } } diff --git a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/state.rs index 22d7b2814cf6..241054ae0345 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/state.rs @@ -191,11 +191,11 @@ impl BootloaderState { l2_block.first_tx_index + l2_block.txs.len() } - pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> Vec { + pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> &[CompressedBytecodeInfo] { if let Some(tx) = self.last_l2_block().txs.last() { - tx.compressed_bytecodes.clone() + &tx.compressed_bytecodes } else { - vec![] + &[] } } diff --git a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs index 8e63afd8e1ca..2c1a4ba5e36b 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs @@ -105,7 +105,7 @@ impl VmInterface for Vm { tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_with_compression(tx, with_compression); let result = self.inspect_inner(tracer, VmExecutionMode::OneTx, None); if self.has_unpublished_bytecodes() { @@ -115,7 +115,10 @@ impl VmInterface for Vm { ) } else { ( - Ok(self.bootloader_state.get_last_tx_compressed_bytecodes()), + Ok(self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into()), result, ) } diff --git a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/state.rs index e692c8a2640d..c0d94bd685c4 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/state.rs @@ -191,11 +191,11 @@ impl BootloaderState { l2_block.first_tx_index + l2_block.txs.len() } - pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> Vec { + pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> &[CompressedBytecodeInfo] { if let Some(tx) = self.last_l2_block().txs.last() { - tx.compressed_bytecodes.clone() + &tx.compressed_bytecodes } else { - vec![] + &[] } } diff --git a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs index e7a1f69fa424..71633dd3fca3 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs @@ -105,7 +105,7 @@ impl VmInterface for Vm { tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_with_compression(tx, with_compression); let result = self.inspect_inner(tracer, VmExecutionMode::OneTx, None); if self.has_unpublished_bytecodes() { @@ -115,7 +115,10 @@ impl VmInterface for Vm { ) } else { ( - Ok(self.bootloader_state.get_last_tx_compressed_bytecodes()), + Ok(self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into()), result, ) } diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/state.rs index 8a605978a1ed..830fe482320b 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/state.rs @@ -191,11 +191,11 @@ impl BootloaderState { l2_block.first_tx_index + l2_block.txs.len() } - pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> Vec { + pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> &[CompressedBytecodeInfo] { if let Some(tx) = self.last_l2_block().txs.last() { - tx.compressed_bytecodes.clone() + &tx.compressed_bytecodes } else { - vec![] + &[] } } diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs index 4b6b6931dd22..c7b4a5537acb 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs @@ -106,7 +106,7 @@ impl VmInterface for Vm { tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_with_compression(tx, with_compression); let result = self.inspect_inner(tracer, VmExecutionMode::OneTx); if self.has_unpublished_bytecodes() { @@ -116,7 +116,10 @@ impl VmInterface for Vm { ) } else { ( - Ok(self.bootloader_state.get_last_tx_compressed_bytecodes()), + Ok(self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into()), result, ) } diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs index ce37636d2cda..15b4daf02a77 100644 --- a/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs @@ -189,11 +189,11 @@ impl BootloaderState { l2_block.first_tx_index + l2_block.txs.len() } - pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> Vec { + pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> &[CompressedBytecodeInfo] { if let Some(tx) = self.last_l2_block().txs.last() { - tx.compressed_bytecodes.clone() + &tx.compressed_bytecodes } else { - vec![] + &[] } } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs index 239d40947a67..15af9d868adc 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs @@ -10,22 +10,18 @@ use zksync_types::{ commitment::SerializeCommitment, fee_model::BatchFeeInput, get_code_key, l2_to_l1_log::L2ToL1Log, writes::StateDiffRecord, Address, Execute, H256, U256, }; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; +use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; -use super::utils::{get_complex_upgrade_abi, read_complex_upgrade}; -use crate::versions::vm_fast::tests::tester::{ - default_l1_batch, get_empty_storage, InMemoryStorageView, VmTesterBuilder, +use super::{ + tester::{default_l1_batch, get_empty_storage, VmTesterBuilder}, + utils::{get_complex_upgrade_abi, read_complex_upgrade}, }; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_latest::{ - constants::{ - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, - MAX_VM_PUBDATA_PER_BATCH, - }, - tracers::PubdataTracer, - L1BatchEnv, TracerDispatcher, + interface::{L1BatchEnv, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, + vm_latest::constants::{ + BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, + BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, + MAX_VM_PUBDATA_PER_BATCH, }, }; @@ -130,7 +126,6 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { // We are measuring computational cost, so prices for pubdata don't matter, while they artificially dilute // the gas limit - let batch_env = L1BatchEnv { fee_input: BatchFeeInput::pubdata_independent(100_000, 100_000, 100_000), ..default_l1_batch(zksync_types::L1BatchNumber(1)) @@ -143,15 +138,7 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { .with_l1_batch_env(batch_env) .build(); - let bytecodes = test_data - .bytecodes - .iter() - .map(|bytecode| { - let hash = hash_bytecode(bytecode); - let words = bytes_to_be_words(bytecode.clone()); - (h256_to_u256(hash), words) - }) - .collect(); + let bytecodes = test_data.bytecodes.iter().map(Vec::as_slice); vm.vm.insert_bytecodes(bytecodes); let txs_data = populate_mimic_calls(test_data.clone()); @@ -163,7 +150,7 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { contract_address: CONTRACT_FORCE_DEPLOYER_ADDRESS, calldata: data, value: U256::zero(), - factory_deps: None, + factory_deps: vec![], }, None, ); @@ -173,44 +160,25 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { let result = vm.vm.execute(VmExecutionMode::OneTx); assert!( !result.result.is_failed(), - "Transaction {i} wasn't successful for input: {:#?}", - test_data + "Transaction {i} wasn't successful for input: {test_data:#?}" ); } - // Now we count how much ergs were spent at the end of the batch + // Now we count how much gas was spent at the end of the batch // It is assumed that the top level frame is the bootloader + vm.vm.enforce_state_diffs(test_data.state_diffs.clone()); + let gas_before = vm.vm.gas_remaining(); - let ergs_before = vm.vm.gas_remaining(); - - // We ensure that indeed the provided state diffs are used - let pubdata_tracer = PubdataTracer::::new_with_forced_state_diffs( - vm.vm.batch_env.clone(), - VmExecutionMode::Batch, - test_data.state_diffs.clone(), - ); - - let result = vm.vm.inspect_inner( - TracerDispatcher::default(), - VmExecutionMode::Batch, - Some(pubdata_tracer), - ); - + let result = vm.vm.execute(VmExecutionMode::Batch); assert!( !result.result.is_failed(), - "Batch wasn't successful for input: {:?}", - test_data - ); - - let ergs_after = vm.vm.gas_remaining(); - - assert_eq!( - (ergs_before - ergs_after) as u64, - result.statistics.gas_used + "Batch wasn't successful for input: {test_data:?}" ); + let gas_after = vm.vm.gas_remaining(); + assert_eq!((gas_before - gas_after) as u64, result.statistics.gas_used); TestStatistics { - max_used_gas: ergs_before - ergs_after, + max_used_gas: gas_before - gas_after, circuit_statistics: result.statistics.circuit_statistic.total() as u64, execution_metrics_size: result.get_execution_metrics(None).size() as u64, } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs index c582bd28c882..0270ac35475b 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs @@ -1,17 +1,16 @@ use zksync_types::{Address, Execute, U256}; +use super::tester::VmTesterBuilder; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, tests::tester::VmTesterBuilder, HistoryEnabled, - }, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; // Checks that estimated number of circuits for simple transfer doesn't differ much // from hardcoded expected value. #[test] fn test_circuits() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_random_rich_accounts(1) .with_deployer() @@ -25,12 +24,12 @@ fn test_circuits() { contract_address: Address::random(), calldata: Vec::new(), value: U256::from(1u8), - factory_deps: None, + factory_deps: vec![], }, None, ); vm.vm.push_transaction(tx); - let res = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); + let res = vm.vm.inspect((), VmExecutionMode::OneTx); let s = res.statistics.circuit_statistic; // Check `circuit_statistic`. diff --git a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs index 29df17d7293c..836603d77d87 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs @@ -6,9 +6,12 @@ use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_fast::tests::{ - tester::{get_empty_storage, VmTesterBuilder}, - utils::{load_precompiles_contract, read_precompiles_contract, read_test_contract}, + vm_fast::{ + circuits_tracer::CircuitsTracer, + tests::{ + tester::{get_empty_storage, VmTesterBuilder}, + utils::{load_precompiles_contract, read_precompiles_contract, read_test_contract}, + }, }, }; @@ -209,7 +212,7 @@ fn refunds_in_code_oracle() { if decommit { let (_, is_fresh) = vm.vm.inner.world_diff.decommit_opcode( &mut vm.vm.world, - &mut vm.vm.tracer, + &mut CircuitsTracer::default(), h256_to_u256(normal_zkevm_bytecode_hash), ); assert!(is_fresh); diff --git a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs index 85ff4bbf5e9b..3fcef71add07 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs @@ -6,7 +6,7 @@ use itertools::Itertools; use zk_evm_1_3_1::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; use zksync_test_account::Account; -use zksync_types::{Address, Execute, U256}; +use zksync_types::{AccountTreeId, Address, Execute, StorageKey, H256, U256}; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; use crate::{ @@ -110,7 +110,13 @@ fn inflated_counter_bytecode() -> Vec { counter_bytecode } -fn execute_proxy_counter(gas: u32) -> (VmTester, U256, VmExecutionResultAndLogs) { +#[derive(Debug)] +struct ProxyCounterData { + proxy_counter_address: Address, + counter_bytecode_hash: U256, +} + +fn execute_proxy_counter(gas: u32) -> (VmTester, ProxyCounterData, VmExecutionResultAndLogs) { let counter_bytecode = inflated_counter_bytecode(); let counter_bytecode_hash = h256_to_u256(hash_bytecode(&counter_bytecode)); let counter_address = Address::repeat_byte(0x23); @@ -157,27 +163,69 @@ fn execute_proxy_counter(gas: u32) -> (VmTester, U256, VmExecutionResultAndLogs) .vm .execute_transaction_with_bytecode_compression(increment_tx, true); compression_result.unwrap(); - (vm, counter_bytecode_hash, exec_result) + let data = ProxyCounterData { + proxy_counter_address: deploy_tx.address, + counter_bytecode_hash, + }; + (vm, data, exec_result) } #[test] fn get_used_contracts_with_far_call() { - let (vm, counter_bytecode_hash, exec_result) = execute_proxy_counter(100_000); + let (vm, data, exec_result) = execute_proxy_counter(100_000); assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); let decommitted_hashes = vm.vm.decommitted_hashes().collect::>(); assert!( - decommitted_hashes.contains(&counter_bytecode_hash), + decommitted_hashes.contains(&data.counter_bytecode_hash), "{decommitted_hashes:?}" ); } #[test] fn get_used_contracts_with_out_of_gas_far_call() { - let (vm, counter_bytecode_hash, exec_result) = execute_proxy_counter(10_000); + let (mut vm, data, exec_result) = execute_proxy_counter(10_000); assert_matches!(exec_result.result, ExecutionResult::Revert { .. }); let decommitted_hashes = vm.vm.decommitted_hashes().collect::>(); assert!( - decommitted_hashes.contains(&counter_bytecode_hash), + decommitted_hashes.contains(&data.counter_bytecode_hash), "{decommitted_hashes:?}" ); + + // Execute another transaction with a successful far call and check that it's still charged for decommitment. + let account = &mut vm.rich_accounts[0]; + let (_, proxy_counter_abi) = read_proxy_counter_contract(); + let increment = proxy_counter_abi.function("increment").unwrap(); + let increment_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: data.proxy_counter_address, + calldata: increment + .encode_input(&[Token::Uint(1.into()), Token::Uint(u64::MAX.into())]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(increment_tx, true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let proxy_counter_cost_key = StorageKey::new( + AccountTreeId::new(data.proxy_counter_address), + H256::from_low_u64_be(1), + ); + let far_call_cost_log = exec_result + .logs + .storage_logs + .iter() + .find(|log| log.log.key == proxy_counter_cost_key) + .expect("no cost log"); + assert!( + far_call_cost_log.previous_value.is_zero(), + "{far_call_cost_log:?}" + ); + let far_call_cost = h256_to_u256(far_call_cost_log.log.value); + assert!(far_call_cost > 10_000.into(), "{far_call_cost}"); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs index 9d5b229f23a9..730c573cdcf4 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs @@ -1,9 +1,9 @@ +mod block_tip; mod bootloader; -mod default_aa; -//mod block_tip; FIXME: requires vm metrics mod bytecode_publishing; +mod default_aa; // mod call_tracer; FIXME: requires tracers -// mod circuits; FIXME: requires tracers / circuit stats +mod circuits; mod code_oracle; mod gas_limit; mod get_used_contracts; @@ -11,7 +11,7 @@ mod is_write_initial; mod l1_tx_execution; mod l2_blocks; mod nonce_holder; -// mod precompiles; FIXME: requires tracers / circuit stats +mod precompiles; // mod prestate_tracer; FIXME: is pre-state tracer still relevant? mod refunds; mod require_eip712; diff --git a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs index 5bdf0930d558..f77eeb4f126e 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs @@ -1,9 +1,9 @@ -use zk_evm_1_5_0::zk_evm_abstractions::precompiles::PrecompileAddress; +use circuit_sequencer_api_1_5_0::geometry_config::get_geometry_config; use zksync_types::{Address, Execute}; +use super::{tester::VmTesterBuilder, utils::read_precompiles_contract}; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_fast::tests::{tester::VmTesterBuilder, utils::read_precompiles_contract}, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; @@ -30,25 +30,18 @@ fn test_keccak() { Execute { contract_address: address, calldata: hex::decode(keccak1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, + value: 0.into(), + factory_deps: vec![], }, None, ); vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); + let exec_result = vm.vm.inspect((), VmExecutionMode::OneTx); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - let keccak_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Keccak256) - .count(); - - assert!(keccak_count >= 1000); + let keccak_count = exec_result.statistics.circuit_statistic.keccak256 + * get_geometry_config().cycles_per_keccak256_circuit as f32; + assert!(keccak_count >= 1000.0, "{keccak_count}"); } #[test] @@ -74,25 +67,18 @@ fn test_sha256() { Execute { contract_address: address, calldata: hex::decode(sha1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, + value: 0.into(), + factory_deps: vec![], }, None, ); vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let sha_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::SHA256) - .count(); + let exec_result = vm.vm.inspect((), VmExecutionMode::OneTx); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - assert!(sha_count >= 1000); + let sha_count = exec_result.statistics.circuit_statistic.sha256 + * get_geometry_config().cycles_per_sha256_circuit as f32; + assert!(sha_count >= 1000.0, "{sha_count}"); } #[test] @@ -110,24 +96,17 @@ fn test_ecrecover() { let tx = account.get_l2_tx_for_execute( Execute { contract_address: account.address, - calldata: Vec::new(), - value: Default::default(), - factory_deps: None, + calldata: vec![], + value: 0.into(), + factory_deps: vec![], }, None, ); vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let ecrecover_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Ecrecover) - .count(); + let exec_result = vm.vm.inspect((), VmExecutionMode::OneTx); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - assert_eq!(ecrecover_count, 1); + let ecrecover_count = exec_result.statistics.circuit_statistic.ecrecover + * get_geometry_config().cycles_per_ecrecover_circuit as f32; + assert!((ecrecover_count - 1.0).abs() < 1e-4, "{ecrecover_count}"); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs index 5b8f0cb0b10f..105bc5f2fd43 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs @@ -184,14 +184,22 @@ impl TransactionTestInfo { } // TODO this doesn't include all the state of ModifiedWorld -#[derive(Debug, PartialEq)] +#[derive(Debug)] struct VmStateDump { state: vm2::State>, storage_writes: Vec<((H160, U256), U256)>, events: Box<[vm2::Event]>, } -impl Vm { +impl PartialEq for VmStateDump { + fn eq(&self, other: &Self) -> bool { + self.state == other.state + && self.storage_writes == other.storage_writes + && self.events == other.events + } +} + +impl Vm { fn dump_state(&self) -> VmStateDump { VmStateDump { state: self.inner.state.clone(), diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index d40ea075f19c..d8816cfaf2a6 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -31,7 +31,7 @@ use super::{ use crate::{ glue::GlueInto, interface::{ - storage::ReadStorage, BytecodeCompressionError, CompressedBytecodeInfo, + storage::ReadStorage, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, Refunds, SystemEnv, TxRevertReason, VmEvent, VmExecutionLogs, VmExecutionMode, VmExecutionResultAndLogs, VmExecutionStatistics, VmInterface, VmInterfaceHistoryEnabled, @@ -63,13 +63,15 @@ pub struct Vm { pub(crate) batch_env: L1BatchEnv, pub(crate) system_env: SystemEnv, snapshot: Option, - pub(crate) tracer: CircuitsTracer, + #[cfg(test)] + enforced_state_diffs: Option>, } impl Vm { fn run( &mut self, execution_mode: VmExecutionMode, + tracer: &mut CircuitsTracer, track_refunds: bool, ) -> (ExecutionResult, Refunds) { let mut refunds = Refunds { @@ -80,7 +82,7 @@ impl Vm { let mut pubdata_before = self.inner.world_diff.pubdata() as u32; let result = loop { - let hook = match self.inner.run(&mut self.world, &mut self.tracer) { + let hook = match self.inner.run(&mut self.world, tracer) { ExecutionEnd::SuspendedOnHook(hook) => hook, ExecutionEnd::ProgramFinished(output) => break ExecutionResult::Success { output }, ExecutionEnd::Reverted(output) => { @@ -91,7 +93,7 @@ impl Vm { } ExecutionEnd::Panicked => { break ExecutionResult::Halt { - reason: if self.gas_remaining() == 0 { + reason: if self.inner.state.current_frame.gas == 0 { Halt::BootloaderOutOfGas } else { Halt::VMPanic @@ -213,10 +215,7 @@ impl Vm { user_logs: extract_l2tol1logs_from_l1_messenger(&events), l2_to_l1_messages: VmEvent::extract_long_l2_to_l1_messages(&events), published_bytecodes, - state_diffs: self - .compute_state_diffs() - .filter(|diff| diff.address != L1_MESSENGER_ADDRESS) - .collect(), + state_diffs: self.compute_state_diffs(), }; // Save the pubdata for the future initial bootloader memory building @@ -231,7 +230,13 @@ impl Vm { } Hook::PaymasterValidationEntered | Hook::ValidationStepEnded => { /* unused */ } - Hook::DebugLog | Hook::DebugReturnData | Hook::NearCallCatch => { + Hook::DebugLog => { + let (log, log_arg) = self.get_debug_log(); + let last_tx = self.bootloader_state.last_l2_block().txs.last(); + let tx_hash = last_tx.map(|tx| tx.hash); + tracing::trace!(tx = ?tx_hash, "{log}: {log_arg}"); + } + Hook::DebugReturnData | Hook::NearCallCatch => { // These hooks are for debug purposes only } } @@ -249,6 +254,26 @@ impl Vm { .unwrap() } + fn get_debug_log(&self) -> (String, String) { + let hook_params = self.get_hook_params(); + let mut msg = u256_to_h256(hook_params[0]).as_bytes().to_vec(); + // Trim 0 byte padding at the end. + while msg.last() == Some(&0) { + msg.pop(); + } + + let data = hook_params[1]; + let msg = String::from_utf8(msg).expect("Invalid debug message"); + + // For long data, it is better to use hex-encoding for greater readability + let data_str = if data > U256::from(u64::MAX) { + format!("0x{data:x}") + } else { + data.to_string() + }; + (msg, data_str) + } + /// Should only be used when the bootloader is executing (e.g., when handling hooks). pub(crate) fn read_word_from_bootloader_heap(&self, word: usize) -> U256 { self.inner.state.heaps[vm2::FIRST_HEAP].read_u256(word as u32 * 32) @@ -314,10 +339,19 @@ impl Vm { self.write_to_bootloader_heap(memory); } - fn compute_state_diffs(&mut self) -> impl Iterator + '_ { - let storage = &mut self.world.storage; + #[cfg(test)] + pub(super) fn enforce_state_diffs(&mut self, diffs: Vec) { + self.enforced_state_diffs = Some(diffs); + } + + fn compute_state_diffs(&mut self) -> Vec { + #[cfg(test)] + if let Some(enforced_diffs) = self.enforced_state_diffs.take() { + return enforced_diffs; + } - self.inner.world_diff.get_storage_changes().map( + let storage = &mut self.world.storage; + let diffs = self.inner.world_diff.get_storage_changes().map( move |((address, key), (initial_value, final_value))| { let storage_key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)); StateDiffRecord { @@ -334,14 +368,17 @@ impl Vm { final_value, } }, - ) + ); + diffs + .filter(|diff| diff.address != L1_MESSENGER_ADDRESS) + .collect() } pub(crate) fn decommitted_hashes(&self) -> impl Iterator + '_ { self.inner.world_diff.decommitted_hashes() } - fn gas_remaining(&self) -> u32 { + pub(super) fn gas_remaining(&self) -> u32 { self.inner.state.current_frame.gas } } @@ -356,13 +393,15 @@ impl Vm { .hash .into(); - let program_cache = HashMap::from([convert_system_contract_code( + let program_cache = HashMap::from([World::convert_system_contract_code( &system_env.base_system_smart_contracts.default_aa, false, )]); - let (_, bootloader) = - convert_system_contract_code(&system_env.base_system_smart_contracts.bootloader, true); + let (_, bootloader) = World::convert_system_contract_code( + &system_env.base_system_smart_contracts.bootloader, + true, + ); let bootloader_memory = bootloader_initial_memory(&batch_env); let mut inner = VirtualMachine::new( @@ -386,7 +425,7 @@ impl Vm { inner.state.current_frame.aux_heap_size = u32::MAX; inner.state.current_frame.exception_handler = INITIAL_FRAME_FORMAL_EH_LOCATION; - let mut me = Self { + let mut this = Self { world: World::new(storage, program_cache), inner, gas_for_account_validation: system_env.default_validation_computational_gas_limit, @@ -398,12 +437,11 @@ impl Vm { system_env, batch_env, snapshot: None, - tracer: CircuitsTracer::default(), + #[cfg(test)] + enforced_state_diffs: None, }; - - me.write_to_bootloader_heap(bootloader_memory); - - me + this.write_to_bootloader_heap(bootloader_memory); + this } // visible for testing @@ -465,12 +503,12 @@ impl VmInterface for Vm { track_refunds = true; } - self.tracer = Default::default(); - + let mut tracer = CircuitsTracer::default(); let start = self.inner.world_diff.snapshot(); let pubdata_before = self.inner.world_diff.pubdata(); + let gas_before = self.gas_remaining(); - let (result, refunds) = self.run(execution_mode, track_refunds); + let (result, refunds) = self.run(execution_mode, &mut tracer, track_refunds); let ignore_world_diff = matches!(execution_mode, VmExecutionMode::OneTx) && matches!(result, ExecutionResult::Halt { .. }); @@ -522,9 +560,8 @@ impl VmInterface for Vm { }; let pubdata_after = self.inner.world_diff.pubdata(); - - let circuit_statistic = self.tracer.circuit_statistic(); - + let circuit_statistic = tracer.circuit_statistic(); + let gas_remaining = self.gas_remaining(); VmExecutionResultAndLogs { result, logs, @@ -532,8 +569,8 @@ impl VmInterface for Vm { statistics: VmExecutionStatistics { contracts_used: 0, cycles_used: 0, - gas_used: 0, - gas_remaining: self.gas_remaining(), + gas_used: (gas_before - gas_remaining).into(), + gas_remaining, computational_gas_used: 0, total_log_queries: 0, pubdata_published: (pubdata_after - pubdata_before).max(0) as u32, @@ -548,17 +585,17 @@ impl VmInterface for Vm { (): Self::TracerDispatcher, tx: zksync_types::Transaction, with_compression: bool, - ) -> ( - Result, BytecodeCompressionError>, - VmExecutionResultAndLogs, - ) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_inner(tx, 0, with_compression); let result = self.inspect((), VmExecutionMode::OneTx); let compression_result = if self.has_unpublished_bytecodes() { Err(BytecodeCompressionError::BytecodeCompressionFailed) } else { - Ok(self.bootloader_state.get_last_tx_compressed_bytecodes()) + Ok(self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into()) }; (compression_result, result) } @@ -654,49 +691,56 @@ impl fmt::Debug for Vm { } } -#[derive(Debug, Clone)] +#[derive(Debug)] pub(crate) struct World { pub(crate) storage: S, - // TODO (PLA-1008): Store `Program`s in an LRU cache - program_cache: HashMap>>, + program_cache: HashMap>, pub(crate) bytecode_cache: HashMap>, } -impl World { - fn new(storage: S, program_cache: HashMap>>) -> Self { +impl World { + fn new(storage: S, program_cache: HashMap>) -> Self { Self { storage, program_cache, bytecode_cache: Default::default(), } } -} -impl vm2::World for World { - fn decommit_code(&mut self, hash: U256) -> Vec { - self.decommit(hash) - .code_page() - .as_ref() - .iter() - .flat_map(|u| { - let mut buffer = [0u8; 32]; - u.to_big_endian(&mut buffer); - buffer - }) - .collect() + fn bytecode_to_program(bytecode: &[u8]) -> Program { + Program::new( + decode_program( + &bytecode + .chunks_exact(8) + .map(|chunk| u64::from_be_bytes(chunk.try_into().unwrap())) + .collect::>(), + false, + ), + bytecode + .chunks_exact(32) + .map(U256::from_big_endian) + .collect::>(), + ) } - fn decommit(&mut self, hash: U256) -> Program> { - self.program_cache - .entry(hash) - .or_insert_with(|| { - bytecode_to_program(self.bytecode_cache.entry(hash).or_insert_with(|| { - self.storage - .load_factory_dep(u256_to_h256(hash)) - .expect("vm tried to decommit nonexistent bytecode") - })) - }) - .clone() + fn convert_system_contract_code( + code: &SystemContractCode, + is_bootloader: bool, + ) -> (U256, Program) { + ( + h256_to_u256(code.hash), + Program::new( + decode_program( + &code + .code + .iter() + .flat_map(|x| x.0.into_iter().rev()) + .collect::>(), + is_bootloader, + ), + code.code.clone(), + ), + ) } } @@ -745,38 +789,30 @@ impl vm2::StorageInterface for World { } } -fn bytecode_to_program>(bytecode: &[u8]) -> Program { - Program::new( - decode_program( - &bytecode - .chunks_exact(8) - .map(|chunk| u64::from_be_bytes(chunk.try_into().unwrap())) - .collect::>(), - false, - ), - bytecode - .chunks_exact(32) - .map(U256::from_big_endian) - .collect::>(), - ) -} +impl vm2::World for World { + fn decommit(&mut self, hash: U256) -> Program { + self.program_cache + .entry(hash) + .or_insert_with(|| { + Self::bytecode_to_program(self.bytecode_cache.entry(hash).or_insert_with(|| { + self.storage + .load_factory_dep(u256_to_h256(hash)) + .expect("vm tried to decommit nonexistent bytecode") + })) + }) + .clone() + } -fn convert_system_contract_code>( - code: &SystemContractCode, - is_bootloader: bool, -) -> (U256, Program) { - ( - h256_to_u256(code.hash), - Program::new( - decode_program( - &code - .code - .iter() - .flat_map(|x| x.0.into_iter().rev()) - .collect::>(), - is_bootloader, - ), - code.code.clone(), - ), - ) + fn decommit_code(&mut self, hash: U256) -> Vec { + self.decommit(hash) + .code_page() + .as_ref() + .iter() + .flat_map(|u| { + let mut buffer = [0u8; 32]; + u.to_big_endian(&mut buffer); + buffer + }) + .collect() + } } diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs index f15199a74f84..4ba27b14bad6 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs @@ -191,11 +191,11 @@ impl BootloaderState { l2_block.first_tx_index + l2_block.txs.len() } - pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> Vec { + pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> &[CompressedBytecodeInfo] { if let Some(tx) = self.last_l2_block().txs.last() { - tx.compressed_bytecodes.clone() + &tx.compressed_bytecodes } else { - vec![] + &[] } } diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs b/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs index bad09617b8f0..1ecb75c28071 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs @@ -95,7 +95,10 @@ pub(crate) fn get_debug_log( .into_iter() .map(u256_to_h256) .collect(); - let msg = vm_hook_params[0].as_bytes().to_vec(); + let mut msg = vm_hook_params[0].as_bytes().to_vec(); + while msg.last() == Some(&0) { + msg.pop(); + } let data = vm_hook_params[1].as_bytes().to_vec(); let msg = String::from_utf8(msg).expect("Invalid debug message"); @@ -109,10 +112,8 @@ pub(crate) fn get_debug_log( } else { data.to_string() }; - let tx_id = state.vm_local_state.tx_number_in_block; - - format!("Bootloader transaction {}: {} {}", tx_id, msg, data_str) + format!("Bootloader transaction {tx_id}: {msg}: {data_str}") } /// Reads the memory slice represented by the fat pointer. @@ -167,8 +168,7 @@ pub(crate) fn print_debug_if_needed( VmHook::DebugReturnData => get_debug_returndata(memory, latest_returndata_ptr), _ => return, }; - - tracing::trace!("{}", log); + tracing::trace!("{log}"); } pub(crate) fn computational_gas_price( diff --git a/core/lib/multivm/src/versions/vm_latest/vm.rs b/core/lib/multivm/src/versions/vm_latest/vm.rs index c0c13669c2ef..a445a1d51402 100644 --- a/core/lib/multivm/src/versions/vm_latest/vm.rs +++ b/core/lib/multivm/src/versions/vm_latest/vm.rs @@ -141,7 +141,7 @@ impl VmInterface for Vm { tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_with_compression(tx, with_compression); let result = self.inspect_inner(tracer, VmExecutionMode::OneTx, None); if self.has_unpublished_bytecodes() { @@ -151,7 +151,10 @@ impl VmInterface for Vm { ) } else { ( - Ok(self.bootloader_state.get_last_tx_compressed_bytecodes()), + Ok(self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into()), result, ) } diff --git a/core/lib/multivm/src/versions/vm_m5/vm.rs b/core/lib/multivm/src/versions/vm_m5/vm.rs index 4282f3f0cf4a..df4baccaf156 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm.rs @@ -93,14 +93,14 @@ impl VmInterface for Vm { _tracer: Self::TracerDispatcher, tx: Transaction, _with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { crate::vm_m5::vm_with_bootloader::push_transaction_to_bootloader_memory( &mut self.vm, &tx, self.system_env.execution_mode.glue_into(), ); // Bytecode compression isn't supported - (Ok(vec![]), self.inspect((), VmExecutionMode::OneTx)) + (Ok(vec![].into()), self.inspect((), VmExecutionMode::OneTx)) } fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { diff --git a/core/lib/multivm/src/versions/vm_m6/vm.rs b/core/lib/multivm/src/versions/vm_m6/vm.rs index 520abd930555..7e19076a5202 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm.rs @@ -109,7 +109,7 @@ impl VmInterface for Vm { tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { if let Some(storage_invocations) = tracer.storage_invocations { self.vm .execution_mode @@ -182,7 +182,7 @@ impl VmInterface for Vm { result, ) } else { - (Ok(compressed_bytecodes), result) + (Ok(compressed_bytecodes.into()), result) } } diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/state.rs index 12aab3c7364c..b428851c9383 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/state.rs @@ -167,11 +167,11 @@ impl BootloaderState { l2_block.first_tx_index + l2_block.txs.len() } - pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> Vec { + pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> &[CompressedBytecodeInfo] { if let Some(tx) = self.last_l2_block().txs.last() { - tx.compressed_bytecodes.clone() + &tx.compressed_bytecodes } else { - vec![] + &[] } } diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs index 2aa3ba05e662..119abf052b9f 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs @@ -99,7 +99,7 @@ impl VmInterface for Vm { dispatcher: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_with_compression(tx, with_compression); let result = self.inspect(dispatcher, VmExecutionMode::OneTx); if self.has_unpublished_bytecodes() { @@ -109,7 +109,10 @@ impl VmInterface for Vm { ) } else { ( - Ok(self.bootloader_state.get_last_tx_compressed_bytecodes()), + Ok(self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into()), result, ) } diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/state.rs index 562d74513710..7e9af0ed6b82 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/state.rs @@ -167,11 +167,11 @@ impl BootloaderState { l2_block.first_tx_index + l2_block.txs.len() } - pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> Vec { + pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> &[CompressedBytecodeInfo] { if let Some(tx) = self.last_l2_block().txs.last() { - tx.compressed_bytecodes.clone() + &tx.compressed_bytecodes } else { - vec![] + &[] } } diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs index 6080df2bf2f1..0ecdd6797f4b 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs @@ -99,7 +99,7 @@ impl VmInterface for Vm { tracer: TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_with_compression(tx, with_compression); let result = self.inspect_inner(tracer, VmExecutionMode::OneTx); if self.has_unpublished_bytecodes() { @@ -109,7 +109,10 @@ impl VmInterface for Vm { ) } else { ( - Ok(self.bootloader_state.get_last_tx_compressed_bytecodes()), + Ok(self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into()), result, ) } diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index 0fc626d9ac48..cedb4bc8276d 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -74,7 +74,7 @@ impl VmInterface for VmInstance { dispatcher: Self::TracerDispatcher, tx: zksync_types::Transaction, with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { dispatch_vm!(self.inspect_transaction_with_bytecode_compression( dispatcher.into(), tx, diff --git a/core/lib/protobuf_config/src/consensus.rs b/core/lib/protobuf_config/src/consensus.rs index b57f033d0d22..f5eb5c5b2f10 100644 --- a/core/lib/protobuf_config/src/consensus.rs +++ b/core/lib/protobuf_config/src/consensus.rs @@ -6,7 +6,7 @@ use zksync_config::configs::consensus::{ }; use zksync_protobuf::{kB, read_optional, repr::ProtoRepr, required, ProtoFmt}; -use crate::{proto::consensus as proto, read_optional_repr}; +use crate::{parse_h160, proto::consensus as proto, read_optional_repr}; impl ProtoRepr for proto::WeightedValidator { type Type = WeightedValidator; @@ -65,6 +65,12 @@ impl ProtoRepr for proto::GenesisSpec { .collect::>() .context("attesters")?, leader: ValidatorPublicKey(required(&self.leader).context("leader")?.clone()), + registry_address: self + .registry_address + .as_ref() + .map(|x| parse_h160(x)) + .transpose() + .context("registry_address")?, }) } fn build(this: &Self::Type) -> Self { @@ -74,6 +80,7 @@ impl ProtoRepr for proto::GenesisSpec { validators: this.validators.iter().map(ProtoRepr::build).collect(), attesters: this.attesters.iter().map(ProtoRepr::build).collect(), leader: Some(this.leader.0.clone()), + registry_address: this.registry_address.map(|a| format!("{:?}", a)), } } } diff --git a/core/lib/protobuf_config/src/genesis.rs b/core/lib/protobuf_config/src/genesis.rs index 92f639aa224e..59896aa244d8 100644 --- a/core/lib/protobuf_config/src/genesis.rs +++ b/core/lib/protobuf_config/src/genesis.rs @@ -43,6 +43,13 @@ impl ProtoRepr for proto::Genesis { 0.into(), ) }; + // Check either of fields, use old name as a fallback. + let snark_wrapper_vk_hash = match (&prover.snark_wrapper_vk_hash, &prover.recursion_scheduler_level_vk_hash) { + (Some(x), _) => parse_h256(x).context("snark_wrapper_vk_hash")?, + (_, Some(x)) => parse_h256(x).context("recursion_scheduler_level_vk_hash")?, + _ => anyhow::bail!("Either snark_wrapper_vk_hash or recursion_scheduler_level_vk_hash should be presented"), + }; + Ok(Self::Type { protocol_version: Some(protocol_version), genesis_root_hash: Some( @@ -75,9 +82,7 @@ impl ProtoRepr for proto::Genesis { l2_chain_id: required(&self.l2_chain_id) .and_then(|x| L2ChainId::try_from(*x).map_err(|a| anyhow::anyhow!(a))) .context("l2_chain_id")?, - recursion_scheduler_level_vk_hash: required(&prover.recursion_scheduler_level_vk_hash) - .and_then(|x| parse_h256(x)) - .context("recursion_scheduler_level_vk_hash")?, + snark_wrapper_vk_hash, fee_account: required(&self.fee_account) .and_then(|x| parse_h160(x)) .context("fee_account")?, @@ -104,11 +109,9 @@ impl ProtoRepr for proto::Genesis { l1_chain_id: Some(this.l1_chain_id.0), l2_chain_id: Some(this.l2_chain_id.as_u64()), prover: Some(proto::Prover { - recursion_scheduler_level_vk_hash: Some(format!( - "{:?}", - this.recursion_scheduler_level_vk_hash - )), + recursion_scheduler_level_vk_hash: None, // Deprecated field. dummy_verifier: Some(this.dummy_verifier), + snark_wrapper_vk_hash: Some(format!("{:?}", this.snark_wrapper_vk_hash)), }), l1_batch_commit_data_generator_mode: Some( proto::L1BatchCommitDataGeneratorMode::new( diff --git a/core/lib/protobuf_config/src/proto/config/genesis.proto b/core/lib/protobuf_config/src/proto/config/genesis.proto index 6e679d865d92..08cbb954fcbc 100644 --- a/core/lib/protobuf_config/src/proto/config/genesis.proto +++ b/core/lib/protobuf_config/src/proto/config/genesis.proto @@ -8,8 +8,9 @@ enum L1BatchCommitDataGeneratorMode { } message Prover { - optional string recursion_scheduler_level_vk_hash = 1; // required; H256 + optional string recursion_scheduler_level_vk_hash = 1; // optional and deprecated, used as alias for `snark_wrapper_vk_hash`; H256 optional bool dummy_verifier = 5; + optional string snark_wrapper_vk_hash = 6; // optional (required if `recursion_scheduler_level_vk_hash` is not set); H256 reserved 2, 3, 4; reserved "recursion_node_level_vk_hash", "recursion_leaf_level_vk_hash", "recursion_circuits_set_vks_hash"; } diff --git a/core/lib/protobuf_config/src/proto/core/consensus.proto b/core/lib/protobuf_config/src/proto/core/consensus.proto index c64c993be7c8..835ead1ab65c 100644 --- a/core/lib/protobuf_config/src/proto/core/consensus.proto +++ b/core/lib/protobuf_config/src/proto/core/consensus.proto @@ -56,6 +56,8 @@ message GenesisSpec { repeated WeightedValidator validators = 3; // must be non-empty; validator committee. optional string leader = 4; // required; ValidatorPublicKey repeated WeightedAttester attesters = 5; // can be empty; attester committee. + // Currently not in consensus genesis, but still a part of the global configuration. + optional string registry_address = 6; // optional; H160 } // Per peer connection RPC rate limits. diff --git a/core/lib/prover_interface/tests/job_serialization.rs b/core/lib/prover_interface/tests/job_serialization.rs index a2d55a140655..a2aee0c2733e 100644 --- a/core/lib/prover_interface/tests/job_serialization.rs +++ b/core/lib/prover_interface/tests/job_serialization.rs @@ -170,7 +170,7 @@ fn test_tee_proof_request_serialization() { "signature": [ 0, 1, 2, 3, 4 ], "pubkey": [ 5, 6, 7, 8, 9 ], "proof": [ 10, 11, 12, 13, 14 ], - "tee_type": "Sgx" + "tee_type": "sgx" }"#; let tee_proof_result = serde_json::from_str::(tee_proof_str).unwrap(); let tee_proof_expected = SubmitTeeProofRequest(Box::new(L1BatchTeeProofForL1 { diff --git a/core/lib/types/src/api/en.rs b/core/lib/types/src/api/en.rs index bf26caddd07b..9391c8627573 100644 --- a/core/lib/types/src/api/en.rs +++ b/core/lib/types/src/api/en.rs @@ -44,10 +44,23 @@ pub struct SyncBlock { pub protocol_version: ProtocolVersionId, } +/// Global configuration of the consensus served by the main node to the external nodes. +/// In particular, it contains consensus genesis. +/// +/// The wrapped JSON value corresponds to `zksync_dal::consensus::GlobalConfig`. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ConsensusGlobalConfig(pub serde_json::Value); + +/// [DEPRECATED] Genesis served by the main node to the external nodes. +/// This type is deprecated since ConsensusGlobalConfig also contains genesis and is extensible. +/// +/// The wrapped JSON value corresponds to `zksync_consensus_roles::validator::Genesis`. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ConsensusGenesis(pub serde_json::Value); /// AttestationStatus maintained by the main node. /// Used for testing L1 batch signing by consensus attesters. +/// +/// The wrapped JSON value corresponds to `zksync_dal::consensus::AttestationStatus`. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct AttestationStatus(pub serde_json::Value); diff --git a/core/lib/types/src/protocol_upgrade.rs b/core/lib/types/src/protocol_upgrade.rs index bc9bd7667e82..1afb108a0536 100644 --- a/core/lib/types/src/protocol_upgrade.rs +++ b/core/lib/types/src/protocol_upgrade.rs @@ -282,14 +282,14 @@ impl ProtocolVersion { pub fn apply_upgrade( &self, upgrade: ProtocolUpgrade, - new_scheduler_vk_hash: Option, + new_snark_wrapper_vk_hash: Option, ) -> ProtocolVersion { ProtocolVersion { version: upgrade.version, timestamp: upgrade.timestamp, l1_verifier_config: L1VerifierConfig { - recursion_scheduler_level_vk_hash: new_scheduler_vk_hash - .unwrap_or(self.l1_verifier_config.recursion_scheduler_level_vk_hash), + snark_wrapper_vk_hash: new_snark_wrapper_vk_hash + .unwrap_or(self.l1_verifier_config.snark_wrapper_vk_hash), }, base_system_contracts_hashes: BaseSystemContractsHashes { bootloader: upgrade diff --git a/core/lib/vm_executor/src/batch/factory.rs b/core/lib/vm_executor/src/batch/factory.rs index 17b125b0c41a..d02014584467 100644 --- a/core/lib/vm_executor/src/batch/factory.rs +++ b/core/lib/vm_executor/src/batch/factory.rs @@ -250,7 +250,7 @@ impl CommandReceiver { .unwrap_or_default(); return Ok(BatchTransactionExecutionResult { tx_result: Box::new(tx_result), - compressed_bytecodes, + compressed_bytecodes: compressed_bytecodes.into_owned(), call_traces, }); } @@ -269,8 +269,9 @@ impl CommandReceiver { let (compression_result, tx_result) = vm.inspect_transaction_with_bytecode_compression(tracer.into(), tx.clone(), false); - let compressed_bytecodes = - compression_result.context("compression failed when it wasn't applied")?; + let compressed_bytecodes = compression_result + .context("compression failed when it wasn't applied")? + .into_owned(); // TODO implement tracer manager which will be responsible // for collecting result from all tracers and save it to the database @@ -308,7 +309,7 @@ impl CommandReceiver { .unwrap_or_default(); Ok(BatchTransactionExecutionResult { tx_result: Box::new(tx_result), - compressed_bytecodes, + compressed_bytecodes: compressed_bytecodes.into_owned(), call_traces, }) } else { diff --git a/core/lib/vm_interface/src/types/errors/bytecode_compression.rs b/core/lib/vm_interface/src/types/errors/bytecode_compression.rs index 1dd69dc7398d..c0c6e8737bbe 100644 --- a/core/lib/vm_interface/src/types/errors/bytecode_compression.rs +++ b/core/lib/vm_interface/src/types/errors/bytecode_compression.rs @@ -1,3 +1,5 @@ +use std::borrow::Cow; + use crate::CompressedBytecodeInfo; /// Errors related to bytecode compression. @@ -9,4 +11,5 @@ pub enum BytecodeCompressionError { } /// Result of compressing bytecodes used by a transaction. -pub type BytecodeCompressionResult = Result, BytecodeCompressionError>; +pub type BytecodeCompressionResult<'a> = + Result, BytecodeCompressionError>; diff --git a/core/lib/vm_interface/src/vm.rs b/core/lib/vm_interface/src/vm.rs index b6be2c7581f7..f70be52bd86a 100644 --- a/core/lib/vm_interface/src/vm.rs +++ b/core/lib/vm_interface/src/vm.rs @@ -41,7 +41,7 @@ pub trait VmInterface { tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs); + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs); /// Record VM memory metrics. fn record_vm_memory_metrics(&self) -> VmMemoryMetrics; @@ -63,7 +63,7 @@ pub trait VmInterfaceExt: VmInterface { &mut self, tx: Transaction, with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.inspect_transaction_with_bytecode_compression( Self::TracerDispatcher::default(), tx, diff --git a/core/lib/web3_decl/src/client/mod.rs b/core/lib/web3_decl/src/client/mod.rs index a8246216eca3..7f0de4f3bca9 100644 --- a/core/lib/web3_decl/src/client/mod.rs +++ b/core/lib/web3_decl/src/client/mod.rs @@ -318,6 +318,7 @@ pub struct ClientBuilder { client: C, url: SensitiveUrl, rate_limit: (usize, Duration), + report_config: bool, network: Net, } @@ -328,6 +329,7 @@ impl fmt::Debug for ClientBuilder { .field("client", &any::type_name::()) .field("url", &self.url) .field("rate_limit", &self.rate_limit) + .field("report_config", &self.report_config) .field("network", &self.network) .finish_non_exhaustive() } @@ -340,6 +342,7 @@ impl ClientBuilder { client, url, rate_limit: (1, Duration::ZERO), + report_config: true, network: Net::default(), } } @@ -366,16 +369,25 @@ impl ClientBuilder { self } + /// Allows switching off config reporting for this client in logs and metrics. This is useful if a client is a short-living one + /// and is not injected as a dependency. + pub fn report_config(mut self, report: bool) -> Self { + self.report_config = report; + self + } + /// Builds the client. pub fn build(self) -> Client { - tracing::info!( - "Creating JSON-RPC client for network {:?} with inner client: {:?} and rate limit: {:?}", - self.network, - self.client, - self.rate_limit - ); let rate_limit = SharedRateLimit::new(self.rate_limit.0, self.rate_limit.1); - METRICS.observe_config(self.network.metric_label(), &rate_limit); + if self.report_config { + tracing::info!( + "Creating JSON-RPC client for network {:?} with inner client: {:?} and rate limit: {:?}", + self.network, + self.client, + self.rate_limit + ); + METRICS.observe_config(self.network.metric_label(), &rate_limit); + } Client { inner: self.client, diff --git a/core/lib/web3_decl/src/error.rs b/core/lib/web3_decl/src/error.rs index f42fe8de59d5..3aa16a9ab77c 100644 --- a/core/lib/web3_decl/src/error.rs +++ b/core/lib/web3_decl/src/error.rs @@ -60,6 +60,19 @@ pub struct EnrichedClientError { args: HashMap<&'static str, String>, } +/// Whether the error should be considered retriable. +pub fn is_retriable(err: &ClientError) -> bool { + match err { + ClientError::Transport(_) | ClientError::RequestTimeout => true, + ClientError::Call(err) => { + // At least some RPC providers use "internal error" in case of the server being overloaded + err.code() == ErrorCode::ServerIsBusy.code() + || err.code() == ErrorCode::InternalError.code() + } + _ => false, + } +} + /// Alias for a result with enriched client RPC error. pub type EnrichedClientResult = Result; @@ -87,15 +100,7 @@ impl EnrichedClientError { /// Whether the error should be considered retriable. pub fn is_retriable(&self) -> bool { - match self.as_ref() { - ClientError::Transport(_) | ClientError::RequestTimeout => true, - ClientError::Call(err) => { - // At least some RPC providers use "internal error" in case of the server being overloaded - err.code() == ErrorCode::ServerIsBusy.code() - || err.code() == ErrorCode::InternalError.code() - } - _ => false, - } + is_retriable(&self.inner_error) } } diff --git a/core/lib/web3_decl/src/namespaces/en.rs b/core/lib/web3_decl/src/namespaces/en.rs index dac774dd7bdf..8a4d2db8c6fe 100644 --- a/core/lib/web3_decl/src/namespaces/en.rs +++ b/core/lib/web3_decl/src/namespaces/en.rs @@ -25,6 +25,9 @@ pub trait EnNamespace { #[method(name = "consensusGenesis")] async fn consensus_genesis(&self) -> RpcResult>; + #[method(name = "consensusGlobalConfig")] + async fn consensus_global_config(&self) -> RpcResult>; + /// Lists all tokens created at or before the specified `block_number`. /// /// This method is used by EN after snapshot recovery in order to recover token records. diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index 086a75c81de9..f247313db2b1 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -25,7 +25,7 @@ use super::{ /// Please don't add fields that duplicate `SystemEnv` or `L1BatchEnv` information, since both of these /// are also provided to an executor. #[derive(Debug)] -pub(crate) struct TxExecutionArgs { +pub struct TxExecutionArgs { /// Transaction / call itself. pub transaction: Transaction, /// Nonce override for the initiator account. @@ -80,7 +80,7 @@ impl TxExecutionArgs { } #[derive(Debug, Clone)] -pub(crate) struct TransactionExecutionOutput { +pub struct TransactionExecutionOutput { /// Output of the VM. pub vm: VmExecutionResultAndLogs, /// Execution metrics. @@ -91,7 +91,7 @@ pub(crate) struct TransactionExecutionOutput { /// Executor of transactions. #[derive(Debug)] -pub(crate) enum TransactionExecutor { +pub enum TransactionExecutor { Real(MainOneshotExecutor), #[doc(hidden)] // Intended for tests only Mock(MockOneshotExecutor), diff --git a/core/node/api_server/src/execution_sandbox/mod.rs b/core/node/api_server/src/execution_sandbox/mod.rs index f2a3f0e5f8c3..faaccf03c96a 100644 --- a/core/node/api_server/src/execution_sandbox/mod.rs +++ b/core/node/api_server/src/execution_sandbox/mod.rs @@ -16,10 +16,10 @@ use zksync_types::{ api, fee_model::BatchFeeInput, AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, }; +pub use self::execute::{TransactionExecutor, TxExecutionArgs}; use self::vm_metrics::SandboxStage; pub(super) use self::{ error::SandboxExecutionError, - execute::{TransactionExecutor, TxExecutionArgs}, tracers::ApiTracer, validate::ValidationError, vm_metrics::{SubmitTxStage, SANDBOX_METRICS}, @@ -158,7 +158,7 @@ async fn get_pending_state( /// Arguments for VM execution necessary to set up storage and environment. #[derive(Debug, Clone)] -pub(crate) struct TxSetupArgs { +pub struct TxSetupArgs { pub execution_mode: TxExecutionMode, pub operator_account: AccountTreeId, pub fee_input: BatchFeeInput, @@ -215,7 +215,7 @@ impl BlockStartInfoInner { /// Information about first L1 batch / L2 block in the node storage. #[derive(Debug, Clone)] -pub(crate) struct BlockStartInfo { +pub struct BlockStartInfo { cached_pruning_info: Arc>, max_cache_age: Duration, } @@ -331,7 +331,7 @@ impl BlockStartInfo { } #[derive(Debug, thiserror::Error)] -pub(crate) enum BlockArgsError { +pub enum BlockArgsError { #[error("Block is pruned; first retained block is {0}")] Pruned(L2BlockNumber), #[error("Block is missing, but can appear in the future")] @@ -342,7 +342,7 @@ pub(crate) enum BlockArgsError { /// Information about a block provided to VM. #[derive(Debug, Clone, Copy)] -pub(crate) struct BlockArgs { +pub struct BlockArgs { block_id: api::BlockId, resolved_block_number: L2BlockNumber, l1_batch_timestamp_s: Option, diff --git a/core/node/api_server/src/execution_sandbox/tracers.rs b/core/node/api_server/src/execution_sandbox/tracers.rs index 31384b7a0898..6fdc3dbc7b62 100644 --- a/core/node/api_server/src/execution_sandbox/tracers.rs +++ b/core/node/api_server/src/execution_sandbox/tracers.rs @@ -11,7 +11,7 @@ use zksync_types::ProtocolVersionId; /// Custom tracers supported by the API sandbox. #[derive(Debug)] -pub(crate) enum ApiTracer { +pub enum ApiTracer { CallTracer(Arc>>), Validation { params: ValidationTracerParams, diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index 5f913e305cd0..f0d96118638b 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -140,6 +140,38 @@ impl MultiVMBaseSystemContracts { } } } + + pub fn load_estimate_gas_blocking() -> Self { + Self { + pre_virtual_blocks: BaseSystemContracts::estimate_gas_pre_virtual_blocks(), + post_virtual_blocks: BaseSystemContracts::estimate_gas_post_virtual_blocks(), + post_virtual_blocks_finish_upgrade_fix: + BaseSystemContracts::estimate_gas_post_virtual_blocks_finish_upgrade_fix(), + post_boojum: BaseSystemContracts::estimate_gas_post_boojum(), + post_allowlist_removal: BaseSystemContracts::estimate_gas_post_allowlist_removal(), + post_1_4_1: BaseSystemContracts::estimate_gas_post_1_4_1(), + post_1_4_2: BaseSystemContracts::estimate_gas_post_1_4_2(), + vm_1_5_0_small_memory: BaseSystemContracts::estimate_gas_1_5_0_small_memory(), + vm_1_5_0_increased_memory: + BaseSystemContracts::estimate_gas_post_1_5_0_increased_memory(), + } + } + + pub fn load_eth_call_blocking() -> Self { + Self { + pre_virtual_blocks: BaseSystemContracts::playground_pre_virtual_blocks(), + post_virtual_blocks: BaseSystemContracts::playground_post_virtual_blocks(), + post_virtual_blocks_finish_upgrade_fix: + BaseSystemContracts::playground_post_virtual_blocks_finish_upgrade_fix(), + post_boojum: BaseSystemContracts::playground_post_boojum(), + post_allowlist_removal: BaseSystemContracts::playground_post_allowlist_removal(), + post_1_4_1: BaseSystemContracts::playground_post_1_4_1(), + post_1_4_2: BaseSystemContracts::playground_post_1_4_2(), + vm_1_5_0_small_memory: BaseSystemContracts::playground_1_5_0_small_memory(), + vm_1_5_0_increased_memory: BaseSystemContracts::playground_post_1_5_0_increased_memory( + ), + } + } } /// Smart contracts to be used in the API sandbox requests, e.g. for estimating gas and @@ -169,32 +201,8 @@ impl ApiContracts { /// Blocking version of [`Self::load_from_disk()`]. pub fn load_from_disk_blocking() -> Self { Self { - estimate_gas: MultiVMBaseSystemContracts { - pre_virtual_blocks: BaseSystemContracts::estimate_gas_pre_virtual_blocks(), - post_virtual_blocks: BaseSystemContracts::estimate_gas_post_virtual_blocks(), - post_virtual_blocks_finish_upgrade_fix: - BaseSystemContracts::estimate_gas_post_virtual_blocks_finish_upgrade_fix(), - post_boojum: BaseSystemContracts::estimate_gas_post_boojum(), - post_allowlist_removal: BaseSystemContracts::estimate_gas_post_allowlist_removal(), - post_1_4_1: BaseSystemContracts::estimate_gas_post_1_4_1(), - post_1_4_2: BaseSystemContracts::estimate_gas_post_1_4_2(), - vm_1_5_0_small_memory: BaseSystemContracts::estimate_gas_1_5_0_small_memory(), - vm_1_5_0_increased_memory: - BaseSystemContracts::estimate_gas_post_1_5_0_increased_memory(), - }, - eth_call: MultiVMBaseSystemContracts { - pre_virtual_blocks: BaseSystemContracts::playground_pre_virtual_blocks(), - post_virtual_blocks: BaseSystemContracts::playground_post_virtual_blocks(), - post_virtual_blocks_finish_upgrade_fix: - BaseSystemContracts::playground_post_virtual_blocks_finish_upgrade_fix(), - post_boojum: BaseSystemContracts::playground_post_boojum(), - post_allowlist_removal: BaseSystemContracts::playground_post_allowlist_removal(), - post_1_4_1: BaseSystemContracts::playground_post_1_4_1(), - post_1_4_2: BaseSystemContracts::playground_post_1_4_2(), - vm_1_5_0_small_memory: BaseSystemContracts::playground_1_5_0_small_memory(), - vm_1_5_0_increased_memory: - BaseSystemContracts::playground_post_1_5_0_increased_memory(), - }, + estimate_gas: MultiVMBaseSystemContracts::load_estimate_gas_blocking(), + eth_call: MultiVMBaseSystemContracts::load_eth_call_blocking(), } } } @@ -1003,7 +1011,7 @@ impl TxSender { .await } - pub(super) async fn eth_call( + pub async fn eth_call( &self, block_args: BlockArgs, call_overrides: CallOverrides, diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs index c3e116d39928..de7635263735 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs @@ -19,6 +19,12 @@ impl EnNamespaceServer for EnNamespace { .map_err(|err| self.current_method().map_err(err)) } + async fn consensus_global_config(&self) -> RpcResult> { + self.consensus_global_config_impl() + .await + .map_err(|err| self.current_method().map_err(err)) + } + async fn consensus_genesis(&self) -> RpcResult> { self.consensus_genesis_impl() .await diff --git a/core/node/api_server/src/web3/namespaces/en.rs b/core/node/api_server/src/web3/namespaces/en.rs index 604d38ef94ab..26f4aa2b0b5f 100644 --- a/core/node/api_server/src/web3/namespaces/en.rs +++ b/core/node/api_server/src/web3/namespaces/en.rs @@ -21,18 +21,35 @@ impl EnNamespace { Self { state } } + pub async fn consensus_global_config_impl( + &self, + ) -> Result, Web3Error> { + let mut conn = self.state.acquire_connection().await?; + let Some(cfg) = conn + .consensus_dal() + .global_config() + .await + .context("global_config()")? + else { + return Ok(None); + }; + Ok(Some(en::ConsensusGlobalConfig( + zksync_protobuf::serde::serialize(&cfg, serde_json::value::Serializer).unwrap(), + ))) + } + pub async fn consensus_genesis_impl(&self) -> Result, Web3Error> { let mut conn = self.state.acquire_connection().await?; - let Some(genesis) = conn + let Some(cfg) = conn .consensus_dal() - .genesis() + .global_config() .await - .map_err(DalError::generalize)? + .context("global_config()")? else { return Ok(None); }; Ok(Some(en::ConsensusGenesis( - zksync_protobuf::serde::serialize(&genesis, serde_json::value::Serializer).unwrap(), + zksync_protobuf::serde::serialize(&cfg.genesis, serde_json::value::Serializer).unwrap(), ))) } @@ -40,7 +57,7 @@ impl EnNamespace { pub async fn attestation_status_impl( &self, ) -> Result, Web3Error> { - let status = self + let Some(status) = self .state .acquire_connection() .await? @@ -54,13 +71,13 @@ impl EnNamespace { .context("TransactionBuilder::build()")? .consensus_dal() .attestation_status() - .await?; - - Ok(status.map(|s| { - en::AttestationStatus( - zksync_protobuf::serde::serialize(&s, serde_json::value::Serializer).unwrap(), - ) - })) + .await? + else { + return Ok(None); + }; + Ok(Some(en::AttestationStatus( + zksync_protobuf::serde::serialize(&status, serde_json::value::Serializer).unwrap(), + ))) } pub(crate) fn current_method(&self) -> &MethodTracer { @@ -157,7 +174,7 @@ impl EnNamespace { l1_chain_id: self.state.api_config.l1_chain_id, sl_chain_id: Some(self.state.api_config.l1_chain_id.into()), l2_chain_id: self.state.api_config.l2_chain_id, - recursion_scheduler_level_vk_hash: verifier_config.recursion_scheduler_level_vk_hash, + snark_wrapper_vk_hash: verifier_config.snark_wrapper_vk_hash, fee_account, dummy_verifier: self.state.api_config.dummy_verifier, l1_batch_commit_data_generator_mode: self diff --git a/core/node/consensus/Cargo.toml b/core/node/consensus/Cargo.toml index e82969dae6c6..ba52892584d2 100644 --- a/core/node/consensus/Cargo.toml +++ b/core/node/consensus/Cargo.toml @@ -11,6 +11,7 @@ keywords.workspace = true categories.workspace = true [dependencies] +zksync_basic_types.workspace = true zksync_config.workspace = true zksync_concurrency.workspace = true zksync_consensus_crypto.workspace = true @@ -20,6 +21,7 @@ zksync_consensus_storage.workspace = true zksync_consensus_executor.workspace = true zksync_consensus_bft.workspace = true zksync_consensus_utils.workspace = true +zksync_contracts.workspace = true zksync_protobuf.workspace = true zksync_dal.workspace = true zksync_l1_contract_interface.workspace = true @@ -31,22 +33,27 @@ zksync_system_constants.workspace = true zksync_types.workspace = true zksync_utils.workspace = true zksync_web3_decl.workspace = true - +zksync_node_api_server.workspace = true +zksync_state.workspace = true +zksync_storage.workspace = true +zksync_vm_interface.workspace = true +zksync_multivm.workspace = true anyhow.workspace = true async-trait.workspace = true secrecy.workspace = true tempfile.workspace = true thiserror.workspace = true tracing.workspace = true +hex.workspace = true tokio.workspace = true +jsonrpsee.workspace = true +semver.workspace = true [dev-dependencies] zksync_node_genesis.workspace = true zksync_node_test_utils.workspace = true zksync_node_api_server.workspace = true zksync_test_account.workspace = true -zksync_contracts.workspace = true -tokio.workspace = true test-casing.workspace = true rand.workspace = true diff --git a/core/node/consensus/src/abi.rs b/core/node/consensus/src/abi.rs new file mode 100644 index 000000000000..0e2200e28038 --- /dev/null +++ b/core/node/consensus/src/abi.rs @@ -0,0 +1,133 @@ +//! Strongly-typed API for Consensus-related solidity contracts. +//! Placeholder until we can depend on alloy_sol_types. +use anyhow::Context as _; +use zksync_types::{ethabi, ethabi::Token}; + +/// Strongly typed representation of a contract function. +/// It also represents the inputs of the function. +pub trait Function { + /// Name of the solidity function. + const NAME: &'static str; + /// Type representing contract this function belongs to. + type Contract: AsRef; + /// Typ representing outputs of this function. + type Outputs; + /// Encodes this struct to inputs of this function. + fn encode(&self) -> Vec; + /// Decodes outputs of this function. + fn decode_outputs(outputs: Vec) -> anyhow::Result; +} + +/// Address of contract C. It is just a wrapper of ethabi::Address, +/// just additionally indicating what contract is deployed under this address. +#[derive(Debug)] +pub struct Address(ethabi::Address, std::marker::PhantomData); + +impl Clone for Address { + fn clone(&self) -> Self { + *self + } +} + +impl Copy for Address {} + +impl PartialEq for Address { + fn eq(&self, other: &Self) -> bool { + self.0.eq(&other.0) + } +} + +impl Eq for Address {} + +impl Address { + pub fn new(address: ethabi::Address) -> Self { + Self(address, std::marker::PhantomData) + } +} + +impl std::ops::Deref for Address { + type Target = ethabi::Address; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +/// Represents a call to the function F. +#[derive(Debug)] +pub struct Call { + /// Contract of the function. + pub contract: F::Contract, + /// Inputs to the function. + pub inputs: F, +} + +impl Call { + pub(super) fn function(&self) -> ðabi::Function { + self.contract.as_ref().function(F::NAME).unwrap() + } + /// Converts the call to raw calldata. + pub fn calldata(&self) -> ethabi::Result { + self.function().encode_input(&self.inputs.encode()) + } + /// Parses the outputs of the call. + pub fn decode_outputs(&self, outputs: &[u8]) -> anyhow::Result { + F::decode_outputs( + self.function() + .decode_output(outputs) + .context("decode_output()")?, + ) + } +} + +pub(crate) fn into_fixed_bytes(t: Token) -> anyhow::Result<[u8; N]> { + match t { + Token::FixedBytes(b) => b.try_into().ok().context("bad size"), + bad => anyhow::bail!("want fixed_bytes, got {bad:?}"), + } +} + +pub(crate) fn into_tuple(t: Token) -> anyhow::Result<[Token; N]> { + match t { + Token::Tuple(ts) => ts.try_into().ok().context("bad size"), + bad => anyhow::bail!("want tuple, got {bad:?}"), + } +} + +pub(crate) fn into_uint>(t: Token) -> anyhow::Result { + match t { + Token::Uint(i) => i.try_into().ok().context("overflow"), + bad => anyhow::bail!("want uint, got {bad:?}"), + } +} + +#[cfg(test)] +fn example(t: ðabi::ParamType) -> Token { + use ethabi::ParamType as T; + match t { + T::Address => Token::Address(ethabi::Address::default()), + T::Bytes => Token::Bytes(ethabi::Bytes::default()), + T::Int(_) => Token::Int(ethabi::Int::default()), + T::Uint(_) => Token::Uint(ethabi::Uint::default()), + T::Bool => Token::Bool(bool::default()), + T::String => Token::String(String::default()), + T::Array(t) => Token::Array(vec![example(t)]), + T::FixedBytes(n) => Token::FixedBytes(vec![0; *n]), + T::FixedArray(t, n) => Token::FixedArray(vec![example(t); *n]), + T::Tuple(ts) => Token::Tuple(ts.iter().map(example).collect()), + } +} + +#[cfg(test)] +impl Call { + pub(crate) fn test(&self) -> anyhow::Result<()> { + self.calldata().context("calldata()")?; + F::decode_outputs( + self.function() + .outputs + .iter() + .map(|p| example(&p.kind)) + .collect(), + )?; + Ok(()) + } +} diff --git a/core/node/consensus/src/config.rs b/core/node/consensus/src/config.rs index c2fa13472066..22f8fc01192f 100644 --- a/core/node/consensus/src/config.rs +++ b/core/node/consensus/src/config.rs @@ -11,6 +11,8 @@ use zksync_config::{ use zksync_consensus_crypto::{Text, TextFmt}; use zksync_consensus_executor as executor; use zksync_consensus_roles::{attester, node, validator}; +use zksync_dal::consensus_dal; +use zksync_types::ethabi; fn read_secret_text(text: Option<&Secret>) -> anyhow::Result> { text.map(|text| Text::new(text.expose_secret()).decode()) @@ -41,16 +43,18 @@ pub(super) struct GenesisSpec { pub(super) validators: validator::Committee, pub(super) attesters: Option, pub(super) leader_selection: validator::LeaderSelectionMode, + pub(super) registry_address: Option, } impl GenesisSpec { - pub(super) fn from_genesis(g: &validator::Genesis) -> Self { + pub(super) fn from_global_config(cfg: &consensus_dal::GlobalConfig) -> Self { Self { - chain_id: g.chain_id, - protocol_version: g.protocol_version, - validators: g.validators.clone(), - attesters: g.attesters.clone(), - leader_selection: g.leader_selection.clone(), + chain_id: cfg.genesis.chain_id, + protocol_version: cfg.genesis.protocol_version, + validators: cfg.genesis.validators.clone(), + attesters: cfg.genesis.attesters.clone(), + leader_selection: cfg.genesis.leader_selection.clone(), + registry_address: cfg.registry_address, } } @@ -93,6 +97,7 @@ impl GenesisSpec { } else { Some(attester::Committee::new(attesters).context("attesters")?) }, + registry_address: x.registry_address, }) } } @@ -104,6 +109,7 @@ pub(super) fn node_key(secrets: &ConsensusSecrets) -> anyhow::Result, ) -> anyhow::Result { let mut gossip_static_outbound = HashMap::new(); { @@ -128,6 +134,7 @@ pub(super) fn executor( }; Ok(executor::Config { + build_version, server_addr: cfg.server_addr, public_addr: net::Host(cfg.public_addr.0.clone()), max_payload_size: cfg.max_payload_size, diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index 259cac5d074a..e1f10b8e4e50 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -1,20 +1,25 @@ use std::sync::Arc; use anyhow::Context as _; +use jsonrpsee::{core::ClientError, types::error::ErrorCode}; use zksync_concurrency::{ctx, error::Wrap as _, scope, time}; use zksync_consensus_executor::{self as executor, attestation}; use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::{BatchStore, BlockStore}; use zksync_dal::consensus_dal; -use zksync_node_sync::{ - fetcher::FetchedBlock, sync_action::ActionQueueSender, MainNodeClient, SyncState, -}; -use zksync_protobuf::ProtoFmt as _; +use zksync_node_sync::{fetcher::FetchedBlock, sync_action::ActionQueueSender, SyncState}; use zksync_types::L2BlockNumber; -use zksync_web3_decl::client::{DynClient, L2}; +use zksync_web3_decl::{ + client::{DynClient, L2}, + error::is_retriable, + namespaces::{EnNamespaceClient as _, EthNamespaceClient as _}, +}; use super::{config, storage::Store, ConsensusConfig, ConsensusSecrets}; -use crate::storage::{self, ConnectionPool}; +use crate::{ + registry, + storage::{self, ConnectionPool}, +}; /// External node. pub(super) struct EN { @@ -27,7 +32,7 @@ impl EN { /// Task running a consensus node for the external node. /// It may be a validator, but it cannot be a leader (cannot propose blocks). /// - /// NOTE: Before starting the consensus node if fetches all the blocks + /// NOTE: Before starting the consensus node it fetches all the blocks /// older than consensus genesis from the main node using json RPC. pub async fn run( self, @@ -35,6 +40,7 @@ impl EN { actions: ActionQueueSender, cfg: ConsensusConfig, secrets: ConsensusSecrets, + build_version: Option, ) -> anyhow::Result<()> { let attester = config::attester_key(&secrets).context("attester_key")?; @@ -47,13 +53,16 @@ impl EN { // Update sync state in the background. s.spawn_bg(self.fetch_state_loop(ctx)); - // Initialize genesis. - let genesis = self.fetch_genesis(ctx).await.wrap("fetch_genesis()")?; + // Initialize global config. + let global_config = self + .fetch_global_config(ctx) + .await + .wrap("fetch_genesis()")?; let mut conn = self.pool.connection(ctx).await.wrap("connection()")?; - conn.try_update_genesis(ctx, &genesis) + conn.try_update_global_config(ctx, &global_config) .await - .wrap("set_genesis()")?; + .wrap("try_update_global_config()")?; let mut payload_queue = conn .new_payload_queue(ctx, actions, self.sync_state.clone()) @@ -63,18 +72,22 @@ impl EN { drop(conn); // Fetch blocks before the genesis. - self.fetch_blocks(ctx, &mut payload_queue, Some(genesis.first_block)) - .await - .wrap("fetch_blocks()")?; + self.fetch_blocks( + ctx, + &mut payload_queue, + Some(global_config.genesis.first_block), + ) + .await + .wrap("fetch_blocks()")?; // Monitor the genesis of the main node. // If it changes, it means that a hard fork occurred and we need to reset the consensus state. s.spawn_bg::<()>({ - let old = genesis.clone(); + let old = global_config.clone(); async { let old = old; loop { - if let Ok(new) = self.fetch_genesis(ctx).await { + if let Ok(new) = self.fetch_global_config(ctx).await { if new != old { return Err(anyhow::format_err!( "genesis changed: old {old:?}, new {new:?}" @@ -105,10 +118,14 @@ impl EN { s.spawn_bg(async { Ok(runner.run(ctx).await?) }); let attestation = Arc::new(attestation::Controller::new(attester)); - s.spawn_bg(self.run_attestation_updater(ctx, genesis.clone(), attestation.clone())); + s.spawn_bg(self.run_attestation_controller( + ctx, + global_config.clone(), + attestation.clone(), + )); let executor = executor::Executor { - config: config::executor(&cfg, &secrets)?, + config: config::executor(&cfg, &secrets, build_version)?, block_store, batch_store, validator: config::validator_key(&secrets) @@ -164,24 +181,21 @@ impl EN { /// Monitors the `AttestationStatus` on the main node, /// and updates the attestation config accordingly. - async fn run_attestation_updater( + async fn run_attestation_controller( &self, ctx: &ctx::Ctx, - genesis: validator::Genesis, + cfg: consensus_dal::GlobalConfig, attestation: Arc, ) -> ctx::Result<()> { const POLL_INTERVAL: time::Duration = time::Duration::seconds(5); - let Some(committee) = &genesis.attesters else { - return Ok(()); - }; - let committee = Arc::new(committee.clone()); + let registry = registry::Registry::new(cfg.genesis.clone(), self.pool.clone()).await; let mut next = attester::BatchNumber(0); loop { let status = loop { match self.fetch_attestation_status(ctx).await { Err(err) => tracing::warn!("{err:#}"), Ok(status) => { - if status.genesis != genesis.hash() { + if status.genesis != cfg.genesis.hash() { return Err(anyhow::format_err!("genesis mismatch").into()); } if status.next_batch_to_attest >= next { @@ -191,6 +205,7 @@ impl EN { } ctx.sleep(POLL_INTERVAL).await?; }; + next = status.next_batch_to_attest.next(); tracing::info!( "waiting for hash of batch {:?}", status.next_batch_to_attest @@ -199,6 +214,27 @@ impl EN { .pool .wait_for_batch_hash(ctx, status.next_batch_to_attest) .await?; + let Some(committee) = registry + .attester_committee_for( + ctx, + cfg.registry_address.map(registry::Address::new), + status.next_batch_to_attest, + ) + .await + .wrap("attester_committee_for()")? + else { + tracing::info!("attestation not required"); + continue; + }; + let committee = Arc::new(committee); + // Persist the derived committee. + self.pool + .connection(ctx) + .await + .wrap("connection")? + .upsert_attester_committee(ctx, status.next_batch_to_attest, &committee) + .await + .wrap("upsert_attester_committee()")?; tracing::info!( "attesting batch {:?} with hash {hash:?}", status.next_batch_to_attest @@ -214,7 +250,6 @@ impl EN { })) .await .context("start_attestation()")?; - next = status.next_batch_to_attest.next(); } } @@ -224,37 +259,52 @@ impl EN { const DELAY_INTERVAL: time::Duration = time::Duration::milliseconds(500); const RETRY_INTERVAL: time::Duration = time::Duration::seconds(5); loop { - match ctx.wait(self.client.fetch_l2_block_number()).await? { + match ctx.wait(self.client.get_block_number()).await? { Ok(head) => { + let head = L2BlockNumber(head.try_into().ok().context("overflow")?); self.sync_state.set_main_node_block(head); ctx.sleep(DELAY_INTERVAL).await?; } Err(err) => { - tracing::warn!("main_node_client.fetch_l2_block_number(): {err}"); + tracing::warn!("get_block_number(): {err}"); ctx.sleep(RETRY_INTERVAL).await?; } } } } - /// Fetches genesis from the main node. + /// Fetches consensus global configuration from the main node. #[tracing::instrument(skip_all)] - async fn fetch_genesis(&self, ctx: &ctx::Ctx) -> ctx::Result { - let genesis = ctx - .wait(self.client.fetch_consensus_genesis()) - .await? - .context("fetch_consensus_genesis()")? - .context("main node is not running consensus component")?; - // Deserialize the json, but don't allow for unknown fields. - // We need to compute the hash of the Genesis, so simply ignoring the unknown fields won't - // do. - Ok(validator::GenesisRaw::read( - &zksync_protobuf::serde::deserialize_proto_with_options( - &genesis.0, /*deny_unknown_fields=*/ true, - ) - .context("deserialize")?, - )? - .with_hash()) + async fn fetch_global_config( + &self, + ctx: &ctx::Ctx, + ) -> ctx::Result { + match ctx.wait(self.client.consensus_global_config()).await? { + Ok(cfg) => { + let cfg = cfg.context("main node is not running consensus component")?; + Ok(zksync_protobuf::serde::deserialize(&cfg.0).context("deserialize()")?) + } + Err(ClientError::Call(err)) if err.code() == ErrorCode::MethodNotFound.code() => { + tracing::info!( + "consensus_global_config() not found, calling consensus_genesis() instead" + ); + let genesis = ctx + .wait(self.client.consensus_genesis()) + .await? + .context("consensus_genesis()")? + .context("main node is not running consensus component")?; + Ok(consensus_dal::GlobalConfig { + genesis: zksync_protobuf::serde::deserialize(&genesis.0) + .context("deserialize()")?, + registry_address: None, + }) + } + Err(err) => { + return Err(err) + .context("consensus_global_config()") + .map_err(|err| err.into()) + } + } } #[tracing::instrument(skip_all)] @@ -262,15 +312,12 @@ impl EN { &self, ctx: &ctx::Ctx, ) -> ctx::Result { - match ctx.wait(self.client.fetch_attestation_status()).await? { - Ok(Some(status)) => Ok(zksync_protobuf::serde::deserialize(&status.0) - .context("deserialize(AttestationStatus")?), - Ok(None) => Err(anyhow::format_err!("empty response").into()), - Err(err) => Err(anyhow::format_err!( - "AttestationStatus call to main node HTTP RPC failed: {err:#}" - ) - .into()), - } + let status = ctx + .wait(self.client.attestation_status()) + .await? + .context("attestation_status()")? + .context("main node is not runnign consensus component")?; + Ok(zksync_protobuf::serde::deserialize(&status.0).context("deserialize()")?) } /// Fetches (with retries) the given block from the main node. @@ -278,14 +325,11 @@ impl EN { const RETRY_INTERVAL: time::Duration = time::Duration::seconds(5); loop { - let res = ctx.wait(self.client.fetch_l2_block(n, true)).await?; - match res { + match ctx.wait(self.client.sync_l2_block(n, true)).await? { Ok(Some(block)) => return Ok(block.try_into()?), Ok(None) => {} - Err(err) if err.is_retriable() => {} - Err(err) => { - return Err(anyhow::format_err!("client.fetch_l2_block({}): {err}", n).into()); - } + Err(err) if is_retriable(&err) => {} + Err(err) => Err(err).with_context(|| format!("client.sync_l2_block({n})"))?, } ctx.sleep(RETRY_INTERVAL).await?; } diff --git a/core/node/consensus/src/era.rs b/core/node/consensus/src/era.rs index 574e496f4d11..3150f839680e 100644 --- a/core/node/consensus/src/era.rs +++ b/core/node/consensus/src/era.rs @@ -45,6 +45,7 @@ pub async fn run_external_node( sync_state: SyncState, main_node_client: Box>, actions: ActionQueueSender, + build_version: semver::Version, ) -> anyhow::Result<()> { let en = en::EN { pool: ConnectionPool(pool), @@ -58,7 +59,8 @@ pub async fn run_external_node( is_validator = secrets.validator_key.is_some(), "running external node" ); - en.run(ctx, actions, cfg, secrets).await + en.run(ctx, actions, cfg, secrets, Some(build_version)) + .await } None => { tracing::info!("running fetcher"); diff --git a/core/node/consensus/src/lib.rs b/core/node/consensus/src/lib.rs index 13d918b5b6ee..ff9cdf865281 100644 --- a/core/node/consensus/src/lib.rs +++ b/core/node/consensus/src/lib.rs @@ -5,6 +5,7 @@ use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; +mod abi; // Currently `batch` module is only used in tests, // but will be used in production once batch syncing is implemented in consensus. #[allow(unused)] @@ -13,8 +14,10 @@ mod config; mod en; pub mod era; mod mn; +mod registry; mod storage; #[cfg(test)] pub(crate) mod testonly; #[cfg(test)] mod tests; +mod vm; diff --git a/core/node/consensus/src/mn.rs b/core/node/consensus/src/mn.rs index 7de86b4d8ba1..4d428346ebe4 100644 --- a/core/node/consensus/src/mn.rs +++ b/core/node/consensus/src/mn.rs @@ -6,9 +6,10 @@ use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; use zksync_consensus_executor::{self as executor, attestation}; use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::{BatchStore, BlockStore}; +use zksync_dal::consensus_dal; use crate::{ - config, + config, registry, storage::{ConnectionPool, InsertCertificateError, Store}, }; @@ -36,9 +37,9 @@ pub async fn run_main_node( pool.connection(ctx) .await .wrap("connection()")? - .adjust_genesis(ctx, &spec) + .adjust_global_config(ctx, &spec) .await - .wrap("adjust_genesis()")?; + .wrap("adjust_global_config()")?; } // The main node doesn't have a payload queue as it produces all the L2 blocks itself. @@ -47,33 +48,40 @@ pub async fn run_main_node( .wrap("Store::new()")?; s.spawn_bg(runner.run(ctx)); - let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) + let global_config = pool + .connection(ctx) .await - .wrap("BlockStore::new()")?; - s.spawn_bg(runner.run(ctx)); - - let genesis = block_store.genesis().clone(); + .wrap("connection()")? + .global_config(ctx) + .await + .wrap("global_config()")? + .context("global_config() disappeared")?; anyhow::ensure!( - genesis.leader_selection + global_config.genesis.leader_selection == validator::LeaderSelectionMode::Sticky(validator_key.public()), "unsupported leader selection mode - main node has to be the leader" ); + let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) + .await + .wrap("BlockStore::new()")?; + s.spawn_bg(runner.run(ctx)); + let (batch_store, runner) = BatchStore::new(ctx, Box::new(store.clone())) .await .wrap("BatchStore::new()")?; s.spawn_bg(runner.run(ctx)); let attestation = Arc::new(attestation::Controller::new(attester)); - s.spawn_bg(run_attestation_updater( + s.spawn_bg(run_attestation_controller( ctx, &pool, - genesis, + global_config, attestation.clone(), )); let executor = executor::Executor { - config: config::executor(&cfg, &secrets)?, + config: config::executor(&cfg, &secrets, None)?, block_store, batch_store, validator: Some(executor::Validator { @@ -93,18 +101,17 @@ pub async fn run_main_node( /// Manages attestation state by configuring the /// next batch to attest and storing the collected /// certificates. -async fn run_attestation_updater( +async fn run_attestation_controller( ctx: &ctx::Ctx, pool: &ConnectionPool, - genesis: validator::Genesis, + cfg: consensus_dal::GlobalConfig, attestation: Arc, ) -> anyhow::Result<()> { const POLL_INTERVAL: time::Duration = time::Duration::seconds(5); + let registry = registry::Registry::new(cfg.genesis, pool.clone()).await; + let registry_addr = cfg.registry_address.map(registry::Address::new); + let mut next = attester::BatchNumber(0); let res = async { - let Some(committee) = &genesis.attesters else { - return Ok(()); - }; - let committee = Arc::new(committee.clone()); loop { // After regenesis it might happen that the batch number for the first block // is not immediately known (the first block was not produced yet), @@ -118,10 +125,12 @@ async fn run_attestation_updater( .await .wrap("attestation_status()")? { - Some(status) => break status, - None => ctx.sleep(POLL_INTERVAL).await?, + Some(status) if status.next_batch_to_attest >= next => break status, + _ => {} } + ctx.sleep(POLL_INTERVAL).await?; }; + next = status.next_batch_to_attest.next(); tracing::info!( "waiting for hash of batch {:?}", status.next_batch_to_attest @@ -129,6 +138,22 @@ async fn run_attestation_updater( let hash = pool .wait_for_batch_hash(ctx, status.next_batch_to_attest) .await?; + let Some(committee) = registry + .attester_committee_for(ctx, registry_addr, status.next_batch_to_attest) + .await + .wrap("attester_committee_for()")? + else { + tracing::info!("attestation not required"); + continue; + }; + let committee = Arc::new(committee); + // Persist the derived committee. + pool.connection(ctx) + .await + .wrap("connection")? + .upsert_attester_committee(ctx, status.next_batch_to_attest, &committee) + .await + .wrap("upsert_attester_committee()")?; tracing::info!( "attesting batch {:?} with hash {hash:?}", status.next_batch_to_attest @@ -140,7 +165,7 @@ async fn run_attestation_updater( number: status.next_batch_to_attest, genesis: status.genesis, }, - committee: committee.clone(), + committee, })) .await .context("start_attestation()")?; diff --git a/core/node/consensus/src/registry/abi.rs b/core/node/consensus/src/registry/abi.rs new file mode 100644 index 000000000000..55cc7f9264fb --- /dev/null +++ b/core/node/consensus/src/registry/abi.rs @@ -0,0 +1,225 @@ +//! Strongly-typed API for ConsensusRegistry contract. +#![allow(dead_code)] + +use std::sync::Arc; + +use anyhow::Context as _; +use zksync_types::{ethabi, ethabi::Token}; + +use crate::abi; + +/// Reprents ConsensusRegistry contract. +#[derive(Debug, Clone)] +pub(crate) struct ConsensusRegistry(Arc); + +impl AsRef for ConsensusRegistry { + fn as_ref(&self) -> ðabi::Contract { + &self.0 + } +} + +impl ConsensusRegistry { + const FILE: &'static str = "contracts/l2-contracts/artifacts-zk/contracts/ConsensusRegistry.sol/ConsensusRegistry.json"; + + /// Loads bytecode of the contract. + #[cfg(test)] + pub(crate) fn bytecode() -> Vec { + zksync_contracts::read_bytecode(Self::FILE) + } + + /// Loads the `ethabi` representation of the contract. + pub(crate) fn load() -> Self { + Self(zksync_contracts::load_contract(ConsensusRegistry::FILE).into()) + } + + /// Constructs a call to function `F` of this contract. + pub(crate) fn call>(&self, inputs: F) -> abi::Call { + abi::Call { + contract: self.clone(), + inputs, + } + } +} + +/// ConsensusRegistry.getAttesterCommittee function. +#[derive(Debug, Default)] +pub(crate) struct GetAttesterCommittee; + +impl abi::Function for GetAttesterCommittee { + type Contract = ConsensusRegistry; + const NAME: &'static str = "getAttesterCommittee"; + + fn encode(&self) -> Vec { + vec![] + } + + type Outputs = Vec; + fn decode_outputs(tokens: Vec) -> anyhow::Result { + let [attesters] = tokens.try_into().ok().context("bad size")?; + let mut res = vec![]; + for token in attesters.into_array().context("not array")? { + res.push(Attester::from_token(token).context("attesters")?); + } + Ok(res) + } +} + +/// ConsensusRegistry.add function. +#[derive(Debug, Default)] +pub(crate) struct Add { + pub(crate) node_owner: ethabi::Address, + pub(crate) validator_weight: u32, + pub(crate) validator_pub_key: BLS12_381PublicKey, + pub(crate) validator_pop: BLS12_381Signature, + pub(crate) attester_weight: u32, + pub(crate) attester_pub_key: Secp256k1PublicKey, +} + +impl abi::Function for Add { + type Contract = ConsensusRegistry; + const NAME: &'static str = "add"; + fn encode(&self) -> Vec { + vec![ + Token::Address(self.node_owner), + Token::Uint(self.validator_weight.into()), + self.validator_pub_key.to_token(), + self.validator_pop.to_token(), + Token::Uint(self.attester_weight.into()), + self.attester_pub_key.to_token(), + ] + } + type Outputs = (); + fn decode_outputs(tokens: Vec) -> anyhow::Result<()> { + let [] = tokens.try_into().ok().context("bad size")?; + Ok(()) + } +} + +/// ConsensusRegistry.initialize function. +#[derive(Debug, Default)] +pub(crate) struct Initialize { + pub(crate) initial_owner: ethabi::Address, +} + +impl abi::Function for Initialize { + type Contract = ConsensusRegistry; + const NAME: &'static str = "initialize"; + fn encode(&self) -> Vec { + vec![Token::Address(self.initial_owner)] + } + type Outputs = (); + fn decode_outputs(tokens: Vec) -> anyhow::Result<()> { + let [] = tokens.try_into().ok().context("bad size")?; + Ok(()) + } +} + +/// ConsensusRegistry.commitAttesterCommittee function. +#[derive(Debug, Default)] +pub(crate) struct CommitAttesterCommittee; + +impl abi::Function for CommitAttesterCommittee { + type Contract = ConsensusRegistry; + const NAME: &'static str = "commitAttesterCommittee"; + fn encode(&self) -> Vec { + vec![] + } + type Outputs = (); + fn decode_outputs(tokens: Vec) -> anyhow::Result<()> { + let [] = tokens.try_into().ok().context("bad size")?; + Ok(()) + } +} + +/// ConsensusRegistry.owner function. +#[derive(Debug, Default)] +pub(crate) struct Owner; + +impl abi::Function for Owner { + type Contract = ConsensusRegistry; + const NAME: &'static str = "owner"; + fn encode(&self) -> Vec { + vec![] + } + type Outputs = ethabi::Address; + fn decode_outputs(tokens: Vec) -> anyhow::Result { + let [owner] = tokens.try_into().ok().context("bad size")?; + owner.into_address().context("not an address") + } +} + +// Auxiliary structs. + +/// Raw representation of a secp256k1 public key. +#[derive(Debug, Default)] +pub(crate) struct Secp256k1PublicKey { + pub(crate) tag: [u8; 1], + pub(crate) x: [u8; 32], +} + +impl Secp256k1PublicKey { + fn from_token(token: Token) -> anyhow::Result { + let [tag, x] = abi::into_tuple(token)?; + Ok(Self { + tag: abi::into_fixed_bytes(tag).context("tag")?, + x: abi::into_fixed_bytes(x).context("x")?, + }) + } + + fn to_token(&self) -> Token { + Token::Tuple(vec![ + Token::FixedBytes(self.tag.into()), + Token::FixedBytes(self.x.into()), + ]) + } +} + +/// Raw representation of an attester committee member. +#[derive(Debug)] +pub(crate) struct Attester { + pub(crate) weight: u32, + pub(crate) pub_key: Secp256k1PublicKey, +} + +impl Attester { + fn from_token(token: Token) -> anyhow::Result { + let [weight, pub_key] = abi::into_tuple(token)?; + Ok(Self { + weight: abi::into_uint(weight).context("weight")?, + pub_key: Secp256k1PublicKey::from_token(pub_key).context("pub_key")?, + }) + } +} + +/// Raw representation of a BLS12_381 public key. +#[derive(Debug, Default)] +pub(crate) struct BLS12_381PublicKey { + pub(crate) a: [u8; 32], + pub(crate) b: [u8; 32], + pub(crate) c: [u8; 32], +} + +impl BLS12_381PublicKey { + fn to_token(&self) -> Token { + Token::Tuple(vec![ + Token::FixedBytes(self.a.into()), + Token::FixedBytes(self.b.into()), + Token::FixedBytes(self.c.into()), + ]) + } +} + +#[derive(Debug, Default)] +pub(crate) struct BLS12_381Signature { + pub(crate) a: [u8; 32], + pub(crate) b: [u8; 16], +} + +impl BLS12_381Signature { + fn to_token(&self) -> Token { + Token::Tuple(vec![ + Token::FixedBytes(self.a.into()), + Token::FixedBytes(self.b.into()), + ]) + } +} diff --git a/core/node/consensus/src/registry/mod.rs b/core/node/consensus/src/registry/mod.rs new file mode 100644 index 000000000000..74da41309573 --- /dev/null +++ b/core/node/consensus/src/registry/mod.rs @@ -0,0 +1,80 @@ +use anyhow::Context as _; +use zksync_concurrency::{ctx, error::Wrap as _}; +use zksync_consensus_crypto::ByteFmt; +use zksync_consensus_roles::{attester, validator}; + +use crate::{storage::ConnectionPool, vm::VM}; + +mod abi; +#[cfg(test)] +pub(crate) mod testonly; +#[cfg(test)] +mod tests; + +fn decode_attester_key(k: &abi::Secp256k1PublicKey) -> anyhow::Result { + let mut x = vec![]; + x.extend(k.tag); + x.extend(k.x); + ByteFmt::decode(&x) +} + +fn decode_weighted_attester(a: &abi::Attester) -> anyhow::Result { + Ok(attester::WeightedAttester { + weight: a.weight.into(), + key: decode_attester_key(&a.pub_key).context("key")?, + }) +} + +pub type Address = crate::abi::Address; + +#[derive(Debug)] +pub(crate) struct Registry { + contract: abi::ConsensusRegistry, + genesis: validator::Genesis, + vm: VM, +} + +impl Registry { + pub async fn new(genesis: validator::Genesis, pool: ConnectionPool) -> Self { + Self { + contract: abi::ConsensusRegistry::load(), + genesis, + vm: VM::new(pool).await, + } + } + + /// Attester committee for the given batch. + /// It reads committee from the contract. + /// Falls back to committee specified in the genesis. + pub async fn attester_committee_for( + &self, + ctx: &ctx::Ctx, + address: Option
, + attested_batch: attester::BatchNumber, + ) -> ctx::Result> { + let Some(batch_defining_committee) = attested_batch.prev() else { + // Batch 0 doesn't need attestation. + return Ok(None); + }; + let Some(address) = address else { + return Ok(self.genesis.attesters.clone()); + }; + let raw = self + .vm + .call( + ctx, + batch_defining_committee, + address, + self.contract.call(abi::GetAttesterCommittee), + ) + .await + .wrap("vm.call()")?; + let mut attesters = vec![]; + for a in raw { + attesters.push(decode_weighted_attester(&a).context("decode_weighted_attester()")?); + } + Ok(Some( + attester::Committee::new(attesters.into_iter()).context("Committee::new()")?, + )) + } +} diff --git a/core/node/consensus/src/registry/testonly.rs b/core/node/consensus/src/registry/testonly.rs new file mode 100644 index 000000000000..a0c55a557feb --- /dev/null +++ b/core/node/consensus/src/registry/testonly.rs @@ -0,0 +1,118 @@ +use rand::Rng; +use zksync_consensus_crypto::ByteFmt; +use zksync_consensus_roles::{attester, validator}; +use zksync_test_account::Account; +use zksync_types::{ethabi, Execute, Transaction, U256}; + +use super::*; + +pub(crate) fn make_tx( + account: &mut Account, + address: crate::abi::Address, + call: crate::abi::Call, +) -> Transaction { + account.get_l2_tx_for_execute( + Execute { + contract_address: *address, + calldata: call.calldata().unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ) +} + +pub(crate) struct WeightedValidator { + weight: validator::Weight, + key: validator::PublicKey, + pop: validator::ProofOfPossession, +} + +fn encode_attester_key(k: &attester::PublicKey) -> abi::Secp256k1PublicKey { + let b: [u8; 33] = ByteFmt::encode(k).try_into().unwrap(); + abi::Secp256k1PublicKey { + tag: b[0..1].try_into().unwrap(), + x: b[1..33].try_into().unwrap(), + } +} + +fn encode_validator_key(k: &validator::PublicKey) -> abi::BLS12_381PublicKey { + let b: [u8; 96] = ByteFmt::encode(k).try_into().unwrap(); + abi::BLS12_381PublicKey { + a: b[0..32].try_into().unwrap(), + b: b[32..64].try_into().unwrap(), + c: b[64..96].try_into().unwrap(), + } +} + +fn encode_validator_pop(pop: &validator::ProofOfPossession) -> abi::BLS12_381Signature { + let b: [u8; 48] = ByteFmt::encode(pop).try_into().unwrap(); + abi::BLS12_381Signature { + a: b[0..32].try_into().unwrap(), + b: b[32..48].try_into().unwrap(), + } +} + +pub(crate) fn gen_validator(rng: &mut impl Rng) -> WeightedValidator { + let k: validator::SecretKey = rng.gen(); + WeightedValidator { + key: k.public(), + weight: rng.gen_range(1..100), + pop: k.sign_pop(), + } +} + +pub(crate) fn gen_attester(rng: &mut impl Rng) -> attester::WeightedAttester { + attester::WeightedAttester { + key: rng.gen(), + weight: rng.gen_range(1..100), + } +} + +impl Registry { + pub(crate) fn deploy(&self, account: &mut Account) -> (Address, Transaction) { + let tx = account.get_deploy_tx( + &abi::ConsensusRegistry::bytecode(), + None, + zksync_test_account::TxType::L2, + ); + (Address::new(tx.address), tx.tx) + } + + pub(crate) fn add( + &self, + node_owner: ethabi::Address, + validator: WeightedValidator, + attester: attester::WeightedAttester, + ) -> anyhow::Result> { + Ok(self.contract.call(abi::Add { + node_owner, + validator_pub_key: encode_validator_key(&validator.key), + validator_weight: validator + .weight + .try_into() + .context("overflow") + .context("validator_weight")?, + validator_pop: encode_validator_pop(&validator.pop), + attester_pub_key: encode_attester_key(&attester.key), + attester_weight: attester + .weight + .try_into() + .context("overflow") + .context("attester_weight")?, + })) + } + + pub(crate) fn initialize( + &self, + initial_owner: ethabi::Address, + ) -> crate::abi::Call { + self.contract.call(abi::Initialize { initial_owner }) + } + + pub(crate) fn commit_attester_committee( + &self, + ) -> crate::abi::Call { + self.contract.call(abi::CommitAttesterCommittee) + } +} diff --git a/core/node/consensus/src/registry/tests.rs b/core/node/consensus/src/registry/tests.rs new file mode 100644 index 000000000000..935cd6738918 --- /dev/null +++ b/core/node/consensus/src/registry/tests.rs @@ -0,0 +1,91 @@ +use rand::Rng as _; +use zksync_concurrency::{ctx, scope}; +use zksync_consensus_roles::{attester, validator::testonly::Setup}; +use zksync_test_account::Account; +use zksync_types::ProtocolVersionId; + +use super::*; +use crate::storage::ConnectionPool; + +/// Test checking that parsing logic matches the abi specified in the json file. +#[test] +fn test_consensus_registry_abi() { + zksync_concurrency::testonly::abort_on_panic(); + let c = abi::ConsensusRegistry::load(); + c.call(abi::GetAttesterCommittee).test().unwrap(); + c.call(abi::Add::default()).test().unwrap(); + c.call(abi::Initialize::default()).test().unwrap(); + c.call(abi::CommitAttesterCommittee).test().unwrap(); + c.call(abi::Owner).test().unwrap(); +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_attester_committee() { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + let setup = Setup::new(rng, 10); + let account = &mut Account::random(); + let to_fund = &[account.address]; + + scope::run!(ctx, |ctx, s| async { + let pool = ConnectionPool::test(false, ProtocolVersionId::latest()).await; + let registry = Registry::new(setup.genesis.clone(), pool.clone()).await; + + // If the registry contract address is not specified, + // then the committee from genesis should be returned. + let got = registry + .attester_committee_for(ctx, None, attester::BatchNumber(10)) + .await + .unwrap(); + assert_eq!(setup.genesis.attesters, got); + + let (mut node, runner) = crate::testonly::StateKeeper::new(ctx, pool.clone()).await?; + s.spawn_bg(runner.run_real(ctx, to_fund)); + + // Deploy registry contract and initialize it. + let committee = + attester::Committee::new((0..5).map(|_| testonly::gen_attester(rng))).unwrap(); + let (registry_addr, tx) = registry.deploy(account); + let mut txs = vec![tx]; + let account_addr = account.address(); + txs.push(testonly::make_tx( + account, + registry_addr, + registry.initialize(account_addr), + )); + // Add attesters. + for a in committee.iter() { + txs.push(testonly::make_tx( + account, + registry_addr, + registry + .add(rng.gen(), testonly::gen_validator(rng), a.clone()) + .unwrap(), + )); + } + // Commit the update. + txs.push(testonly::make_tx( + account, + registry_addr, + registry.commit_attester_committee(), + )); + + node.push_block(&txs).await; + node.seal_batch().await; + pool.wait_for_batch(ctx, node.last_batch()).await?; + + // Read the attester committee using the vm. + let batch = attester::BatchNumber(node.last_batch().0.into()); + assert_eq!( + Some(committee), + registry + .attester_committee_for(ctx, Some(registry_addr), batch + 1) + .await + .unwrap() + ); + Ok(()) + }) + .await + .unwrap(); +} diff --git a/core/node/consensus/src/storage/connection.rs b/core/node/consensus/src/storage/connection.rs index 6ff2fb1ce0a0..512b37e81a11 100644 --- a/core/node/consensus/src/storage/connection.rs +++ b/core/node/consensus/src/storage/connection.rs @@ -1,13 +1,14 @@ use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, time}; use zksync_consensus_crypto::keccak256::Keccak256; -use zksync_consensus_roles::{attester, validator}; +use zksync_consensus_roles::{attester, attester::BatchNumber, validator}; use zksync_consensus_storage::{self as storage, BatchStoreState}; use zksync_dal::{consensus_dal, consensus_dal::Payload, Core, CoreDal, DalError}; use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; +use zksync_node_api_server::execution_sandbox::{BlockArgs, BlockStartInfo}; use zksync_node_sync::{fetcher::IoCursorExt as _, ActionQueueSender, SyncState}; use zksync_state_keeper::io::common::IoCursor; -use zksync_types::{commitment::L1BatchWithMetadata, L1BatchNumber}; +use zksync_types::{api, commitment::L1BatchWithMetadata, L1BatchNumber}; use super::{InsertCertificateError, PayloadQueue}; use crate::config; @@ -18,7 +19,7 @@ pub(crate) struct ConnectionPool(pub(crate) zksync_dal::ConnectionPool); impl ConnectionPool { /// Wrapper for `connection_tagged()`. - pub(crate) async fn connection<'a>(&'a self, ctx: &ctx::Ctx) -> ctx::Result> { + pub(crate) async fn connection(&self, ctx: &ctx::Ctx) -> ctx::Result> { Ok(Connection( ctx.wait(self.0.connection_tagged("consensus")) .await? @@ -164,6 +165,22 @@ impl<'a> Connection<'a> { .map_err(E::Other)?) } + /// Wrapper for `consensus_dal().upsert_attester_committee()`. + pub async fn upsert_attester_committee( + &mut self, + ctx: &ctx::Ctx, + number: BatchNumber, + committee: &attester::Committee, + ) -> ctx::Result<()> { + ctx.wait( + self.0 + .consensus_dal() + .upsert_attester_committee(number, committee), + ) + .await??; + Ok(()) + } + /// Wrapper for `consensus_dal().replica_state()`. pub async fn replica_state(&mut self, ctx: &ctx::Ctx) -> ctx::Result { Ok(ctx @@ -229,22 +246,22 @@ impl<'a> Connection<'a> { }) } - /// Wrapper for `consensus_dal().genesis()`. - pub async fn genesis(&mut self, ctx: &ctx::Ctx) -> ctx::Result> { - Ok(ctx - .wait(self.0.consensus_dal().genesis()) - .await? - .map_err(DalError::generalize)?) + /// Wrapper for `consensus_dal().global_config()`. + pub async fn global_config( + &mut self, + ctx: &ctx::Ctx, + ) -> ctx::Result> { + Ok(ctx.wait(self.0.consensus_dal().global_config()).await??) } - /// Wrapper for `consensus_dal().try_update_genesis()`. - pub async fn try_update_genesis( + /// Wrapper for `consensus_dal().try_update_global_config()`. + pub async fn try_update_global_config( &mut self, ctx: &ctx::Ctx, - genesis: &validator::Genesis, + cfg: &consensus_dal::GlobalConfig, ) -> ctx::Result<()> { Ok(ctx - .wait(self.0.consensus_dal().try_update_genesis(genesis)) + .wait(self.0.consensus_dal().try_update_global_config(cfg)) .await??) } @@ -267,7 +284,7 @@ impl<'a> Connection<'a> { /// (Re)initializes consensus genesis to start at the last L2 block in storage. /// Noop if `spec` matches the current genesis. - pub(crate) async fn adjust_genesis( + pub(crate) async fn adjust_global_config( &mut self, ctx: &ctx::Ctx, spec: &config::GenesisSpec, @@ -277,31 +294,34 @@ impl<'a> Connection<'a> { .await .wrap("start_transaction()")?; - let old = txn.genesis(ctx).await.wrap("genesis()")?; + let old = txn.global_config(ctx).await.wrap("genesis()")?; if let Some(old) = &old { - if &config::GenesisSpec::from_genesis(old) == spec { + if &config::GenesisSpec::from_global_config(old) == spec { // Hard fork is not needed. return Ok(()); } } tracing::info!("Performing a hard fork of consensus."); - let genesis = validator::GenesisRaw { - chain_id: spec.chain_id, - fork_number: old - .as_ref() - .map_or(validator::ForkNumber(0), |old| old.fork_number.next()), - first_block: txn.next_block(ctx).await.context("next_block()")?, - protocol_version: spec.protocol_version, - validators: spec.validators.clone(), - attesters: spec.attesters.clone(), - leader_selection: spec.leader_selection.clone(), - } - .with_hash(); + let new = consensus_dal::GlobalConfig { + genesis: validator::GenesisRaw { + chain_id: spec.chain_id, + fork_number: old.as_ref().map_or(validator::ForkNumber(0), |old| { + old.genesis.fork_number.next() + }), + first_block: txn.next_block(ctx).await.context("next_block()")?, + protocol_version: spec.protocol_version, + validators: spec.validators.clone(), + attesters: spec.attesters.clone(), + leader_selection: spec.leader_selection.clone(), + } + .with_hash(), + registry_address: spec.registry_address, + }; - txn.try_update_genesis(ctx, &genesis) + txn.try_update_global_config(ctx, &new) .await - .wrap("try_update_genesis()")?; + .wrap("try_update_global_config()")?; txn.commit(ctx).await.wrap("commit()")?; Ok(()) } @@ -447,4 +467,29 @@ impl<'a> Connection<'a> { .await? .context("attestation_status()")?) } + + /// Constructs `BlockArgs` for the last block of the batch. + pub async fn vm_block_args( + &mut self, + ctx: &ctx::Ctx, + batch: attester::BatchNumber, + ) -> ctx::Result { + let (_, block) = self + .get_l2_block_range_of_l1_batch(ctx, batch) + .await + .wrap("get_l2_block_range_of_l1_batch()")? + .context("batch not sealed")?; + let block = api::BlockId::Number(api::BlockNumber::Number(block.0.into())); + let start_info = ctx + .wait(BlockStartInfo::new( + &mut self.0, + /*max_cache_age=*/ std::time::Duration::from_secs(10), + )) + .await? + .context("BlockStartInfo::new()")?; + Ok(ctx + .wait(BlockArgs::new(&mut self.0, block, &start_info)) + .await? + .context("BlockArgs::new")?) + } } diff --git a/core/node/consensus/src/storage/store.rs b/core/node/consensus/src/storage/store.rs index 6a96812ae408..cb8e039d7d01 100644 --- a/core/node/consensus/src/storage/store.rs +++ b/core/node/consensus/src/storage/store.rs @@ -325,9 +325,10 @@ impl storage::PersistentBlockStore for Store { Ok(self .conn(ctx) .await? - .genesis(ctx) + .global_config(ctx) .await? - .context("not found")?) + .context("not found")? + .genesis) } fn persisted(&self) -> sync::watch::Receiver { diff --git a/core/node/consensus/src/storage/testonly.rs b/core/node/consensus/src/storage/testonly.rs index 5d1279afbbfd..65c464d98b93 100644 --- a/core/node/consensus/src/storage/testonly.rs +++ b/core/node/consensus/src/storage/testonly.rs @@ -1,5 +1,4 @@ //! Storage test helpers. - use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, time}; use zksync_consensus_roles::{attester, validator}; @@ -13,6 +12,7 @@ use zksync_types::{ }; use super::{Connection, ConnectionPool}; +use crate::registry; impl Connection<'_> { /// Wrapper for `consensus_dal().batch_of_block()`. @@ -181,16 +181,16 @@ impl ConnectionPool { want_last: validator::BlockNumber, ) -> ctx::Result> { let blocks = self.wait_for_block_certificates(ctx, want_last).await?; - let genesis = self + let cfg = self .connection(ctx) .await .wrap("connection()")? - .genesis(ctx) + .global_config(ctx) .await .wrap("genesis()")? .context("genesis is missing")?; for block in &blocks { - block.verify(&genesis).context(block.number())?; + block.verify(&cfg.genesis).context(block.number())?; } Ok(blocks) } @@ -199,6 +199,7 @@ impl ConnectionPool { &self, ctx: &ctx::Ctx, want_last: attester::BatchNumber, + registry_addr: Option, ) -> ctx::Result<()> { // Wait for the last batch to be attested. const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(100); @@ -214,17 +215,17 @@ impl ConnectionPool { ctx.sleep(POLL_INTERVAL).await?; } let mut conn = self.connection(ctx).await.wrap("connection()")?; - let genesis = conn - .genesis(ctx) + let cfg = conn + .global_config(ctx) .await - .wrap("genesis()")? - .context("genesis is missing")?; + .wrap("global_config()")? + .context("global config is missing")?; let first = conn - .batch_of_block(ctx, genesis.first_block) + .batch_of_block(ctx, cfg.genesis.first_block) .await .wrap("batch_of_block()")? .context("batch of first_block is missing")?; - let committee = genesis.attesters.as_ref().unwrap(); + let registry = registry::Registry::new(cfg.genesis.clone(), self.clone()).await; for i in first.0..want_last.0 { let i = attester::BatchNumber(i); let hash = conn @@ -240,8 +241,13 @@ impl ConnectionPool { if cert.message.hash != hash { return Err(anyhow::format_err!("cert[{i:?}]: hash mismatch").into()); } - cert.verify(genesis.hash(), committee) - .context("cert[{i:?}].verify()")?; + let committee = registry + .attester_committee_for(ctx, registry_addr, i) + .await + .context("attester_committee_for()")? + .context("committee not specified")?; + cert.verify(cfg.genesis.hash(), &committee) + .with_context(|| format!("cert[{i:?}].verify()"))?; } Ok(()) } diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 90063772da92..241998f26928 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -42,8 +42,9 @@ use zksync_state_keeper::{ }; use zksync_test_account::Account; use zksync_types::{ + ethabi, fee_model::{BatchFeeInput, L1PeggedBatchFeeModelInput}, - Address, L1BatchNumber, L2BlockNumber, L2ChainId, PriorityOpId, ProtocolVersionId, + L1BatchNumber, L2BlockNumber, L2ChainId, PriorityOpId, ProtocolVersionId, Transaction, }; use zksync_web3_decl::client::{Client, DynClient, L2}; @@ -54,6 +55,7 @@ use crate::{ }; /// Fake StateKeeper for tests. +#[derive(Debug)] pub(super) struct StateKeeper { protocol_version: ProtocolVersionId, // Batch of the `last_block`. @@ -62,8 +64,6 @@ pub(super) struct StateKeeper { // timestamp of the last block. last_timestamp: u64, batch_sealed: bool, - // test L2 account - account: Account, next_priority_op: PriorityOpId, actions_sender: ActionQueueSender, @@ -116,6 +116,7 @@ pub(super) fn new_configs( }) .collect(), leader: config::ValidatorPublicKey(setup.validator_keys[0].public().encode()), + registry_address: None, }; network::testonly::new_configs(rng, setup, gossip_peers) .into_iter() @@ -183,7 +184,6 @@ pub(super) struct StateKeeperRunner { addr: sync::watch::Sender>, rocksdb_dir: tempfile::TempDir, metadata_calculator: MetadataCalculator, - account: Account, } impl StateKeeper { @@ -242,7 +242,6 @@ impl StateKeeper { .await .context("MetadataCalculator::new()")?; let tree_reader = metadata_calculator.tree_reader(); - let account = Account::random(); Ok(( Self { protocol_version, @@ -256,7 +255,6 @@ impl StateKeeper { addr: addr.subscribe(), pool: pool.clone(), tree_reader, - account: account.clone(), }, StateKeeperRunner { actions_queue, @@ -265,7 +263,6 @@ impl StateKeeper { addr, rocksdb_dir, metadata_calculator, - account, }, )) } @@ -306,22 +303,29 @@ impl StateKeeper { } } - /// Pushes a new L2 block with `transactions` transactions to the `StateKeeper`. - pub async fn push_random_block(&mut self, rng: &mut impl Rng) { + pub async fn push_block(&mut self, txs: &[Transaction]) { let mut actions = vec![self.open_block()]; - for _ in 0..rng.gen_range(3..8) { - let tx = match rng.gen() { - true => l2_transaction(&mut self.account, 1_000_000), + actions.extend( + txs.iter() + .map(|tx| FetchedTransaction::new(tx.clone()).into()), + ); + actions.push(SyncAction::SealL2Block); + self.actions_sender.push_actions(actions).await.unwrap(); + } + + /// Pushes a new L2 block with `transactions` transactions to the `StateKeeper`. + pub async fn push_random_block(&mut self, rng: &mut impl Rng, account: &mut Account) { + let txs: Vec<_> = (0..rng.gen_range(3..8)) + .map(|_| match rng.gen() { + true => l2_transaction(account, 1_000_000), false => { - let tx = l1_transaction(&mut self.account, self.next_priority_op); + let tx = l1_transaction(account, self.next_priority_op); self.next_priority_op += 1; tx } - }; - actions.push(FetchedTransaction::new(tx).into()); - } - actions.push(SyncAction::SealL2Block); - self.actions_sender.push_actions(actions).await.unwrap(); + }) + .collect(); + self.push_block(&txs).await; } /// Pushes `SealBatch` command to the `StateKeeper`. @@ -334,14 +338,19 @@ impl StateKeeper { } /// Pushes `count` random L2 blocks to the StateKeeper. - pub async fn push_random_blocks(&mut self, rng: &mut impl Rng, count: usize) { + pub async fn push_random_blocks( + &mut self, + rng: &mut impl Rng, + account: &mut Account, + count: usize, + ) { for _ in 0..count { // 20% chance to seal an L1 batch. // `seal_batch()` also produces a (fictive) block. if rng.gen_range(0..100) < 20 { self.seal_batch().await; } else { - self.push_random_block(rng).await; + self.push_random_block(rng, account).await; } } } @@ -451,7 +460,13 @@ impl StateKeeper { client, sync_state: self.sync_state.clone(), } - .run(ctx, self.actions_sender, cfgs.config, cfgs.secrets) + .run( + ctx, + self.actions_sender, + cfgs.config, + cfgs.secrets, + cfgs.net.build_version, + ) .await } } @@ -534,14 +549,21 @@ async fn mock_metadata_calculator_step(ctx: &ctx::Ctx, pool: &ConnectionPool) -> impl StateKeeperRunner { // Executes the state keeper task with real metadata calculator task // and fake commitment generator (because real one is too slow). - pub async fn run_real(self, ctx: &ctx::Ctx) -> anyhow::Result<()> { + pub async fn run_real( + self, + ctx: &ctx::Ctx, + addrs_to_fund: &[ethabi::Address], + ) -> anyhow::Result<()> { let res = scope::run!(ctx, |ctx, s| async { - // Fund the test account. Required for L2 transactions to succeed. - fund(&self.pool.0, &[self.account.address]).await; + // Fund the test accounts. Required for L2 transactions to succeed. + fund(&self.pool.0, addrs_to_fund).await; let (stop_send, stop_recv) = sync::watch::channel(false); - let (persistence, l2_block_sealer) = - StateKeeperPersistence::new(self.pool.0.clone(), Address::repeat_byte(11), 5); + let (persistence, l2_block_sealer) = StateKeeperPersistence::new( + self.pool.0.clone(), + ethabi::Address::repeat_byte(11), + 5, + ); let io = ExternalIO::new( self.pool.0.clone(), @@ -649,8 +671,11 @@ impl StateKeeperRunner { pub async fn run(self, ctx: &ctx::Ctx) -> anyhow::Result<()> { let res = scope::run!(ctx, |ctx, s| async { let (stop_send, stop_recv) = sync::watch::channel(false); - let (persistence, l2_block_sealer) = - StateKeeperPersistence::new(self.pool.0.clone(), Address::repeat_byte(11), 5); + let (persistence, l2_block_sealer) = StateKeeperPersistence::new( + self.pool.0.clone(), + ethabi::Address::repeat_byte(11), + 5, + ); let tree_writes_persistence = TreeWritesPersistence::new(self.pool.0.clone()); let io = ExternalIO::new( diff --git a/core/node/consensus/src/tests/attestation.rs b/core/node/consensus/src/tests/attestation.rs index b245d0524aa9..abd35508c7f7 100644 --- a/core/node/consensus/src/tests/attestation.rs +++ b/core/node/consensus/src/tests/attestation.rs @@ -1,17 +1,24 @@ use anyhow::Context as _; -use test_casing::{test_casing, Product}; +use rand::Rng as _; +use test_casing::test_casing; use tracing::Instrument as _; use zksync_concurrency::{ctx, error::Wrap, scope}; use zksync_consensus_roles::{ attester, validator::testonly::{Setup, SetupSpec}, }; -use zksync_dal::consensus_dal::AttestationStatus; -use zksync_node_sync::MainNodeClient; +use zksync_dal::consensus_dal; +use zksync_test_account::Account; use zksync_types::{L1BatchNumber, ProtocolVersionId}; +use zksync_web3_decl::namespaces::EnNamespaceClient as _; -use super::{FROM_SNAPSHOT, VERSIONS}; -use crate::{mn::run_main_node, storage::ConnectionPool, testonly}; +use super::VERSIONS; +use crate::{ + mn::run_main_node, + registry::{testonly, Registry}, + storage::ConnectionPool, + testonly::{new_configs, StateKeeper}, +}; #[test_casing(2, VERSIONS)] #[tokio::test] @@ -19,24 +26,31 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { let pool = ConnectionPool::test(false, version).await; - let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; + let (mut sk, runner) = StateKeeper::new(ctx, pool.clone()).await?; s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("validator"))); // Setup nontrivial genesis. while sk.last_sealed_batch() < L1BatchNumber(3) { - sk.push_random_blocks(rng, 10).await; + sk.push_random_blocks(rng, account, 10).await; } let mut setup = SetupSpec::new(rng, 3); setup.first_block = sk.last_block(); let first_batch = sk.last_batch(); let setup = Setup::from(setup); let mut conn = pool.connection(ctx).await.wrap("connection()")?; - conn.try_update_genesis(ctx, &setup.genesis) - .await - .wrap("try_update_genesis()")?; + conn.try_update_global_config( + ctx, + &consensus_dal::GlobalConfig { + genesis: setup.genesis.clone(), + registry_address: None, + }, + ) + .await + .wrap("try_update_global_config()")?; // Make sure that the first_batch is actually sealed. sk.seal_batch().await; pool.wait_for_batch(ctx, first_batch).await?; @@ -44,11 +58,11 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { // Connect to API endpoint. let api = sk.connect(ctx).await?; let fetch_status = || async { - let s = api - .fetch_attestation_status() - .await? + let s = ctx + .wait(api.attestation_status()) + .await?? .context("no attestation_status")?; - let s: AttestationStatus = + let s: consensus_dal::AttestationStatus = zksync_protobuf::serde::deserialize(&s.0).context("deserialize()")?; anyhow::ensure!(s.genesis == setup.genesis.hash(), "genesis hash mismatch"); Ok(s) @@ -62,24 +76,37 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { attester::BatchNumber(first_batch.0.into()) ); - // Insert a (fake) cert, then check again. + tracing::info!("Insert a cert"); { let mut conn = pool.connection(ctx).await?; let number = status.next_batch_to_attest; let hash = conn.batch_hash(ctx, number).await?.unwrap(); - let genesis = conn.genesis(ctx).await?.unwrap().hash(); + let gcfg = conn.global_config(ctx).await?.unwrap(); + let m = attester::Batch { + number, + hash, + genesis: gcfg.genesis.hash(), + }; + let mut sigs = attester::MultiSig::default(); + for k in &setup.attester_keys { + sigs.add(k.public(), k.sign_msg(m.clone()).sig); + } let cert = attester::BatchQC { - signatures: attester::MultiSig::default(), - message: attester::Batch { - number, - hash, - genesis, - }, + signatures: sigs, + message: m, }; + conn.upsert_attester_committee( + ctx, + cert.message.number, + setup.genesis.attesters.as_ref().unwrap(), + ) + .await + .context("upsert_attester_committee")?; conn.insert_batch_certificate(ctx, &cert) .await .context("insert_batch_certificate()")?; } + tracing::info!("Check again."); let want = status.next_batch_to_attest.next(); let got = fetch_status().await?; assert_eq!(want, got.next_batch_to_attest); @@ -93,34 +120,65 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { // Test running a couple of attesters (which are also validators). // Main node is expected to collect all certificates. // External nodes are expected to just vote for the batch. -// -// TODO: it would be nice to use `StateKeeperRunner::run_real()` in this test, -// however as of now it doesn't work with ENs and it doesn't work with -// `ConnectionPool::from_snapshot`. -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[test_casing(2, VERSIONS)] #[tokio::test] -async fn test_multiple_attesters(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_multiple_attesters(version: ProtocolVersionId) { const NODES: usize = 4; zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); + let account = &mut Account::random(); + let to_fund = &[account.address]; let setup = Setup::new(rng, 4); - let cfgs = testonly::new_configs(rng, &setup, NODES); - + let mut cfgs = new_configs(rng, &setup, NODES); scope::run!(ctx, |ctx, s| async { - let validator_pool = ConnectionPool::test(from_snapshot, version).await; - let (mut validator, runner) = - testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; + let validator_pool = ConnectionPool::test(false, version).await; + let (mut validator, runner) = StateKeeper::new(ctx, validator_pool.clone()).await?; s.spawn_bg(async { runner - .run(ctx) + .run_real(ctx, to_fund) .instrument(tracing::info_span!("validator")) .await .context("validator") }); - // API server needs at least 1 L1 batch to start. + + tracing::info!("deploy registry with 1 attester"); + let attesters: Vec<_> = setup.genesis.attesters.as_ref().unwrap().iter().collect(); + let registry = Registry::new(setup.genesis.clone(), validator_pool.clone()).await; + let (registry_addr, tx) = registry.deploy(account); + cfgs[0] + .config + .genesis_spec + .as_mut() + .unwrap() + .registry_address = Some(*registry_addr); + let mut txs = vec![tx]; + txs.push(testonly::make_tx( + account, + registry_addr, + registry.initialize(account.address), + )); + txs.push(testonly::make_tx( + account, + registry_addr, + registry + .add( + rng.gen(), + testonly::gen_validator(rng), + attesters[0].clone(), + ) + .unwrap(), + )); + txs.push(testonly::make_tx( + account, + registry_addr, + registry.commit_attester_committee(), + )); + validator.push_block(&txs).await; validator.seal_batch().await; + + tracing::info!("wait for the batch to be processed before starting consensus"); validator_pool .wait_for_payload(ctx, validator.last_block()) .await?; @@ -137,13 +195,13 @@ async fn test_multiple_attesters(from_snapshot: bool, version: ProtocolVersionId let mut node_pools = vec![]; for (i, cfg) in cfgs[1..].iter().enumerate() { let i = ctx::NoCopy(i); - let pool = ConnectionPool::test(from_snapshot, version).await; - let (node, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; + let pool = ConnectionPool::test(false, version).await; + let (node, runner) = StateKeeper::new(ctx, pool.clone()).await?; node_pools.push(pool.clone()); s.spawn_bg(async { let i = i; runner - .run(ctx) + .run_real(ctx, to_fund) .instrument(tracing::info_span!("node", i = *i)) .await .with_context(|| format!("node{}", *i)) @@ -151,13 +209,31 @@ async fn test_multiple_attesters(from_snapshot: bool, version: ProtocolVersionId s.spawn_bg(node.run_consensus(ctx, validator.connect(ctx).await?, cfg.clone())); } - tracing::info!("Create some batches"); - validator.push_random_blocks(rng, 20).await; - validator.seal_batch().await; + tracing::info!("add attesters one by one"); + #[allow(clippy::needless_range_loop)] + for i in 1..attesters.len() { + let txs = vec![ + testonly::make_tx( + account, + registry_addr, + registry + .add( + rng.gen(), + testonly::gen_validator(rng), + attesters[i].clone(), + ) + .unwrap(), + ), + testonly::make_tx(account, registry_addr, registry.commit_attester_committee()), + ]; + validator.push_block(&txs).await; + validator.seal_batch().await; + } + tracing::info!("Wait for the batches to be attested"); let want_last = attester::BatchNumber(validator.last_sealed_batch().0.into()); validator_pool - .wait_for_batch_certificates_and_verify(ctx, want_last) + .wait_for_batch_certificates_and_verify(ctx, want_last, Some(registry_addr)) .await?; Ok(()) }) diff --git a/core/node/consensus/src/tests/batch.rs b/core/node/consensus/src/tests/batch.rs index 41d73fdb87c6..f0cae7f2c02e 100644 --- a/core/node/consensus/src/tests/batch.rs +++ b/core/node/consensus/src/tests/batch.rs @@ -1,6 +1,7 @@ use test_casing::{test_casing, Product}; use zksync_concurrency::{ctx, scope}; use zksync_consensus_roles::validator; +use zksync_test_account::Account; use zksync_types::{L1BatchNumber, ProtocolVersionId}; use super::{FROM_SNAPSHOT, VERSIONS}; @@ -13,6 +14,7 @@ async fn test_connection_get_batch(from_snapshot: bool, version: ProtocolVersion let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); let pool = ConnectionPool::test(from_snapshot, version).await; + let account = &mut Account::random(); // Fill storage with unsigned L2 blocks and L1 batches in a way that the // last L1 batch is guaranteed to have some L2 blocks executed in it. @@ -23,11 +25,11 @@ async fn test_connection_get_batch(from_snapshot: bool, version: ProtocolVersion for _ in 0..3 { for _ in 0..2 { - sk.push_random_block(rng).await; + sk.push_random_block(rng, account).await; } sk.seal_batch().await; } - sk.push_random_block(rng).await; + sk.push_random_block(rng, account).await; pool.wait_for_payload(ctx, sk.last_block()).await?; @@ -84,11 +86,13 @@ async fn test_batch_witness(version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); + let account = &mut Account::random(); + let to_fund = &[account.address]; scope::run!(ctx, |ctx, s| async { let pool = ConnectionPool::from_genesis(version).await; let (mut node, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; - s.spawn_bg(runner.run_real(ctx)); + s.spawn_bg(runner.run_real(ctx, to_fund)); tracing::info!("analyzing storage"); { @@ -101,7 +105,7 @@ async fn test_batch_witness(version: ProtocolVersionId) { } // Seal a bunch of batches. - node.push_random_blocks(rng, 10).await; + node.push_random_blocks(rng, account, 10).await; node.seal_batch().await; pool.wait_for_batch(ctx, node.last_sealed_batch()).await?; // We can verify only 2nd batch onward, because diff --git a/core/node/consensus/src/tests/mod.rs b/core/node/consensus/src/tests/mod.rs index 0b611d55f06a..91f01f865a2b 100644 --- a/core/node/consensus/src/tests/mod.rs +++ b/core/node/consensus/src/tests/mod.rs @@ -7,6 +7,8 @@ use zksync_consensus_roles::{ validator::testonly::{Setup, SetupSpec}, }; use zksync_consensus_storage::BlockStore; +use zksync_dal::consensus_dal; +use zksync_test_account::Account; use zksync_types::ProtocolVersionId; use crate::{ @@ -28,6 +30,7 @@ async fn test_validator_block_store(version: ProtocolVersionId) { let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); let pool = ConnectionPool::test(false, version).await; + let account = &mut Account::random(); // Fill storage with unsigned L2 blocks. // Fetch a suffix of blocks that we will generate (fake) certs for. @@ -35,15 +38,21 @@ async fn test_validator_block_store(version: ProtocolVersionId) { // Start state keeper. let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; s.spawn_bg(runner.run(ctx)); - sk.push_random_blocks(rng, 10).await; + sk.push_random_blocks(rng, account, 10).await; pool.wait_for_payload(ctx, sk.last_block()).await?; let mut setup = SetupSpec::new(rng, 3); setup.first_block = validator::BlockNumber(4); let mut setup = Setup::from(setup); let mut conn = pool.connection(ctx).await.wrap("connection()")?; - conn.try_update_genesis(ctx, &setup.genesis) - .await - .wrap("try_update_genesis()")?; + conn.try_update_global_config( + ctx, + &consensus_dal::GlobalConfig { + genesis: setup.genesis.clone(), + registry_address: None, + }, + ) + .await + .wrap("try_update_global_config()")?; for i in setup.genesis.first_block.0..sk.last_block().next().0 { let i = validator::BlockNumber(i); let payload = conn @@ -95,6 +104,7 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); let cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { tracing::info!("Start state keeper."); @@ -103,7 +113,7 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { s.spawn_bg(runner.run(ctx)); tracing::info!("Populate storage with a bunch of blocks."); - sk.push_random_blocks(rng, 5).await; + sk.push_random_blocks(rng, account, 5).await; pool .wait_for_payload(ctx, sk.last_block()) .await @@ -118,7 +128,7 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { s.spawn_bg(run_main_node(ctx, cfg.config.clone(), cfg.secrets.clone(), pool.clone())); tracing::info!("Generate couple more blocks and wait for consensus to catch up."); - sk.push_random_blocks(rng, 3).await; + sk.push_random_blocks(rng, account, 3).await; pool .wait_for_block_certificate(ctx, sk.last_block()) .await @@ -126,7 +136,7 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { tracing::info!("Synchronously produce blocks one by one, and wait for consensus."); for _ in 0..2 { - sk.push_random_blocks(rng, 1).await; + sk.push_random_blocks(rng, account, 1).await; pool .wait_for_block_certificate(ctx, sk.last_block()) .await @@ -158,6 +168,7 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { tracing::info!("spawn validator"); @@ -173,7 +184,7 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { )); tracing::info!("produce some batches"); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; validator.seal_batch().await; validator_pool .wait_for_block_certificate(ctx, validator.last_block()) @@ -191,7 +202,7 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { }); tracing::info!("produce more batches"); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; validator.seal_batch().await; node_pool .wait_for_block_certificate(ctx, validator.last_block()) @@ -209,7 +220,7 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { }); tracing::info!("produce more blocks and compare storages"); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; let want = validator_pool .wait_for_block_certificates_and_verify(ctx, validator.last_block()) .await?; @@ -243,6 +254,7 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let account = &mut Account::random(); // topology: // validator <-> node <-> node <-> ... @@ -264,7 +276,7 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { .context("validator") }); tracing::info!("Generate a couple of blocks, before initializing consensus genesis."); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; // API server needs at least 1 L1 batch to start. validator.seal_batch().await; validator_pool @@ -299,7 +311,7 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { tracing::info!("Make validator produce blocks and wait for fetchers to get them."); // Note that block from before and after genesis have to be fetched. - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; let want_last = validator.last_block(); let want = validator_pool .wait_for_block_certificates_and_verify(ctx, want_last) @@ -328,6 +340,7 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { let rng = &mut ctx.rng(); let setup = Setup::new(rng, NODES); let cfgs = testonly::new_configs(rng, &setup, 1); + let account = &mut Account::random(); // Run all nodes in parallel. scope::run!(ctx, |ctx, s| async { @@ -342,7 +355,7 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { .context("main_node") }); tracing::info!("Generate a couple of blocks, before initializing consensus genesis."); - main_node.push_random_blocks(rng, 5).await; + main_node.push_random_blocks(rng, account, 5).await; // API server needs at least 1 L1 batch to start. main_node.seal_batch().await; main_node_pool @@ -381,7 +394,7 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { } tracing::info!("Make the main node produce blocks and wait for consensus to finalize them"); - main_node.push_random_blocks(rng, 5).await; + main_node.push_random_blocks(rng, account, 5).await; let want_last = main_node.last_block(); let want = main_node_pool .wait_for_block_certificates_and_verify(ctx, want_last) @@ -409,6 +422,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV let setup = Setup::new(rng, 1); let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { tracing::info!("Spawn validator."); @@ -433,7 +447,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; s.spawn_bg(runner.run(ctx)); s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg.clone())); - validator.push_random_blocks(rng, 3).await; + validator.push_random_blocks(rng, account, 3).await; node_pool .wait_for_block_certificate(ctx, validator.last_block()) .await?; @@ -447,7 +461,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; s.spawn_bg(runner.run(ctx)); s.spawn_bg(node.run_fetcher(ctx, client.clone())); - validator.push_random_blocks(rng, 3).await; + validator.push_random_blocks(rng, account, 3).await; node_pool .wait_for_payload(ctx, validator.last_block()) .await?; @@ -461,7 +475,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; s.spawn_bg(runner.run(ctx)); s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg)); - validator.push_random_blocks(rng, 3).await; + validator.push_random_blocks(rng, account, 3).await; let want = validator_pool .wait_for_block_certificates_and_verify(ctx, validator.last_block()) .await?; @@ -488,6 +502,7 @@ async fn test_with_pruning(version: ProtocolVersionId) { let setup = Setup::new(rng, 1); let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { let validator_pool = ConnectionPool::test(false, version).await; @@ -535,7 +550,7 @@ async fn test_with_pruning(version: ProtocolVersionId) { }); tracing::info!("Sync some blocks"); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; validator.seal_batch().await; let to_prune = validator.last_sealed_batch(); tracing::info!( @@ -546,7 +561,7 @@ async fn test_with_pruning(version: ProtocolVersionId) { tracing::info!( "Seal another batch to make sure that there is at least 1 sealed batch after pruning." ); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; validator.seal_batch().await; validator_pool .wait_for_batch(ctx, validator.last_sealed_batch()) @@ -565,7 +580,7 @@ async fn test_with_pruning(version: ProtocolVersionId) { .prune_batches(ctx, to_prune) .await .context("prune_batches")?; - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; node_pool .wait_for_block_certificates(ctx, validator.last_block()) .await @@ -582,6 +597,7 @@ async fn test_centralized_fetcher(from_snapshot: bool, version: ProtocolVersionI zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { tracing::info!("Spawn a validator."); @@ -601,7 +617,7 @@ async fn test_centralized_fetcher(from_snapshot: bool, version: ProtocolVersionI s.spawn_bg(node.run_fetcher(ctx, validator.connect(ctx).await?)); tracing::info!("Produce some blocks and wait for node to fetch them"); - validator.push_random_blocks(rng, 10).await; + validator.push_random_blocks(rng, account, 10).await; let want = validator_pool .wait_for_payload(ctx, validator.last_block()) .await?; diff --git a/core/node/consensus/src/vm.rs b/core/node/consensus/src/vm.rs new file mode 100644 index 000000000000..f7f14ad8fe0a --- /dev/null +++ b/core/node/consensus/src/vm.rs @@ -0,0 +1,96 @@ +use anyhow::Context as _; +use zksync_concurrency::{ctx, error::Wrap as _, scope}; +use zksync_consensus_roles::attester; +use zksync_multivm::interface::TxExecutionMode; +use zksync_node_api_server::{ + execution_sandbox::{TransactionExecutor, TxExecutionArgs, TxSetupArgs, VmConcurrencyLimiter}, + tx_sender::MultiVMBaseSystemContracts, +}; +use zksync_state::PostgresStorageCaches; +use zksync_system_constants::DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE; +use zksync_types::{ + ethabi, fee::Fee, fee_model::BatchFeeInput, l2::L2Tx, AccountTreeId, L2ChainId, Nonce, U256, +}; +use zksync_vm_interface::ExecutionResult; + +use crate::{abi, storage::ConnectionPool}; + +/// VM executes eth_calls on the db. +#[derive(Debug)] +pub(crate) struct VM { + pool: ConnectionPool, + setup_args: TxSetupArgs, + limiter: VmConcurrencyLimiter, +} + +impl VM { + /// Constructs a new `VM` instance. + pub async fn new(pool: ConnectionPool) -> Self { + Self { + pool, + setup_args: TxSetupArgs { + execution_mode: TxExecutionMode::EthCall, + operator_account: AccountTreeId::default(), + fee_input: BatchFeeInput::sensible_l1_pegged_default(), + base_system_contracts: scope::wait_blocking( + MultiVMBaseSystemContracts::load_eth_call_blocking, + ) + .await, + caches: PostgresStorageCaches::new(1, 1), + validation_computational_gas_limit: u32::MAX, + chain_id: L2ChainId::default(), + whitelisted_tokens_for_aa: vec![], + enforced_base_fee: None, + }, + limiter: VmConcurrencyLimiter::new(1).0, + } + } + + pub async fn call( + &self, + ctx: &ctx::Ctx, + batch: attester::BatchNumber, + address: abi::Address, + call: abi::Call, + ) -> ctx::Result { + let tx = L2Tx::new( + *address, + call.calldata().context("call.calldata()")?, + Nonce(0), + Fee { + gas_limit: U256::from(2000000000u32), + max_fee_per_gas: U256::zero(), + max_priority_fee_per_gas: U256::zero(), + gas_per_pubdata_limit: U256::from(DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE), + }, + ethabi::Address::zero(), + U256::zero(), + vec![], + Default::default(), + ); + let permit = ctx.wait(self.limiter.acquire()).await?.unwrap(); + let mut conn = self.pool.connection(ctx).await.wrap("connection()")?; + let args = conn + .vm_block_args(ctx, batch) + .await + .wrap("vm_block_args()")?; + let output = ctx + .wait(TransactionExecutor::real(usize::MAX).execute_tx_in_sandbox( + permit, + self.setup_args.clone(), + TxExecutionArgs::for_eth_call(tx.clone()), + conn.0, + args, + None, + vec![], + )) + .await? + .context("execute_tx_in_sandbox()")?; + match output.vm.result { + ExecutionResult::Success { output } => { + Ok(call.decode_outputs(&output).context("decode_output()")?) + } + other => Err(anyhow::format_err!("unsuccessful execution: {other:?}").into()), + } + } +} diff --git a/core/node/eth_sender/src/aggregator.rs b/core/node/eth_sender/src/aggregator.rs index de6a6982088b..1e0bd315b9d9 100644 --- a/core/node/eth_sender/src/aggregator.rs +++ b/core/node/eth_sender/src/aggregator.rs @@ -333,16 +333,13 @@ impl Aggregator { // keys that correspond to one on L1. let allowed_patch_versions = storage .protocol_versions_dal() - .get_patch_versions_for_vk( - minor_version, - l1_verifier_config.recursion_scheduler_level_vk_hash, - ) + .get_patch_versions_for_vk(minor_version, l1_verifier_config.snark_wrapper_vk_hash) .await .unwrap(); if allowed_patch_versions.is_empty() { tracing::warn!( "No patch version corresponds to the verification key on L1: {:?}", - l1_verifier_config.recursion_scheduler_level_vk_hash + l1_verifier_config.snark_wrapper_vk_hash ); return None; }; diff --git a/core/node/eth_sender/src/eth_tx_aggregator.rs b/core/node/eth_sender/src/eth_tx_aggregator.rs index 7f304e2f72b7..6e9e71d74ea4 100644 --- a/core/node/eth_sender/src/eth_tx_aggregator.rs +++ b/core/node/eth_sender/src/eth_tx_aggregator.rs @@ -317,7 +317,7 @@ impl EthTxAggregator { } /// Loads current verifier config on L1 - async fn get_recursion_scheduler_level_vk_hash( + async fn get_snark_wrapper_vk_hash( &mut self, verifier_address: Address, ) -> Result { @@ -344,15 +344,15 @@ impl EthTxAggregator { })?; let contracts_are_pre_shared_bridge = protocol_version_id.is_pre_shared_bridge(); - let recursion_scheduler_level_vk_hash = self - .get_recursion_scheduler_level_vk_hash(verifier_address) + let snark_wrapper_vk_hash = self + .get_snark_wrapper_vk_hash(verifier_address) .await .map_err(|err| { tracing::error!("Failed to get VK hash from the Verifier {err:?}"); err })?; let l1_verifier_config = L1VerifierConfig { - recursion_scheduler_level_vk_hash, + snark_wrapper_vk_hash, }; if let Some(agg_op) = self .aggregator diff --git a/core/node/eth_watch/src/client.rs b/core/node/eth_watch/src/client.rs index 8be556b42889..67e603041e6c 100644 --- a/core/node/eth_watch/src/client.rs +++ b/core/node/eth_watch/src/client.rs @@ -40,6 +40,7 @@ pub trait EthClient: 'static + fmt::Debug + Send + Sync { pub const RETRY_LIMIT: usize = 5; const TOO_MANY_RESULTS_INFURA: &str = "query returned more than"; const TOO_MANY_RESULTS_ALCHEMY: &str = "response size exceeded"; +const TOO_MANY_RESULTS_RETH: &str = "query exceeds max block range"; /// Implementation of [`EthClient`] based on HTTP JSON-RPC (encapsulated via [`EthInterface`]). #[derive(Debug)] @@ -87,75 +88,34 @@ impl EthHttpQueryClient { } } - async fn get_filter_logs( + fn get_default_address_list(&self) -> Vec
{ + [ + Some(self.diamond_proxy_addr), + Some(self.governance_address), + self.state_transition_manager_address, + self.chain_admin_address, + ] + .into_iter() + .flatten() + .collect() + } + + async fn get_events_inner( &self, from: BlockNumber, to: BlockNumber, - topics: Vec, + topics1: Vec, + topics2: Vec, + addresses: Vec
, + retries_left: usize, ) -> EnrichedClientResult> { let filter = FilterBuilder::default() - .address( - [ - Some(self.diamond_proxy_addr), - Some(self.governance_address), - self.state_transition_manager_address, - self.chain_admin_address, - ] - .into_iter() - .flatten() - .collect(), - ) .from_block(from) .to_block(to) - .topics(Some(topics), None, None, None) + .topics(Some(topics1), Some(topics2), None, None) + .address(addresses) .build(); - self.client.logs(&filter).await - } -} - -#[async_trait::async_trait] -impl EthClient for EthHttpQueryClient { - async fn scheduler_vk_hash( - &self, - verifier_address: Address, - ) -> Result { - // New verifier returns the hash of the verification key. - CallFunctionArgs::new("verificationKeyHash", ()) - .for_contract(verifier_address, &self.verifier_contract_abi) - .call(&self.client) - .await - } - - async fn diamond_cut_by_version( - &self, - packed_version: H256, - ) -> EnrichedClientResult>> { - let Some(state_transition_manager_address) = self.state_transition_manager_address else { - return Ok(None); - }; - - let filter = FilterBuilder::default() - .address(vec![state_transition_manager_address]) - .from_block(BlockNumber::Earliest) - .to_block(BlockNumber::Latest) - .topics( - Some(vec![self.new_upgrade_cut_data_signature]), - Some(vec![packed_version]), - None, - None, - ) - .build(); - let logs = self.client.logs(&filter).await?; - Ok(logs.into_iter().next().map(|log| log.data.0)) - } - - async fn get_events( - &self, - from: BlockNumber, - to: BlockNumber, - retries_left: usize, - ) -> EnrichedClientResult> { - let mut result = self.get_filter_logs(from, to, self.topics.clone()).await; + let mut result = self.client.logs(&filter).await; // This code is compatible with both Infura and Alchemy API providers. // Note: we don't handle rate-limits here - assumption is that we're never going to hit them. @@ -178,6 +138,7 @@ impl EthClient for EthHttpQueryClient { // check whether the error is related to having too many results if err_message.contains(TOO_MANY_RESULTS_INFURA) || err_message.contains(TOO_MANY_RESULTS_ALCHEMY) + || err_message.contains(TOO_MANY_RESULTS_RETH) { // get the numeric block ids let from_number = match from { @@ -223,6 +184,64 @@ impl EthClient for EthHttpQueryClient { result } +} + +#[async_trait::async_trait] +impl EthClient for EthHttpQueryClient { + async fn scheduler_vk_hash( + &self, + verifier_address: Address, + ) -> Result { + // New verifier returns the hash of the verification key. + CallFunctionArgs::new("verificationKeyHash", ()) + .for_contract(verifier_address, &self.verifier_contract_abi) + .call(&self.client) + .await + } + + async fn diamond_cut_by_version( + &self, + packed_version: H256, + ) -> EnrichedClientResult>> { + const LOOK_BACK_BLOCK_RANGE: u64 = 1_000_000; + + let Some(state_transition_manager_address) = self.state_transition_manager_address else { + return Ok(None); + }; + + let to_block = self.client.block_number().await?; + let from_block = to_block.saturating_sub((LOOK_BACK_BLOCK_RANGE - 1).into()); + + let logs = self + .get_events_inner( + from_block.into(), + to_block.into(), + vec![self.new_upgrade_cut_data_signature], + vec![packed_version], + vec![state_transition_manager_address], + RETRY_LIMIT, + ) + .await?; + + Ok(logs.into_iter().next().map(|log| log.data.0)) + } + + async fn get_events( + &self, + from: BlockNumber, + to: BlockNumber, + retries_left: usize, + ) -> EnrichedClientResult> { + self.get_events_inner( + from, + to, + self.topics.clone(), + Vec::new(), + self.get_default_address_list(), + retries_left, + ) + .await + } async fn finalized_block_number(&self) -> EnrichedClientResult { if let Some(confirmations) = self.confirmations_for_eth_event { diff --git a/core/node/genesis/src/lib.rs b/core/node/genesis/src/lib.rs index 6713e5a4bcc2..1f30d314bb06 100644 --- a/core/node/genesis/src/lib.rs +++ b/core/node/genesis/src/lib.rs @@ -175,8 +175,7 @@ pub fn mock_genesis_config() -> GenesisConfig { l1_chain_id: L1ChainId(9), sl_chain_id: None, l2_chain_id: L2ChainId::default(), - recursion_scheduler_level_vk_hash: first_l1_verifier_config - .recursion_scheduler_level_vk_hash, + snark_wrapper_vk_hash: first_l1_verifier_config.snark_wrapper_vk_hash, fee_account: Default::default(), dummy_verifier: false, l1_batch_commit_data_generator_mode: Default::default(), @@ -190,7 +189,7 @@ pub async fn insert_genesis_batch( ) -> Result { let mut transaction = storage.start_transaction().await?; let verifier_config = L1VerifierConfig { - recursion_scheduler_level_vk_hash: genesis_params.config.recursion_scheduler_level_vk_hash, + snark_wrapper_vk_hash: genesis_params.config.snark_wrapper_vk_hash, }; create_genesis_l1_batch( @@ -297,10 +296,10 @@ pub async fn validate_genesis_params( .call(query_client) .await?; - if verification_key_hash != genesis_params.config().recursion_scheduler_level_vk_hash { + if verification_key_hash != genesis_params.config().snark_wrapper_vk_hash { return Err(anyhow::anyhow!( "Verification key hash mismatch: {verification_key_hash:?} on contract, {:?} in config", - genesis_params.config().recursion_scheduler_level_vk_hash + genesis_params.config().snark_wrapper_vk_hash )); } diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index fe4889225675..d5b19a1d4b01 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -64,6 +64,7 @@ futures.workspace = true anyhow.workspace = true tokio = { workspace = true, features = ["rt"] } ctrlc.workspace = true +semver.workspace = true [dev-dependencies] zksync_env_config.workspace = true diff --git a/core/node/node_framework/src/implementations/layers/consensus/external_node.rs b/core/node/node_framework/src/implementations/layers/consensus/external_node.rs index 14365384c1a4..5acdab568e74 100644 --- a/core/node/node_framework/src/implementations/layers/consensus/external_node.rs +++ b/core/node/node_framework/src/implementations/layers/consensus/external_node.rs @@ -23,6 +23,7 @@ use crate::{ /// Wiring layer for external node consensus component. #[derive(Debug)] pub struct ExternalNodeConsensusLayer { + pub build_version: semver::Version, pub config: Option, pub secrets: Option, } @@ -78,6 +79,7 @@ impl WiringLayer for ExternalNodeConsensusLayer { }; let consensus_task = ExternalNodeTask { + build_version: self.build_version, config, pool, main_node_client, @@ -90,6 +92,7 @@ impl WiringLayer for ExternalNodeConsensusLayer { #[derive(Debug)] pub struct ExternalNodeTask { + build_version: semver::Version, config: Option<(ConsensusConfig, ConsensusSecrets)>, pool: ConnectionPool, main_node_client: Box>, @@ -118,6 +121,7 @@ impl Task for ExternalNodeTask { self.sync_state, self.main_node_client, self.action_queue_sender, + self.build_version, )); // `run_external_node` might return an error or panic, // in which case we need to return immediately, diff --git a/core/node/node_sync/src/client.rs b/core/node/node_sync/src/client.rs index d064803eab59..ee89db10ddd1 100644 --- a/core/node/node_sync/src/client.rs +++ b/core/node/node_sync/src/client.rs @@ -42,12 +42,7 @@ pub trait MainNodeClient: 'static + Send + Sync + fmt::Debug { with_transactions: bool, ) -> EnrichedClientResult>; - async fn fetch_consensus_genesis(&self) -> EnrichedClientResult>; - async fn fetch_genesis_config(&self) -> EnrichedClientResult; - - async fn fetch_attestation_status(&self) - -> EnrichedClientResult>; } #[async_trait] @@ -133,20 +128,6 @@ impl MainNodeClient for Box> { .with_arg("with_transactions", &with_transactions) .await } - - async fn fetch_consensus_genesis(&self) -> EnrichedClientResult> { - self.consensus_genesis() - .rpc_context("consensus_genesis") - .await - } - - async fn fetch_attestation_status( - &self, - ) -> EnrichedClientResult> { - self.attestation_status() - .rpc_context("attestation_status") - .await - } } /// Main node health check. diff --git a/core/node/node_sync/src/testonly.rs b/core/node/node_sync/src/testonly.rs index b9e1adc995af..16027a71a251 100644 --- a/core/node/node_sync/src/testonly.rs +++ b/core/node/node_sync/src/testonly.rs @@ -71,18 +71,6 @@ impl MainNodeClient for MockMainNodeClient { Ok(Some(block)) } - async fn fetch_consensus_genesis( - &self, - ) -> EnrichedClientResult> { - unimplemented!() - } - - async fn fetch_attestation_status( - &self, - ) -> EnrichedClientResult> { - unimplemented!() - } - async fn fetch_genesis_config(&self) -> EnrichedClientResult { Ok(mock_genesis_config()) } diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index 5d7569d5720c..6ab7e4dec436 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -94,7 +94,7 @@ async fn request_tee_proof_inputs() { }, L1BatchCommitmentMode::Rollup, ); - let req_body = Body::from(serde_json::to_vec(&json!({ "tee_type": "Sgx" })).unwrap()); + let req_body = Body::from(serde_json::to_vec(&json!({ "tee_type": "sgx" })).unwrap()); let response = app .oneshot( Request::builder() @@ -134,7 +134,7 @@ async fn submit_tee_proof() { "signature": [ 0, 1, 2, 3, 4 ], "pubkey": [ 5, 6, 7, 8, 9 ], "proof": [ 10, 11, 12, 13, 14 ], - "tee_type": "Sgx" + "tee_type": "sgx" }"#; let tee_proof_request = serde_json::from_str::(tee_proof_request_str).unwrap(); diff --git a/core/node/state_keeper/src/keeper.rs b/core/node/state_keeper/src/keeper.rs index 02f7f92e070a..d36ceec7d70c 100644 --- a/core/node/state_keeper/src/keeper.rs +++ b/core/node/state_keeper/src/keeper.rs @@ -14,6 +14,7 @@ use zksync_multivm::{ }, utils::StorageWritesDeduplicator, }; +use zksync_shared_metrics::{TxStage, APP_METRICS}; use zksync_state::{OwnedStorage, ReadStorageFactory}; use zksync_types::{ block::L2BlockExecutionData, l2::TransactionType, protocol_upgrade::ProtocolUpgradeTx, @@ -463,6 +464,9 @@ impl ZkSyncStateKeeper { .with_context(|| format!("failed re-executing transaction {:?}", tx.hash()))?; let result = TxExecutionResult::new(result, &tx); + APP_METRICS.processed_txs[&TxStage::StateKeeper].inc(); + APP_METRICS.processed_l1_txs[&TxStage::StateKeeper].inc_by(tx.is_l1().into()); + let TxExecutionResult::Success { tx_result, tx_metrics, @@ -742,6 +746,9 @@ impl ZkSyncStateKeeper { let exec_result = TxExecutionResult::new(exec_result, &tx); latency.observe(); + APP_METRICS.processed_txs[&TxStage::StateKeeper].inc(); + APP_METRICS.processed_l1_txs[&TxStage::StateKeeper].inc_by(tx.is_l1().into()); + let latency = KEEPER_METRICS.determine_seal_resolution.start(); // All of `TxExecutionResult::BootloaderOutOfGasForTx`, // `Halt::NotEnoughGasProvided` correspond to out-of-gas errors but of different nature. diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index 0ce8c06be0e7..23aec8af49fb 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -117,7 +117,7 @@ pub async fn fund(pool: &ConnectionPool, addresses: &[Address]) { pub(crate) const DEFAULT_GAS_PER_PUBDATA: u32 = 10000; -pub(crate) fn fee(gas_limit: u32) -> Fee { +pub fn fee(gas_limit: u32) -> Fee { Fee { gas_limit: U256::from(gas_limit), max_fee_per_gas: SYSTEM_CONTEXT_MINIMAL_BASE_FEE.into(), diff --git a/core/tests/loadnext/src/account/mod.rs b/core/tests/loadnext/src/account/mod.rs index 5dcd5167165e..0f418bf12676 100644 --- a/core/tests/loadnext/src/account/mod.rs +++ b/core/tests/loadnext/src/account/mod.rs @@ -5,6 +5,7 @@ use std::{ }; use futures::{channel::mpsc, SinkExt}; +use rand::Rng; use tokio::sync::RwLock; use zksync_contracts::test_contracts::LoadnextContractExecutionParams; use zksync_types::{api::TransactionReceipt, Address, Nonce, H256, U256, U64}; @@ -75,6 +76,8 @@ pub struct AccountLifespan { inflight_txs: VecDeque, /// Current account nonce, it is None at the beginning and will be set after the first transaction current_nonce: Option, + /// Randomly assigned polling interval. + polling_interval: Duration, } impl AccountLifespan { @@ -82,11 +85,12 @@ impl AccountLifespan { config: &LoadtestConfig, contract_execution_params: LoadnextContractExecutionParams, addresses: AddressPool, - test_account: TestWallet, + mut test_account: TestWallet, report_sink: mpsc::Sender, main_l2_token: Address, paymaster_address: Address, ) -> Self { + let polling_interval = test_account.rng.gen_range(POLLING_INTERVAL); Self { wallet: test_account, config: config.clone(), @@ -99,6 +103,7 @@ impl AccountLifespan { report_sink, inflight_txs: Default::default(), current_nonce: None, + polling_interval, } } @@ -132,7 +137,7 @@ impl AccountLifespan { self.execute_command(deploy_command.clone()).await?; self.wait_for_all_inflight_tx().await?; - let mut timer = tokio::time::interval(POLLING_INTERVAL); + let mut timer = tokio::time::interval(self.polling_interval); let mut l1_tx_count = 0; loop { let command = self.generate_command(); @@ -157,7 +162,7 @@ impl AccountLifespan { } async fn wait_for_all_inflight_tx(&mut self) -> Result<(), Aborted> { - let mut timer = tokio::time::interval(POLLING_INTERVAL); + let mut timer = tokio::time::interval(self.polling_interval); while !self.inflight_txs.is_empty() { timer.tick().await; self.check_inflight_txs().await?; diff --git a/core/tests/loadnext/src/account_pool.rs b/core/tests/loadnext/src/account_pool.rs index 7b5e277e139b..3fa3141553cd 100644 --- a/core/tests/loadnext/src/account_pool.rs +++ b/core/tests/loadnext/src/account_pool.rs @@ -101,7 +101,9 @@ impl AccountPool { .context("invalid L2 RPC URL")?, )? .for_network(l2_chain_id.into()) + .report_config(false) .build(); + // Perform a health check: check whether ZKsync server is alive. let mut server_alive = false; for _ in 0usize..3 { diff --git a/core/tests/loadnext/src/constants.rs b/core/tests/loadnext/src/constants.rs index 7ac66ab7e1e7..6b989b16feb1 100644 --- a/core/tests/loadnext/src/constants.rs +++ b/core/tests/loadnext/src/constants.rs @@ -1,4 +1,4 @@ -use std::time::Duration; +use std::{ops, time::Duration}; /// Normally, block is committed on Ethereum every 15 seconds; however there are no guarantees that our transaction /// will be included in the next block right after sending. @@ -14,7 +14,7 @@ pub const ETH_POLLING_INTERVAL: Duration = Duration::from_secs(10); pub const COMMIT_TIMEOUT: Duration = Duration::from_secs(600); /// We don't want to overload the server with too many requests; given the fact that blocks are expected to be created /// every couple of seconds, chosen value seems to be adequate to provide the result in one or two calls at average. -pub const POLLING_INTERVAL: Duration = Duration::from_secs(3); +pub const POLLING_INTERVAL: ops::Range = Duration::from_secs(2)..Duration::from_secs(3); pub const MAX_OUTSTANDING_NONCE: usize = 20; diff --git a/core/tests/loadnext/src/executor.rs b/core/tests/loadnext/src/executor.rs index a573583ed318..43a1be164b64 100644 --- a/core/tests/loadnext/src/executor.rs +++ b/core/tests/loadnext/src/executor.rs @@ -244,7 +244,7 @@ impl Executor { }); priority_op_handle - .polling_interval(POLLING_INTERVAL) + .polling_interval(POLLING_INTERVAL.end) .unwrap(); priority_op_handle .commit_timeout(COMMIT_TIMEOUT) @@ -313,7 +313,7 @@ impl Executor { }); priority_op_handle - .polling_interval(POLLING_INTERVAL) + .polling_interval(POLLING_INTERVAL.end) .unwrap(); priority_op_handle .commit_timeout(COMMIT_TIMEOUT) @@ -463,7 +463,7 @@ impl Executor { // Wait for transactions to be committed, if at least one of them fails, // return error. for mut handle in handles { - handle.polling_interval(POLLING_INTERVAL).unwrap(); + handle.polling_interval(POLLING_INTERVAL.end).unwrap(); let result = handle .commit_timeout(COMMIT_TIMEOUT) diff --git a/core/tests/loadnext/src/sdk/ethereum/mod.rs b/core/tests/loadnext/src/sdk/ethereum/mod.rs index 4b7bb00a3080..4557c2c43200 100644 --- a/core/tests/loadnext/src/sdk/ethereum/mod.rs +++ b/core/tests/loadnext/src/sdk/ethereum/mod.rs @@ -102,6 +102,7 @@ impl EthereumProvider { let query_client = Client::http(eth_web3_url) .map_err(|err| ClientError::NetworkError(err.to_string()))? .for_network(sl_chain_id.into()) + .report_config(false) .build(); let query_client: Box> = Box::new(query_client); let eth_client = SigningClient::new( diff --git a/core/tests/loadnext/src/sdk/wallet.rs b/core/tests/loadnext/src/sdk/wallet.rs index 9d3bd73a9bf2..551d0d8e385f 100644 --- a/core/tests/loadnext/src/sdk/wallet.rs +++ b/core/tests/loadnext/src/sdk/wallet.rs @@ -45,6 +45,7 @@ where let client = Client::http(rpc_address) .map_err(|err| ClientError::NetworkError(err.to_string()))? .for_network(signer.chain_id.into()) + .report_config(false) .build(); Ok(Wallet { diff --git a/deny.toml b/deny.toml index aadb868aa394..b840ec5176e8 100644 --- a/deny.toml +++ b/deny.toml @@ -12,10 +12,10 @@ ignore = [ "RUSTSEC-2022-0041", # crossbeam-utils vulnerability, dependency coming from bellman_ce "RUSTSEC-2024-0320", # yaml_rust dependency being unmaintained, dependency in core, we should consider moving to yaml_rust2 fork "RUSTSEC-2020-0168", # mach dependency being unmaintained, dependency in consensus, we should consider moving to mach2 fork + "RUSTSEC-2024-0370", # `cs_derive` needs to be updated to not rely on `proc-macro-error` # all below caused by StructOpt which we still use and we should move to clap v3 instead "RUSTSEC-2021-0145", "RUSTSEC-2021-0139", - ] [licenses] @@ -51,7 +51,7 @@ ignore = false registries = [] [bans] -multiple-versions = "warn" +multiple-versions = "allow" wildcards = "allow" highlight = "all" workspace-default-features = "allow" diff --git a/etc/contracts-test-data/contracts/counter/proxy_counter.sol b/etc/contracts-test-data/contracts/counter/proxy_counter.sol index 1c1883cd4c9d..b3bbf9dda93c 100644 --- a/etc/contracts-test-data/contracts/counter/proxy_counter.sol +++ b/etc/contracts-test-data/contracts/counter/proxy_counter.sol @@ -13,10 +13,14 @@ contract ProxyCounter { counter = _counter; } + uint256 lastFarCallCost; + function increment(uint256 x, uint gasToPass) public { while (gasleft() > gasToPass) { // Burn gas so that there's about `gasToPass` left before the external call. } + uint256 gasBefore = gasleft(); counter.increment(x); + lastFarCallCost = gasBefore - gasleft(); } } diff --git a/etc/nix/container-tee_prover.nix b/etc/nix/container-tee_prover.nix index 7c0d8d164e34..cb8ebfb51549 100644 --- a/etc/nix/container-tee_prover.nix +++ b/etc/nix/container-tee_prover.nix @@ -33,9 +33,9 @@ nixsgxLib.mkSGXContainer { env = { TEE_PROVER_API_URL.passthrough = true; TEE_PROVER_MAX_RETRIES.passthrough = true; - TEE_PROVER_INITIAL_RETRY_BACKOFF_SECONDS.passthrough = true; + TEE_PROVER_INITIAL_RETRY_BACKOFF_SEC.passthrough = true; TEE_PROVER_RETRY_BACKOFF_MULTIPLIER.passthrough = true; - TEE_PROVER_MAX_BACKOFF_SECONDS.passthrough = true; + TEE_PROVER_MAX_BACKOFF_SEC.passthrough = true; API_PROMETHEUS_LISTENER_PORT.passthrough = true; API_PROMETHEUS_PUSHGATEWAY_URL.passthrough = true; API_PROMETHEUS_PUSH_INTERVAL_MS.passthrough = true; diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 3ac54b477380..c3cfada3a1a9 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -733,9 +733,9 @@ dependencies = [ [[package]] name = "boojum-cuda" -version = "0.150.4" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c861b4baec895cb8e53b10825407f0844b0eafda2ac79e7f02de95439f0f1e74" +checksum = "252c28bc729eb32a053de0cbd1c8c55b2f51d00ca0c656f30bc70d255c2d8753" dependencies = [ "boojum", "cmake", @@ -1862,9 +1862,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "era_cudart" -version = "0.150.4" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ac97d833b861e32bc0a71d0542bf5c92094f9818c52d65c695227bfa95ffbe3" +checksum = "803be147b389086e33254a6c9fe26a0d1d21a11f9f73181cad06cf5b1beb7d16" dependencies = [ "bitflags 2.6.0", "era_cudart_sys", @@ -1873,9 +1873,9 @@ dependencies = [ [[package]] name = "era_cudart_sys" -version = "0.150.4" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee6aed60cf09cb6d0b954d74351acb9beb13daab0bacad279691f6b97504b7e6" +checksum = "49f9a3d87f3d45d11bc835e5fc78fe6e3fe243355d435f6b3e794b98df7d3323" dependencies = [ "serde_json", ] @@ -5580,9 +5580,9 @@ checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" [[package]] name = "shivini" -version = "0.150.4" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e5d862287bb883a4cb0bc4f8ea938ba3fdaa5e495f1a59bc3515231017a0e2" +checksum = "331868b8d92ffec8887c17e786632cf0c9bd4750986fc1400a6d1fbf3739cba4" dependencies = [ "bincode", "blake2 0.10.6", @@ -7558,13 +7558,15 @@ dependencies = [ [[package]] name = "zksync-gpu-ffi" -version = "0.150.4" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82fe099f4f4a2cc8ca8ca591d7619ac00b8054f63b712fa6ceee2b84c6e04c62" +checksum = "ae694dc0ad818e4d45af70b2cf579ff46f1ac938b42ee55543529beb45ba1464" dependencies = [ "bindgen 0.59.2", + "cmake", "crossbeam 0.8.4", "derivative", + "era_cudart_sys", "futures 0.3.30", "futures-locks", "num_cpus", @@ -7572,9 +7574,9 @@ dependencies = [ [[package]] name = "zksync-gpu-prover" -version = "0.150.4" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73d27e0e4589c7445f5a22e511cb5186e2d205172ca4b26acd7a334b3af9492" +checksum = "f8156dbaf36764409cc93424d43dc86c993601d73f5aa9a5938e6552a14dc2df" dependencies = [ "bit-vec", "cfg-if 1.0.0", @@ -7589,9 +7591,9 @@ dependencies = [ [[package]] name = "zksync-wrapper-prover" -version = "0.150.4" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cf4c09adf0a84af0d7ded1fd85a2487fef4cbf1cfc1925412717d0eef03dd5a" +checksum = "83975189451bfacfa97dbcce899fde9db15a0c072196a9b92ddfabbe756bab9d" dependencies = [ "circuit_definitions", "zkevm_test_harness", @@ -7618,9 +7620,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0e31a9fc9a390b440cd12bbe040330dc64f64697a8a8ecbc3beb98cd0747909" +checksum = "a49ad68bfaf6fb8542c68894b68b28be31514786549855aaa8a46b36defbb100" dependencies = [ "anyhow", "once_cell", @@ -7654,9 +7656,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efb7ff3ec44b7b92fd4e28d9d92b83d61dc74125ccfc90bcfb27a5750d8a8580" +checksum = "ace39bdf50b8421c4d546381fe1ecc5212f953ce61cf93d4fa69172078dbe4af" dependencies = [ "anyhow", "blst", @@ -7678,9 +7680,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72223c0b20621775db51bcc4b043addafeaf784d444af2ad4bc8bcdee477367c" +checksum = "06277266e31efdc1465f6a27ce96c7435392a270978a91956b8a848732df2cfa" dependencies = [ "anyhow", "bit-vec", @@ -7700,9 +7702,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d1750ad93f7e3a0c2f5880f9bcc1244a3b46d3e6c124c4f65f545032b87464" +checksum = "9099b2295f550176d824b5287f2f31b7739c4d10247faec1132f1c6e9d18059c" dependencies = [ "anyhow", "async-trait", @@ -7720,9 +7722,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ff679f8b5f671d887a750b8107f3b5c01fd6085f68eef37ab01de8d2bd0736b" +checksum = "4d624f55e2449f43b2c85588b5dd2a28b3c5ea629effc89df76e3254f8d9d2fb" dependencies = [ "anyhow", "rand 0.8.5", @@ -8032,9 +8034,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f6ba3bf0aac20de18b4ae18a22d8c81b83f8f72e8fdec1c879525ecdacd2f5" +checksum = "d26fb2beb3aeafb5e9babf1acf6494662cc7157b893fa248bd151494f931d07f" dependencies = [ "anyhow", "bit-vec", @@ -8053,9 +8055,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7798c248b9a64505f0586bd5fadad6b26c999be4a8dec6b1a86b10b3888169c5" +checksum = "58e86c198e056d921b4f3f1d2755c23d090e942b5a70b03bcb7e7c02445aa491" dependencies = [ "anyhow", "heck 0.5.0", @@ -8233,10 +8235,8 @@ dependencies = [ "shivini", "tracing", "zkevm_test_harness", - "zksync_config", - "zksync_env_config", + "zksync_basic_types", "zksync_prover_fri_types", - "zksync_types", "zksync_utils", ] diff --git a/prover/Cargo.toml b/prover/Cargo.toml index 8d87b727f906..403314cc13ca 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -61,8 +61,8 @@ circuit_sequencer_api = "=0.150.4" zkevm_test_harness = "=0.150.4" # GPU proving dependencies -wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.150.4" } -shivini = "=0.150.4" +wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.150.6" } +shivini = "=0.150.6" # Core workspace dependencies zksync_multivm = { path = "../core/lib/multivm", version = "0.1.0" } diff --git a/prover/crates/bin/proof_fri_compressor/src/compressor.rs b/prover/crates/bin/proof_fri_compressor/src/compressor.rs index c7747b2e45bd..077347bce9be 100644 --- a/prover/crates/bin/proof_fri_compressor/src/compressor.rs +++ b/prover/crates/bin/proof_fri_compressor/src/compressor.rs @@ -35,7 +35,7 @@ pub struct ProofCompressor { compression_mode: u8, max_attempts: u32, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, } impl ProofCompressor { @@ -45,7 +45,7 @@ impl ProofCompressor { compression_mode: u8, max_attempts: u32, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, ) -> Self { Self { blob_store, @@ -53,7 +53,7 @@ impl ProofCompressor { compression_mode, max_attempts, protocol_version, - setup_data_path, + keystore, } } @@ -62,9 +62,8 @@ impl ProofCompressor { l1_batch: L1BatchNumber, proof: ZkSyncRecursionLayerProof, _compression_mode: u8, - setup_data_path: String, + keystore: Keystore, ) -> anyhow::Result { - let keystore = Keystore::new_with_setup_data_path(setup_data_path); let scheduler_vk = keystore .load_recursive_layer_verification_key( ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8, @@ -178,9 +177,9 @@ impl JobProcessor for ProofCompressor { ) -> JoinHandle> { let compression_mode = self.compression_mode; let block_number = *job_id; - let setup_data_path = self.setup_data_path.clone(); + let keystore = self.keystore.clone(); tokio::task::spawn_blocking(move || { - Self::compress_proof(block_number, job, compression_mode, setup_data_path) + Self::compress_proof(block_number, job, compression_mode, keystore) }) } diff --git a/prover/crates/bin/proof_fri_compressor/src/main.rs b/prover/crates/bin/proof_fri_compressor/src/main.rs index e2086b228b69..f06b4b8f89e5 100644 --- a/prover/crates/bin/proof_fri_compressor/src/main.rs +++ b/prover/crates/bin/proof_fri_compressor/src/main.rs @@ -11,6 +11,7 @@ use zksync_env_config::object_store::ProverObjectStoreConfig; use zksync_object_store::ObjectStoreFactory; use zksync_prover_dal::{ConnectionPool, Prover}; use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; +use zksync_prover_keystore::keystore::Keystore; use zksync_queued_job_processor::JobProcessor; use zksync_utils::wait_for_tasks::ManagedTasks; use zksync_vlog::prometheus::PrometheusExporterConfig; @@ -70,16 +71,18 @@ async fn main() -> anyhow::Result<()> { let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; + let prover_config = general_config + .prover_config + .expect("ProverConfig doesn't exist"); + let keystore = + Keystore::locate().with_setup_path(Some(prover_config.setup_data_path.clone().into())); let proof_compressor = ProofCompressor::new( blob_store, pool, config.compression_mode, config.max_attempts, protocol_version, - general_config - .prover_config - .expect("ProverConfig doesn't exist") - .setup_data_path, + keystore, ); let (stop_sender, stop_receiver) = watch::channel(false); diff --git a/prover/crates/bin/prover_cli/src/commands/insert_version.rs b/prover/crates/bin/prover_cli/src/commands/insert_version.rs index 7f30719a713b..e89d2024e26f 100644 --- a/prover/crates/bin/prover_cli/src/commands/insert_version.rs +++ b/prover/crates/bin/prover_cli/src/commands/insert_version.rs @@ -35,7 +35,7 @@ pub async fn run(args: Args, config: ProverCLIConfig) -> anyhow::Result<()> { let protocol_version_patch = VersionPatch(args.patch); - let snark_wrapper = H256::from_str(&args.snark_wrapper).unwrap_or_else(|_| { + let snark_wrapper_vk_hash = H256::from_str(&args.snark_wrapper).unwrap_or_else(|_| { panic!("Invalid snark wrapper hash"); }); @@ -43,7 +43,7 @@ pub async fn run(args: Args, config: ProverCLIConfig) -> anyhow::Result<()> { .save_prover_protocol_version( ProtocolSemanticVersion::new(protocol_version, protocol_version_patch), L1VerifierConfig { - recursion_scheduler_level_vk_hash: snark_wrapper, + snark_wrapper_vk_hash, }, ) .await; diff --git a/prover/crates/bin/prover_cli/src/commands/status/l1.rs b/prover/crates/bin/prover_cli/src/commands/status/l1.rs index 16cecc103828..4b403215e9c2 100644 --- a/prover/crates/bin/prover_cli/src/commands/status/l1.rs +++ b/prover/crates/bin/prover_cli/src/commands/status/l1.rs @@ -78,7 +78,7 @@ pub(crate) async fn run() -> anyhow::Result<()> { .await?; let node_l1_verifier_config = L1VerifierConfig { - recursion_scheduler_level_vk_hash: node_verification_key_hash, + snark_wrapper_vk_hash: node_verification_key_hash, }; let prover_connection_pool = ConnectionPool::::builder( @@ -149,7 +149,7 @@ fn pretty_print_l1_verifier_config( ) { print_hash_comparison( "Verifier key", - node_l1_verifier_config.recursion_scheduler_level_vk_hash, - db_l1_verifier_config.recursion_scheduler_level_vk_hash, + node_l1_verifier_config.snark_wrapper_vk_hash, + db_l1_verifier_config.snark_wrapper_vk_hash, ); } diff --git a/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs b/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs index 63981fa6c7d6..240251df15bf 100644 --- a/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs +++ b/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs @@ -5,6 +5,7 @@ pub mod gpu_prover { use anyhow::Context as _; use shivini::{ gpu_proof_config::GpuProofConfig, gpu_prove_from_external_witness_data, ProverContext, + ProverContextConfig, }; use tokio::task::JoinHandle; use zksync_config::configs::{fri_prover_group::FriProverGroupConfig, FriProverConfig}; @@ -54,6 +55,7 @@ pub mod gpu_prover { #[allow(dead_code)] pub struct Prover { + keystore: Keystore, blob_store: Arc, public_blob_store: Option>, config: Arc, @@ -72,6 +74,7 @@ pub mod gpu_prover { impl Prover { #[allow(dead_code)] pub fn new( + keystore: Keystore, blob_store: Arc, public_blob_store: Option>, config: FriProverConfig, @@ -82,8 +85,17 @@ pub mod gpu_prover { address: SocketAddress, zone: Zone, protocol_version: ProtocolSemanticVersion, + max_allocation: Option, ) -> Self { + let prover_context = match max_allocation { + Some(max_allocation) => ProverContext::create_with_config( + ProverContextConfig::default().with_maximum_device_allocation(max_allocation), + ) + .expect("failed initializing gpu prover context"), + None => ProverContext::create().expect("failed initializing gpu prover context"), + }; Prover { + keystore, blob_store, public_blob_store, config: Arc::new(config), @@ -91,8 +103,7 @@ pub mod gpu_prover { setup_load_mode, circuit_ids_for_round_to_be_proven, witness_vector_queue, - prover_context: ProverContext::create() - .expect("failed initializing gpu prover context"), + prover_context, address, zone, protocol_version, @@ -112,9 +123,8 @@ pub mod gpu_prover { .clone(), SetupLoadMode::FromDisk => { let started_at = Instant::now(); - let keystore = - Keystore::new_with_setup_data_path(self.config.setup_data_path.clone()); - let artifact: GoldilocksGpuProverSetupData = keystore + let artifact: GoldilocksGpuProverSetupData = self + .keystore .load_gpu_setup_data_for_circuit_type(key.clone()) .context("load_gpu_setup_data_for_circuit_type()")?; @@ -173,8 +183,11 @@ pub mod gpu_prover { (), &worker, ) - .unwrap_or_else(|_| { - panic!("failed generating GPU proof for id: {}", prover_job.job_id) + .unwrap_or_else(|err| { + panic!( + "failed generating GPU proof for id: {}, error: {:?}", + prover_job.job_id, err + ) }); tracing::info!( "Successfully generated gpu proof for job {} took: {:?}", @@ -328,7 +341,10 @@ pub mod gpu_prover { } } - pub fn load_setup_data_cache(config: &FriProverConfig) -> anyhow::Result { + pub fn load_setup_data_cache( + keystore: &Keystore, + config: &FriProverConfig, + ) -> anyhow::Result { Ok(match config.setup_load_mode { zksync_config::configs::fri_prover::SetupLoadMode::FromDisk => SetupLoadMode::FromDisk, zksync_config::configs::fri_prover::SetupLoadMode::FromMemory => { @@ -348,7 +364,6 @@ pub mod gpu_prover { &config.specialized_group_id, prover_setup_metadata_list ); - let keystore = Keystore::new_with_setup_data_path(config.setup_data_path.clone()); for prover_setup_metadata in prover_setup_metadata_list { let key = setup_metadata_to_setup_data_key(&prover_setup_metadata); let setup_data = keystore diff --git a/prover/crates/bin/prover_fri/src/main.rs b/prover/crates/bin/prover_fri/src/main.rs index db813394c194..8191653efec6 100644 --- a/prover/crates/bin/prover_fri/src/main.rs +++ b/prover/crates/bin/prover_fri/src/main.rs @@ -139,6 +139,7 @@ async fn main() -> anyhow::Result<()> { public_blob_store, pool, circuit_ids_for_round_to_be_proven, + opt.max_allocation, notify, ) .await @@ -178,8 +179,11 @@ async fn get_prover_tasks( public_blob_store: Option>, pool: ConnectionPool, circuit_ids_for_round_to_be_proven: Vec, + _max_allocation: Option, _init_notifier: Arc, ) -> anyhow::Result>>> { + use zksync_prover_keystore::keystore::Keystore; + use crate::prover_job_processor::{load_setup_data_cache, Prover}; let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; @@ -189,12 +193,15 @@ async fn get_prover_tasks( protocol_version ); + let keystore = + Keystore::locate().with_setup_path(Some(prover_config.setup_data_path.clone().into())); let setup_load_mode = - load_setup_data_cache(&prover_config).context("load_setup_data_cache()")?; + load_setup_data_cache(&keystore, &prover_config).context("load_setup_data_cache()")?; let prover = Prover::new( store_factory.create_store().await?, public_blob_store, prover_config, + keystore, pool, setup_load_mode, circuit_ids_for_round_to_be_proven, @@ -213,15 +220,19 @@ async fn get_prover_tasks( public_blob_store: Option>, pool: ConnectionPool, circuit_ids_for_round_to_be_proven: Vec, + max_allocation: Option, init_notifier: Arc, ) -> anyhow::Result>>> { use gpu_prover_job_processor::gpu_prover; use socket_listener::gpu_socket_listener; use tokio::sync::Mutex; use zksync_prover_fri_types::queue::FixedSizeQueue; + use zksync_prover_keystore::keystore::Keystore; - let setup_load_mode = - gpu_prover::load_setup_data_cache(&prover_config).context("load_setup_data_cache()")?; + let keystore = + Keystore::locate().with_setup_path(Some(prover_config.setup_data_path.clone().into())); + let setup_load_mode = gpu_prover::load_setup_data_cache(&keystore, &prover_config) + .context("load_setup_data_cache()")?; let witness_vector_queue = FixedSizeQueue::new(prover_config.queue_capacity); let shared_witness_vector_queue = Arc::new(Mutex::new(witness_vector_queue)); let consumer = shared_witness_vector_queue.clone(); @@ -235,6 +246,7 @@ async fn get_prover_tasks( let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; let prover = gpu_prover::Prover::new( + keystore, store_factory.create_store().await?, public_blob_store, prover_config.clone(), @@ -245,6 +257,7 @@ async fn get_prover_tasks( address.clone(), zone.clone(), protocol_version, + max_allocation, ); let producer = shared_witness_vector_queue.clone(); @@ -295,4 +308,6 @@ pub(crate) struct Cli { pub(crate) config_path: Option, #[arg(long)] pub(crate) secrets_path: Option, + #[arg(long)] + pub(crate) max_allocation: Option, } diff --git a/prover/crates/bin/prover_fri/src/prover_job_processor.rs b/prover/crates/bin/prover_fri/src/prover_job_processor.rs index 4de11a68b534..bbfb1d5a8322 100644 --- a/prover/crates/bin/prover_fri/src/prover_job_processor.rs +++ b/prover/crates/bin/prover_fri/src/prover_job_processor.rs @@ -43,6 +43,7 @@ pub struct Prover { blob_store: Arc, public_blob_store: Option>, config: Arc, + keystore: Keystore, prover_connection_pool: ConnectionPool, setup_load_mode: SetupLoadMode, // Only pick jobs for the configured circuit id and aggregation rounds. @@ -52,11 +53,12 @@ pub struct Prover { } impl Prover { - #[allow(dead_code)] + #[allow(dead_code, clippy::too_many_arguments)] pub fn new( blob_store: Arc, public_blob_store: Option>, config: FriProverConfig, + keystore: Keystore, prover_connection_pool: ConnectionPool, setup_load_mode: SetupLoadMode, circuit_ids_for_round_to_be_proven: Vec, @@ -66,6 +68,7 @@ impl Prover { blob_store, public_blob_store, config: Arc::new(config), + keystore, prover_connection_pool, setup_load_mode, circuit_ids_for_round_to_be_proven, @@ -85,9 +88,8 @@ impl Prover { .clone(), SetupLoadMode::FromDisk => { let started_at = Instant::now(); - let keystore = - Keystore::new_with_setup_data_path(self.config.setup_data_path.clone()); - let artifact: GoldilocksProverSetupData = keystore + let artifact: GoldilocksProverSetupData = self + .keystore .load_cpu_setup_data_for_circuit_type(key.clone()) .context("get_cpu_setup_data_for_circuit_type()")?; METRICS.gpu_setup_data_load_time[&key.circuit_id.to_string()] @@ -279,7 +281,10 @@ impl JobProcessor for Prover { } #[allow(dead_code)] -pub fn load_setup_data_cache(config: &FriProverConfig) -> anyhow::Result { +pub fn load_setup_data_cache( + keystore: &Keystore, + config: &FriProverConfig, +) -> anyhow::Result { Ok(match config.setup_load_mode { zksync_config::configs::fri_prover::SetupLoadMode::FromDisk => SetupLoadMode::FromDisk, zksync_config::configs::fri_prover::SetupLoadMode::FromMemory => { @@ -299,7 +304,6 @@ pub fn load_setup_data_cache(config: &FriProverConfig) -> anyhow::Result base.clone(), _ => anyhow::bail!("Expected base layer circuit"), }; - let keystore = Keystore::default(); + let keystore = Keystore::locate(); let circuit_setup_data = generate_setup_data_common( &keystore, ProverServiceDataKey::new_basic(circuit.numeric_circuit_type()), diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_generator.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_generator.rs index 8c2a17590099..f92be40fd7cc 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_generator.rs +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_generator.rs @@ -1,5 +1,5 @@ use anyhow::Context; -use zksync_prover_keystore::{commitment_utils::generate_commitments, keystore::Keystore}; +use zksync_prover_keystore::keystore::Keystore; use crate::vk_commitment_helper::{ get_toml_formatted_value, read_contract_toml, write_contract_toml, @@ -7,7 +7,9 @@ use crate::vk_commitment_helper::{ pub fn read_and_update_contract_toml(keystore: &Keystore, dryrun: bool) -> anyhow::Result<()> { let mut contract_doc = read_contract_toml().context("read_contract_toml()")?; - let vk_commitments = generate_commitments(keystore).context("generate_commitments()")?; + let vk_commitments = keystore + .generate_commitments() + .context("generate_commitments()")?; contract_doc["contracts"]["FRI_RECURSION_LEAF_LEVEL_VK_HASH"] = get_toml_formatted_value(vk_commitments.leaf); @@ -32,6 +34,6 @@ mod test { #[test] fn test_read_and_update_contract_toml() { - read_and_update_contract_toml(&Keystore::default(), true).unwrap(); + read_and_update_contract_toml(&Keystore::locate(), true).unwrap(); } } diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/main.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/main.rs index 313678bc5da8..59d989037c4b 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/main.rs +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/src/main.rs @@ -1,7 +1,7 @@ //! Tool to generate different types of keys used by the proving system. //! //! It can generate verification keys, setup keys, and also commitments. -use std::collections::HashMap; +use std::{collections::HashMap, path::PathBuf}; use anyhow::Context as _; use clap::{Parser, Subcommand}; @@ -24,7 +24,6 @@ use zksync_prover_fri_types::{ ProverServiceDataKey, }; use zksync_prover_keystore::{ - commitment_utils::generate_commitments, keystore::Keystore, setup_data_generator::{CPUSetupDataGenerator, GPUSetupDataGenerator, SetupDataGenerator}, }; @@ -98,7 +97,8 @@ fn generate_vks(keystore: &Keystore, jobs: usize, quiet: bool) -> anyhow::Result } // Let's also update the commitments file. - keystore.save_commitments(&generate_commitments(keystore)?) + let commitments = keystore.generate_commitments()?; + keystore.save_commitments(&commitments) } #[derive(Debug, Parser)] @@ -196,14 +196,14 @@ fn print_stats(digests: HashMap) -> anyhow::Result<()> { Ok(()) } -fn keystore_from_optional_path(path: Option, setup_path: Option) -> Keystore { +fn keystore_from_optional_path(path: Option, setup_data_path: Option) -> Keystore { if let Some(path) = path { - return Keystore::new_with_optional_setup_path(path.into(), setup_path); + return Keystore::new(path.into()).with_setup_path(setup_data_path.map(PathBuf::from)); } - if setup_path.is_some() { + if setup_data_path.is_some() { panic!("--setup_path must not be set when --path is not set"); } - Keystore::default() + Keystore::locate() } fn generate_setup_keys( diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/tests.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/tests.rs index d704f4e8fb60..0a9548197fd7 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/tests.rs +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/src/tests.rs @@ -36,21 +36,21 @@ fn all_possible_prover_service_data_key() -> impl Strategy, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, } impl LeafAggregationWitnessGenerator { @@ -81,14 +81,14 @@ impl LeafAggregationWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, ) -> Self { Self { config, object_store, prover_connection_pool, protocol_version, - setup_data_path, + keystore, } } @@ -134,13 +134,9 @@ impl JobProcessor for LeafAggregationWitnessGenerator { tracing::info!("Processing leaf aggregation job {:?}", metadata.id); Ok(Some(( metadata.id, - prepare_leaf_aggregation_job( - metadata, - &*self.object_store, - self.setup_data_path.clone(), - ) - .await - .context("prepare_leaf_aggregation_job()")?, + prepare_leaf_aggregation_job(metadata, &*self.object_store, self.keystore.clone()) + .await + .context("prepare_leaf_aggregation_job()")?, ))) } @@ -226,7 +222,7 @@ impl JobProcessor for LeafAggregationWitnessGenerator { pub async fn prepare_leaf_aggregation_job( metadata: LeafAggregationJobMetadata, object_store: &dyn ObjectStore, - setup_data_path: String, + keystore: Keystore, ) -> anyhow::Result { let started_at = Instant::now(); let closed_form_input = get_artifacts(&metadata, object_store).await; @@ -235,7 +231,6 @@ pub async fn prepare_leaf_aggregation_job( .observe(started_at.elapsed()); let started_at = Instant::now(); - let keystore = Keystore::new_with_setup_data_path(setup_data_path); let base_vk = keystore .load_base_layer_verification_key(metadata.circuit_id) .context("get_base_layer_vk_for_circuit_type()")?; diff --git a/prover/crates/bin/witness_generator/src/main.rs b/prover/crates/bin/witness_generator/src/main.rs index 0e304b46cf74..9d75d8ddc6f1 100644 --- a/prover/crates/bin/witness_generator/src/main.rs +++ b/prover/crates/bin/witness_generator/src/main.rs @@ -14,9 +14,9 @@ use zksync_env_config::object_store::ProverObjectStoreConfig; use zksync_object_store::ObjectStoreFactory; use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; -use zksync_prover_keystore::commitment_utils::get_cached_commitments; +use zksync_prover_keystore::keystore::Keystore; use zksync_queued_job_processor::JobProcessor; -use zksync_types::basic_fri_types::AggregationRound; +use zksync_types::{basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion}; use zksync_utils::wait_for_tasks::ManagedTasks; use zksync_vlog::prometheus::PrometheusExporterConfig; use zksync_witness_generator::{ @@ -54,6 +54,41 @@ struct Opt { secrets_path: Option, } +/// Checks if the configuration locally matches the one in the database. +/// This function recalculates the commitment in order to check the exact code that +/// will run, instead of loading `commitments.json` (which also may correct misaligned +/// information). +async fn ensure_protocol_alignment( + prover_pool: &ConnectionPool, + protocol_version: ProtocolSemanticVersion, + keystore: &Keystore, +) -> anyhow::Result<()> { + tracing::info!("Verifying protocol alignment for {:?}", protocol_version); + let vk_commitments_in_db = match prover_pool + .connection() + .await + .unwrap() + .fri_protocol_versions_dal() + .vk_commitments_for(protocol_version) + .await + { + Some(commitments) => commitments, + None => { + panic!( + "No vk commitments available in database for a protocol version {:?}.", + protocol_version + ); + } + }; + let scheduler_vk_hash = vk_commitments_in_db.snark_wrapper_vk_hash; + keystore + .verify_scheduler_vk_hash(scheduler_vk_hash) + .with_context(|| + format!("VK commitments didn't match commitments from DB for protocol version {protocol_version:?}") + )?; + Ok(()) +} + #[tokio::main] async fn main() -> anyhow::Result<()> { let opt = Opt::from_args(); @@ -82,6 +117,8 @@ async fn main() -> anyhow::Result<()> { .witness_generator_config .context("witness generator config")? .clone(); + let keystore = + Keystore::locate().with_setup_path(Some(prover_config.setup_data_path.clone().into())); let prometheus_config = general_config.prometheus_config.clone(); @@ -103,22 +140,9 @@ async fn main() -> anyhow::Result<()> { let (stop_sender, stop_receiver) = watch::channel(false); let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; - let vk_commitments_in_db = match prover_connection_pool - .connection() - .await - .unwrap() - .fri_protocol_versions_dal() - .vk_commitments_for(protocol_version) + ensure_protocol_alignment(&prover_connection_pool, protocol_version, &keystore) .await - { - Some(commitments) => commitments, - None => { - panic!( - "No vk commitments available in database for a protocol version {:?}.", - protocol_version - ); - } - }; + .unwrap_or_else(|err| panic!("Protocol alignment check failed: {:?}", err)); let rounds = match (opt.round, opt.all_rounds) { (Some(round), false) => vec![round], @@ -159,8 +183,6 @@ async fn main() -> anyhow::Result<()> { let mut tasks = Vec::new(); tasks.push(tokio::spawn(prometheus_task)); - let setup_data_path = prover_config.setup_data_path.clone(); - for round in rounds { tracing::info!( "initializing the {:?} witness generator, batch size: {:?} with protocol_version: {:?}", @@ -171,16 +193,6 @@ async fn main() -> anyhow::Result<()> { let witness_generator_task = match round { AggregationRound::BasicCircuits => { - let start = Instant::now(); - let vk_commitments = get_cached_commitments(Some(setup_data_path.clone())); - let end = start.elapsed(); - tracing::info!("Calculating commitment took: {:?}", end); - assert_eq!( - vk_commitments, - vk_commitments_in_db, - "VK commitments didn't match commitments from DB for protocol version {protocol_version:?}. Cached commitments: {vk_commitments:?}, commitments in database: {vk_commitments_in_db:?}" - ); - let public_blob_store = match config.shall_save_to_public_bucket { false => None, true => Some( @@ -209,7 +221,7 @@ async fn main() -> anyhow::Result<()> { store_factory.create_store().await?, prover_connection_pool.clone(), protocol_version, - setup_data_path.clone(), + keystore.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } @@ -219,7 +231,7 @@ async fn main() -> anyhow::Result<()> { store_factory.create_store().await?, prover_connection_pool.clone(), protocol_version, - setup_data_path.clone(), + keystore.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } @@ -229,7 +241,7 @@ async fn main() -> anyhow::Result<()> { store_factory.create_store().await?, prover_connection_pool.clone(), protocol_version, - setup_data_path.clone(), + keystore.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } @@ -239,7 +251,7 @@ async fn main() -> anyhow::Result<()> { store_factory.create_store().await?, prover_connection_pool.clone(), protocol_version, - setup_data_path.clone(), + keystore.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } diff --git a/prover/crates/bin/witness_generator/src/node_aggregation.rs b/prover/crates/bin/witness_generator/src/node_aggregation.rs index 87835d79e13f..72bdebde572a 100644 --- a/prover/crates/bin/witness_generator/src/node_aggregation.rs +++ b/prover/crates/bin/witness_generator/src/node_aggregation.rs @@ -70,7 +70,7 @@ pub struct NodeAggregationWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, } impl NodeAggregationWitnessGenerator { @@ -79,14 +79,14 @@ impl NodeAggregationWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, ) -> Self { Self { config, object_store, prover_connection_pool, protocol_version, - setup_data_path, + keystore, } } @@ -244,7 +244,7 @@ impl JobProcessor for NodeAggregationWitnessGenerator { tracing::info!("Processing node aggregation job {:?}", metadata.id); Ok(Some(( metadata.id, - prepare_job(metadata, &*self.object_store, self.setup_data_path.clone()) + prepare_job(metadata, &*self.object_store, self.keystore.clone()) .await .context("prepare_job()")?, ))) @@ -329,7 +329,7 @@ impl JobProcessor for NodeAggregationWitnessGenerator { pub async fn prepare_job( metadata: NodeAggregationJobMetadata, object_store: &dyn ObjectStore, - setup_data_path: String, + keystore: Keystore, ) -> anyhow::Result { let started_at = Instant::now(); let artifacts = get_artifacts(&metadata, object_store).await; @@ -338,7 +338,6 @@ pub async fn prepare_job( .observe(started_at.elapsed()); let started_at = Instant::now(); - let keystore = Keystore::new_with_setup_data_path(setup_data_path); let leaf_vk = keystore .load_recursive_layer_verification_key(metadata.circuit_id) .context("get_recursive_layer_vk_for_circuit_type")?; diff --git a/prover/crates/bin/witness_generator/src/recursion_tip.rs b/prover/crates/bin/witness_generator/src/recursion_tip.rs index c04959b98952..5e97631babb9 100644 --- a/prover/crates/bin/witness_generator/src/recursion_tip.rs +++ b/prover/crates/bin/witness_generator/src/recursion_tip.rs @@ -75,7 +75,7 @@ pub struct RecursionTipWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, } impl RecursionTipWitnessGenerator { @@ -84,14 +84,14 @@ impl RecursionTipWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, ) -> Self { Self { config, object_store, prover_connection_pool, protocol_version, - setup_data_path, + keystore, } } @@ -175,7 +175,7 @@ impl JobProcessor for RecursionTipWitnessGenerator { l1_batch_number, final_node_proof_job_ids, &*self.object_store, - self.setup_data_path.clone(), + self.keystore.clone(), ) .await .context("prepare_job()")?, @@ -288,7 +288,7 @@ pub async fn prepare_job( l1_batch_number: L1BatchNumber, final_node_proof_job_ids: Vec<(u8, u32)>, object_store: &dyn ObjectStore, - setup_data_path: String, + keystore: Keystore, ) -> anyhow::Result { let started_at = Instant::now(); let recursion_tip_proofs = @@ -296,7 +296,6 @@ pub async fn prepare_job( WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::RecursionTip.into()] .observe(started_at.elapsed()); - let keystore = Keystore::new_with_setup_data_path(setup_data_path); let node_vk = keystore .load_recursive_layer_verification_key( ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, diff --git a/prover/crates/bin/witness_generator/src/scheduler.rs b/prover/crates/bin/witness_generator/src/scheduler.rs index 6e3461150fe2..c6e43582bbdb 100644 --- a/prover/crates/bin/witness_generator/src/scheduler.rs +++ b/prover/crates/bin/witness_generator/src/scheduler.rs @@ -57,7 +57,7 @@ pub struct SchedulerWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, } impl SchedulerWitnessGenerator { @@ -66,14 +66,14 @@ impl SchedulerWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, ) -> Self { Self { config, object_store, prover_connection_pool, protocol_version, - setup_data_path, + keystore, } } @@ -154,7 +154,7 @@ impl JobProcessor for SchedulerWitnessGenerator { l1_batch_number, recursion_tip_job_id, &*self.object_store, - self.setup_data_path.clone(), + self.keystore.clone(), ) .await .context("prepare_job()")?, @@ -266,7 +266,7 @@ pub async fn prepare_job( l1_batch_number: L1BatchNumber, recursion_tip_job_id: u32, object_store: &dyn ObjectStore, - setup_data_path: String, + keystore: Keystore, ) -> anyhow::Result { let started_at = Instant::now(); let wrapper = object_store.get(recursion_tip_job_id).await?; @@ -280,7 +280,6 @@ pub async fn prepare_job( .observe(started_at.elapsed()); let started_at = Instant::now(); - let keystore = Keystore::new_with_setup_data_path(setup_data_path); let node_vk = keystore .load_recursive_layer_verification_key( ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, diff --git a/prover/crates/bin/witness_generator/tests/basic_test.rs b/prover/crates/bin/witness_generator/tests/basic_test.rs index b034ab57d82c..3323e3c681e4 100644 --- a/prover/crates/bin/witness_generator/tests/basic_test.rs +++ b/prover/crates/bin/witness_generator/tests/basic_test.rs @@ -8,6 +8,7 @@ use zksync_prover_fri_types::{ CircuitWrapper, }; use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; +use zksync_prover_keystore::keystore::Keystore; use zksync_types::{ basic_fri_types::AggregationRound, prover_dal::{LeafAggregationJobMetadata, NodeAggregationJobMetadata}, @@ -50,13 +51,10 @@ async fn test_leaf_witness_gen() { .await .unwrap(); - let job = prepare_leaf_aggregation_job( - leaf_aggregation_job_metadata, - &*object_store, - "crates/bin/vk_setup_data_generator/data".to_string(), - ) - .await - .unwrap(); + let keystore = Keystore::locate(); + let job = prepare_leaf_aggregation_job(leaf_aggregation_job_metadata, &*object_store, keystore) + .await + .unwrap(); let artifacts = LeafAggregationWitnessGenerator::process_job_impl( job, @@ -143,13 +141,11 @@ async fn test_node_witness_gen() { prover_job_ids_for_proofs: vec![5211320], }; - let job = node_aggregation::prepare_job( - node_aggregation_job_metadata, - &*object_store, - "crates/bin/vk_setup_data_generator/data".to_string(), - ) - .await - .unwrap(); + let keystore = Keystore::locate(); + let job = + node_aggregation::prepare_job(node_aggregation_job_metadata, &*object_store, keystore) + .await + .unwrap(); let artifacts = NodeAggregationWitnessGenerator::process_job_impl( job, diff --git a/prover/crates/bin/witness_vector_generator/src/generator.rs b/prover/crates/bin/witness_vector_generator/src/generator.rs index f482637c1778..6695905c07e3 100644 --- a/prover/crates/bin/witness_vector_generator/src/generator.rs +++ b/prover/crates/bin/witness_vector_generator/src/generator.rs @@ -34,7 +34,7 @@ pub struct WitnessVectorGenerator { config: FriWitnessVectorGeneratorConfig, protocol_version: ProtocolSemanticVersion, max_attempts: u32, - setup_data_path: Option, + keystore: Keystore, } impl WitnessVectorGenerator { @@ -47,7 +47,7 @@ impl WitnessVectorGenerator { config: FriWitnessVectorGeneratorConfig, protocol_version: ProtocolSemanticVersion, max_attempts: u32, - setup_data_path: Option, + keystore: Keystore, ) -> Self { Self { object_store, @@ -57,7 +57,7 @@ impl WitnessVectorGenerator { config, protocol_version, max_attempts, - setup_data_path, + keystore, } } @@ -127,16 +127,10 @@ impl JobProcessor for WitnessVectorGenerator { job: ProverJob, _started_at: Instant, ) -> JoinHandle> { - let setup_data_path = self.setup_data_path.clone(); - + let keystore = self.keystore.clone(); tokio::task::spawn_blocking(move || { let block_number = job.block_number; let _span = tracing::info_span!("witness_vector_generator", %block_number).entered(); - let keystore = if let Some(setup_data_path) = setup_data_path { - Keystore::new_with_setup_data_path(setup_data_path) - } else { - Keystore::default() - }; Self::generate_witness_vector(job, &keystore) }) } diff --git a/prover/crates/bin/witness_vector_generator/src/main.rs b/prover/crates/bin/witness_vector_generator/src/main.rs index 1d3113ebf1aa..17ac3bd6fc9f 100644 --- a/prover/crates/bin/witness_vector_generator/src/main.rs +++ b/prover/crates/bin/witness_vector_generator/src/main.rs @@ -12,6 +12,7 @@ use zksync_object_store::ObjectStoreFactory; use zksync_prover_dal::ConnectionPool; use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; use zksync_prover_fri_utils::{get_all_circuit_id_round_tuples_for, region_fetcher::RegionFetcher}; +use zksync_prover_keystore::keystore::Keystore; use zksync_queued_job_processor::JobProcessor; use zksync_utils::wait_for_tasks::ManagedTasks; use zksync_vlog::prometheus::PrometheusExporterConfig; @@ -87,6 +88,9 @@ async fn main() -> anyhow::Result<()> { .await .context("get_zone()")?; + let keystore = + Keystore::locate().with_setup_path(Some(prover_config.setup_data_path.clone().into())); + let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; let (stop_sender, stop_receiver) = watch::channel(false); @@ -120,7 +124,7 @@ async fn main() -> anyhow::Result<()> { config.clone(), protocol_version, prover_config.max_attempts, - Some(prover_config.setup_data_path.clone()), + keystore.clone(), ); tasks.push(tokio::spawn( witness_vector_generator.run(stop_receiver.clone(), opt.n_iterations), diff --git a/prover/crates/bin/witness_vector_generator/tests/basic_test.rs b/prover/crates/bin/witness_vector_generator/tests/basic_test.rs index dd1ef8404198..bcf01ddc4061 100644 --- a/prover/crates/bin/witness_vector_generator/tests/basic_test.rs +++ b/prover/crates/bin/witness_vector_generator/tests/basic_test.rs @@ -22,8 +22,7 @@ fn test_generate_witness_vector() { circuit_wrapper, setup_data_key: key, }; - let vector = - WitnessVectorGenerator::generate_witness_vector(job, &Keystore::default()).unwrap(); + let vector = WitnessVectorGenerator::generate_witness_vector(job, &Keystore::locate()).unwrap(); assert!(!vector.witness_vector.all_values.is_empty()); assert!(!vector.witness_vector.multiplicities.is_empty()); assert!(!vector.witness_vector.public_inputs_locations.is_empty()); diff --git a/prover/crates/lib/keystore/Cargo.toml b/prover/crates/lib/keystore/Cargo.toml index 41e9f0244f69..617030754f8b 100644 --- a/prover/crates/lib/keystore/Cargo.toml +++ b/prover/crates/lib/keystore/Cargo.toml @@ -11,14 +11,12 @@ categories.workspace = true [dependencies] -zksync_types.workspace = true +zksync_basic_types.workspace = true zksync_utils.workspace = true zksync_prover_fri_types.workspace = true zkevm_test_harness.workspace = true circuit_definitions = { workspace = true, features = ["log_tracing"] } shivini = { workspace = true, optional = true } -zksync_config.workspace = true -zksync_env_config.workspace = true anyhow.workspace = true tracing.workspace = true diff --git a/prover/crates/lib/keystore/src/commitment_utils.rs b/prover/crates/lib/keystore/src/commitment_utils.rs index 792efba35adc..6973f86bf41e 100644 --- a/prover/crates/lib/keystore/src/commitment_utils.rs +++ b/prover/crates/lib/keystore/src/commitment_utils.rs @@ -1,16 +1,15 @@ -use std::{str::FromStr, sync::Mutex}; +use std::str::FromStr; use anyhow::Context as _; use hex::ToHex; -use once_cell::sync::Lazy; use zkevm_test_harness::witness::recursive_aggregation::{ compute_leaf_vks_and_params_commitment, compute_node_vk_commitment, }; +use zksync_basic_types::H256; use zksync_prover_fri_types::circuit_definitions::{ boojum::field::goldilocks::GoldilocksField, circuit_definitions::recursion_layer::ZkSyncRecursionLayerStorageType, }; -use zksync_types::{protocol_version::L1VerifierConfig, H256}; use crate::{ keystore::Keystore, @@ -18,80 +17,62 @@ use crate::{ VkCommitments, }; -static KEYSTORE: Lazy>> = Lazy::new(|| Mutex::new(None)); +impl Keystore { + pub fn generate_commitments(&self) -> anyhow::Result { + let leaf_vk_params = get_leaf_vk_params(self).context("get_leaf_vk_params()")?; + let leaf_layer_params = leaf_vk_params + .iter() + .map(|el| el.1.clone()) + .collect::>() + .try_into() + .unwrap(); + let leaf_vk_commitment = compute_leaf_vks_and_params_commitment(leaf_layer_params); -fn circuit_commitments(keystore: &Keystore) -> anyhow::Result { - let commitments = generate_commitments(keystore).context("generate_commitments()")?; - Ok(L1VerifierConfig { - // Instead of loading the FRI scheduler verification key here, - // we load the SNARK-wrapper verification key. - // This is due to the fact that these keys are used only for picking the - // prover jobs / witgen jobs from the DB. The keys are matched with the ones in - // `prover_fri_protocol_versions` table, which has the SNARK-wrapper verification key. - // This is OK because if the FRI VK changes, the SNARK-wrapper VK will change as well. - recursion_scheduler_level_vk_hash: H256::from_str(&commitments.snark_wrapper) - .context("invalid SNARK wrapper VK")?, - }) -} - -pub fn generate_commitments(keystore: &Keystore) -> anyhow::Result { - let leaf_vk_params = get_leaf_vk_params(keystore).context("get_leaf_vk_params()")?; - let leaf_layer_params = leaf_vk_params - .iter() - .map(|el| el.1.clone()) - .collect::>() - .try_into() - .unwrap(); - let leaf_vk_commitment = compute_leaf_vks_and_params_commitment(leaf_layer_params); + let node_vk = self + .load_recursive_layer_verification_key( + ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, + ) + .context("get_recursive_layer_vk_for_circuit_type(NodeLayerCircuit)")?; + let node_vk_commitment = compute_node_vk_commitment(node_vk.clone()); - let node_vk = keystore - .load_recursive_layer_verification_key( - ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, - ) - .context("get_recursive_layer_vk_for_circuit_type(NodeLayerCircuit)")?; - let node_vk_commitment = compute_node_vk_commitment(node_vk.clone()); + let scheduler_vk = self + .load_recursive_layer_verification_key( + ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8, + ) + .context("get_recursive_layer_vk_for_circuit_type(SchedulerCircuit)")?; + let scheduler_vk_commitment = compute_node_vk_commitment(scheduler_vk.clone()); - let scheduler_vk = keystore - .load_recursive_layer_verification_key( - ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8, - ) - .context("get_recursive_layer_vk_for_circuit_type(SchedulerCircuit)")?; - let scheduler_vk_commitment = compute_node_vk_commitment(scheduler_vk.clone()); + let hex_concatenator = |hex_array: [GoldilocksField; 4]| { + "0x".to_owned() + + &hex_array + .iter() + .map(|x| format!("{:016x}", x.0)) + .collect::>() + .join("") + }; - let hex_concatenator = |hex_array: [GoldilocksField; 4]| { - "0x".to_owned() - + &hex_array - .iter() - .map(|x| format!("{:016x}", x.0)) - .collect::>() - .join("") - }; + let leaf_aggregation_commitment_hex = hex_concatenator(leaf_vk_commitment); + let node_aggregation_commitment_hex = hex_concatenator(node_vk_commitment); + let scheduler_commitment_hex = hex_concatenator(scheduler_vk_commitment); + let snark_vk_hash: String = calculate_snark_vk_hash(self)?.encode_hex(); - let leaf_aggregation_commitment_hex = hex_concatenator(leaf_vk_commitment); - let node_aggregation_commitment_hex = hex_concatenator(node_vk_commitment); - let scheduler_commitment_hex = hex_concatenator(scheduler_vk_commitment); - let snark_vk_hash: String = calculate_snark_vk_hash(keystore)?.encode_hex(); - - let result = VkCommitments { - leaf: leaf_aggregation_commitment_hex, - node: node_aggregation_commitment_hex, - scheduler: scheduler_commitment_hex, - snark_wrapper: format!("0x{}", snark_vk_hash), - }; - tracing::info!("Commitments: {:?}", result); - Ok(result) -} - -pub fn get_cached_commitments(setup_data_path: Option) -> L1VerifierConfig { - if let Some(setup_data_path) = setup_data_path { - let keystore = Keystore::new_with_setup_data_path(setup_data_path); - let mut keystore_lock = KEYSTORE.lock().unwrap(); - *keystore_lock = Some(keystore); + let result = VkCommitments { + leaf: leaf_aggregation_commitment_hex, + node: node_aggregation_commitment_hex, + scheduler: scheduler_commitment_hex, + snark_wrapper: format!("0x{}", snark_vk_hash), + }; + tracing::info!("Commitments: {:?}", result); + Ok(result) } - let keystore = KEYSTORE.lock().unwrap().clone().unwrap_or_default(); - let commitments = circuit_commitments(&keystore).unwrap(); - - tracing::info!("Using cached commitments {:?}", commitments); - commitments + pub fn verify_scheduler_vk_hash(&self, expected_hash: H256) -> anyhow::Result<()> { + let commitments = self + .generate_commitments() + .context("generate_commitments()")?; + let calculated_hash = + H256::from_str(&commitments.snark_wrapper).context("invalid SNARK wrapper VK")?; + anyhow::ensure!(expected_hash == calculated_hash, "Invalid SNARK wrapper VK hash. Calculated locally: {calculated_hash:?}, provided: {expected_hash:?}"); + Ok(()) + } } diff --git a/prover/crates/lib/keystore/src/keystore.rs b/prover/crates/lib/keystore/src/keystore.rs index 7ba5a3aaa701..ff14387bfda7 100644 --- a/prover/crates/lib/keystore/src/keystore.rs +++ b/prover/crates/lib/keystore/src/keystore.rs @@ -16,10 +16,8 @@ use circuit_definitions::{ }; use serde::{Deserialize, Serialize}; use zkevm_test_harness::data_source::{in_memory_data_source::InMemoryDataSource, SetupDataSource}; -use zksync_config::configs::FriProverConfig; -use zksync_env_config::FromEnv; +use zksync_basic_types::basic_fri_types::AggregationRound; use zksync_prover_fri_types::ProverServiceDataKey; -use zksync_types::basic_fri_types::AggregationRound; #[cfg(feature = "gpu")] use crate::GoldilocksGpuProverSetupData; @@ -36,12 +34,12 @@ pub enum ProverServiceDataType { /// There are 2 types: /// - small verification, finalization keys (used only during verification) /// - large setup keys, used during proving. -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct Keystore { /// Directory to store all the small keys. basedir: PathBuf, /// Directory to store large setup keys. - setup_data_path: Option, + setup_data_path: PathBuf, } fn get_base_path() -> PathBuf { @@ -69,41 +67,32 @@ fn get_base_path() -> PathBuf { components.as_path().join("prover/data/keys") } -impl Default for Keystore { - fn default() -> Self { - Self { - basedir: get_base_path(), - setup_data_path: Some( - FriProverConfig::from_env() - .expect("FriProverConfig::from_env()") - .setup_data_path, - ), - } - } -} - impl Keystore { /// Base-dir is the location of smaller keys (like verification keys and finalization hints). /// Setup data path is used for the large setup keys. - pub fn new(basedir: PathBuf, setup_data_path: String) -> Self { + pub fn new(basedir: PathBuf) -> Self { Keystore { - basedir, - setup_data_path: Some(setup_data_path), + basedir: basedir.clone(), + setup_data_path: basedir, } } - pub fn new_with_optional_setup_path(basedir: PathBuf, setup_data_path: Option) -> Self { - Keystore { - basedir, - setup_data_path, + /// Uses automatic detection of the base path, and assumes that setup keys + /// are stored in the same directory. + pub fn locate() -> Self { + let base_path = get_base_path(); + Self { + basedir: base_path.clone(), + setup_data_path: base_path, } } - pub fn new_with_setup_data_path(setup_data_path: String) -> Self { - Keystore { - basedir: get_base_path(), - setup_data_path: Some(setup_data_path), + /// Will override the setup path, if present. + pub fn with_setup_path(mut self, setup_data_path: Option) -> Self { + if let Some(setup_data_path) = setup_data_path { + self.setup_data_path = setup_data_path; } + self } pub fn get_base_path(&self) -> &PathBuf { @@ -120,13 +109,9 @@ impl Keystore { ProverServiceDataType::VerificationKey => { self.basedir.join(format!("verification_{}_key.json", name)) } - ProverServiceDataType::SetupData => PathBuf::from(format!( - "{}/setup_{}_data.bin", - self.setup_data_path - .as_ref() - .expect("Setup data path not set"), - name - )), + ProverServiceDataType::SetupData => self + .setup_data_path + .join(format!("setup_{}_data.bin", name)), ProverServiceDataType::FinalizationHints => self .basedir .join(format!("finalization_hints_{}.bin", name)), diff --git a/prover/crates/lib/keystore/src/utils.rs b/prover/crates/lib/keystore/src/utils.rs index 5387b73e76cd..5cebf7aef77a 100644 --- a/prover/crates/lib/keystore/src/utils.rs +++ b/prover/crates/lib/keystore/src/utils.rs @@ -13,6 +13,7 @@ use zkevm_test_harness::{ franklin_crypto::bellman::{CurveAffine, PrimeField, PrimeFieldRepr}, witness::recursive_aggregation::compute_leaf_params, }; +use zksync_basic_types::H256; use zksync_prover_fri_types::circuit_definitions::{ boojum::field::goldilocks::GoldilocksField, circuit_definitions::recursion_layer::base_circuit_type_into_recursive_leaf_circuit_type, @@ -21,7 +22,6 @@ use zksync_prover_fri_types::circuit_definitions::{ scheduler::aux::BaseLayerCircuitType, }, }; -use zksync_types::H256; use zksync_utils::locate_workspace; use crate::keystore::Keystore; @@ -137,7 +137,7 @@ mod tests { for entry in std::fs::read_dir(path_to_input.clone()).unwrap().flatten() { if entry.metadata().unwrap().is_dir() { let basepath = path_to_input.join(entry.file_name()); - let keystore = Keystore::new_with_optional_setup_path(basepath.clone(), None); + let keystore = Keystore::new(basepath.clone()); let expected = H256::from_str(&keystore.load_commitments().unwrap().snark_wrapper).unwrap(); diff --git a/prover/crates/lib/prover_dal/.sqlx/query-419206075cd24f96f00d65f3d138c05583e8415d2e1d8c503f640e77d282b0d5.json b/prover/crates/lib/prover_dal/.sqlx/query-419206075cd24f96f00d65f3d138c05583e8415d2e1d8c503f640e77d282b0d5.json new file mode 100644 index 000000000000..ff5b1727e26a --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-419206075cd24f96f00d65f3d138c05583e8415d2e1d8c503f640e77d282b0d5.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n snark_wrapper_vk_hash\n FROM\n prover_fri_protocol_versions\n WHERE\n id = $1\n AND protocol_version_patch = $2\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "snark_wrapper_vk_hash", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int4", + "Int4" + ] + }, + "nullable": [ + false + ] + }, + "hash": "419206075cd24f96f00d65f3d138c05583e8415d2e1d8c503f640e77d282b0d5" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-4f26bae35dd959448d9728ef7321fc79400930dddc84e042c5a4dc8a2e8508a5.json b/prover/crates/lib/prover_dal/.sqlx/query-4f26bae35dd959448d9728ef7321fc79400930dddc84e042c5a4dc8a2e8508a5.json deleted file mode 100644 index 73cd88457cd1..000000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-4f26bae35dd959448d9728ef7321fc79400930dddc84e042c5a4dc8a2e8508a5.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n recursion_scheduler_level_vk_hash\n FROM\n prover_fri_protocol_versions\n WHERE\n id = $1\n AND protocol_version_patch = $2\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "recursion_scheduler_level_vk_hash", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int4", - "Int4" - ] - }, - "nullable": [ - false - ] - }, - "hash": "4f26bae35dd959448d9728ef7321fc79400930dddc84e042c5a4dc8a2e8508a5" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-5da848354a84b20ae3f0240f6a352e85f85202637916cfcf4b34c6780536f105.json b/prover/crates/lib/prover_dal/.sqlx/query-5da848354a84b20ae3f0240f6a352e85f85202637916cfcf4b34c6780536f105.json deleted file mode 100644 index c985254f247e..000000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-5da848354a84b20ae3f0240f6a352e85f85202637916cfcf4b34c6780536f105.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n prover_fri_protocol_versions (id, recursion_scheduler_level_vk_hash, created_at, protocol_version_patch)\n VALUES\n ($1, $2, NOW(), $3)\n ON CONFLICT (id, protocol_version_patch) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int4", - "Bytea", - "Int4" - ] - }, - "nullable": [] - }, - "hash": "5da848354a84b20ae3f0240f6a352e85f85202637916cfcf4b34c6780536f105" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-61b2b858d4636809c21838635aa52aeb5f06c26f68d131dd242f6ed68816c513.json b/prover/crates/lib/prover_dal/.sqlx/query-61b2b858d4636809c21838635aa52aeb5f06c26f68d131dd242f6ed68816c513.json deleted file mode 100644 index c713af9a210d..000000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-61b2b858d4636809c21838635aa52aeb5f06c26f68d131dd242f6ed68816c513.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n l1_batch_number\n FROM\n prover_jobs_fri\n WHERE\n status <> 'skipped'\n AND status <> 'successful'\n AND aggregation_round = $1\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int2" - ] - }, - "nullable": [ - false - ] - }, - "hash": "61b2b858d4636809c21838635aa52aeb5f06c26f68d131dd242f6ed68816c513" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-6e58af820b4dd867cd794a04c97c23ff743fc01c92e28ed447a8e124062fa62c.json b/prover/crates/lib/prover_dal/.sqlx/query-6e58af820b4dd867cd794a04c97c23ff743fc01c92e28ed447a8e124062fa62c.json new file mode 100644 index 000000000000..b5025c6ed18d --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-6e58af820b4dd867cd794a04c97c23ff743fc01c92e28ed447a8e124062fa62c.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n snark_wrapper_vk_hash\n FROM\n prover_fri_protocol_versions\n ORDER BY\n id DESC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "snark_wrapper_vk_hash", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "6e58af820b4dd867cd794a04c97c23ff743fc01c92e28ed447a8e124062fa62c" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-89a25708d0b0a15e1e56ee8fd69f5a15d2fc5ad3e5ce738a2f6ee3eecfc96736.json b/prover/crates/lib/prover_dal/.sqlx/query-89a25708d0b0a15e1e56ee8fd69f5a15d2fc5ad3e5ce738a2f6ee3eecfc96736.json new file mode 100644 index 000000000000..d8bd3223905c --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-89a25708d0b0a15e1e56ee8fd69f5a15d2fc5ad3e5ce738a2f6ee3eecfc96736.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n prover_fri_protocol_versions (id, snark_wrapper_vk_hash, created_at, protocol_version_patch)\n VALUES\n ($1, $2, NOW(), $3)\n ON CONFLICT (id, protocol_version_patch) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Bytea", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "89a25708d0b0a15e1e56ee8fd69f5a15d2fc5ad3e5ce738a2f6ee3eecfc96736" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-ed0f22d3d2c8b0d9bd1b4360a0f7999388451886a3eb9b4481b55404b16b89ac.json b/prover/crates/lib/prover_dal/.sqlx/query-ed0f22d3d2c8b0d9bd1b4360a0f7999388451886a3eb9b4481b55404b16b89ac.json deleted file mode 100644 index d699aae174c7..000000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-ed0f22d3d2c8b0d9bd1b4360a0f7999388451886a3eb9b4481b55404b16b89ac.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n recursion_scheduler_level_vk_hash\n FROM\n prover_fri_protocol_versions\n ORDER BY\n id DESC\n LIMIT\n 1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "recursion_scheduler_level_vk_hash", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false - ] - }, - "hash": "ed0f22d3d2c8b0d9bd1b4360a0f7999388451886a3eb9b4481b55404b16b89ac" -} diff --git a/prover/crates/lib/prover_dal/migrations/20240905124208_rename-recurision-scheduler-level-vk-hash.down.sql b/prover/crates/lib/prover_dal/migrations/20240905124208_rename-recurision-scheduler-level-vk-hash.down.sql new file mode 100644 index 000000000000..8d1681440769 --- /dev/null +++ b/prover/crates/lib/prover_dal/migrations/20240905124208_rename-recurision-scheduler-level-vk-hash.down.sql @@ -0,0 +1,3 @@ +UPDATE prover_fri_protocol_versions SET recursion_scheduler_level_vk_hash = snark_wrapper_vk_hash WHERE recursion_scheduler_level_vk_hash = ''::bytea; +ALTER TABLE prover_fri_protocol_versions DROP COLUMN snark_wrapper_vk_hash; +ALTER TABLE prover_fri_protocol_versions ALTER COLUMN recursion_scheduler_level_vk_hash DROP DEFAULT; diff --git a/prover/crates/lib/prover_dal/migrations/20240905124208_rename-recurision-scheduler-level-vk-hash.up.sql b/prover/crates/lib/prover_dal/migrations/20240905124208_rename-recurision-scheduler-level-vk-hash.up.sql new file mode 100644 index 000000000000..98eb1ee791c2 --- /dev/null +++ b/prover/crates/lib/prover_dal/migrations/20240905124208_rename-recurision-scheduler-level-vk-hash.up.sql @@ -0,0 +1,8 @@ +ALTER TABLE prover_fri_protocol_versions ADD COLUMN snark_wrapper_vk_hash BYTEA NOT NULL DEFAULT ''::bytea; +ALTER TABLE prover_fri_protocol_versions ALTER COLUMN recursion_scheduler_level_vk_hash SET DEFAULT ''::bytea; +UPDATE prover_fri_protocol_versions SET snark_wrapper_vk_hash = recursion_scheduler_level_vk_hash; +-- Default was only needed to migrate old rows, we don't want this field to be forgotten by accident after migration. +ALTER TABLE prover_fri_protocol_versions ALTER COLUMN snark_wrapper_vk_hash DROP DEFAULT; + +-- Old column should be removed once the migration is on the mainnet. +COMMENT ON COLUMN prover_fri_protocol_versions.recursion_scheduler_level_vk_hash IS 'This column is deprecated and will be removed in the future. Use snark_wrapper_vk_hash instead.'; diff --git a/prover/crates/lib/prover_dal/src/fri_protocol_versions_dal.rs b/prover/crates/lib/prover_dal/src/fri_protocol_versions_dal.rs index caf620882bc2..50df1046e67d 100644 --- a/prover/crates/lib/prover_dal/src/fri_protocol_versions_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_protocol_versions_dal.rs @@ -20,14 +20,14 @@ impl FriProtocolVersionsDal<'_, '_> { sqlx::query!( r#" INSERT INTO - prover_fri_protocol_versions (id, recursion_scheduler_level_vk_hash, created_at, protocol_version_patch) + prover_fri_protocol_versions (id, snark_wrapper_vk_hash, created_at, protocol_version_patch) VALUES ($1, $2, NOW(), $3) ON CONFLICT (id, protocol_version_patch) DO NOTHING "#, id.minor as i32, l1_verifier_config - .recursion_scheduler_level_vk_hash + .snark_wrapper_vk_hash .as_bytes(), id.patch.0 as i32 ) @@ -43,7 +43,7 @@ impl FriProtocolVersionsDal<'_, '_> { sqlx::query!( r#" SELECT - recursion_scheduler_level_vk_hash + snark_wrapper_vk_hash FROM prover_fri_protocol_versions WHERE @@ -57,9 +57,7 @@ impl FriProtocolVersionsDal<'_, '_> { .await .unwrap() .map(|row| L1VerifierConfig { - recursion_scheduler_level_vk_hash: H256::from_slice( - &row.recursion_scheduler_level_vk_hash, - ), + snark_wrapper_vk_hash: H256::from_slice(&row.snark_wrapper_vk_hash), }) } @@ -67,7 +65,7 @@ impl FriProtocolVersionsDal<'_, '_> { let result = sqlx::query!( r#" SELECT - recursion_scheduler_level_vk_hash + snark_wrapper_vk_hash FROM prover_fri_protocol_versions ORDER BY @@ -80,9 +78,7 @@ impl FriProtocolVersionsDal<'_, '_> { .await?; Ok(L1VerifierConfig { - recursion_scheduler_level_vk_hash: H256::from_slice( - &result.recursion_scheduler_level_vk_hash, - ), + snark_wrapper_vk_hash: H256::from_slice(&result.snark_wrapper_vk_hash), }) } diff --git a/renovate.json b/renovate.json index 055bc3425806..eeccfee848dc 100644 --- a/renovate.json +++ b/renovate.json @@ -1,11 +1,5 @@ { - "enabled": false, - "extends": [ - "config:base", - "helpers:pinGitHubActionDigests" - ], - "enabledManagers": [ - "github-actions" - ], + "extends": ["config:base", "schedule:earlyMondays","helpers:pinGitHubActionDigests"], + "enabledManagers": ["github-actions"], "prCreation": "immediate" } diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index cd5d6a0b280e..75859021979f 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -6349,9 +6349,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0e31a9fc9a390b440cd12bbe040330dc64f64697a8a8ecbc3beb98cd0747909" +checksum = "a49ad68bfaf6fb8542c68894b68b28be31514786549855aaa8a46b36defbb100" dependencies = [ "anyhow", "once_cell", @@ -6383,9 +6383,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ff679f8b5f671d887a750b8107f3b5c01fd6085f68eef37ab01de8d2bd0736b" +checksum = "4d624f55e2449f43b2c85588b5dd2a28b3c5ea629effc89df76e3254f8d9d2fb" dependencies = [ "anyhow", "rand", @@ -6434,9 +6434,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f6ba3bf0aac20de18b4ae18a22d8c81b83f8f72e8fdec1c879525ecdacd2f5" +checksum = "d26fb2beb3aeafb5e9babf1acf6494662cc7157b893fa248bd151494f931d07f" dependencies = [ "anyhow", "bit-vec", @@ -6455,9 +6455,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7798c248b9a64505f0586bd5fadad6b26c999be4a8dec6b1a86b10b3888169c5" +checksum = "58e86c198e056d921b4f3f1d2755c23d090e942b5a70b03bcb7e7c02445aa491" dependencies = [ "anyhow", "heck", diff --git a/zk_toolbox/Cargo.toml b/zk_toolbox/Cargo.toml index 4a08776558ed..e1ad63136af1 100644 --- a/zk_toolbox/Cargo.toml +++ b/zk_toolbox/Cargo.toml @@ -30,7 +30,7 @@ types = { path = "crates/types" } zksync_config = { path = "../core/lib/config" } zksync_protobuf_config = { path = "../core/lib/protobuf_config" } zksync_basic_types = { path = "../core/lib/basic_types" } -zksync_protobuf = "=0.1.0-rc.11" +zksync_protobuf = "=0.1.0-rc.12" # External dependencies anyhow = "1.0.82" diff --git a/zk_toolbox/README.md b/zk_toolbox/README.md index 469e36a65f64..a3b44fa98b32 100644 --- a/zk_toolbox/README.md +++ b/zk_toolbox/README.md @@ -247,6 +247,53 @@ Run the external node: zk_inception en run ``` +### Portal + +Once you have at least one chain initialized, you can run the [portal](https://github.com/matter-labs/dapp-portal) - a +web-app to bridge tokens between L1 and L2 and more: + +```bash +zk_inception portal +``` + +This command will start the dockerized portal app using configuration from `apps/portal.config.json` file inside your +ecosystem directory. You can edit this file to configure the portal app if needed. By default, portal starts on +`http://localhost:3030`, you can configure the port in `apps.yaml` file. + +### Explorer + +For better understanding of the blockchain data, you can use the +[explorer](https://github.com/matter-labs/block-explorer) - a web-app to view and inspect transactions, blocks, +contracts and more. + +First, each chain should be initialized: + +```bash +zk_inception explorer init +``` + +This command creates a database to store explorer data and generatesdocker compose file with explorer services +(`explorer-docker-compose.yml`). + +Next, for each chain you want to have an explorer, you need to start its backend services: + +```bash +zk_inception explorer backend --chain +``` + +This command uses previously created docker compose file to start the services (api, data fetcher, worker) required for +the explorer. + +Finally, you can run the explorer app: + +```bash +zk_inception explorer run +``` + +This command will start the dockerized explorer app using configuration from `apps/explorer.config.json` file inside +your ecosystem directory. You can edit this file to configure the app if needed. By default, explorer starts on +`http://localhost:3010`, you can configure the port in `apps.yaml` file. + ### Update To update your node: diff --git a/zk_toolbox/crates/common/src/docker.rs b/zk_toolbox/crates/common/src/docker.rs index 0ca31383f9cc..a5731808814f 100644 --- a/zk_toolbox/crates/common/src/docker.rs +++ b/zk_toolbox/crates/common/src/docker.rs @@ -1,26 +1,33 @@ -use std::collections::HashMap; - +use url::Url; use xshell::{cmd, Shell}; use crate::cmd::Cmd; -pub fn up(shell: &Shell, docker_compose_file: &str) -> anyhow::Result<()> { - Ok(Cmd::new(cmd!(shell, "docker compose -f {docker_compose_file} up -d")).run()?) +pub fn up(shell: &Shell, docker_compose_file: &str, detach: bool) -> anyhow::Result<()> { + let args = if detach { vec!["-d"] } else { vec![] }; + let mut cmd = Cmd::new(cmd!( + shell, + "docker compose -f {docker_compose_file} up {args...}" + )); + cmd = if !detach { cmd.with_force_run() } else { cmd }; + Ok(cmd.run()?) } pub fn down(shell: &Shell, docker_compose_file: &str) -> anyhow::Result<()> { Ok(Cmd::new(cmd!(shell, "docker compose -f {docker_compose_file} down")).run()?) } -pub fn run( - shell: &Shell, - docker_image: &str, - docker_args: HashMap, -) -> anyhow::Result<()> { - let mut args = vec![]; - for (key, value) in docker_args.iter() { - args.push(key); - args.push(value); +pub fn run(shell: &Shell, docker_image: &str, docker_args: Vec) -> anyhow::Result<()> { + Ok(Cmd::new(cmd!(shell, "docker run {docker_args...} {docker_image}")).run()?) +} + +pub fn adjust_localhost_for_docker(mut url: Url) -> anyhow::Result { + if let Some(host) = url.host_str() { + if host == "localhost" || host == "127.0.0.1" { + url.set_host(Some("host.docker.internal"))?; + } + } else { + anyhow::bail!("Failed to parse: no host"); } - Ok(Cmd::new(cmd!(shell, "docker run {args...} {docker_image}")).run()?) + Ok(url) } diff --git a/zk_toolbox/crates/common/src/lib.rs b/zk_toolbox/crates/common/src/lib.rs index fbd6e93eb5d0..5a6f63e3a51f 100644 --- a/zk_toolbox/crates/common/src/lib.rs +++ b/zk_toolbox/crates/common/src/lib.rs @@ -14,6 +14,9 @@ pub mod git; pub mod server; pub mod wallets; -pub use prerequisites::{check_general_prerequisites, check_prover_prequisites}; +pub use prerequisites::{ + check_general_prerequisites, check_prerequisites, GCLOUD_PREREQUISITES, GPU_PREREQUISITES, + WGET_PREREQUISITES, +}; pub use prompt::{init_prompt_theme, Prompt, PromptConfirm, PromptSelect}; pub use term::{error, logger, spinner}; diff --git a/zk_toolbox/crates/common/src/prerequisites.rs b/zk_toolbox/crates/common/src/prerequisites.rs index 6c437302470d..87ec396d0e63 100644 --- a/zk_toolbox/crates/common/src/prerequisites.rs +++ b/zk_toolbox/crates/common/src/prerequisites.rs @@ -30,15 +30,7 @@ const DOCKER_COMPOSE_PREREQUISITE: Prerequisite = Prerequisite { download_link: "https://docs.docker.com/compose/install/", }; -const PROVER_PREREQUISITES: [Prerequisite; 5] = [ - Prerequisite { - name: "gcloud", - download_link: "https://cloud.google.com/sdk/docs/install", - }, - Prerequisite { - name: "wget", - download_link: "https://www.gnu.org/software/wget/", - }, +pub const GPU_PREREQUISITES: [Prerequisite; 3] = [ Prerequisite { name: "cmake", download_link: "https://cmake.org/download/", @@ -53,7 +45,17 @@ const PROVER_PREREQUISITES: [Prerequisite; 5] = [ }, // CUDA GPU driver ]; -struct Prerequisite { +pub const WGET_PREREQUISITES: [Prerequisite; 1] = [Prerequisite { + name: "wget", + download_link: "https://www.gnu.org/software/wget/", +}]; + +pub const GCLOUD_PREREQUISITES: [Prerequisite; 1] = [Prerequisite { + name: "gcloud", + download_link: "https://cloud.google.com/sdk/docs/install", +}]; + +pub struct Prerequisite { name: &'static str, download_link: &'static str, } @@ -62,11 +64,7 @@ pub fn check_general_prerequisites(shell: &Shell) { check_prerequisites(shell, &PREREQUISITES, true); } -pub fn check_prover_prequisites(shell: &Shell) { - check_prerequisites(shell, &PROVER_PREREQUISITES, false); -} - -fn check_prerequisites(shell: &Shell, prerequisites: &[Prerequisite], check_compose: bool) { +pub fn check_prerequisites(shell: &Shell, prerequisites: &[Prerequisite], check_compose: bool) { let mut missing_prerequisites = vec![]; for prerequisite in prerequisites { diff --git a/zk_toolbox/crates/config/src/apps.rs b/zk_toolbox/crates/config/src/apps.rs new file mode 100644 index 000000000000..697b35b0851b --- /dev/null +++ b/zk_toolbox/crates/config/src/apps.rs @@ -0,0 +1,59 @@ +use std::path::{Path, PathBuf}; + +use serde::{Deserialize, Serialize}; +use xshell::Shell; + +use crate::{ + consts::{APPS_CONFIG_FILE, DEFAULT_EXPLORER_PORT, DEFAULT_PORTAL_PORT, LOCAL_CONFIGS_PATH}, + traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig, ZkToolboxConfig}, +}; + +/// Ecosystem level configuration for the apps (portal and explorer). +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct AppsEcosystemConfig { + pub portal: AppEcosystemConfig, + pub explorer: AppEcosystemConfig, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct AppEcosystemConfig { + pub http_port: u16, +} + +impl ZkToolboxConfig for AppsEcosystemConfig {} +impl FileConfigWithDefaultName for AppsEcosystemConfig { + const FILE_NAME: &'static str = APPS_CONFIG_FILE; +} + +impl AppsEcosystemConfig { + pub fn get_config_path(ecosystem_base_path: &Path) -> PathBuf { + ecosystem_base_path + .join(LOCAL_CONFIGS_PATH) + .join(APPS_CONFIG_FILE) + } + + pub fn read_or_create_default(shell: &Shell) -> anyhow::Result { + let config_path = Self::get_config_path(&shell.current_dir()); + match Self::read(shell, &config_path) { + Ok(config) => Ok(config), + Err(_) => { + let config = Self::default(); + config.save(shell, &config_path)?; + Ok(config) + } + } + } +} + +impl Default for AppsEcosystemConfig { + fn default() -> Self { + AppsEcosystemConfig { + portal: AppEcosystemConfig { + http_port: DEFAULT_PORTAL_PORT, + }, + explorer: AppEcosystemConfig { + http_port: DEFAULT_EXPLORER_PORT, + }, + } + } +} diff --git a/zk_toolbox/crates/config/src/consts.rs b/zk_toolbox/crates/config/src/consts.rs index b4bbbdffbe24..1e1c0998f00e 100644 --- a/zk_toolbox/crates/config/src/consts.rs +++ b/zk_toolbox/crates/config/src/consts.rs @@ -30,12 +30,43 @@ pub const ERA_OBSERVABILITY_COMPOSE_FILE: &str = "era-observability/docker-compo pub const ERA_OBSERBAVILITY_DIR: &str = "era-observability"; /// Era observability repo link pub const ERA_OBSERBAVILITY_GIT_REPO: &str = "https://github.com/matter-labs/era-observability"; +pub(crate) const LOCAL_APPS_PATH: &str = "apps/"; +pub(crate) const LOCAL_CHAINS_PATH: &str = "chains/"; pub(crate) const LOCAL_CONFIGS_PATH: &str = "configs/"; +pub(crate) const LOCAL_GENERATED_PATH: &str = ".generated/"; pub(crate) const LOCAL_DB_PATH: &str = "db/"; pub(crate) const LOCAL_ARTIFACTS_PATH: &str = "artifacts/"; -/// Name of portal config file -pub const PORTAL_CONFIG_FILE: &str = "portal.config.js"; +/// Name of apps config file +pub const APPS_CONFIG_FILE: &str = "apps.yaml"; +/// Name of portal runtime config file (auto-generated) +pub const PORTAL_JS_CONFIG_FILE: &str = "portal.config.js"; +/// Name of portal config JSON file +pub const PORTAL_CONFIG_FILE: &str = "portal.config.json"; +/// Name of explorer runtime config file (auto-generated) +pub const EXPLORER_JS_CONFIG_FILE: &str = "explorer.config.js"; +/// Name of explorer config JSON file +pub const EXPLORER_CONFIG_FILE: &str = "explorer.config.json"; +/// Name of explorer docker compose file +pub const EXPLORER_DOCKER_COMPOSE_FILE: &str = "explorer-docker-compose.yml"; + +/// Default port for the explorer app +pub const DEFAULT_EXPLORER_PORT: u16 = 3010; +/// Default port for the portal app +pub const DEFAULT_PORTAL_PORT: u16 = 3030; +/// Default port for the explorer worker service +pub const DEFAULT_EXPLORER_WORKER_PORT: u16 = 3001; +/// Default port for the explorer API service +pub const DEFAULT_EXPLORER_API_PORT: u16 = 3002; +/// Default port for the explorer data fetcher service +pub const DEFAULT_EXPLORER_DATA_FETCHER_PORT: u16 = 3040; + +pub const EXPLORER_API_DOCKER_IMAGE: &str = "matterlabs/block-explorer-api"; +pub const EXPLORER_DATA_FETCHER_DOCKER_IMAGE: &str = "matterlabs/block-explorer-data-fetcher"; +pub const EXPLORER_WORKER_DOCKER_IMAGE: &str = "matterlabs/block-explorer-worker"; + +/// Interval (in milliseconds) for polling new batches to process in explorer app +pub const EXPLORER_BATCHES_PROCESSING_POLLING_INTERVAL: u64 = 1000; /// Path to ecosystem contacts pub(crate) const ECOSYSTEM_PATH: &str = "etc/env/ecosystems"; diff --git a/zk_toolbox/crates/config/src/contracts.rs b/zk_toolbox/crates/config/src/contracts.rs index 6042c4bea088..19d432909487 100644 --- a/zk_toolbox/crates/config/src/contracts.rs +++ b/zk_toolbox/crates/config/src/contracts.rs @@ -5,7 +5,9 @@ use crate::{ consts::CONTRACTS_FILE, forge_interface::{ deploy_ecosystem::output::DeployL1Output, - deploy_l2_contracts::output::{DefaultL2UpgradeOutput, InitializeBridgeOutput}, + deploy_l2_contracts::output::{ + ConsensusRegistryOutput, DefaultL2UpgradeOutput, InitializeBridgeOutput, + }, register_chain::output::RegisterChainOutput, }, traits::{FileConfigWithDefaultName, ZkToolboxConfig}, @@ -84,6 +86,14 @@ impl ContractsConfig { Ok(()) } + pub fn set_consensus_registry( + &mut self, + consensus_registry_output: &ConsensusRegistryOutput, + ) -> anyhow::Result<()> { + self.l2.consensus_registry = Some(consensus_registry_output.consensus_registry_proxy); + Ok(()) + } + pub fn set_default_l2_upgrade( &mut self, default_upgrade_output: &DefaultL2UpgradeOutput, @@ -140,4 +150,5 @@ pub struct L1Contracts { pub struct L2Contracts { pub testnet_paymaster_addr: Address, pub default_l2_upgrader: Address, + pub consensus_registry: Option
, } diff --git a/zk_toolbox/crates/config/src/docker_compose.rs b/zk_toolbox/crates/config/src/docker_compose.rs new file mode 100644 index 000000000000..05c6e73eaea5 --- /dev/null +++ b/zk_toolbox/crates/config/src/docker_compose.rs @@ -0,0 +1,43 @@ +use std::collections::HashMap; + +use serde::{Deserialize, Serialize}; + +use crate::traits::ZkToolboxConfig; + +#[derive(Debug, Default, Serialize, Deserialize, Clone)] +pub struct DockerComposeConfig { + pub services: HashMap, + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + #[serde(flatten)] + pub other: serde_json::Value, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct DockerComposeService { + pub image: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub platform: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub ports: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub environment: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub volumes: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub depends_on: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub restart: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub extra_hosts: Option>, + #[serde(flatten)] + pub other: serde_json::Value, +} + +impl ZkToolboxConfig for DockerComposeConfig {} + +impl DockerComposeConfig { + pub fn add_service(&mut self, name: &str, service: DockerComposeService) { + self.services.insert(name.to_string(), service); + } +} diff --git a/zk_toolbox/crates/config/src/explorer.rs b/zk_toolbox/crates/config/src/explorer.rs new file mode 100644 index 000000000000..ee7a59e5105c --- /dev/null +++ b/zk_toolbox/crates/config/src/explorer.rs @@ -0,0 +1,147 @@ +use std::path::{Path, PathBuf}; + +use serde::{Deserialize, Serialize}; +use xshell::Shell; + +use crate::{ + consts::{ + EXPLORER_CONFIG_FILE, EXPLORER_JS_CONFIG_FILE, LOCAL_APPS_PATH, LOCAL_CONFIGS_PATH, + LOCAL_GENERATED_PATH, + }, + traits::{ReadConfig, SaveConfig, ZkToolboxConfig}, +}; + +/// Explorer JSON configuration file. This file contains configuration for the explorer app. +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ExplorerConfig { + pub app_environment: String, + pub environment_config: EnvironmentConfig, + #[serde(flatten)] + pub other: serde_json::Value, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct EnvironmentConfig { + pub networks: Vec, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ExplorerChainConfig { + pub name: String, // L2 network chain name (the one used during the chain initialization) + pub l2_network_name: String, // How the network is displayed in the app dropdown + pub l2_chain_id: u64, + pub rpc_url: String, // L2 RPC URL + pub api_url: String, // L2 API URL + pub base_token_address: String, // L2 base token address (currently always 0x800A) + pub hostnames: Vec, // Custom domain to use when switched to this chain in the app + pub icon: String, // Icon to show in the explorer dropdown + pub maintenance: bool, // Maintenance warning + pub published: bool, // If false, the chain will not be shown in the explorer dropdown + #[serde(skip_serializing_if = "Option::is_none")] + pub bridge_url: Option, // Link to the portal bridge + #[serde(skip_serializing_if = "Option::is_none")] + pub l1_explorer_url: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub verification_api_url: Option, // L2 verification API URL + #[serde(flatten)] + pub other: serde_json::Value, +} + +impl ExplorerConfig { + /// Returns the path to the explorer configuration file. + pub fn get_config_path(ecosystem_base_path: &Path) -> PathBuf { + ecosystem_base_path + .join(LOCAL_CONFIGS_PATH) + .join(LOCAL_APPS_PATH) + .join(EXPLORER_CONFIG_FILE) + } + + /// Reads the existing config or creates a default one if it doesn't exist. + pub fn read_or_create_default(shell: &Shell) -> anyhow::Result { + let config_path = Self::get_config_path(&shell.current_dir()); + match Self::read(shell, &config_path) { + Ok(config) => Ok(config), + Err(_) => { + let config = Self::default(); + config.save(shell, &config_path)?; + Ok(config) + } + } + } + + /// Adds or updates a given chain configuration. + pub fn add_chain_config(&mut self, config: &ExplorerChainConfig) { + // Replace if config with the same network name already exists + if let Some(index) = self + .environment_config + .networks + .iter() + .position(|c| c.name == config.name) + { + self.environment_config.networks[index] = config.clone(); + return; + } + self.environment_config.networks.push(config.clone()); + } + + /// Retains only the chains whose names are present in the given vector. + pub fn filter(&mut self, chain_names: &[String]) { + self.environment_config + .networks + .retain(|config| chain_names.contains(&config.name)); + } + + /// Hides all chains except those specified in the given vector. + pub fn hide_except(&mut self, chain_names: &[String]) { + for network in &mut self.environment_config.networks { + network.published = chain_names.contains(&network.name); + } + } + + /// Checks if a chain with the given name exists in the configuration. + pub fn contains(&self, chain_name: &String) -> bool { + self.environment_config + .networks + .iter() + .any(|config| &config.name == chain_name) + } + + pub fn is_empty(&self) -> bool { + self.environment_config.networks.is_empty() + } + + pub fn save_as_js(&self, shell: &Shell) -> anyhow::Result { + // The block-explorer-app is served as a pre-built static app in a Docker image. + // It uses a JavaScript file (config.js) that injects the configuration at runtime + // by overwriting the '##runtimeConfig' property of the window object. + // This file will be mounted to the Docker image when it runs. + let path = Self::get_generated_js_config_path(&shell.current_dir()); + let json = serde_json::to_string_pretty(&self)?; + let config_js_content = format!("window['##runtimeConfig'] = {};", json); + shell.write_file(path.clone(), config_js_content.as_bytes())?; + Ok(path) + } + + fn get_generated_js_config_path(ecosystem_base_path: &Path) -> PathBuf { + ecosystem_base_path + .join(LOCAL_CONFIGS_PATH) + .join(LOCAL_GENERATED_PATH) + .join(EXPLORER_JS_CONFIG_FILE) + } +} + +impl Default for ExplorerConfig { + fn default() -> Self { + ExplorerConfig { + app_environment: "default".to_string(), + environment_config: EnvironmentConfig { + networks: Vec::new(), + }, + other: serde_json::Value::Null, + } + } +} + +impl ZkToolboxConfig for ExplorerConfig {} diff --git a/zk_toolbox/crates/config/src/explorer_compose.rs b/zk_toolbox/crates/config/src/explorer_compose.rs new file mode 100644 index 000000000000..ca9abc1e3e23 --- /dev/null +++ b/zk_toolbox/crates/config/src/explorer_compose.rs @@ -0,0 +1,214 @@ +use std::{ + collections::HashMap, + path::{Path, PathBuf}, +}; + +use anyhow::Context; +use common::{db, docker::adjust_localhost_for_docker}; +use serde::{Deserialize, Serialize}; +use url::Url; + +use crate::{ + consts::{ + DEFAULT_EXPLORER_API_PORT, DEFAULT_EXPLORER_DATA_FETCHER_PORT, + DEFAULT_EXPLORER_WORKER_PORT, EXPLORER_API_DOCKER_IMAGE, + EXPLORER_DATA_FETCHER_DOCKER_IMAGE, EXPLORER_DOCKER_COMPOSE_FILE, + EXPLORER_WORKER_DOCKER_IMAGE, LOCAL_CHAINS_PATH, LOCAL_CONFIGS_PATH, + }, + docker_compose::{DockerComposeConfig, DockerComposeService}, + traits::ZkToolboxConfig, + EXPLORER_BATCHES_PROCESSING_POLLING_INTERVAL, +}; + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct ExplorerBackendPorts { + pub api_http_port: u16, + pub data_fetcher_http_port: u16, + pub worker_http_port: u16, +} + +impl ExplorerBackendPorts { + pub fn with_offset(&self, offset: u16) -> Self { + ExplorerBackendPorts { + api_http_port: self.api_http_port + offset, + data_fetcher_http_port: self.data_fetcher_http_port + offset, + worker_http_port: self.worker_http_port + offset, + } + } +} + +impl Default for ExplorerBackendPorts { + fn default() -> Self { + ExplorerBackendPorts { + api_http_port: DEFAULT_EXPLORER_API_PORT, + data_fetcher_http_port: DEFAULT_EXPLORER_DATA_FETCHER_PORT, + worker_http_port: DEFAULT_EXPLORER_WORKER_PORT, + } + } +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct ExplorerBackendConfig { + pub database_url: Url, + pub ports: ExplorerBackendPorts, + pub batches_processing_polling_interval: u64, +} + +impl ExplorerBackendConfig { + pub fn new(database_url: Url, ports: &ExplorerBackendPorts) -> Self { + ExplorerBackendConfig { + database_url, + ports: ports.clone(), + batches_processing_polling_interval: EXPLORER_BATCHES_PROCESSING_POLLING_INTERVAL, + } + } +} + +/// Chain-level explorer backend docker compose file. +/// It contains configuration for api, data fetcher, and worker services. +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct ExplorerBackendComposeConfig { + #[serde(flatten)] + pub docker_compose: DockerComposeConfig, +} + +impl ZkToolboxConfig for ExplorerBackendComposeConfig {} + +impl ExplorerBackendComposeConfig { + const API_NAME: &'static str = "api"; + const DATA_FETCHER_NAME: &'static str = "data-fetcher"; + const WORKER_NAME: &'static str = "worker"; + + pub fn new( + chain_name: &str, + l2_rpc_url: Url, + config: &ExplorerBackendConfig, + ) -> anyhow::Result { + let db_url = adjust_localhost_for_docker(config.database_url.clone())?; + let l2_rpc_url = adjust_localhost_for_docker(l2_rpc_url)?; + + let mut services: HashMap = HashMap::new(); + services.insert( + Self::API_NAME.to_string(), + Self::create_api_service(config.ports.api_http_port, db_url.as_ref()), + ); + services.insert( + Self::DATA_FETCHER_NAME.to_string(), + Self::create_data_fetcher_service( + config.ports.data_fetcher_http_port, + l2_rpc_url.as_ref(), + ), + ); + + let worker = Self::create_worker_service( + config.ports.worker_http_port, + config.ports.data_fetcher_http_port, + l2_rpc_url.as_ref(), + &db_url, + config.batches_processing_polling_interval, + ) + .context("Failed to create worker service")?; + services.insert(Self::WORKER_NAME.to_string(), worker); + + Ok(Self { + docker_compose: DockerComposeConfig { + name: Some(format!("{chain_name}-explorer")), + services, + other: serde_json::Value::Null, + }, + }) + } + + fn create_api_service(port: u16, db_url: &str) -> DockerComposeService { + DockerComposeService { + image: EXPLORER_API_DOCKER_IMAGE.to_string(), + platform: Some("linux/amd64".to_string()), + ports: Some(vec![format!("{}:{}", port, port)]), + volumes: None, + depends_on: Some(vec![Self::WORKER_NAME.to_string()]), + restart: None, + environment: Some(HashMap::from([ + ("PORT".to_string(), port.to_string()), + ("LOG_LEVEL".to_string(), "verbose".to_string()), + ("NODE_ENV".to_string(), "development".to_string()), + ("DATABASE_URL".to_string(), db_url.to_string()), + ])), + extra_hosts: Some(vec!["host.docker.internal:host-gateway".to_string()]), + other: serde_json::Value::Null, + } + } + + fn create_data_fetcher_service(port: u16, l2_rpc_url: &str) -> DockerComposeService { + DockerComposeService { + image: EXPLORER_DATA_FETCHER_DOCKER_IMAGE.to_string(), + platform: Some("linux/amd64".to_string()), + ports: Some(vec![format!("{}:{}", port, port)]), + volumes: None, + depends_on: None, + restart: None, + environment: Some(HashMap::from([ + ("PORT".to_string(), port.to_string()), + ("LOG_LEVEL".to_string(), "verbose".to_string()), + ("NODE_ENV".to_string(), "development".to_string()), + ("BLOCKCHAIN_RPC_URL".to_string(), l2_rpc_url.to_string()), + ])), + extra_hosts: Some(vec!["host.docker.internal:host-gateway".to_string()]), + other: serde_json::Value::Null, + } + } + + fn create_worker_service( + port: u16, + data_fetcher_port: u16, + l2_rpc_url: &str, + db_url: &Url, + batches_processing_polling_interval: u64, + ) -> anyhow::Result { + let data_fetcher_url = format!("http://{}:{}", Self::DATA_FETCHER_NAME, data_fetcher_port); + + // Parse database URL + let db_config = db::DatabaseConfig::from_url(db_url)?; + let db_user = db_url.username().to_string(); + let db_password = db_url.password().unwrap_or(""); + let db_port = db_url.port().unwrap_or(5432); + let db_host = db_url + .host_str() + .context("Failed to parse database host")? + .to_string(); + + Ok(DockerComposeService { + image: EXPLORER_WORKER_DOCKER_IMAGE.to_string(), + platform: Some("linux/amd64".to_string()), + ports: None, + volumes: None, + depends_on: None, + restart: None, + environment: Some(HashMap::from([ + ("PORT".to_string(), port.to_string()), + ("LOG_LEVEL".to_string(), "verbose".to_string()), + ("NODE_ENV".to_string(), "development".to_string()), + ("DATABASE_HOST".to_string(), db_host.to_string()), + ("DATABASE_PORT".to_string(), db_port.to_string()), + ("DATABASE_USER".to_string(), db_user.to_string()), + ("DATABASE_PASSWORD".to_string(), db_password.to_string()), + ("DATABASE_NAME".to_string(), db_config.name.to_string()), + ("BLOCKCHAIN_RPC_URL".to_string(), l2_rpc_url.to_string()), + ("DATA_FETCHER_URL".to_string(), data_fetcher_url), + ( + "BATCHES_PROCESSING_POLLING_INTERVAL".to_string(), + batches_processing_polling_interval.to_string(), + ), + ])), + extra_hosts: Some(vec!["host.docker.internal:host-gateway".to_string()]), + other: serde_json::Value::Null, + }) + } + + pub fn get_config_path(ecosystem_base_path: &Path, chain_name: &str) -> PathBuf { + ecosystem_base_path + .join(LOCAL_CHAINS_PATH) + .join(chain_name) + .join(LOCAL_CONFIGS_PATH) + .join(EXPLORER_DOCKER_COMPOSE_FILE) + } +} diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs index f48fd0ba2b5e..b20b58f99c58 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs @@ -6,6 +6,8 @@ use crate::{traits::ZkToolboxConfig, ChainConfig}; impl ZkToolboxConfig for DeployL2ContractsInput {} +/// Fields corresponding to `contracts/l1-contracts/deploy-script-config-template/config-deploy-l2-config.toml` +/// which are read by `contracts/l1-contracts/deploy-scripts/DeployL2Contracts.sol`. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct DeployL2ContractsInput { pub era_chain_id: L2ChainId, @@ -14,6 +16,7 @@ pub struct DeployL2ContractsInput { pub bridgehub: Address, pub governance: Address, pub erc20_bridge: Address, + pub consensus_registry_owner: Address, } impl DeployL2ContractsInput { @@ -27,6 +30,7 @@ impl DeployL2ContractsInput { bridgehub: contracts.ecosystem_contracts.bridgehub_proxy_addr, governance: wallets.governor.address, erc20_bridge: contracts.bridges.erc20.l1_address, + consensus_registry_owner: wallets.governor.address, }) } } diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs index 22f3dc9381b3..860e7e293f99 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs @@ -4,8 +4,8 @@ use serde::{Deserialize, Serialize}; use crate::traits::ZkToolboxConfig; impl ZkToolboxConfig for InitializeBridgeOutput {} - impl ZkToolboxConfig for DefaultL2UpgradeOutput {} +impl ZkToolboxConfig for ConsensusRegistryOutput {} #[derive(Debug, Clone, Serialize, Deserialize)] pub struct InitializeBridgeOutput { @@ -17,3 +17,9 @@ pub struct InitializeBridgeOutput { pub struct DefaultL2UpgradeOutput { pub l2_default_upgrader: Address, } + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ConsensusRegistryOutput { + pub consensus_registry_implementation: Address, + pub consensus_registry_proxy: Address, +} diff --git a/zk_toolbox/crates/config/src/general.rs b/zk_toolbox/crates/config/src/general.rs index 3426b21c6f6e..41c2e4c33cfd 100644 --- a/zk_toolbox/crates/config/src/general.rs +++ b/zk_toolbox/crates/config/src/general.rs @@ -9,7 +9,7 @@ use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; use crate::{ consts::GENERAL_FILE, - traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig}, + traits::{ConfigWithL2RpcUrl, FileConfigWithDefaultName, ReadConfig, SaveConfig}, }; pub struct RocksDbs { @@ -127,7 +127,7 @@ pub fn update_ports(config: &mut GeneralConfig, ports_config: &PortsConfig) -> a let prometheus = config .prometheus_config .as_mut() - .context("Contract Verifier config is not presented")?; + .context("Prometheus config is not presented")?; api.web3_json_rpc.http_port = ports_config.web3_json_rpc_http_port; update_port_in_url( @@ -211,3 +211,14 @@ impl ReadConfig for GeneralConfig { decode_yaml_repr::(&path, false) } } + +impl ConfigWithL2RpcUrl for GeneralConfig { + fn get_l2_rpc_url(&self) -> anyhow::Result { + self.api_config + .as_ref() + .map(|api_config| &api_config.web3_json_rpc.http_url) + .context("API config is missing")? + .parse() + .context("Failed to parse L2 RPC URL") + } +} diff --git a/zk_toolbox/crates/config/src/lib.rs b/zk_toolbox/crates/config/src/lib.rs index 4e00962229bc..3c7443f24490 100644 --- a/zk_toolbox/crates/config/src/lib.rs +++ b/zk_toolbox/crates/config/src/lib.rs @@ -1,3 +1,4 @@ +pub use apps::*; pub use chain::*; pub use consts::*; pub use contracts::*; @@ -11,6 +12,7 @@ pub use wallet_creation::*; pub use wallets::*; pub use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +mod apps; mod chain; mod consts; mod contracts; @@ -23,6 +25,9 @@ mod secrets; mod wallet_creation; mod wallets; +pub mod docker_compose; +pub mod explorer; +pub mod explorer_compose; pub mod external_node; pub mod forge_interface; pub mod portal; diff --git a/zk_toolbox/crates/config/src/portal.rs b/zk_toolbox/crates/config/src/portal.rs index 4b68d5744cd9..c787c6cc7026 100644 --- a/zk_toolbox/crates/config/src/portal.rs +++ b/zk_toolbox/crates/config/src/portal.rs @@ -5,28 +5,25 @@ use types::TokenInfo; use xshell::Shell; use crate::{ - consts::{LOCAL_CONFIGS_PATH, PORTAL_CONFIG_FILE}, - traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig}, + consts::{ + LOCAL_APPS_PATH, LOCAL_CONFIGS_PATH, LOCAL_GENERATED_PATH, PORTAL_CONFIG_FILE, + PORTAL_JS_CONFIG_FILE, + }, + traits::{ReadConfig, SaveConfig, ZkToolboxConfig}, }; +/// Portal JSON configuration file. This file contains configuration for the portal app. #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "camelCase")] -pub struct PortalRuntimeConfig { +pub struct PortalConfig { pub node_type: String, - pub hyperchains_config: HyperchainsConfig, + pub hyperchains_config: Vec, + #[serde(flatten)] + pub other: serde_json::Value, } #[derive(Serialize, Deserialize, Debug, Clone)] -pub struct HyperchainsConfig(pub Vec); - -impl HyperchainsConfig { - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct HyperchainConfig { +pub struct PortalChainConfig { pub network: NetworkConfig, pub tokens: Vec, } @@ -35,10 +32,12 @@ pub struct HyperchainConfig { #[serde(rename_all = "camelCase")] pub struct NetworkConfig { pub id: u64, // L2 Network ID - pub key: String, // L2 Network key - pub name: String, // L2 Network name + pub key: String, // L2 Network key (chain name used during the initialization) + pub name: String, // L2 Network name (displayed in the app dropdown) pub rpc_url: String, // L2 RPC URL #[serde(skip_serializing_if = "Option::is_none")] + pub hidden: Option, // If true, the chain will not be shown in the app dropdown + #[serde(skip_serializing_if = "Option::is_none")] pub block_explorer_url: Option, // L2 Block Explorer URL #[serde(skip_serializing_if = "Option::is_none")] pub block_explorer_api: Option, // L2 Block Explorer API @@ -46,6 +45,8 @@ pub struct NetworkConfig { pub public_l1_network_id: Option, // Ethereum Mainnet or Ethereum Sepolia Testnet ID #[serde(skip_serializing_if = "Option::is_none")] pub l1_network: Option, + #[serde(flatten)] + pub other: serde_json::Value, } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -81,44 +82,94 @@ pub struct TokenConfig { pub name: Option, } -impl PortalRuntimeConfig { +impl PortalConfig { + /// Returns the path to the portal configuration file. pub fn get_config_path(ecosystem_base_path: &Path) -> PathBuf { ecosystem_base_path .join(LOCAL_CONFIGS_PATH) + .join(LOCAL_APPS_PATH) .join(PORTAL_CONFIG_FILE) } -} -impl FileConfigWithDefaultName for PortalRuntimeConfig { - const FILE_NAME: &'static str = PORTAL_CONFIG_FILE; -} + /// Reads the existing config or creates a default one if it doesn't exist. + pub fn read_or_create_default(shell: &Shell) -> anyhow::Result { + let config_path = Self::get_config_path(&shell.current_dir()); + match Self::read(shell, &config_path) { + Ok(config) => Ok(config), + Err(_) => { + let config = Self::default(); + config.save(shell, &config_path)?; + Ok(config) + } + } + } + + /// Adds or updates a given chain configuration. + pub fn add_chain_config(&mut self, config: &PortalChainConfig) { + // Replace if config with the same network key already exists + if let Some(index) = self + .hyperchains_config + .iter() + .position(|c| c.network.key == config.network.key) + { + self.hyperchains_config[index] = config.clone(); + return; + } + self.hyperchains_config.push(config.clone()); + } + + /// Retains only the chains whose names are present in the given vector. + pub fn filter(&mut self, chain_names: &[String]) { + self.hyperchains_config + .retain(|config| chain_names.contains(&config.network.key)); + } + + /// Hides all chains except those specified in the given vector. + pub fn hide_except(&mut self, chain_names: &[String]) { + for config in &mut self.hyperchains_config { + config.network.hidden = Some(!chain_names.contains(&config.network.key)); + } + } -impl SaveConfig for PortalRuntimeConfig { - fn save(&self, shell: &Shell, path: impl AsRef) -> anyhow::Result<()> { + /// Checks if a chain with the given name exists in the configuration. + pub fn contains(&self, chain_name: &String) -> bool { + self.hyperchains_config + .iter() + .any(|config| &config.network.key == chain_name) + } + + pub fn is_empty(&self) -> bool { + self.hyperchains_config.is_empty() + } + + pub fn save_as_js(&self, shell: &Shell) -> anyhow::Result { // The dapp-portal is served as a pre-built static app in a Docker image. // It uses a JavaScript file (config.js) that injects the configuration at runtime // by overwriting the '##runtimeConfig' property of the window object. - // Therefore, we generate a JavaScript file instead of a JSON file. // This file will be mounted to the Docker image when it runs. + let path = Self::get_generated_js_config_path(&shell.current_dir()); let json = serde_json::to_string_pretty(&self)?; let config_js_content = format!("window['##runtimeConfig'] = {};", json); - Ok(shell.write_file(path, config_js_content.as_bytes())?) + shell.write_file(path.clone(), config_js_content.as_bytes())?; + Ok(path) + } + + fn get_generated_js_config_path(ecosystem_base_path: &Path) -> PathBuf { + ecosystem_base_path + .join(LOCAL_CONFIGS_PATH) + .join(LOCAL_GENERATED_PATH) + .join(PORTAL_JS_CONFIG_FILE) } } -impl ReadConfig for PortalRuntimeConfig { - fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { - let config_js_content = shell.read_file(path)?; - // Extract the JSON part from the JavaScript file - let json_start = config_js_content - .find('{') - .ok_or_else(|| anyhow::anyhow!("Invalid config file format"))?; - let json_end = config_js_content - .rfind('}') - .ok_or_else(|| anyhow::anyhow!("Invalid config file format"))?; - let json_str = &config_js_content[json_start..=json_end]; - // Parse the JSON into PortalRuntimeConfig - let config: PortalRuntimeConfig = serde_json::from_str(json_str)?; - Ok(config) +impl Default for PortalConfig { + fn default() -> Self { + PortalConfig { + node_type: "hyperchain".to_string(), + hyperchains_config: Vec::new(), + other: serde_json::Value::Null, + } } } + +impl ZkToolboxConfig for PortalConfig {} diff --git a/zk_toolbox/crates/config/src/traits.rs b/zk_toolbox/crates/config/src/traits.rs index 1f00b39b040a..bb0722762e31 100644 --- a/zk_toolbox/crates/config/src/traits.rs +++ b/zk_toolbox/crates/config/src/traits.rs @@ -5,6 +5,7 @@ use common::files::{ read_json_file, read_toml_file, read_yaml_file, save_json_file, save_toml_file, save_yaml_file, }; use serde::{de::DeserializeOwned, Serialize}; +use url::Url; use xshell::Shell; // Configs that we use only inside zk toolbox, we don't have protobuf implementation for them. @@ -156,3 +157,7 @@ fn save_with_comment( } Ok(()) } + +pub trait ConfigWithL2RpcUrl { + fn get_l2_rpc_url(&self) -> anyhow::Result; +} diff --git a/zk_toolbox/crates/zk_inception/README.md b/zk_toolbox/crates/zk_inception/README.md index 037a7e3fc925..904b1421e3a0 100644 --- a/zk_toolbox/crates/zk_inception/README.md +++ b/zk_toolbox/crates/zk_inception/README.md @@ -21,7 +21,7 @@ This document contains the help content for the `zk_inception` command-line prog - [`zk_inception chain update-token-multiplier-setter`↴](#zk_inception-chain-update-token-multiplier-setter) - [`zk_inception prover`↴](#zk_inception-prover) - [`zk_inception prover init`↴](#zk_inception-prover-init) -- [`zk_inception prover generate-sk`↴](#zk_inception-prover-generate-sk) +- [`zk_inception prover setup-keys`↴](#zk_inception-prover-setup-keys) - [`zk_inception prover run`↴](#zk_inception-prover-run) - [`zk_inception prover init-bellman-cuda`↴](#zk_inception-prover-init-bellman-cuda) - [`zk_inception server`↴](#zk_inception-server) @@ -475,11 +475,21 @@ Initialize prover Possible values: `gcp`, `local` -## `zk_inception prover generate-sk` +## `zk_inception prover setup-keys` -Generate setup keys +Setup keys -**Usage:** `zk_inception prover generate-sk` +**Usage:** `zk_inception prover setup-keys` + +###### **Options:** + +- `--mode` + + Possible valuess: `download`, `generate` + +- `--region` + + Possible values: `asia`, `europe`, `us` ## `zk_inception prover run` @@ -494,12 +504,19 @@ Run prover Possible values: `gateway`, `witness-generator`, `witness-vector-generator`, `prover`, `compressor`, `prover-job-monitor` +- `--docker` - Whether to run image of the component instead of binary. + + Possible values: `true`, `false` + - `--round ` Possible values: `all-rounds`, `basic-circuits`, `leaf-aggregation`, `node-aggregation`, `recursion-tip`, `scheduler` - `--threads ` +- `--max-allocation ` - in case you are running prover component, the value limits maximum + memory allocation of it in bytes. + ## `zk_inception prover init-bellman-cuda` Initialize bellman-cuda diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/args/mod.rs index a27b653edf52..d18b05c910e5 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/args/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/args/mod.rs @@ -1,9 +1,7 @@ pub use containers::*; -pub use portal::*; pub use run_server::*; pub use update::*; mod containers; -mod portal; mod run_server; mod update; diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/portal.rs b/zk_toolbox/crates/zk_inception/src/commands/args/portal.rs deleted file mode 100644 index e31058aad5d0..000000000000 --- a/zk_toolbox/crates/zk_inception/src/commands/args/portal.rs +++ /dev/null @@ -1,12 +0,0 @@ -use clap::Parser; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Serialize, Deserialize, Parser)] -pub struct PortalArgs { - #[clap( - long, - default_value = "3030", - help = "The port number for the portal app" - )] - pub port: u16, -} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs index 2253eeb314ef..9dd6c490bd78 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs @@ -22,7 +22,7 @@ pub struct PortOffset(u16); impl PortOffset { pub fn from_chain_id(chain_id: u16) -> Self { - Self(chain_id * 100) + Self((chain_id - 1) * 100) } } @@ -88,7 +88,7 @@ impl InitArgs { l1_rpc_url, port_offset: self .port_offset - .unwrap_or(PortOffset::from_chain_id(config.chain_id.as_u64() as u16)) + .unwrap_or(PortOffset::from_chain_id(config.id as u16)) .into(), } } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs index 30f361e44af2..3625abfb15a9 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs @@ -11,7 +11,7 @@ use config::{ forge_interface::{ deploy_l2_contracts::{ input::DeployL2ContractsInput, - output::{DefaultL2UpgradeOutput, InitializeBridgeOutput}, + output::{ConsensusRegistryOutput, DefaultL2UpgradeOutput, InitializeBridgeOutput}, }, script_params::DEPLOY_L2_CONTRACTS_SCRIPT_PARAMS, }, @@ -31,7 +31,8 @@ use crate::{ pub enum Deploy2ContractsOption { All, Upgrader, - IntiailizeBridges, + InitiailizeBridges, + ConsensusRegistry, } pub async fn run( @@ -70,7 +71,17 @@ pub async fn run( ) .await?; } - Deploy2ContractsOption::IntiailizeBridges => { + Deploy2ContractsOption::ConsensusRegistry => { + deploy_consensus_registry( + shell, + &chain_config, + &ecosystem_config, + &mut contracts, + args, + ) + .await?; + } + Deploy2ContractsOption::InitiailizeBridges => { initialize_bridges( shell, &chain_config, @@ -88,6 +99,25 @@ pub async fn run( Ok(()) } +/// Build the L2 contracts, deploy one or all of them with `forge`, then update the config +/// by reading one or all outputs written by the deploy scripts. +async fn build_and_deploy( + shell: &Shell, + chain_config: &ChainConfig, + ecosystem_config: &EcosystemConfig, + forge_args: ForgeScriptArgs, + signature: Option<&str>, + mut update_config: impl FnMut(&Shell, &Path) -> anyhow::Result<()>, +) -> anyhow::Result<()> { + build_l2_contracts(shell, &ecosystem_config.link_to_code)?; + call_forge(shell, chain_config, ecosystem_config, forge_args, signature).await?; + update_config( + shell, + &DEPLOY_L2_CONTRACTS_SCRIPT_PARAMS.output(&chain_config.link_to_code), + )?; + Ok(()) +} + pub async fn initialize_bridges( shell: &Shell, chain_config: &ChainConfig, @@ -95,22 +125,17 @@ pub async fn initialize_bridges( contracts_config: &mut ContractsConfig, forge_args: ForgeScriptArgs, ) -> anyhow::Result<()> { - build_l2_contracts(shell, &ecosystem_config.link_to_code)?; - call_forge( + build_and_deploy( shell, chain_config, ecosystem_config, forge_args, Some("runDeploySharedBridge"), + |shell, out| { + contracts_config.set_l2_shared_bridge(&InitializeBridgeOutput::read(shell, out)?) + }, ) - .await?; - let output = InitializeBridgeOutput::read( - shell, - DEPLOY_L2_CONTRACTS_SCRIPT_PARAMS.output(&chain_config.link_to_code), - )?; - - contracts_config.set_l2_shared_bridge(&output)?; - Ok(()) + .await } pub async fn deploy_upgrader( @@ -120,48 +145,60 @@ pub async fn deploy_upgrader( contracts_config: &mut ContractsConfig, forge_args: ForgeScriptArgs, ) -> anyhow::Result<()> { - build_l2_contracts(shell, &ecosystem_config.link_to_code)?; - call_forge( + build_and_deploy( shell, chain_config, ecosystem_config, forge_args, Some("runDefaultUpgrader"), + |shell, out| { + contracts_config.set_default_l2_upgrade(&DefaultL2UpgradeOutput::read(shell, out)?) + }, ) - .await?; - let output = DefaultL2UpgradeOutput::read( - shell, - DEPLOY_L2_CONTRACTS_SCRIPT_PARAMS.output(&chain_config.link_to_code), - )?; - - contracts_config.set_default_l2_upgrade(&output)?; - Ok(()) + .await } -pub async fn deploy_l2_contracts( +pub async fn deploy_consensus_registry( shell: &Shell, chain_config: &ChainConfig, ecosystem_config: &EcosystemConfig, contracts_config: &mut ContractsConfig, forge_args: ForgeScriptArgs, ) -> anyhow::Result<()> { - build_l2_contracts(shell, &ecosystem_config.link_to_code)?; - call_forge(shell, chain_config, ecosystem_config, forge_args, None).await?; - let output = InitializeBridgeOutput::read( + build_and_deploy( shell, - DEPLOY_L2_CONTRACTS_SCRIPT_PARAMS.output(&chain_config.link_to_code), - )?; - - contracts_config.set_l2_shared_bridge(&output)?; + chain_config, + ecosystem_config, + forge_args, + Some("runDeployConsensusRegistry"), + |shell, out| { + contracts_config.set_consensus_registry(&ConsensusRegistryOutput::read(shell, out)?) + }, + ) + .await +} - let output = DefaultL2UpgradeOutput::read( +pub async fn deploy_l2_contracts( + shell: &Shell, + chain_config: &ChainConfig, + ecosystem_config: &EcosystemConfig, + contracts_config: &mut ContractsConfig, + forge_args: ForgeScriptArgs, +) -> anyhow::Result<()> { + build_and_deploy( shell, - DEPLOY_L2_CONTRACTS_SCRIPT_PARAMS.output(&chain_config.link_to_code), - )?; - - contracts_config.set_default_l2_upgrade(&output)?; - - Ok(()) + chain_config, + ecosystem_config, + forge_args, + None, + |shell, out| { + contracts_config.set_l2_shared_bridge(&InitializeBridgeOutput::read(shell, out)?)?; + contracts_config.set_default_l2_upgrade(&DefaultL2UpgradeOutput::read(shell, out)?)?; + contracts_config.set_consensus_registry(&ConsensusRegistryOutput::read(shell, out)?)?; + Ok(()) + }, + ) + .await } async fn call_forge( diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs index 921eeaa98af8..793fbbf31aee 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs @@ -28,7 +28,7 @@ use crate::{ genesis::genesis, set_token_multiplier_setter::set_token_multiplier_setter, }, - portal::create_and_save_portal_config, + portal::update_portal_config, }, consts::AMOUNT_FOR_DISTRIBUTION_TO_WALLETS, messages::{ @@ -154,7 +154,7 @@ pub async fn init( .await .context(MSG_GENESIS_DATABASE_ERR)?; - create_and_save_portal_config(ecosystem_config, shell) + update_portal_config(shell, chain_config) .await .context(MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs index dbddc923336a..afc92d2288bf 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs @@ -31,6 +31,9 @@ pub enum ChainCommands { /// Deploy all l2 contracts #[command(alias = "l2")] DeployL2Contracts(ForgeScriptArgs), + /// Deploy L2 consensus registry + #[command(alias = "consensus")] + DeployConsensusRegistry(ForgeScriptArgs), /// Deploy Default Upgrader Upgrader(ForgeScriptArgs), /// Deploy paymaster smart contract @@ -48,11 +51,14 @@ pub(crate) async fn run(shell: &Shell, args: ChainCommands) -> anyhow::Result<() ChainCommands::DeployL2Contracts(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::All).await } + ChainCommands::DeployConsensusRegistry(args) => { + deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::ConsensusRegistry).await + } ChainCommands::Upgrader(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::Upgrader).await } ChainCommands::InitializeBridges(args) => { - deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::IntiailizeBridges).await + deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::InitiailizeBridges).await } ChainCommands::DeployPaymaster(args) => deploy_paymaster::run(args, shell).await, ChainCommands::UpdateTokenMultiplierSetter(args) => { diff --git a/zk_toolbox/crates/zk_inception/src/commands/containers.rs b/zk_toolbox/crates/zk_inception/src/commands/containers.rs index 17c32c04bc2f..81d7970df839 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/containers.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/containers.rs @@ -40,7 +40,7 @@ pub fn initialize_docker(shell: &Shell, ecosystem: &EcosystemConfig) -> anyhow:: } fn start_container(shell: &Shell, compose_file: &str, retry_msg: &str) -> anyhow::Result<()> { - while let Err(err) = docker::up(shell, compose_file) { + while let Err(err) = docker::up(shell, compose_file, true) { logger::error(err.to_string()); if !common::PromptConfirm::new(retry_msg).default(true).ask() { return Err(err); diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs index f9940c8a9798..356b5322980f 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs @@ -15,7 +15,10 @@ use crate::{ containers::{initialize_docker, start_containers}, ecosystem::{ args::create::EcosystemCreateArgs, - create_configs::{create_erc20_deployment_config, create_initial_deployments_config}, + create_configs::{ + create_apps_config, create_erc20_deployment_config, + create_initial_deployments_config, + }, }, }, messages::{ @@ -75,6 +78,7 @@ fn create(args: EcosystemCreateArgs, shell: &Shell) -> anyhow::Result<()> { create_initial_deployments_config(shell, &configs_path)?; create_erc20_deployment_config(shell, &configs_path)?; + create_apps_config(shell, &configs_path)?; let ecosystem_config = EcosystemConfig { name: ecosystem_name.clone(), diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create_configs.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create_configs.rs index b4f42313e3d0..38358355ff97 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create_configs.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create_configs.rs @@ -2,7 +2,8 @@ use std::path::Path; use config::{ forge_interface::deploy_ecosystem::input::{Erc20DeploymentConfig, InitialDeploymentConfig}, - traits::SaveConfigWithCommentAndBasePath, + traits::{SaveConfigWithBasePath, SaveConfigWithCommentAndBasePath}, + AppsEcosystemConfig, }; use xshell::Shell; @@ -33,3 +34,12 @@ pub fn create_erc20_deployment_config( )?; Ok(config) } + +pub fn create_apps_config( + shell: &Shell, + ecosystem_configs_path: &Path, +) -> anyhow::Result { + let config = AppsEcosystemConfig::default(); + config.save_with_base_path(shell, ecosystem_configs_path)?; + Ok(config) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/backend.rs b/zk_toolbox/crates/zk_inception/src/commands/explorer/backend.rs new file mode 100644 index 000000000000..6fdd3faa9807 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/explorer/backend.rs @@ -0,0 +1,39 @@ +use std::path::Path; + +use anyhow::Context; +use common::{config::global_config, docker}; +use config::{explorer_compose::ExplorerBackendComposeConfig, EcosystemConfig}; +use xshell::Shell; + +use crate::messages::{ + msg_explorer_chain_not_initialized, MSG_CHAIN_NOT_FOUND_ERR, + MSG_EXPLORER_FAILED_TO_RUN_DOCKER_SERVICES_ERR, +}; + +pub(crate) fn run(shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_chain(global_config().chain_name.clone()) + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + let chain_name = chain_config.name.clone(); + // Read chain-level explorer backend docker compose file + let ecosystem_path = shell.current_dir(); + let backend_config_path = + ExplorerBackendComposeConfig::get_config_path(&ecosystem_path, &chain_config.name); + if !backend_config_path.exists() { + anyhow::bail!(msg_explorer_chain_not_initialized(&chain_name)); + } + // Run docker compose + run_backend(shell, &backend_config_path)?; + Ok(()) +} + +fn run_backend(shell: &Shell, explorer_compose_config_path: &Path) -> anyhow::Result<()> { + if let Some(docker_compose_file) = explorer_compose_config_path.to_str() { + docker::up(shell, docker_compose_file, false) + .context(MSG_EXPLORER_FAILED_TO_RUN_DOCKER_SERVICES_ERR)?; + } else { + anyhow::bail!("Invalid docker compose file"); + } + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/init.rs b/zk_toolbox/crates/zk_inception/src/commands/explorer/init.rs new file mode 100644 index 000000000000..43700d91a0df --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/explorer/init.rs @@ -0,0 +1,135 @@ +use anyhow::Context; +use common::{config::global_config, db, logger, Prompt}; +use config::{ + explorer::{ExplorerChainConfig, ExplorerConfig}, + explorer_compose::{ExplorerBackendComposeConfig, ExplorerBackendConfig, ExplorerBackendPorts}, + traits::{ConfigWithL2RpcUrl, SaveConfig}, + ChainConfig, EcosystemConfig, +}; +use slugify_rs::slugify; +use url::Url; +use xshell::Shell; + +use crate::{ + commands::chain::args::init::PortOffset, + consts::L2_BASE_TOKEN_ADDRESS, + defaults::{generate_explorer_db_name, DATABASE_EXPLORER_URL}, + messages::{ + msg_chain_load_err, msg_explorer_db_name_prompt, msg_explorer_db_url_prompt, + msg_explorer_initializing_database_for, MSG_EXPLORER_FAILED_TO_DROP_DATABASE_ERR, + MSG_EXPLORER_INITIALIZED, + }, +}; + +pub(crate) async fn run(shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + // If specific chain is provided, initialize only that chain; otherwise, initialize all chains + let chains_enabled = match global_config().chain_name { + Some(ref chain_name) => vec![chain_name.clone()], + None => ecosystem_config.list_of_chains(), + }; + // Initialize chains one by one + let mut explorer_config = ExplorerConfig::read_or_create_default(shell)?; + for chain_name in chains_enabled.iter() { + // Load chain config + let chain_config = ecosystem_config + .load_chain(Some(chain_name.clone())) + .context(msg_chain_load_err(chain_name))?; + // Build backend config - parameters required to create explorer backend services + let backend_config = build_backend_config(&chain_config); + // Initialize explorer database + initialize_explorer_database(&backend_config.database_url).await?; + // Create explorer backend docker compose file + let l2_rpc_url = chain_config.get_general_config()?.get_l2_rpc_url()?; + let backend_compose_config = + ExplorerBackendComposeConfig::new(chain_name, l2_rpc_url, &backend_config)?; + let backend_compose_config_path = + ExplorerBackendComposeConfig::get_config_path(&shell.current_dir(), chain_name); + backend_compose_config.save(shell, &backend_compose_config_path)?; + // Add chain to explorer.json + let explorer_chain_config = build_explorer_chain_config(&chain_config, &backend_config)?; + explorer_config.add_chain_config(&explorer_chain_config); + } + // Save explorer config + let config_path = ExplorerConfig::get_config_path(&shell.current_dir()); + explorer_config.save(shell, config_path)?; + + logger::outro(MSG_EXPLORER_INITIALIZED); + Ok(()) +} + +fn build_backend_config(chain_config: &ChainConfig) -> ExplorerBackendConfig { + // Prompt explorer database name + logger::info(msg_explorer_initializing_database_for(&chain_config.name)); + let db_config = fill_database_values_with_prompt(chain_config); + + // Allocate ports for backend services + let backend_ports = allocate_explorer_services_ports(chain_config); + + // Build explorer backend config + ExplorerBackendConfig::new(db_config.full_url(), &backend_ports) +} + +async fn initialize_explorer_database(db_url: &Url) -> anyhow::Result<()> { + let db_config = db::DatabaseConfig::from_url(db_url)?; + db::drop_db_if_exists(&db_config) + .await + .context(MSG_EXPLORER_FAILED_TO_DROP_DATABASE_ERR)?; + db::init_db(&db_config).await?; + Ok(()) +} + +fn fill_database_values_with_prompt(config: &ChainConfig) -> db::DatabaseConfig { + let defaul_db_name: String = generate_explorer_db_name(config); + let chain_name = config.name.clone(); + let explorer_db_url = Prompt::new(&msg_explorer_db_url_prompt(&chain_name)) + .default(DATABASE_EXPLORER_URL.as_str()) + .ask(); + let explorer_db_name: String = Prompt::new(&msg_explorer_db_name_prompt(&chain_name)) + .default(&defaul_db_name) + .ask(); + let explorer_db_name = slugify!(&explorer_db_name, separator = "_"); + db::DatabaseConfig::new(explorer_db_url, explorer_db_name) +} + +fn allocate_explorer_services_ports(chain_config: &ChainConfig) -> ExplorerBackendPorts { + // Try to allocate intuitive ports with an offset from the defaults + let offset: u16 = PortOffset::from_chain_id(chain_config.id as u16).into(); + ExplorerBackendPorts::default().with_offset(offset) +} + +fn build_explorer_chain_config( + chain_config: &ChainConfig, + backend_config: &ExplorerBackendConfig, +) -> anyhow::Result { + let general_config = chain_config.get_general_config()?; + // Get L2 RPC URL from general config + let l2_rpc_url = general_config.get_l2_rpc_url()?; + // Get Verification API URL from general config + let verification_api_url = general_config + .contract_verifier + .as_ref() + .map(|verifier| &verifier.url) + .context("verification_url")?; + // Build API URL + let api_port = backend_config.ports.api_http_port; + let api_url = format!("http://127.0.0.1:{}", api_port); + + // Build explorer chain config + Ok(ExplorerChainConfig { + name: chain_config.name.clone(), + l2_network_name: chain_config.name.clone(), + l2_chain_id: chain_config.chain_id.as_u64(), + rpc_url: l2_rpc_url.to_string(), + api_url: api_url.to_string(), + base_token_address: L2_BASE_TOKEN_ADDRESS.to_string(), + hostnames: Vec::new(), + icon: "/images/icons/zksync-arrows.svg".to_string(), + maintenance: false, + published: true, + bridge_url: None, + l1_explorer_url: None, + verification_api_url: Some(verification_api_url.to_string()), + other: serde_json::Value::Null, + }) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/explorer/mod.rs new file mode 100644 index 000000000000..4b66d49598c4 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/explorer/mod.rs @@ -0,0 +1,27 @@ +use clap::Subcommand; +use xshell::Shell; + +mod backend; +mod init; +mod run; + +#[derive(Subcommand, Debug)] +pub enum ExplorerCommands { + /// Initialize explorer (create database to store explorer data and generate docker + /// compose file with explorer services). Runs for all chains, unless --chain is passed + Init, + /// Start explorer backend services (api, data_fetcher, worker) for a given chain. + /// Uses default chain, unless --chain is passed + #[command(alias = "backend")] + RunBackend, + /// Run explorer app + Run, +} + +pub(crate) async fn run(shell: &Shell, args: ExplorerCommands) -> anyhow::Result<()> { + match args { + ExplorerCommands::Init => init::run(shell).await, + ExplorerCommands::Run => run::run(shell), + ExplorerCommands::RunBackend => backend::run(shell), + } +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/run.rs b/zk_toolbox/crates/zk_inception/src/commands/explorer/run.rs new file mode 100644 index 000000000000..a6519f62edba --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/explorer/run.rs @@ -0,0 +1,98 @@ +use std::path::Path; + +use anyhow::Context; +use common::{config::global_config, docker, logger}; +use config::{explorer::*, traits::SaveConfig, AppsEcosystemConfig, EcosystemConfig}; +use xshell::Shell; + +use crate::{ + consts::{EXPLORER_APP_DOCKER_CONFIG_PATH, EXPLORER_APP_DOCKER_IMAGE}, + messages::{ + msg_explorer_running_with_config, msg_explorer_starting_on, + MSG_EXPLORER_FAILED_TO_CREATE_CONFIG_ERR, MSG_EXPLORER_FAILED_TO_FIND_ANY_CHAIN_ERR, + MSG_EXPLORER_FAILED_TO_RUN_DOCKER_ERR, + }, +}; + +pub(crate) fn run(shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let ecosystem_path = shell.current_dir(); + // Get ecosystem level apps.yaml config + let apps_config = AppsEcosystemConfig::read_or_create_default(shell)?; + // If specific_chain is provided, run only with that chain; otherwise, run with all chains + let chains_enabled = match global_config().chain_name { + Some(ref chain_name) => vec![chain_name.clone()], + None => ecosystem_config.list_of_chains(), + }; + + // Read explorer config + let config_path = ExplorerConfig::get_config_path(&ecosystem_path); + let mut explorer_config = ExplorerConfig::read_or_create_default(shell) + .context(MSG_EXPLORER_FAILED_TO_CREATE_CONFIG_ERR)?; + + // Validate and update explorer config + explorer_config.filter(&ecosystem_config.list_of_chains()); + explorer_config.hide_except(&chains_enabled); + if explorer_config.is_empty() { + anyhow::bail!(MSG_EXPLORER_FAILED_TO_FIND_ANY_CHAIN_ERR); + } + + // Save explorer config + explorer_config.save(shell, &config_path)?; + + let config_js_path = explorer_config + .save_as_js(shell) + .context(MSG_EXPLORER_FAILED_TO_CREATE_CONFIG_ERR)?; + + logger::info(msg_explorer_running_with_config(&config_path)); + logger::info(msg_explorer_starting_on( + "127.0.0.1", + apps_config.explorer.http_port, + )); + let name = explorer_app_name(&ecosystem_config.name); + run_explorer( + shell, + &config_js_path, + &name, + apps_config.explorer.http_port, + )?; + Ok(()) +} + +fn run_explorer( + shell: &Shell, + config_file_path: &Path, + name: &str, + port: u16, +) -> anyhow::Result<()> { + let port_mapping = format!("{}:{}", port, port); + let volume_mapping = format!( + "{}:{}", + config_file_path.display(), + EXPLORER_APP_DOCKER_CONFIG_PATH + ); + + let docker_args: Vec = vec![ + "--platform".to_string(), + "linux/amd64".to_string(), + "--name".to_string(), + name.to_string(), + "-p".to_string(), + port_mapping, + "-v".to_string(), + volume_mapping, + "-e".to_string(), + format!("PORT={}", port), + "--rm".to_string(), + ]; + + docker::run(shell, EXPLORER_APP_DOCKER_IMAGE, docker_args) + .with_context(|| MSG_EXPLORER_FAILED_TO_RUN_DOCKER_ERR)?; + Ok(()) +} + +/// Generates a name for the explorer app Docker container. +/// Will be passed as `--name` argument to `docker run`. +fn explorer_app_name(ecosystem_name: &str) -> String { + format!("{}-explorer-app", ecosystem_name) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/mod.rs index 0ac363beb2da..523faea04786 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/mod.rs @@ -3,6 +3,7 @@ pub mod chain; pub mod containers; pub mod contract_verifier; pub mod ecosystem; +pub mod explorer; pub mod external_node; pub mod portal; pub mod prover; diff --git a/zk_toolbox/crates/zk_inception/src/commands/portal.rs b/zk_toolbox/crates/zk_inception/src/commands/portal.rs index cc939f3fb3ea..5bf211211779 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/portal.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/portal.rs @@ -1,33 +1,30 @@ -use std::{collections::HashMap, path::Path}; +use std::path::Path; -use anyhow::{anyhow, Context}; -use common::{docker, ethereum, logger}; +use anyhow::Context; +use common::{config::global_config, docker, ethereum, logger}; use config::{ portal::*, - traits::{ReadConfig, SaveConfig}, - ChainConfig, EcosystemConfig, + traits::{ConfigWithL2RpcUrl, SaveConfig}, + AppsEcosystemConfig, ChainConfig, EcosystemConfig, }; use ethers::types::Address; use types::{BaseToken, TokenInfo}; use xshell::Shell; use crate::{ - commands::args::PortalArgs, - consts::{L2_BASE_TOKEN_ADDRESS, PORTAL_DOCKER_CONTAINER_PORT, PORTAL_DOCKER_IMAGE}, + consts::{L2_BASE_TOKEN_ADDRESS, PORTAL_DOCKER_CONFIG_PATH, PORTAL_DOCKER_IMAGE}, messages::{ - msg_portal_starting_on, MSG_PORTAL_CONFIG_IS_EMPTY_ERR, - MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, MSG_PORTAL_FAILED_TO_RUN_DOCKER_ERR, + msg_portal_running_with_config, msg_portal_starting_on, + MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, MSG_PORTAL_FAILED_TO_FIND_ANY_CHAIN_ERR, + MSG_PORTAL_FAILED_TO_RUN_DOCKER_ERR, }, }; -async fn create_hyperchain_config(chain_config: &ChainConfig) -> anyhow::Result { +async fn build_portal_chain_config( + chain_config: &ChainConfig, +) -> anyhow::Result { // Get L2 RPC URL from general config - let general_config = chain_config.get_general_config()?; - let rpc_url = general_config - .api_config - .as_ref() - .map(|api_config| &api_config.web3_json_rpc.http_url) - .context("api_config")?; + let l2_rpc_url = chain_config.get_general_config()?.get_l2_rpc_url()?; // Get L1 RPC URL from secrects config let secrets_config = chain_config.get_secrets_config()?; let l1_rpc_url = secrets_config @@ -68,97 +65,126 @@ async fn create_hyperchain_config(chain_config: &ChainConfig) -> anyhow::Result< name: Some(base_token_info.name.to_string()), }]; // Build hyperchain config - Ok(HyperchainConfig { + Ok(PortalChainConfig { network: NetworkConfig { id: chain_config.chain_id.as_u64(), key: chain_config.name.clone(), name: chain_config.name.clone(), - rpc_url: rpc_url.to_string(), + rpc_url: l2_rpc_url.to_string(), l1_network, public_l1_network_id: None, block_explorer_url: None, block_explorer_api: None, + hidden: None, + other: serde_json::Value::Null, }, tokens, }) } -async fn create_hyperchains_config( - chain_configs: &[ChainConfig], -) -> anyhow::Result { - let mut hyperchain_configs = Vec::new(); - for chain_config in chain_configs { - if let Ok(config) = create_hyperchain_config(chain_config).await { - hyperchain_configs.push(config) - } - } - Ok(HyperchainsConfig(hyperchain_configs)) +pub async fn update_portal_config( + shell: &Shell, + chain_config: &ChainConfig, +) -> anyhow::Result { + // Build and append portal chain config to the portal config + let portal_chain_config = build_portal_chain_config(chain_config).await?; + let mut portal_config = PortalConfig::read_or_create_default(shell)?; + portal_config.add_chain_config(&portal_chain_config); + // Save portal config + let config_path = PortalConfig::get_config_path(&shell.current_dir()); + portal_config.save(shell, config_path)?; + Ok(portal_config) } -pub async fn create_portal_config( +/// Validates portal config - appends missing chains and removes unknown chains +async fn validate_portal_config( + portal_config: &mut PortalConfig, ecosystem_config: &EcosystemConfig, -) -> anyhow::Result { - let chains: Vec = ecosystem_config.list_of_chains(); - let mut chain_configs = Vec::new(); - for chain in chains { - if let Some(chain_config) = ecosystem_config.load_chain(Some(chain.clone())) { - chain_configs.push(chain_config) +) -> anyhow::Result<()> { + let chain_names = ecosystem_config.list_of_chains(); + for chain_name in &chain_names { + if portal_config.contains(chain_name) { + continue; + } + // Append missing chain, chain might not be initialized, so ignoring errors + if let Some(chain_config) = ecosystem_config.load_chain(Some(chain_name.clone())) { + if let Ok(portal_chain_config) = build_portal_chain_config(&chain_config).await { + portal_config.add_chain_config(&portal_chain_config); + } } } - let hyperchains_config = create_hyperchains_config(&chain_configs).await?; - if hyperchains_config.is_empty() { - anyhow::bail!("Failed to create any valid hyperchain config") - } - let runtime_config = PortalRuntimeConfig { - node_type: "hyperchain".to_string(), - hyperchains_config, - }; - Ok(runtime_config) -} - -pub async fn create_and_save_portal_config( - ecosystem_config: &EcosystemConfig, - shell: &Shell, -) -> anyhow::Result { - let portal_config = create_portal_config(ecosystem_config).await?; - let config_path = PortalRuntimeConfig::get_config_path(&shell.current_dir()); - portal_config.save(shell, config_path)?; - Ok(portal_config) + portal_config.filter(&chain_names); + Ok(()) } -pub async fn run(shell: &Shell, args: PortalArgs) -> anyhow::Result<()> { +pub async fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem_config: EcosystemConfig = EcosystemConfig::from_file(shell)?; - let config_path = PortalRuntimeConfig::get_config_path(&shell.current_dir()); - logger::info(format!( - "Using portal config file at {}", - config_path.display() - )); - - let portal_config = match PortalRuntimeConfig::read(shell, &config_path) { - Ok(config) => config, - Err(_) => create_and_save_portal_config(&ecosystem_config, shell) - .await - .context(MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR)?, + // Get ecosystem level apps.yaml config + let apps_config = AppsEcosystemConfig::read_or_create_default(shell)?; + // Display all chains, unless --chain is passed + let chains_enabled = match global_config().chain_name { + Some(ref chain_name) => vec![chain_name.clone()], + None => ecosystem_config.list_of_chains(), }; - if portal_config.hyperchains_config.is_empty() { - return Err(anyhow!(MSG_PORTAL_CONFIG_IS_EMPTY_ERR)); + + // Read portal config + let config_path = PortalConfig::get_config_path(&shell.current_dir()); + let mut portal_config = PortalConfig::read_or_create_default(shell) + .context(MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR)?; + + // Validate and update portal config + validate_portal_config(&mut portal_config, &ecosystem_config).await?; + portal_config.hide_except(&chains_enabled); + if portal_config.is_empty() { + anyhow::bail!(MSG_PORTAL_FAILED_TO_FIND_ANY_CHAIN_ERR); } - logger::info(msg_portal_starting_on("127.0.0.1", args.port)); - run_portal(shell, &config_path, args.port)?; + // Save portal config + portal_config.save(shell, &config_path)?; + + let config_js_path = portal_config + .save_as_js(shell) + .context(MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR)?; + + logger::info(msg_portal_running_with_config(&config_path)); + logger::info(msg_portal_starting_on( + "127.0.0.1", + apps_config.portal.http_port, + )); + let name = portal_app_name(&ecosystem_config.name); + run_portal(shell, &config_js_path, &name, apps_config.portal.http_port)?; Ok(()) } -fn run_portal(shell: &Shell, config_file_path: &Path, port: u16) -> anyhow::Result<()> { - let port_mapping = format!("{}:{}", port, PORTAL_DOCKER_CONTAINER_PORT); - let volume_mapping = format!("{}:/usr/src/app/dist/config.js", config_file_path.display()); +fn run_portal(shell: &Shell, config_file_path: &Path, name: &str, port: u16) -> anyhow::Result<()> { + let port_mapping = format!("{}:{}", port, port); + let volume_mapping = format!( + "{}:{}", + config_file_path.display(), + PORTAL_DOCKER_CONFIG_PATH + ); - let mut docker_args: HashMap = HashMap::new(); - docker_args.insert("--platform".to_string(), "linux/amd64".to_string()); - docker_args.insert("-p".to_string(), port_mapping); - docker_args.insert("-v".to_string(), volume_mapping); + let docker_args: Vec = vec![ + "--platform".to_string(), + "linux/amd64".to_string(), + "--name".to_string(), + name.to_string(), + "-p".to_string(), + port_mapping, + "-v".to_string(), + volume_mapping, + "-e".to_string(), + format!("PORT={}", port), + "--rm".to_string(), + ]; docker::run(shell, PORTAL_DOCKER_IMAGE, docker_args) .with_context(|| MSG_PORTAL_FAILED_TO_RUN_DOCKER_ERR)?; Ok(()) } + +/// Generates a name for the portal app Docker container. +/// Will be passed as `--name` argument to `docker run`. +fn portal_app_name(ecosystem_name: &str) -> String { + format!("{}-portal-app", ecosystem_name) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/compressor_keys.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/compressor_keys.rs new file mode 100644 index 000000000000..095dccf00b38 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/compressor_keys.rs @@ -0,0 +1,22 @@ +use clap::Parser; +use common::Prompt; + +use crate::messages::MSG_SETUP_COMPRESSOR_KEY_PATH_PROMPT; + +#[derive(Debug, Clone, Parser, Default)] +pub struct CompressorKeysArgs { + #[clap(long)] + pub path: Option, +} + +impl CompressorKeysArgs { + pub fn fill_values_with_prompt(self, default: &str) -> CompressorKeysArgs { + let path = self.path.unwrap_or_else(|| { + Prompt::new(MSG_SETUP_COMPRESSOR_KEY_PATH_PROMPT) + .default(default) + .ask() + }); + + CompressorKeysArgs { path: Some(path) } + } +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs index e8c9cf1888d5..94fea1389d28 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs @@ -8,7 +8,10 @@ use url::Url; use xshell::Shell; use zksync_config::configs::fri_prover::CloudConnectionMode; -use super::init_bellman_cuda::InitBellmanCudaArgs; +use super::{ + compressor_keys::CompressorKeysArgs, init_bellman_cuda::InitBellmanCudaArgs, + setup_keys::SetupKeysArgs, +}; use crate::{ commands::prover::gcs::get_project_ids, consts::{DEFAULT_CREDENTIALS_FILE, DEFAULT_PROOF_STORE_DIR}, @@ -18,25 +21,24 @@ use crate::{ MSG_CREATE_GCS_BUCKET_LOCATION_PROMPT, MSG_CREATE_GCS_BUCKET_NAME_PROMTP, MSG_CREATE_GCS_BUCKET_PROJECT_ID_NO_PROJECTS_PROMPT, MSG_CREATE_GCS_BUCKET_PROJECT_ID_PROMPT, MSG_CREATE_GCS_BUCKET_PROMPT, - MSG_DOWNLOAD_SETUP_KEY_PROMPT, MSG_GETTING_PROOF_STORE_CONFIG, - MSG_GETTING_PUBLIC_STORE_CONFIG, MSG_PROOF_STORE_CONFIG_PROMPT, MSG_PROOF_STORE_DIR_PROMPT, + MSG_DOWNLOAD_SETUP_COMPRESSOR_KEY_PROMPT, MSG_GETTING_PROOF_STORE_CONFIG, + MSG_GETTING_PUBLIC_STORE_CONFIG, MSG_INITIALIZE_BELLMAN_CUDA_PROMPT, + MSG_PROOF_STORE_CONFIG_PROMPT, MSG_PROOF_STORE_DIR_PROMPT, MSG_PROOF_STORE_GCS_BUCKET_BASE_URL_ERR, MSG_PROOF_STORE_GCS_BUCKET_BASE_URL_PROMPT, MSG_PROOF_STORE_GCS_CREDENTIALS_FILE_PROMPT, MSG_PROVER_DB_NAME_HELP, - MSG_PROVER_DB_URL_HELP, MSG_SAVE_TO_PUBLIC_BUCKET_PROMPT, MSG_SETUP_KEY_PATH_PROMPT, + MSG_PROVER_DB_URL_HELP, MSG_SAVE_TO_PUBLIC_BUCKET_PROMPT, MSG_SETUP_KEYS_PROMPT, MSG_USE_DEFAULT_DATABASES_HELP, }, }; -#[derive(Debug, Clone, Serialize, Deserialize, Parser, Default)] +#[derive(Debug, Clone, Parser, Default)] pub struct ProverInitArgs { // Proof store object #[clap(long)] pub proof_store_dir: Option, #[clap(flatten)] - #[serde(flatten)] pub proof_store_gcs_config: ProofStorageGCSTmp, #[clap(flatten)] - #[serde(flatten)] pub create_gcs_bucket_config: ProofStorageGCSCreateBucketTmp, // Public store object @@ -45,20 +47,25 @@ pub struct ProverInitArgs { #[clap(long)] pub public_store_dir: Option, #[clap(flatten)] - #[serde(flatten)] pub public_store_gcs_config: PublicStorageGCSTmp, #[clap(flatten)] - #[serde(flatten)] pub public_create_gcs_bucket_config: PublicStorageGCSCreateBucketTmp, // Bellman cuda #[clap(flatten)] - #[serde(flatten)] pub bellman_cuda_config: InitBellmanCudaArgs, + #[clap(long, default_missing_value = "true", num_args = 0..=1)] + pub bellman_cuda: Option, + + #[clap(long, default_missing_value = "true", num_args = 0..=1)] + pub setup_compressor_keys: Option, + #[clap(flatten)] + pub compressor_keys_args: CompressorKeysArgs, #[clap(flatten)] - #[serde(flatten)] - pub setup_key_config: SetupKeyConfigTmp, + pub setup_keys_args: SetupKeysArgs, + #[clap(long, default_missing_value = "true", num_args = 0..=1)] + pub setup_keys: Option, #[clap(long)] pub setup_database: Option, @@ -137,7 +144,7 @@ pub struct PublicStorageGCSCreateBucketTmp { } #[derive(Clone, Debug, Serialize, Deserialize, Parser, Default)] -pub struct SetupKeyConfigTmp { +pub struct SetupCompressorKeyConfigTmp { #[clap(long)] pub download_key: Option, #[clap(long)] @@ -171,12 +178,6 @@ pub enum ProofStorageConfig { GCSCreateBucket(ProofStorageGCSCreateBucket), } -#[derive(Debug, Clone)] -pub struct SetupKeyConfig { - pub download_key: bool, - pub setup_key_path: String, -} - #[derive(Debug, Clone)] pub struct ProverDatabaseConfig { pub database_config: DatabaseConfig, @@ -187,8 +188,9 @@ pub struct ProverDatabaseConfig { pub struct ProverInitArgsFinal { pub proof_store: ProofStorageConfig, pub public_store: Option, - pub setup_key_config: SetupKeyConfig, - pub bellman_cuda_config: InitBellmanCudaArgs, + pub compressor_key_args: Option, + pub setup_keys: Option, + pub bellman_cuda_config: Option, pub cloud_type: CloudConnectionMode, pub database_config: Option, } @@ -197,20 +199,23 @@ impl ProverInitArgs { pub(crate) fn fill_values_with_prompt( &self, shell: &Shell, - setup_key_path: &str, + default_compressor_key_path: &str, chain_config: &ChainConfig, ) -> anyhow::Result { let proof_store = self.fill_proof_storage_values_with_prompt(shell)?; let public_store = self.fill_public_storage_values_with_prompt(shell)?; - let setup_key_config = self.fill_setup_key_values_with_prompt(setup_key_path); - let bellman_cuda_config = self.fill_bellman_cuda_values_with_prompt()?; + let compressor_key_args = + self.fill_setup_compressor_key_values_with_prompt(default_compressor_key_path); + let bellman_cuda_config = self.fill_bellman_cuda_values_with_prompt(); let cloud_type = self.get_cloud_type_with_prompt(); let database_config = self.fill_database_values_with_prompt(chain_config); + let setup_keys = self.fill_setup_keys_values_with_prompt(); Ok(ProverInitArgsFinal { proof_store, public_store, - setup_key_config, + compressor_key_args, + setup_keys, bellman_cuda_config, cloud_type, database_config, @@ -336,29 +341,38 @@ impl ProverInitArgs { } } - fn fill_setup_key_values_with_prompt(&self, setup_key_path: &str) -> SetupKeyConfig { - let download_key = self - .clone() - .setup_key_config - .download_key - .unwrap_or_else(|| { - PromptConfirm::new(MSG_DOWNLOAD_SETUP_KEY_PROMPT) - .default(true) - .ask() - }); - let setup_key_path = self - .clone() - .setup_key_config - .setup_key_path - .unwrap_or_else(|| { - Prompt::new(MSG_SETUP_KEY_PATH_PROMPT) - .default(setup_key_path) - .ask() - }); + fn fill_setup_compressor_key_values_with_prompt( + &self, + default_path: &str, + ) -> Option { + let download_key = self.clone().setup_compressor_keys.unwrap_or_else(|| { + PromptConfirm::new(MSG_DOWNLOAD_SETUP_COMPRESSOR_KEY_PROMPT) + .default(false) + .ask() + }); - SetupKeyConfig { - download_key, - setup_key_path, + if download_key { + Some( + self.compressor_keys_args + .clone() + .fill_values_with_prompt(default_path), + ) + } else { + None + } + } + + fn fill_setup_keys_values_with_prompt(&self) -> Option { + let args = self.setup_keys_args.clone(); + + if self.setup_keys.unwrap_or_else(|| { + PromptConfirm::new(MSG_SETUP_KEYS_PROMPT) + .default(false) + .ask() + }) { + Some(args) + } else { + None } } @@ -460,8 +474,17 @@ impl ProverInitArgs { }) } - fn fill_bellman_cuda_values_with_prompt(&self) -> anyhow::Result { - self.bellman_cuda_config.clone().fill_values_with_prompt() + fn fill_bellman_cuda_values_with_prompt(&self) -> Option { + let args = self.bellman_cuda_config.clone(); + if self.bellman_cuda.unwrap_or_else(|| { + PromptConfirm::new(MSG_INITIALIZE_BELLMAN_CUDA_PROMPT) + .default(false) + .ask() + }) { + Some(args) + } else { + None + } } fn get_cloud_type_with_prompt(&self) -> CloudConnectionMode { diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init_bellman_cuda.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/init_bellman_cuda.rs index 848457c53271..ba204b0be9e9 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init_bellman_cuda.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/init_bellman_cuda.rs @@ -30,7 +30,7 @@ impl std::fmt::Display for BellmanCudaPathSelection { } impl InitBellmanCudaArgs { - pub fn fill_values_with_prompt(self) -> anyhow::Result { + pub fn fill_values_with_prompt(self) -> InitBellmanCudaArgs { let bellman_cuda_dir = self.bellman_cuda_dir.unwrap_or_else(|| { match PromptSelect::new( MSG_BELLMAN_CUDA_ORIGIN_SELECT, @@ -43,8 +43,8 @@ impl InitBellmanCudaArgs { } }); - Ok(InitBellmanCudaArgs { + InitBellmanCudaArgs { bellman_cuda_dir: Some(bellman_cuda_dir), - }) + } } } diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs index 66d97d75094c..39391977b843 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs @@ -1,3 +1,5 @@ +pub mod compressor_keys; pub mod init; pub mod init_bellman_cuda; pub mod run; +pub mod setup_keys; diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs index 6bdd62c1d488..751cc48074fe 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs @@ -1,8 +1,22 @@ +use anyhow::anyhow; use clap::{Parser, ValueEnum}; use common::{Prompt, PromptSelect}; +use config::ChainConfig; use strum::{EnumIter, IntoEnumIterator}; -use crate::messages::{MSG_ROUND_SELECT_PROMPT, MSG_RUN_COMPONENT_PROMPT, MSG_THREADS_PROMPT}; +use crate::{ + consts::{ + COMPRESSOR_BINARY_NAME, COMPRESSOR_DOCKER_IMAGE, PROVER_BINARY_NAME, PROVER_DOCKER_IMAGE, + PROVER_GATEWAY_BINARY_NAME, PROVER_GATEWAY_DOCKER_IMAGE, PROVER_JOB_MONITOR_BINARY_NAME, + PROVER_JOB_MONITOR_DOCKER_IMAGE, WITNESS_GENERATOR_BINARY_NAME, + WITNESS_GENERATOR_DOCKER_IMAGE, WITNESS_VECTOR_GENERATOR_BINARY_NAME, + WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE, + }, + messages::{ + MSG_ROUND_SELECT_PROMPT, MSG_RUN_COMPONENT_PROMPT, MSG_THREADS_PROMPT, + MSG_WITNESS_GENERATOR_ROUND_ERR, + }, +}; #[derive(Debug, Clone, Parser, Default)] pub struct ProverRunArgs { @@ -12,6 +26,10 @@ pub struct ProverRunArgs { pub witness_generator_args: WitnessGeneratorArgs, #[clap(flatten)] pub witness_vector_generator_args: WitnessVectorGeneratorArgs, + #[clap(flatten)] + pub fri_prover_args: FriProverRunArgs, + #[clap(long)] + pub docker: Option, } #[derive( @@ -32,6 +50,108 @@ pub enum ProverComponent { ProverJobMonitor, } +impl ProverComponent { + pub fn image_name(&self) -> &'static str { + match self { + Self::Gateway => PROVER_GATEWAY_DOCKER_IMAGE, + Self::WitnessGenerator => WITNESS_GENERATOR_DOCKER_IMAGE, + Self::WitnessVectorGenerator => WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE, + Self::Prover => PROVER_DOCKER_IMAGE, + Self::Compressor => COMPRESSOR_DOCKER_IMAGE, + Self::ProverJobMonitor => PROVER_JOB_MONITOR_DOCKER_IMAGE, + } + } + + pub fn binary_name(&self) -> &'static str { + match self { + Self::Gateway => PROVER_GATEWAY_BINARY_NAME, + Self::WitnessGenerator => WITNESS_GENERATOR_BINARY_NAME, + Self::WitnessVectorGenerator => WITNESS_VECTOR_GENERATOR_BINARY_NAME, + Self::Prover => PROVER_BINARY_NAME, + Self::Compressor => COMPRESSOR_BINARY_NAME, + Self::ProverJobMonitor => PROVER_JOB_MONITOR_BINARY_NAME, + } + } + + pub fn get_application_args(&self, in_docker: bool) -> anyhow::Result> { + let mut application_args = vec![]; + + if self == &Self::Prover || self == &Self::Compressor { + if in_docker { + application_args.push("--gpus=all".to_string()); + } else { + application_args.push("--features=gpu".to_string()); + } + } + + Ok(application_args) + } + + pub fn get_additional_args( + &self, + in_docker: bool, + args: ProverRunArgs, + chain: &ChainConfig, + ) -> anyhow::Result> { + let mut additional_args = vec![]; + if in_docker { + additional_args.push("--config-path=/configs/general.yaml".to_string()); + additional_args.push("--secrets-path=/configs/secrets.yaml".to_string()); + } else { + let general_config = chain + .path_to_general_config() + .into_os_string() + .into_string() + .map_err(|_| anyhow!("Failed to convert path to string"))?; + let secrets_config = chain + .path_to_secrets_config() + .into_os_string() + .into_string() + .map_err(|_| anyhow!("Failed to convert path to string"))?; + + additional_args.push(format!("--config-path={}", general_config)); + additional_args.push(format!("--secrets-path={}", secrets_config)); + } + + match self { + Self::WitnessGenerator => { + additional_args.push( + match args + .witness_generator_args + .round + .expect(MSG_WITNESS_GENERATOR_ROUND_ERR) + { + WitnessGeneratorRound::AllRounds => "--all_rounds", + WitnessGeneratorRound::BasicCircuits => "--round=basic_circuits", + WitnessGeneratorRound::LeafAggregation => "--round=leaf_aggregation", + WitnessGeneratorRound::NodeAggregation => "--round=node_aggregation", + WitnessGeneratorRound::RecursionTip => "--round=recursion_tip", + WitnessGeneratorRound::Scheduler => "--round=scheduler", + } + .to_string(), + ); + } + Self::WitnessVectorGenerator => { + additional_args.push(format!( + "--threads={}", + args.witness_vector_generator_args.threads.unwrap_or(1) + )); + } + Self::Prover => { + if args.fri_prover_args.max_allocation.is_some() { + additional_args.push(format!( + "--max-allocation={}", + args.fri_prover_args.max_allocation.unwrap() + )); + }; + } + _ => {} + }; + + Ok(additional_args) + } +} + #[derive(Debug, Clone, Parser, Default)] pub struct WitnessGeneratorArgs { #[clap(long)] @@ -76,8 +196,15 @@ impl WitnessVectorGeneratorArgs { } } +#[derive(Debug, Clone, Parser, Default)] +pub struct FriProverRunArgs { + /// Memory allocation limit in bytes (for prover component) + #[clap(long)] + pub max_allocation: Option, +} + impl ProverRunArgs { - pub fn fill_values_with_prompt(&self) -> anyhow::Result { + pub fn fill_values_with_prompt(self) -> anyhow::Result { let component = self.component.unwrap_or_else(|| { PromptSelect::new(MSG_RUN_COMPONENT_PROMPT, ProverComponent::iter()).ask() }); @@ -90,10 +217,18 @@ impl ProverRunArgs { .witness_vector_generator_args .fill_values_with_prompt(component)?; + let docker = self.docker.unwrap_or_else(|| { + Prompt::new("Do you want to run Docker image for the component?") + .default("false") + .ask() + }); + Ok(ProverRunArgs { component: Some(component), witness_generator_args, witness_vector_generator_args, + fri_prover_args: self.fri_prover_args, + docker: Some(docker), }) } } diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/setup_keys.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/setup_keys.rs new file mode 100644 index 000000000000..155977b8812a --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/setup_keys.rs @@ -0,0 +1,53 @@ +use clap::{Parser, ValueEnum}; +use common::PromptSelect; +use strum::{EnumIter, IntoEnumIterator}; + +use crate::messages::{MSG_SETUP_KEYS_DOWNLOAD_SELECTION_PROMPT, MSG_SETUP_KEYS_REGION_PROMPT}; + +#[derive(Debug, Clone, Parser, Default)] +pub struct SetupKeysArgs { + #[clap(long)] + pub region: Option, + #[clap(long)] + pub mode: Option, +} + +#[derive(Debug, Clone)] +pub struct SetupKeysArgsFinal { + pub region: Option, + pub mode: Mode, +} + +#[derive(Debug, Clone, ValueEnum, strum::EnumString, EnumIter, PartialEq, Eq, strum::Display)] +pub enum Mode { + Download, + Generate, +} + +#[derive(Debug, Clone, ValueEnum, strum::EnumString, EnumIter, PartialEq, Eq, strum::Display)] +pub enum Region { + Us, + Europe, + Asia, +} + +impl SetupKeysArgs { + pub fn fill_values_with_prompt(self) -> SetupKeysArgsFinal { + let mode = self.mode.unwrap_or_else(|| { + PromptSelect::new(MSG_SETUP_KEYS_DOWNLOAD_SELECTION_PROMPT, Mode::iter()).ask() + }); + + if mode == Mode::Download { + let region = self.region.unwrap_or_else(|| { + PromptSelect::new(MSG_SETUP_KEYS_REGION_PROMPT, Region::iter()).ask() + }); + + SetupKeysArgsFinal { + region: Some(region), + mode, + } + } else { + SetupKeysArgsFinal { region: None, mode } + } + } +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs new file mode 100644 index 000000000000..1f39c91a2e2e --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs @@ -0,0 +1,73 @@ +use anyhow::Context; +use common::{ + check_prerequisites, cmd::Cmd, config::global_config, spinner::Spinner, WGET_PREREQUISITES, +}; +use config::{EcosystemConfig, GeneralConfig}; +use xshell::{cmd, Shell}; + +use super::{args::compressor_keys::CompressorKeysArgs, utils::get_link_to_prover}; +use crate::messages::{ + MSG_CHAIN_NOT_FOUND_ERR, MSG_DOWNLOADING_SETUP_COMPRESSOR_KEY_SPINNER, + MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR, MSG_SETUP_KEY_PATH_ERROR, +}; + +pub(crate) async fn run(shell: &Shell, args: CompressorKeysArgs) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_chain(global_config().chain_name.clone()) + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + let mut general_config = chain_config.get_general_config()?; + + let default_path = get_default_compressor_keys_path(&ecosystem_config)?; + let args = args.fill_values_with_prompt(&default_path); + + download_compressor_key( + shell, + &mut general_config, + &args.path.context(MSG_SETUP_KEY_PATH_ERROR)?, + )?; + + chain_config.save_general_config(&general_config)?; + + Ok(()) +} + +pub(crate) fn download_compressor_key( + shell: &Shell, + general_config: &mut GeneralConfig, + path: &str, +) -> anyhow::Result<()> { + check_prerequisites(shell, &WGET_PREREQUISITES, false); + let spinner = Spinner::new(MSG_DOWNLOADING_SETUP_COMPRESSOR_KEY_SPINNER); + let mut compressor_config: zksync_config::configs::FriProofCompressorConfig = general_config + .proof_compressor_config + .as_ref() + .expect(MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR) + .clone(); + compressor_config.universal_setup_path = path.to_string(); + general_config.proof_compressor_config = Some(compressor_config.clone()); + + let url = compressor_config.universal_setup_download_url; + let path = std::path::Path::new(path); + let parent = path.parent().expect(MSG_SETUP_KEY_PATH_ERROR); + let file_name = path.file_name().expect(MSG_SETUP_KEY_PATH_ERROR); + + Cmd::new(cmd!(shell, "wget {url} -P {parent}")).run()?; + + if file_name != "setup_2^24.key" { + Cmd::new(cmd!(shell, "mv {parent}/setup_2^24.key {path}")).run()?; + } + + spinner.finish(); + Ok(()) +} + +pub fn get_default_compressor_keys_path( + ecosystem_config: &EcosystemConfig, +) -> anyhow::Result { + let link_to_prover = get_link_to_prover(ecosystem_config); + let path = link_to_prover.join("keys/setup/setup_2^24.key"); + let string = path.to_str().unwrap(); + + Ok(String::from(string)) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs index 0c76cb10f542..700209f5ffc8 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs @@ -1,4 +1,4 @@ -use common::{cmd::Cmd, logger, spinner::Spinner}; +use common::{check_prerequisites, cmd::Cmd, logger, spinner::Spinner, GCLOUD_PREREQUISITES}; use xshell::{cmd, Shell}; use zksync_config::{configs::object_store::ObjectStoreMode, ObjectStoreConfig}; @@ -14,6 +14,8 @@ pub(crate) fn create_gcs_bucket( shell: &Shell, config: ProofStorageGCSCreateBucket, ) -> anyhow::Result { + check_prerequisites(shell, &GCLOUD_PREREQUISITES, false); + let bucket_name = config.bucket_name; let location = config.location; let project_id = config.project_id; diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs deleted file mode 100644 index c13d1c3b5e03..000000000000 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs +++ /dev/null @@ -1,29 +0,0 @@ -use anyhow::Ok; -use common::{check_prover_prequisites, cmd::Cmd, logger, spinner::Spinner}; -use config::EcosystemConfig; -use xshell::{cmd, Shell}; - -use super::utils::get_link_to_prover; -use crate::messages::{MSG_GENERATING_SK_SPINNER, MSG_SK_GENERATED}; - -pub(crate) async fn run(shell: &Shell) -> anyhow::Result<()> { - check_prover_prequisites(shell); - - let ecosystem_config = EcosystemConfig::from_file(shell)?; - let link_to_prover = get_link_to_prover(&ecosystem_config); - shell.change_dir(&link_to_prover); - - let spinner = Spinner::new(MSG_GENERATING_SK_SPINNER); - let cmd = Cmd::new(cmd!( - shell, - "cargo run --features gpu --release --bin key_generator -- - generate-sk-gpu all --recompute-if-missing - --setup-path=data/keys - --path={link_to_prover}/data/keys" - )); - cmd.run()?; - spinner.finish(); - logger::outro(MSG_SK_GENERATED); - - Ok(()) -} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs index 051fd26801c9..c8636381f203 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs @@ -2,48 +2,41 @@ use std::path::PathBuf; use anyhow::Context; use common::{ - check_prover_prequisites, - cmd::Cmd, config::global_config, db::{drop_db_if_exists, init_db, migrate_db, DatabaseConfig}, logger, spinner::Spinner, }; use config::{copy_configs, set_prover_database, traits::SaveConfigWithBasePath, EcosystemConfig}; -use xshell::{cmd, Shell}; -use zksync_config::{ - configs::{object_store::ObjectStoreMode, GeneralConfig}, - ObjectStoreConfig, -}; +use xshell::Shell; +use zksync_config::{configs::object_store::ObjectStoreMode, ObjectStoreConfig}; use super::{ args::init::{ProofStorageConfig, ProverInitArgs}, + compressor_keys::{download_compressor_key, get_default_compressor_keys_path}, gcs::create_gcs_bucket, init_bellman_cuda::run as init_bellman_cuda, - utils::get_link_to_prover, + setup_keys, }; use crate::{ consts::{PROVER_MIGRATIONS, PROVER_STORE_MAX_RETRIES}, messages::{ - MSG_CHAIN_NOT_FOUND_ERR, MSG_DOWNLOADING_SETUP_KEY_SPINNER, - MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR, MSG_GENERAL_CONFIG_NOT_FOUND_ERR, - MSG_INITIALIZING_DATABASES_SPINNER, MSG_INITIALIZING_PROVER_DATABASE, - MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR, MSG_PROVER_CONFIG_NOT_FOUND_ERR, - MSG_PROVER_INITIALIZED, MSG_SETUP_KEY_PATH_ERROR, + MSG_CHAIN_NOT_FOUND_ERR, MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR, + MSG_GENERAL_CONFIG_NOT_FOUND_ERR, MSG_INITIALIZING_DATABASES_SPINNER, + MSG_INITIALIZING_PROVER_DATABASE, MSG_PROVER_CONFIG_NOT_FOUND_ERR, MSG_PROVER_INITIALIZED, + MSG_SETUP_KEY_PATH_ERROR, }, }; pub(crate) async fn run(args: ProverInitArgs, shell: &Shell) -> anyhow::Result<()> { - check_prover_prequisites(shell); - let ecosystem_config = EcosystemConfig::from_file(shell)?; - let setup_key_path = get_default_setup_key_path(&ecosystem_config)?; + let default_compressor_key_path = get_default_compressor_keys_path(&ecosystem_config)?; let chain_config = ecosystem_config .load_chain(global_config().chain_name.clone()) .context(MSG_CHAIN_NOT_FOUND_ERR)?; - let args = args.fill_values_with_prompt(shell, &setup_key_path, &chain_config)?; + let args = args.fill_values_with_prompt(shell, &default_compressor_key_path, &chain_config)?; if chain_config.get_general_config().is_err() || chain_config.get_secrets_config().is_err() { copy_configs(shell, &ecosystem_config.link_to_code, &chain_config.configs)?; @@ -56,12 +49,13 @@ pub(crate) async fn run(args: ProverInitArgs, shell: &Shell) -> anyhow::Result<( let proof_object_store_config = get_object_store_config(shell, Some(args.proof_store))?; let public_object_store_config = get_object_store_config(shell, args.public_store)?; - if args.setup_key_config.download_key { - download_setup_key( - shell, - &general_config, - &args.setup_key_config.setup_key_path, - )?; + if let Some(args) = args.compressor_key_args { + let path = args.path.context(MSG_SETUP_KEY_PATH_ERROR)?; + download_compressor_key(shell, &mut general_config, &path)?; + } + + if let Some(args) = args.setup_keys { + setup_keys::run(args, shell).await?; } let mut prover_config = general_config @@ -79,15 +73,11 @@ pub(crate) async fn run(args: ProverInitArgs, shell: &Shell) -> anyhow::Result<( prover_config.cloud_type = args.cloud_type; general_config.prover_config = Some(prover_config); - let mut proof_compressor_config = general_config - .proof_compressor_config - .expect(MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR); - proof_compressor_config.universal_setup_path = args.setup_key_config.setup_key_path; - general_config.proof_compressor_config = Some(proof_compressor_config); - chain_config.save_general_config(&general_config)?; - init_bellman_cuda(shell, args.bellman_cuda_config).await?; + if let Some(args) = args.bellman_cuda_config { + init_bellman_cuda(shell, args).await?; + } if let Some(prover_db) = &args.database_config { let spinner = Spinner::new(MSG_INITIALIZING_DATABASES_SPINNER); @@ -110,40 +100,6 @@ pub(crate) async fn run(args: ProverInitArgs, shell: &Shell) -> anyhow::Result<( Ok(()) } -fn download_setup_key( - shell: &Shell, - general_config: &GeneralConfig, - path: &str, -) -> anyhow::Result<()> { - let spinner = Spinner::new(MSG_DOWNLOADING_SETUP_KEY_SPINNER); - let compressor_config: zksync_config::configs::FriProofCompressorConfig = general_config - .proof_compressor_config - .as_ref() - .expect(MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR) - .clone(); - let url = compressor_config.universal_setup_download_url; - let path = std::path::Path::new(path); - let parent = path.parent().expect(MSG_SETUP_KEY_PATH_ERROR); - let file_name = path.file_name().expect(MSG_SETUP_KEY_PATH_ERROR); - - Cmd::new(cmd!(shell, "wget {url} -P {parent}")).run()?; - - if file_name != "setup_2^24.key" { - Cmd::new(cmd!(shell, "mv {parent}/setup_2^24.key {path}")).run()?; - } - - spinner.finish(); - Ok(()) -} - -fn get_default_setup_key_path(ecosystem_config: &EcosystemConfig) -> anyhow::Result { - let link_to_prover = get_link_to_prover(ecosystem_config); - let path = link_to_prover.join("keys/setup/setup_2^24.key"); - let string = path.to_str().unwrap(); - - Ok(String::from(string)) -} - fn get_object_store_config( shell: &Shell, config: Option, diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs index 75535587c42c..615ef841488b 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs @@ -1,5 +1,5 @@ use anyhow::Context; -use common::{check_prover_prequisites, cmd::Cmd, git, logger, spinner::Spinner}; +use common::{check_prerequisites, cmd::Cmd, git, logger, spinner::Spinner, GPU_PREREQUISITES}; use config::{traits::SaveConfigWithBasePath, EcosystemConfig}; use xshell::{cmd, Shell}; @@ -13,11 +13,11 @@ use crate::{ }; pub(crate) async fn run(shell: &Shell, args: InitBellmanCudaArgs) -> anyhow::Result<()> { - check_prover_prequisites(shell); + check_prerequisites(shell, &GPU_PREREQUISITES, false); let mut ecosystem_config = EcosystemConfig::from_file(shell)?; - let args = args.fill_values_with_prompt()?; + let args = args.fill_values_with_prompt(); let bellman_cuda_dir = args.bellman_cuda_dir.unwrap_or("".to_string()); let bellman_cuda_dir = if bellman_cuda_dir.is_empty() { diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs index 31c3a02e3806..2b771c8ad201 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs @@ -1,13 +1,19 @@ -use args::{init::ProverInitArgs, init_bellman_cuda::InitBellmanCudaArgs, run::ProverRunArgs}; +use args::{ + compressor_keys::CompressorKeysArgs, init::ProverInitArgs, + init_bellman_cuda::InitBellmanCudaArgs, run::ProverRunArgs, +}; use clap::Subcommand; use xshell::Shell; +use crate::commands::prover::args::setup_keys::SetupKeysArgs; + mod args; +mod compressor_keys; mod gcs; -mod generate_sk; mod init; mod init_bellman_cuda; mod run; +mod setup_keys; mod utils; #[derive(Subcommand, Debug)] @@ -16,19 +22,23 @@ pub enum ProverCommands { Init(Box), /// Generate setup keys #[command(alias = "sk")] - GenerateSK, + SetupKeys(SetupKeysArgs), /// Run prover Run(ProverRunArgs), /// Initialize bellman-cuda #[command(alias = "cuda")] InitBellmanCuda(Box), + /// Download compressor keys + #[command(alias = "ck")] + CompressorKeys(CompressorKeysArgs), } pub(crate) async fn run(shell: &Shell, args: ProverCommands) -> anyhow::Result<()> { match args { ProverCommands::Init(args) => init::run(*args, shell).await, - ProverCommands::GenerateSK => generate_sk::run(shell).await, + ProverCommands::SetupKeys(args) => setup_keys::run(args, shell).await, ProverCommands::Run(args) => run::run(args, shell).await, ProverCommands::InitBellmanCuda(args) => init_bellman_cuda::run(shell, *args).await, + ProverCommands::CompressorKeys(args) => compressor_keys::run(shell, args).await, } } diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs index 20ddfea6ac55..78116e40d6c7 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs @@ -1,26 +1,24 @@ -use anyhow::Context; -use common::{check_prover_prequisites, cmd::Cmd, config::global_config, logger}; -use config::{ChainConfig, EcosystemConfig}; +use std::path::PathBuf; + +use anyhow::{anyhow, Context}; +use common::{check_prerequisites, cmd::Cmd, config::global_config, logger, GPU_PREREQUISITES}; +use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::{ - args::run::{ - ProverComponent, ProverRunArgs, WitnessGeneratorArgs, WitnessGeneratorRound, - WitnessVectorGeneratorArgs, - }, + args::run::{ProverComponent, ProverRunArgs}, utils::get_link_to_prover, }; use crate::messages::{ MSG_BELLMAN_CUDA_DIR_ERR, MSG_CHAIN_NOT_FOUND_ERR, MSG_MISSING_COMPONENT_ERR, MSG_RUNNING_COMPRESSOR, MSG_RUNNING_COMPRESSOR_ERR, MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR, MSG_RUNNING_PROVER_GATEWAY, MSG_RUNNING_PROVER_GATEWAY_ERR, MSG_RUNNING_PROVER_JOB_MONITOR, - MSG_RUNNING_WITNESS_GENERATOR, MSG_RUNNING_WITNESS_GENERATOR_ERR, - MSG_RUNNING_WITNESS_VECTOR_GENERATOR, MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR, - MSG_WITNESS_GENERATOR_ROUND_ERR, + MSG_RUNNING_PROVER_JOB_MONITOR_ERR, MSG_RUNNING_WITNESS_GENERATOR, + MSG_RUNNING_WITNESS_GENERATOR_ERR, MSG_RUNNING_WITNESS_VECTOR_GENERATOR, + MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR, }; pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<()> { - check_prover_prequisites(shell); let args = args.fill_values_with_prompt()?; let ecosystem_config = EcosystemConfig::from_file(shell)?; let chain = ecosystem_config @@ -30,112 +28,110 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() let link_to_prover = get_link_to_prover(&ecosystem_config); shell.change_dir(link_to_prover.clone()); - match args.component { - Some(ProverComponent::Gateway) => run_gateway(shell, &chain)?, - Some(ProverComponent::WitnessGenerator) => { - run_witness_generator(shell, &chain, args.witness_generator_args)? + let component = args.component.context(anyhow!(MSG_MISSING_COMPONENT_ERR))?; + let in_docker = args.docker.unwrap_or(false); + + let application_args = component.get_application_args(in_docker)?; + let additional_args = component.get_additional_args(in_docker, args, &chain)?; + + let (message, error) = match component { + ProverComponent::WitnessGenerator => ( + MSG_RUNNING_WITNESS_GENERATOR, + MSG_RUNNING_WITNESS_GENERATOR_ERR, + ), + ProverComponent::WitnessVectorGenerator => ( + MSG_RUNNING_WITNESS_VECTOR_GENERATOR, + MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR, + ), + ProverComponent::Prover => { + if !in_docker { + check_prerequisites(shell, &GPU_PREREQUISITES, false); + } + (MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR) } - Some(ProverComponent::WitnessVectorGenerator) => { - run_witness_vector_generator(shell, &chain, args.witness_vector_generator_args)? + ProverComponent::Compressor => { + if !in_docker { + check_prerequisites(shell, &GPU_PREREQUISITES, false); + shell.set_var( + "BELLMAN_CUDA_DIR", + ecosystem_config + .bellman_cuda_dir + .clone() + .expect(MSG_BELLMAN_CUDA_DIR_ERR), + ); + } + (MSG_RUNNING_COMPRESSOR, MSG_RUNNING_COMPRESSOR_ERR) } - Some(ProverComponent::Prover) => run_prover(shell, &chain)?, - Some(ProverComponent::Compressor) => run_compressor(shell, &chain, &ecosystem_config)?, - Some(ProverComponent::ProverJobMonitor) => run_prover_job_monitor(shell, &chain)?, - None => anyhow::bail!(MSG_MISSING_COMPONENT_ERR), + ProverComponent::ProverJobMonitor => ( + MSG_RUNNING_PROVER_JOB_MONITOR, + MSG_RUNNING_PROVER_JOB_MONITOR_ERR, + ), + ProverComponent::Gateway => (MSG_RUNNING_PROVER_GATEWAY, MSG_RUNNING_PROVER_GATEWAY_ERR), + }; + + if in_docker { + let path_to_configs = chain.configs.clone(); + let path_to_prover = get_link_to_prover(&ecosystem_config); + run_dockerized_component( + shell, + component.image_name(), + &application_args, + &additional_args, + message, + error, + &path_to_configs, + &path_to_prover, + )? + } else { + run_binary_component( + shell, + component.binary_name(), + &application_args, + &additional_args, + message, + error, + )? } Ok(()) } -fn run_gateway(shell: &Shell, chain: &ChainConfig) -> anyhow::Result<()> { - logger::info(MSG_RUNNING_PROVER_GATEWAY); - let config_path = chain.path_to_general_config(); - let secrets_path = chain.path_to_secrets_config(); - - let mut cmd = Cmd::new(cmd!(shell, "cargo run --release --bin zksync_prover_fri_gateway -- --config-path={config_path} --secrets-path={secrets_path}")); - cmd = cmd.with_force_run(); - cmd.run().context(MSG_RUNNING_PROVER_GATEWAY_ERR) -} - -fn run_witness_generator( +#[allow(clippy::too_many_arguments)] +fn run_dockerized_component( shell: &Shell, - chain: &ChainConfig, - args: WitnessGeneratorArgs, + image_name: &str, + application_args: &[String], + args: &[String], + message: &'static str, + error: &'static str, + path_to_configs: &PathBuf, + path_to_prover: &PathBuf, ) -> anyhow::Result<()> { - logger::info(MSG_RUNNING_WITNESS_GENERATOR); - let config_path = chain.path_to_general_config(); - let secrets_path = chain.path_to_secrets_config(); - let round = args.round.expect(MSG_WITNESS_GENERATOR_ROUND_ERR); + logger::info(message); - let round_str = match round { - WitnessGeneratorRound::AllRounds => "--all_rounds", - WitnessGeneratorRound::BasicCircuits => "--round=basic_circuits", - WitnessGeneratorRound::LeafAggregation => "--round=leaf_aggregation", - WitnessGeneratorRound::NodeAggregation => "--round=node_aggregation", - WitnessGeneratorRound::RecursionTip => "--round=recursion_tip", - WitnessGeneratorRound::Scheduler => "--round=scheduler", - }; + let mut cmd = Cmd::new(cmd!( + shell, + "docker run --net=host -v {path_to_prover}/data/keys:/prover/data/keys -v {path_to_prover}/artifacts:/artifacts -v {path_to_configs}:/configs {application_args...} {image_name} {args...}" + )); - let mut cmd = Cmd::new(cmd!(shell, "cargo run --release --bin zksync_witness_generator -- {round_str} --config-path={config_path} --secrets-path={secrets_path}")); cmd = cmd.with_force_run(); - cmd.run().context(MSG_RUNNING_WITNESS_GENERATOR_ERR) + cmd.run().context(error) } -fn run_witness_vector_generator( +fn run_binary_component( shell: &Shell, - chain: &ChainConfig, - args: WitnessVectorGeneratorArgs, + binary_name: &str, + application_args: &[String], + args: &[String], + message: &'static str, + error: &'static str, ) -> anyhow::Result<()> { - logger::info(MSG_RUNNING_WITNESS_VECTOR_GENERATOR); - let config_path = chain.path_to_general_config(); - let secrets_path = chain.path_to_secrets_config(); - - let threads = args.threads.unwrap_or(1).to_string(); - let mut cmd = Cmd::new(cmd!(shell, "cargo run --release --bin zksync_witness_vector_generator -- --config-path={config_path} --secrets-path={secrets_path} --threads={threads}")); - cmd = cmd.with_force_run(); - cmd.run().context(MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR) -} - -fn run_prover(shell: &Shell, chain: &ChainConfig) -> anyhow::Result<()> { - logger::info(MSG_RUNNING_PROVER); - let config_path = chain.path_to_general_config(); - let secrets_path = chain.path_to_secrets_config(); - - let mut cmd = Cmd::new( - cmd!(shell, "cargo run --features gpu --release --bin zksync_prover_fri -- --config-path={config_path} --secrets-path={secrets_path}"), - ); - cmd = cmd.with_force_run(); - cmd.run().context(MSG_RUNNING_PROVER_ERR) -} - -fn run_compressor( - shell: &Shell, - chain: &ChainConfig, - ecosystem: &EcosystemConfig, -) -> anyhow::Result<()> { - logger::info(MSG_RUNNING_COMPRESSOR); - let config_path = chain.path_to_general_config(); - let secrets_path = chain.path_to_secrets_config(); - - shell.set_var( - "BELLMAN_CUDA_DIR", - ecosystem - .bellman_cuda_dir - .clone() - .expect(MSG_BELLMAN_CUDA_DIR_ERR), - ); - - let mut cmd = Cmd::new(cmd!(shell, "cargo run --features gpu --release --bin zksync_proof_fri_compressor -- --config-path={config_path} --secrets-path={secrets_path}")); - cmd = cmd.with_force_run(); - cmd.run().context(MSG_RUNNING_COMPRESSOR_ERR) -} - -fn run_prover_job_monitor(shell: &Shell, chain: &ChainConfig) -> anyhow::Result<()> { - logger::info(MSG_RUNNING_PROVER_JOB_MONITOR); - let config_path = chain.path_to_general_config(); - let secrets_path = chain.path_to_secrets_config(); + logger::info(message); - let mut cmd = Cmd::new(cmd!(shell, "cargo run --release --bin zksync_prover_job_monitor -- --config-path={config_path} --secrets-path={secrets_path}")); + let mut cmd = Cmd::new(cmd!( + shell, + "cargo run {application_args...} --release --bin {binary_name} -- {args...}" + )); cmd = cmd.with_force_run(); - cmd.run().context(MSG_RUNNING_PROVER_JOB_MONITOR) + cmd.run().context(error) } diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/setup_keys.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/setup_keys.rs new file mode 100644 index 000000000000..09d9f76a47cf --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/setup_keys.rs @@ -0,0 +1,83 @@ +use anyhow::Ok; +use common::{ + check_prerequisites, cmd::Cmd, logger, spinner::Spinner, GCLOUD_PREREQUISITES, + GPU_PREREQUISITES, +}; +use config::EcosystemConfig; +use xshell::{cmd, Shell}; + +use super::utils::get_link_to_prover; +use crate::{ + commands::prover::args::setup_keys::{Mode, Region, SetupKeysArgs}, + messages::{MSG_GENERATING_SK_SPINNER, MSG_SK_GENERATED}, +}; + +pub(crate) async fn run(args: SetupKeysArgs, shell: &Shell) -> anyhow::Result<()> { + let args = args.fill_values_with_prompt(); + let ecosystem_config = EcosystemConfig::from_file(shell)?; + + if args.mode == Mode::Generate { + check_prerequisites(shell, &GPU_PREREQUISITES, false); + let link_to_prover = get_link_to_prover(&ecosystem_config); + shell.change_dir(&link_to_prover); + + let spinner = Spinner::new(MSG_GENERATING_SK_SPINNER); + let cmd = Cmd::new(cmd!( + shell, + "cargo run --features gpu --release --bin key_generator -- + generate-sk-gpu all --recompute-if-missing + --setup-path=data/keys + --path={link_to_prover}/data/keys" + )); + cmd.run()?; + spinner.finish(); + logger::outro(MSG_SK_GENERATED); + } else { + check_prerequisites(shell, &GCLOUD_PREREQUISITES, false); + + let link_to_setup_keys = get_link_to_prover(&ecosystem_config).join("data/keys"); + let path_to_keys_buckets = + get_link_to_prover(&ecosystem_config).join("setup-data-gpu-keys.json"); + + let region = args.region.expect("Region is not provided"); + + let file = shell + .read_file(path_to_keys_buckets) + .expect("Could not find commitments file in zksync-era"); + let json: serde_json::Value = + serde_json::from_str(&file).expect("Could not parse commitments.json"); + + let bucket = &match region { + Region::Us => json + .get("us") + .expect("Could not find link to US bucket") + .to_string(), + Region::Europe => json + .get("europe") + .expect("Could not find link to Europe bucket") + .to_string(), + Region::Asia => json + .get("asia") + .expect("Could not find link to Asia bucket") + .to_string(), + }; + + let len = bucket.len() - 2usize; + let bucket = &bucket[1..len]; + + let spinner = Spinner::new(&format!( + "Downloading keys from bucket: {} to {:?}", + bucket, link_to_setup_keys + )); + + let cmd = Cmd::new(cmd!( + shell, + "gsutil -m rsync -r {bucket} {link_to_setup_keys}" + )); + cmd.run()?; + spinner.finish(); + logger::outro("Keys are downloaded"); + } + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/consts.rs b/zk_toolbox/crates/zk_inception/src/consts.rs index 7463dc28570e..72c8948a65d1 100644 --- a/zk_toolbox/crates/zk_inception/src/consts.rs +++ b/zk_toolbox/crates/zk_inception/src/consts.rs @@ -8,5 +8,25 @@ pub const DEFAULT_CREDENTIALS_FILE: &str = "~/.config/gcloud/application_default pub const DEFAULT_PROOF_STORE_DIR: &str = "artifacts"; pub const BELLMAN_CUDA_DIR: &str = "era-bellman-cuda"; pub const L2_BASE_TOKEN_ADDRESS: &str = "0x000000000000000000000000000000000000800A"; + +/// Path to the JS runtime config for the block-explorer-app docker container to be mounted to +pub const EXPLORER_APP_DOCKER_CONFIG_PATH: &str = "/usr/src/app/packages/app/dist/config.js"; +pub const EXPLORER_APP_DOCKER_IMAGE: &str = "matterlabs/block-explorer-app"; +/// Path to the JS runtime config for the dapp-portal docker container to be mounted to +pub const PORTAL_DOCKER_CONFIG_PATH: &str = "/usr/src/app/dist/config.js"; pub const PORTAL_DOCKER_IMAGE: &str = "matterlabs/dapp-portal"; -pub const PORTAL_DOCKER_CONTAINER_PORT: u16 = 3000; + +pub const PROVER_GATEWAY_DOCKER_IMAGE: &str = "matterlabs/prover-fri-gateway:latest2.0"; +pub const WITNESS_GENERATOR_DOCKER_IMAGE: &str = "matterlabs/witness-generator:latest2.0"; +pub const WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE: &str = + "matterlabs/witness-vector-generator:latest2.0"; +pub const PROVER_DOCKER_IMAGE: &str = "matterlabs/prover-gpu-fri:latest2.0"; +pub const COMPRESSOR_DOCKER_IMAGE: &str = "matterlabs/proof-fri-gpu-compressor:latest2.0"; +pub const PROVER_JOB_MONITOR_DOCKER_IMAGE: &str = "matterlabs/prover-job-monitor:latest2.0"; + +pub const PROVER_GATEWAY_BINARY_NAME: &str = "zksync_prover_fri_gateway"; +pub const WITNESS_GENERATOR_BINARY_NAME: &str = "zksync_witness_generator"; +pub const WITNESS_VECTOR_GENERATOR_BINARY_NAME: &str = "zksync_witness_vector_generator"; +pub const PROVER_BINARY_NAME: &str = "zksync_prover_fri"; +pub const COMPRESSOR_BINARY_NAME: &str = "zksync_proof_fri_compressor"; +pub const PROVER_JOB_MONITOR_BINARY_NAME: &str = "zksync_prover_job_monitor"; diff --git a/zk_toolbox/crates/zk_inception/src/defaults.rs b/zk_toolbox/crates/zk_inception/src/defaults.rs index 34b0eeae4195..544e28377403 100644 --- a/zk_toolbox/crates/zk_inception/src/defaults.rs +++ b/zk_toolbox/crates/zk_inception/src/defaults.rs @@ -7,6 +7,8 @@ lazy_static! { Url::parse("postgres://postgres:notsecurepassword@localhost:5432").unwrap(); pub static ref DATABASE_PROVER_URL: Url = Url::parse("postgres://postgres:notsecurepassword@localhost:5432").unwrap(); + pub static ref DATABASE_EXPLORER_URL: Url = + Url::parse("postgres://postgres:notsecurepassword@localhost:5432").unwrap(); } pub const ROCKS_DB_STATE_KEEPER: &str = "state_keeper"; @@ -40,6 +42,14 @@ pub fn generate_db_names(config: &ChainConfig) -> DBNames { } } +pub fn generate_explorer_db_name(config: &ChainConfig) -> String { + format!( + "zksync_explorer_{}_{}", + config.l1_network.to_string().to_ascii_lowercase(), + config.name + ) +} + pub fn generate_external_node_db_name(config: &ChainConfig) -> String { format!( "external_node_{}_{}", diff --git a/zk_toolbox/crates/zk_inception/src/main.rs b/zk_toolbox/crates/zk_inception/src/main.rs index cb1b5388196a..f6f7d83dede6 100644 --- a/zk_toolbox/crates/zk_inception/src/main.rs +++ b/zk_toolbox/crates/zk_inception/src/main.rs @@ -13,11 +13,8 @@ use config::EcosystemConfig; use xshell::Shell; use crate::commands::{ - args::{PortalArgs, RunServerArgs}, - chain::ChainCommands, - ecosystem::EcosystemCommands, - external_node::ExternalNodeCommands, - prover::ProverCommands, + args::RunServerArgs, chain::ChainCommands, ecosystem::EcosystemCommands, + explorer::ExplorerCommands, external_node::ExternalNodeCommands, prover::ProverCommands, }; pub mod accept_ownership; @@ -60,7 +57,10 @@ pub enum InceptionSubcommands { #[command(subcommand)] ContractVerifier(ContractVerifierCommands), /// Run dapp-portal - Portal(PortalArgs), + Portal, + /// Run block-explorer + #[command(subcommand)] + Explorer(ExplorerCommands), /// Update ZKsync #[command(alias = "u")] Update(UpdateArgs), @@ -123,7 +123,8 @@ async fn run_subcommand(inception_args: Inception, shell: &Shell) -> anyhow::Res InceptionSubcommands::ContractVerifier(args) => { commands::contract_verifier::run(shell, args).await? } - InceptionSubcommands::Portal(args) => commands::portal::run(shell, args).await?, + InceptionSubcommands::Explorer(args) => commands::explorer::run(shell, args).await?, + InceptionSubcommands::Portal => commands::portal::run(shell).await?, InceptionSubcommands::Update(args) => commands::update::run(shell, args)?, InceptionSubcommands::Markdown => { clap_markdown::print_help_markdown::(); diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zk_toolbox/crates/zk_inception/src/messages.rs index 30cb422dfca6..6f94a7b102a4 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zk_toolbox/crates/zk_inception/src/messages.rs @@ -5,6 +5,10 @@ use ethers::{ utils::format_ether, }; +pub(super) const MSG_SETUP_KEYS_DOWNLOAD_SELECTION_PROMPT: &str = + "Do you want to download the setup keys or generate them?"; +pub(super) const MSG_SETUP_KEYS_REGION_PROMPT: &str = + "From which region you want setup keys to be downloaded?"; /// Common messages pub(super) const MSG_SELECTED_CONFIG: &str = "Selected config"; pub(super) const MSG_CHAIN_NOT_INITIALIZED: &str = @@ -113,6 +117,9 @@ pub(super) fn msg_chain_doesnt_exist_err(chain_name: &str, chains: &Vec) chain_name, chains ) } +pub(super) fn msg_chain_load_err(chain_name: &str) -> String { + format!("Failed to load chain config for {chain_name}") +} /// Chain create related messages pub(super) const MSG_PROVER_MODE_HELP: &str = "Prover options"; @@ -195,6 +202,14 @@ pub(super) fn msg_server_db_name_prompt(chain_name: &str) -> String { format!("Please provide server database name for chain {chain_name}") } +pub(super) fn msg_explorer_db_url_prompt(chain_name: &str) -> String { + format!("Please provide explorer database url for chain {chain_name}") +} + +pub(super) fn msg_explorer_db_name_prompt(chain_name: &str) -> String { + format!("Please provide explorer database name for chain {chain_name}") +} + /// Chain initialize bridges related messages pub(super) const MSG_DEPLOYING_L2_CONTRACT_SPINNER: &str = "Deploying l2 contracts"; @@ -227,14 +242,46 @@ pub(super) const MSG_FAILED_TO_RUN_SERVER_ERR: &str = "Failed to start server"; pub(super) const MSG_PREPARING_EN_CONFIGS: &str = "Preparing External Node config"; /// Portal related messages -pub(super) const MSG_PORTAL_CONFIG_IS_EMPTY_ERR: &str = "Hyperchains config is empty"; +pub(super) const MSG_PORTAL_FAILED_TO_FIND_ANY_CHAIN_ERR: &str = + "Failed to find any valid chain to run portal for"; pub(super) const MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR: &str = "Failed to create portal config"; pub(super) const MSG_PORTAL_FAILED_TO_RUN_DOCKER_ERR: &str = "Failed to run portal docker container"; +pub(super) fn msg_portal_running_with_config(path: &Path) -> String { + format!("Running portal with configuration from: {}", path.display()) +} pub(super) fn msg_portal_starting_on(host: &str, port: u16) -> String { format!("Starting portal on http://{host}:{port}") } +/// Explorer related messages +pub(super) const MSG_EXPLORER_FAILED_TO_DROP_DATABASE_ERR: &str = + "Failed to drop explorer database"; +pub(super) const MSG_EXPLORER_FAILED_TO_RUN_DOCKER_SERVICES_ERR: &str = + "Failed to run docker compose with explorer services"; +pub(super) const MSG_EXPLORER_FAILED_TO_RUN_DOCKER_ERR: &str = + "Failed to run explorer docker container"; +pub(super) const MSG_EXPLORER_FAILED_TO_CREATE_CONFIG_ERR: &str = + "Failed to create explorer config"; +pub(super) const MSG_EXPLORER_FAILED_TO_FIND_ANY_CHAIN_ERR: &str = + "Failed to find any valid chain to run explorer for. Did you run `zk_inception explorer init`?"; +pub(super) const MSG_EXPLORER_INITIALIZED: &str = "Explorer has been initialized successfully"; +pub(super) fn msg_explorer_initializing_database_for(chain: &str) -> String { + format!("Initializing explorer database for {chain} chain") +} +pub(super) fn msg_explorer_running_with_config(path: &Path) -> String { + format!( + "Running explorer with configuration from: {}", + path.display() + ) +} +pub(super) fn msg_explorer_starting_on(host: &str, port: u16) -> String { + format!("Starting explorer on http://{host}:{port}") +} +pub(super) fn msg_explorer_chain_not_initialized(chain: &str) -> String { + format!("Chain {chain} is not initialized for explorer: run `zk_inception explorer init --chain {chain}` first") +} + /// Forge utils related messages pub(super) const MSG_DEPLOYER_PK_NOT_SET_ERR: &str = "Deployer private key is not set"; @@ -264,6 +311,7 @@ pub(super) const MSG_GENERATING_SK_SPINNER: &str = "Generating setup keys..."; pub(super) const MSG_SK_GENERATED: &str = "Setup keys generated successfully"; pub(super) const MSG_MISSING_COMPONENT_ERR: &str = "Missing component"; pub(super) const MSG_RUNNING_PROVER_GATEWAY: &str = "Running gateway"; +pub(super) const MSG_RUNNING_PROVER_JOB_MONITOR_ERR: &str = "Failed to run prover job monitor"; pub(super) const MSG_RUNNING_PROVER_JOB_MONITOR: &str = "Running prover job monitor"; pub(super) const MSG_RUNNING_WITNESS_GENERATOR: &str = "Running witness generator"; pub(super) const MSG_RUNNING_WITNESS_VECTOR_GENERATOR: &str = "Running witness vector generator"; @@ -297,9 +345,13 @@ pub(super) const MSG_CREATE_GCS_BUCKET_NAME_PROMTP: &str = "What do you want to pub(super) const MSG_CREATE_GCS_BUCKET_LOCATION_PROMPT: &str = "What location do you want to use? Find available locations at https://cloud.google.com/storage/docs/locations"; pub(super) const MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR: &str = "Proof compressor config not found"; -pub(super) const MSG_DOWNLOADING_SETUP_KEY_SPINNER: &str = "Downloading setup key..."; -pub(super) const MSG_DOWNLOAD_SETUP_KEY_PROMPT: &str = "Do you want to download the setup key?"; -pub(super) const MSG_SETUP_KEY_PATH_PROMPT: &str = "Provide the path to the setup key:"; +pub(super) const MSG_DOWNLOADING_SETUP_COMPRESSOR_KEY_SPINNER: &str = + "Downloading compressor setup key..."; +pub(super) const MSG_DOWNLOAD_SETUP_COMPRESSOR_KEY_PROMPT: &str = + "Do you want to download the setup key for compressor?"; +pub(super) const MSG_INITIALIZE_BELLMAN_CUDA_PROMPT: &str = + "Do you want to initialize bellman-cuda?"; +pub(super) const MSG_SETUP_COMPRESSOR_KEY_PATH_PROMPT: &str = "Provide the path to the setup key:"; pub(super) const MSG_GETTING_GCP_PROJECTS_SPINNER: &str = "Getting GCP projects..."; pub(super) const MSG_GETTING_PROOF_STORE_CONFIG: &str = "Getting proof store configuration..."; pub(super) const MSG_GETTING_PUBLIC_STORE_CONFIG: &str = "Getting public store configuration..."; @@ -321,6 +373,7 @@ pub(super) const MSG_BELLMAN_CUDA_SELECTION_CLONE: &str = "Clone for me (recomme pub(super) const MSG_BELLMAN_CUDA_SELECTION_PATH: &str = "I have the code already"; pub(super) const MSG_CLOUD_TYPE_PROMPT: &str = "Select the cloud connection mode:"; pub(super) const MSG_THREADS_PROMPT: &str = "Provide the number of threads:"; +pub(super) const MSG_SETUP_KEYS_PROMPT: &str = "Do you want to setup keys?"; pub(super) fn msg_bucket_created(bucket_name: &str) -> String { format!("Bucket created successfully with url: gs://{bucket_name}") diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs b/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs index 0c635b2b0d34..bab4205cd66f 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs @@ -1,16 +1,16 @@ use std::path::PathBuf; -use clap::{Parser, ValueEnum}; +use clap::Parser; use common::{cmd::Cmd, logger, spinner::Spinner}; use config::EcosystemConfig; -use strum::EnumIter; use xshell::{cmd, Shell}; use crate::messages::{ MSG_BUILDING_CONTRACTS, MSG_BUILDING_CONTRACTS_SUCCESS, MSG_BUILDING_L1_CONTRACTS_SPINNER, MSG_BUILDING_L2_CONTRACTS_SPINNER, MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER, - MSG_BUILD_L1_CONTRACTS_HELP, MSG_BUILD_L2_CONTRACTS_HELP, MSG_BUILD_SYSTEM_CONTRACTS_HELP, - MSG_CONTRACTS_DEPS_SPINNER, MSG_NOTHING_TO_BUILD_MSG, + MSG_BUILDING_TEST_CONTRACTS_SPINNER, MSG_BUILD_L1_CONTRACTS_HELP, MSG_BUILD_L2_CONTRACTS_HELP, + MSG_BUILD_SYSTEM_CONTRACTS_HELP, MSG_BUILD_TEST_CONTRACTS_HELP, MSG_CONTRACTS_DEPS_SPINNER, + MSG_NOTHING_TO_BUILD_MSG, }; #[derive(Debug, Parser)] @@ -21,6 +21,8 @@ pub struct ContractsArgs { pub l2_contracts: Option, #[clap(long, alias = "sc", help = MSG_BUILD_SYSTEM_CONTRACTS_HELP, default_missing_value = "true", num_args = 0..=1)] pub system_contracts: Option, + #[clap(long, alias = "test", help = MSG_BUILD_TEST_CONTRACTS_HELP, default_missing_value = "true", num_args = 0..=1)] + pub test_contracts: Option, } impl ContractsArgs { @@ -28,11 +30,13 @@ impl ContractsArgs { if self.l1_contracts.is_none() && self.l2_contracts.is_none() && self.system_contracts.is_none() + && self.test_contracts.is_none() { return vec![ ContractType::L1, ContractType::L2, ContractType::SystemContracts, + ContractType::TestContracts, ]; } @@ -47,17 +51,20 @@ impl ContractsArgs { if self.system_contracts.unwrap_or(false) { contracts.push(ContractType::SystemContracts); } + if self.test_contracts.unwrap_or(false) { + contracts.push(ContractType::TestContracts); + } contracts } } -#[derive(Debug, ValueEnum, EnumIter, strum::Display, PartialEq, Eq, Clone, Copy)] -#[strum(serialize_all = "lowercase")] +#[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum ContractType { L1, L2, SystemContracts, + TestContracts, } #[derive(Debug)] @@ -85,6 +92,11 @@ impl ContractBuilder { cmd: "yarn sc build".to_string(), msg: MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER.to_string(), }, + ContractType::TestContracts => Self { + dir: ecosystem.link_to_code.join("etc/contracts-test-data"), + cmd: "yarn build".to_string(), + msg: MSG_BUILDING_TEST_CONTRACTS_SPINNER.to_string(), + }, } } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs index 59c86743291d..ad1318cfa768 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs @@ -1,12 +1,16 @@ +use std::{path::Path, str::FromStr}; + use anyhow::Context; -use common::{cmd::Cmd, config::global_config, db::wait_for_db, logger}; +use common::{cmd::Cmd, db::wait_for_db, logger}; use config::EcosystemConfig; +use url::Url; use xshell::{cmd, Shell}; use super::args::rust::RustArgs; use crate::{ commands::database, - dals::get_test_dals, + dals::{Dal, CORE_DAL_PATH, PROVER_DAL_PATH}, + defaults::{TEST_DATABASE_PROVER_URL, TEST_DATABASE_SERVER_URL}, messages::{ MSG_CARGO_NEXTEST_MISSING_ERR, MSG_CHAIN_NOT_FOUND_ERR, MSG_POSTGRES_CONFIG_NOT_FOUND_ERR, MSG_RESETTING_TEST_DATABASES, MSG_UNIT_TESTS_RUN_SUCCESS, MSG_USING_CARGO_NEXTEST, @@ -17,16 +21,45 @@ pub async fn run(shell: &Shell, args: RustArgs) -> anyhow::Result<()> { let ecosystem = EcosystemConfig::from_file(shell)?; let chain = ecosystem .clone() - .load_chain(global_config().chain_name.clone()) + .load_chain(Some(ecosystem.default_chain)) .context(MSG_CHAIN_NOT_FOUND_ERR)?; - let general_config = chain.get_general_config()?; - let postgres = general_config - .postgres_config - .context(MSG_POSTGRES_CONFIG_NOT_FOUND_ERR)?; + let general_config = chain.get_general_config(); + let link_to_code = ecosystem.link_to_code; + + let (test_server_url, test_prover_url) = if let Ok(general_config) = general_config { + let postgres = general_config + .postgres_config + .context(MSG_POSTGRES_CONFIG_NOT_FOUND_ERR)?; + + ( + postgres + .test_server_url + .context(MSG_POSTGRES_CONFIG_NOT_FOUND_ERR)?, + postgres + .test_prover_url + .context(MSG_POSTGRES_CONFIG_NOT_FOUND_ERR)?, + ) + } else { + ( + TEST_DATABASE_SERVER_URL.to_string(), + TEST_DATABASE_PROVER_URL.to_string(), + ) + }; + + let dals = vec![ + Dal { + url: Url::from_str(&test_server_url.clone())?, + path: CORE_DAL_PATH.to_string(), + }, + Dal { + url: Url::from_str(&test_prover_url.clone())?, + path: PROVER_DAL_PATH.to_string(), + }, + ]; - reset_test_databases(shell).await?; + reset_test_databases(shell, &link_to_code, dals).await?; - let _dir_guard = shell.push_dir(&ecosystem.link_to_code); + let _dir_guard = shell.push_dir(&link_to_code); let cmd = if nextest_is_installed(shell)? { logger::info(MSG_USING_CARGO_NEXTEST); @@ -43,18 +76,8 @@ pub async fn run(shell: &Shell, args: RustArgs) -> anyhow::Result<()> { }; let cmd = cmd - .env( - "TEST_DATABASE_URL", - postgres - .test_server_url - .context(MSG_POSTGRES_CONFIG_NOT_FOUND_ERR)?, - ) - .env( - "TEST_PROVER_DATABASE_URL", - postgres - .test_prover_url - .context(MSG_POSTGRES_CONFIG_NOT_FOUND_ERR)?, - ); + .env("TEST_DATABASE_URL", test_server_url) + .env("TEST_PROVER_DATABASE_URL", test_prover_url); cmd.run()?; logger::outro(MSG_UNIT_TESTS_RUN_SUCCESS); @@ -70,9 +93,12 @@ fn nextest_is_installed(shell: &Shell) -> anyhow::Result { Ok(out.contains("cargo-nextest")) } -async fn reset_test_databases(shell: &Shell) -> anyhow::Result<()> { +async fn reset_test_databases( + shell: &Shell, + link_to_code: &Path, + dals: Vec, +) -> anyhow::Result<()> { logger::info(MSG_RESETTING_TEST_DATABASES); - let ecosystem = EcosystemConfig::from_file(shell)?; Cmd::new(cmd!( shell, @@ -85,11 +111,11 @@ async fn reset_test_databases(shell: &Shell) -> anyhow::Result<()> { )) .run()?; - for dal in get_test_dals(shell)? { + for dal in dals { let mut url = dal.url.clone(); url.set_path(""); wait_for_db(&url, 3).await?; - database::reset::reset_database(shell, ecosystem.link_to_code.clone(), dal.clone()).await?; + database::reset::reset_database(shell, link_to_code, dal.clone()).await?; } Ok(()) diff --git a/zk_toolbox/crates/zk_supervisor/src/dals.rs b/zk_toolbox/crates/zk_supervisor/src/dals.rs index a8600a2665e6..f9c07585f6dd 100644 --- a/zk_toolbox/crates/zk_supervisor/src/dals.rs +++ b/zk_toolbox/crates/zk_supervisor/src/dals.rs @@ -1,5 +1,3 @@ -use std::str::FromStr; - use anyhow::{anyhow, Context}; use common::config::global_config; use config::{EcosystemConfig, SecretsConfig}; @@ -11,8 +9,8 @@ use crate::{ messages::{MSG_CHAIN_NOT_FOUND_ERR, MSG_DATABASE_MUST_BE_PRESENTED}, }; -const CORE_DAL_PATH: &str = "core/lib/dal"; -const PROVER_DAL_PATH: &str = "prover/crates/lib/prover_dal"; +pub const CORE_DAL_PATH: &str = "core/lib/dal"; +pub const PROVER_DAL_PATH: &str = "prover/crates/lib/prover_dal"; #[derive(Debug, Clone)] pub struct SelectedDals { @@ -50,10 +48,6 @@ pub fn get_dals( Ok(dals) } -pub fn get_test_dals(shell: &Shell) -> anyhow::Result> { - Ok(vec![get_test_prover_dal(shell)?, get_test_core_dal(shell)?]) -} - pub fn get_prover_dal(shell: &Shell, url: Option) -> anyhow::Result { let url = if let Some(url) = url { Url::parse(&url)? @@ -94,51 +88,6 @@ pub fn get_core_dal(shell: &Shell, url: Option) -> anyhow::Result { }) } -pub fn get_test_core_dal(shell: &Shell) -> anyhow::Result { - let general_config = get_general_config(shell)?; - let postgres = general_config - .postgres_config - .context(MSG_DATABASE_MUST_BE_PRESENTED)?; - - let url = Url::from_str( - &postgres - .test_server_url - .clone() - .context(MSG_DATABASE_MUST_BE_PRESENTED)?, - )?; - Ok(Dal { - path: CORE_DAL_PATH.to_string(), - url, - }) -} - -pub fn get_test_prover_dal(shell: &Shell) -> anyhow::Result { - let general_config = get_general_config(shell)?; - let postgres = general_config - .postgres_config - .context(MSG_DATABASE_MUST_BE_PRESENTED)?; - - let url = Url::from_str( - &postgres - .test_prover_url - .clone() - .context(MSG_DATABASE_MUST_BE_PRESENTED)?, - )?; - - Ok(Dal { - path: PROVER_DAL_PATH.to_string(), - url, - }) -} - -fn get_general_config(shell: &Shell) -> anyhow::Result { - let ecosystem_config = EcosystemConfig::from_file(shell)?; - let chain_config = ecosystem_config - .load_chain(global_config().chain_name.clone()) - .context(MSG_CHAIN_NOT_FOUND_ERR)?; - chain_config.get_general_config() -} - fn get_secrets(shell: &Shell) -> anyhow::Result { let ecosystem_config = EcosystemConfig::from_file(shell)?; let chain_config = ecosystem_config diff --git a/zk_toolbox/crates/zk_supervisor/src/defaults.rs b/zk_toolbox/crates/zk_supervisor/src/defaults.rs new file mode 100644 index 000000000000..f4bae739c2d1 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/defaults.rs @@ -0,0 +1,4 @@ +pub const TEST_DATABASE_SERVER_URL: &str = + "postgres://postgres:notsecurepassword@localhost:5433/zksync_local_test"; +pub const TEST_DATABASE_PROVER_URL: &str = + "postgres://postgres:notsecurepassword@localhost:5433/prover_local_test"; diff --git a/zk_toolbox/crates/zk_supervisor/src/main.rs b/zk_toolbox/crates/zk_supervisor/src/main.rs index 6b5bfa46943e..a8722787b5ff 100644 --- a/zk_toolbox/crates/zk_supervisor/src/main.rs +++ b/zk_toolbox/crates/zk_supervisor/src/main.rs @@ -21,6 +21,7 @@ use crate::commands::{clean::CleanCommands, fmt::FmtArgs}; mod commands; mod dals; +mod defaults; mod messages; #[derive(Parser, Debug)] diff --git a/zk_toolbox/crates/zk_supervisor/src/messages.rs b/zk_toolbox/crates/zk_supervisor/src/messages.rs index 17f01e664678..ff9cc104a505 100644 --- a/zk_toolbox/crates/zk_supervisor/src/messages.rs +++ b/zk_toolbox/crates/zk_supervisor/src/messages.rs @@ -112,10 +112,12 @@ pub(super) const MSG_CONTRACTS_DEPS_SPINNER: &str = "Installing dependencies.."; pub(super) const MSG_BUILDING_L2_CONTRACTS_SPINNER: &str = "Building L2 contracts.."; pub(super) const MSG_BUILDING_L1_CONTRACTS_SPINNER: &str = "Building L1 contracts.."; pub(super) const MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER: &str = "Building system contracts.."; +pub(super) const MSG_BUILDING_TEST_CONTRACTS_SPINNER: &str = "Building test contracts.."; pub(super) const MSG_BUILDING_CONTRACTS_SUCCESS: &str = "Contracts built successfully"; pub(super) const MSG_BUILD_L1_CONTRACTS_HELP: &str = "Build L1 contracts"; pub(super) const MSG_BUILD_L2_CONTRACTS_HELP: &str = "Build L2 contracts"; pub(super) const MSG_BUILD_SYSTEM_CONTRACTS_HELP: &str = "Build system contracts"; +pub(super) const MSG_BUILD_TEST_CONTRACTS_HELP: &str = "Build test contracts"; // Integration tests related messages pub(super) fn msg_integration_tests_run(external_node: bool) -> String {