diff --git a/.env b/.env deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 811c773b6f54..9bae11c7b26a 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { - "core": "24.24.0", + "core": "24.25.0", "prover": "16.5.0", "zk_toolbox": "0.1.2" } diff --git a/.github/scripts/rate_limit_check.sh b/.github/scripts/rate_limit_check.sh new file mode 100755 index 000000000000..6594c685d847 --- /dev/null +++ b/.github/scripts/rate_limit_check.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +set -o errexit +set -o pipefail + + +api_endpoint="https://api.github.com/users/zksync-era-bot" +wait_time=60 +max_retries=60 +retry_count=0 + +while [[ $retry_count -lt $max_retries ]]; do + response=$(run_retried curl -s -w "%{http_code}" -o temp.json "$api_endpoint") + http_code=$(echo "$response" | tail -n1) + + if [[ "$http_code" == "200" ]]; then + echo "Request successful. Not rate-limited." + cat temp.json + rm temp.json + exit 0 + elif [[ "$http_code" == "403" ]]; then + rate_limit_exceeded=$(jq -r '.message' temp.json | grep -i "API rate limit exceeded") + if [[ -n "$rate_limit_exceeded" ]]; then + retry_count=$((retry_count+1)) + echo "API rate limit exceeded. Retry $retry_count of $max_retries. Retrying in $wait_time seconds..." + sleep $wait_time + else + echo "Request failed with HTTP status $http_code." + cat temp.json + rm temp.json + exit 1 + fi + else + echo "Request failed with HTTP status $http_code." + cat temp.json + rm temp.json + exit 1 + fi +done + +echo "Reached the maximum number of retries ($max_retries). Exiting." +rm temp.json +exit 1 diff --git a/.github/workflows/build-base.yml b/.github/workflows/build-base.yml new file mode 100644 index 000000000000..83be44c126f9 --- /dev/null +++ b/.github/workflows/build-base.yml @@ -0,0 +1,159 @@ +name: Build zksync-build-base Docker image +on: + workflow_dispatch: + inputs: + repo_ref: + description: "git reference of the zksync-era to build" + required: true + default: main +jobs: + build-images: + name: Build and Push Docker Images + runs-on: ${{ fromJSON('["matterlabs-ci-runner-high-performance", "matterlabs-ci-runner-arm"]')[contains(matrix.arch, 'arm')] }} + outputs: + image_tag_sha: ${{ steps.get-sha.outputs.image_tag_sha }} + # Needed to push to Gihub Package Registry + permissions: + packages: write + contents: read + env: + DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + REPO_REF: ${{ github.event.inputs.repo_ref }} + strategy: + matrix: + name: [ build-base ] + repository: [ zksync-build-base ] + arch: [ amd64, arm64 ] + + steps: + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 + with: + submodules: "recursive" + + - name: Login to google container registry + run: | + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Login to DockerHub + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + with: + username: ${{ secrets.DOCKERHUB_USER }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Get tag + id: get-sha + run: | + echo IMAGE_TAG_SHA=$(git rev-parse --short HEAD) >> $GITHUB_ENV + echo image_tag_sha=$(git rev-parse --short HEAD) >> $GITHUB_OUTPUT + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: Build and push + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + push: true + context: . + file: docker/build-base/Dockerfile + labels: | + org.opencontainers.image.source=https://github.com/matter-labs/zksync-era + org.opencontainers.image.licenses="MIT OR Apache-2.0" + tags: | + matterlabs/zksync-build-base:${{ steps.get-sha.outputs.image_tag_sha }}-${{ matrix.arch }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zksync-build-base:${{ steps.get-sha.outputs.image_tag_sha }}-${{ matrix.arch }} + ghcr.io/${{ github.repository_owner }}/zksync-build-base:${{ steps.get-sha.outputs.image_tag_sha }}-${{ matrix.arch }} + + multiarch_manifest: + # Needed to push to Gihub Package Registry + permissions: + packages: write + contents: read + needs: [ build-images ] + env: + IMAGE_TAG_SUFFIX: ${{ needs.build-images.outputs.image_tag_sha }} + runs-on: [ matterlabs-ci-runner-high-performance ] + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: Login to google container registry + run: | + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Login to DockerHub + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + with: + username: ${{ secrets.DOCKERHUB_USER }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Create and push multi-arch manifests for Dockerhub + shell: bash + run: | + images=("zksync-build-base") + archs=("amd64" "arm64") + + for img in "${images[@]}"; do + multiarch_tag="matterlabs/zksync-build-base:latest" + individual_images=() + + for arch in "${archs[@]}"; do + TAG="$IMAGE_TAG_SUFFIX" + docker pull matterlabs/zksync-build-base:${TAG}-${arch} --platform linux/${arch} + individual_images+=("matterlabs/zksync-build-base:${TAG}-${arch}") + done + + docker buildx imagetools create --tag "${multiarch_tag}" "${individual_images[@]}" + done + + - name: Create and push multi-arch manifests for GitHub Container Registry + shell: bash + run: | + images=("zksync-build-base") + archs=("amd64" "arm64") + + for img in "${images[@]}"; do + multiarch_tag="ghcr.io/${{ github.repository_owner }}/zksync-build-base:latest" + individual_images=() + + for arch in "${archs[@]}"; do + TAG="$IMAGE_TAG_SUFFIX" + docker pull ghcr.io/${{ github.repository_owner }}/zksync-build-base:${TAG}-${arch} --platform linux/${arch} + individual_images+=("ghcr.io/${{ github.repository_owner }}/zksync-build-base:${TAG}-${arch}") + done + + docker buildx imagetools create --tag "${multiarch_tag}" "${individual_images[@]}" + done + + - name: Create and push multi-arch manifests for Google Artifact Registry + shell: bash + run: | + images=("zksync-build-base") + archs=("amd64" "arm64") + + for img in "${images[@]}"; do + multiarch_tag="us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zksync-build-base:latest" + individual_images=() + + for arch in "${archs[@]}"; do + TAG="$IMAGE_TAG_SUFFIX" + docker pull us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zksync-build-base:${TAG}-${arch} --platform linux/${arch} + individual_images+=("us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zksync-build-base:${TAG}-${arch}") + done + + docker buildx imagetools create --tag "${multiarch_tag}" "${individual_images[@]}" + done diff --git a/.github/workflows/build-runtime-base.yml b/.github/workflows/build-runtime-base.yml new file mode 100644 index 000000000000..eaec05bc6bcf --- /dev/null +++ b/.github/workflows/build-runtime-base.yml @@ -0,0 +1,66 @@ +name: Build zksync-runtime-base Docker image +on: + workflow_dispatch: + inputs: + repo_ref: + description: "git reference of the zksync-era to build" + required: true + default: main +jobs: + build-images: + name: Build and Push Docker Images + runs-on: matterlabs-ci-runner-high-performance + outputs: + image_tag_sha: ${{ steps.get-sha.outputs.image_tag_sha }} + # Needed to push to Gihub Package Registry + permissions: + packages: write + contents: read + env: + REPO_REF: ${{ github.event.inputs.repo_ref }} + strategy: + matrix: + name: [ runtime-base ] + image_name: [ zksync-runtime-base ] + + steps: + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 + with: + submodules: "recursive" + + - name: Login to google container registry + run: | + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Login to GitHub Container Registry + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Get tag + id: get-sha + run: | + echo IMAGE_TAG_SHA=$(git rev-parse --short HEAD) >> $GITHUB_ENV + echo image_tag_sha=$(git rev-parse --short HEAD) >> $GITHUB_OUTPUT + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: Set up QEMU + uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 + + - name: Build and push + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + push: true + context: . + platforms: arm64, amd64 + file: docker/${{ matrix.name }}/Dockerfile + labels: | + org.opencontainers.image.source=https://github.com/matter-labs/zksync-era + org.opencontainers.image.licenses="MIT OR Apache-2.0" + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.image_name }}:latest + ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest diff --git a/.github/workflows/ci-common-reusable.yml b/.github/workflows/ci-common-reusable.yml index d2f9e11348f0..2e5d36feebff 100644 --- a/.github/workflows/ci-common-reusable.yml +++ b/.github/workflows/ci-common-reusable.yml @@ -18,13 +18,16 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env - name: Start services run: | run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - ci_run sccache --start-server - name: Init run: | diff --git a/.github/workflows/ci-core-lint-reusable.yml b/.github/workflows/ci-core-lint-reusable.yml index e46a67dd8af4..404f0966b405 100644 --- a/.github/workflows/ci-core-lint-reusable.yml +++ b/.github/workflows/ci-core-lint-reusable.yml @@ -15,13 +15,16 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env echo "prover_url=postgres://postgres:notsecurepassword@localhost:5432/zksync_local_prover" >> $GITHUB_ENV echo "core_url=postgres://postgres:notsecurepassword@localhost:5432/zksync_local" >> $GITHUB_ENV - name: Start services run: | ci_localnet_up - ci_run sccache --start-server - name: Build run: | diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index cac585a00a87..53ff64398291 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -26,6 +26,10 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env # TODO: Remove when we after upgrade of hardhat-plugins - name: pre-download compilers @@ -48,7 +52,6 @@ jobs: - name: Start services run: | ci_localnet_up - ci_run sccache --start-server - name: Init run: | @@ -84,6 +87,10 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env - name: Loadtest configuration run: | @@ -119,7 +126,7 @@ jobs: --set-as-default false \ --ignore-prerequisites \ --legacy-bridge - + ci_run zk_inception ecosystem init --dev --verbose ci_run zk_supervisor contracts --test-contracts @@ -157,12 +164,15 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env echo RUN_CONTRACT_VERIFICATION_TEST=true >> $GITHUB_ENV - name: Start services run: | ci_localnet_up - ci_run sccache --start-server - name: Build zk_toolbox run: ci_run bash -c "./bin/zkt" @@ -176,7 +186,7 @@ jobs: GENESIS_RECOVERY_LOGS_DIR=logs/genesis_recovery/ EXTERNAL_NODE_LOGS_DIR=logs/external_node REVERT_LOGS_DIR=logs/revert - + mkdir -p $SERVER_LOGS_DIR mkdir -p $INTEGRATION_TESTS_LOGS_DIR mkdir -p $INTEGRATION_TESTS_EN_LOGS_DIR @@ -184,7 +194,7 @@ jobs: mkdir -p $GENESIS_RECOVERY_LOGS_DIR mkdir -p $EXTERNAL_NODE_LOGS_DIR mkdir -p $REVERT_LOGS_DIR - + echo "SERVER_LOGS_DIR=$SERVER_LOGS_DIR" >> $GITHUB_ENV echo "INTEGRATION_TESTS_LOGS_DIR=$INTEGRATION_TESTS_LOGS_DIR" >> $GITHUB_ENV echo "INTEGRATION_TESTS_EN_LOGS_DIR=$INTEGRATION_TESTS_EN_LOGS_DIR" >> $GITHUB_ENV @@ -327,7 +337,7 @@ jobs: - name: Initialize Contract verifier run: | - ci_run zk_inception contract-verifier init --zksolc-version=v1.5.3 --zkvyper-version=v1.5.4 --solc-version=0.8.26 --vyper-version=v0.3.10 --era-vm-solc-version=0.8.18-1.0.1 --only --chain era + ci_run zk_inception contract-verifier init --zksolc-version=v1.5.3 --zkvyper-version=v1.5.4 --solc-version=0.8.26 --vyper-version=v0.3.10 --era-vm-solc-version=0.8.26-1.0.1 --only --chain era ci_run zk_inception contract-verifier run --chain era &> ${{ env.SERVER_LOGS_DIR }}/contract-verifier-rollup.log & - name: Run servers @@ -352,7 +362,7 @@ jobs: ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain custom_token &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/custom_token.log & PID3=$! - + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain consensus &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/consensus.log & PID4=$! @@ -365,8 +375,8 @@ jobs: run: | ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --db-name=zksync_en_localhost_era_rollup --l1-rpc-url=http://localhost:8545 --chain era - ci_run zk_inception external-node init --ignore-prerequisites --chain era - + ci_run zk_inception external-node init --ignore-prerequisites --chain era + ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --db-name=zksync_en_localhost_era_validium1 --l1-rpc-url=http://localhost:8545 --chain validium ci_run zk_inception external-node init --ignore-prerequisites --chain validium @@ -374,7 +384,7 @@ jobs: ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --db-name=zksync_en_localhost_era_custom_token --l1-rpc-url=http://localhost:8545 --chain custom_token ci_run zk_inception external-node init --ignore-prerequisites --chain custom_token - + ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --db-name=zksync_en_localhost_era_consensus --l1-rpc-url=http://localhost:8545 --chain consensus ci_run zk_inception external-node init --ignore-prerequisites --chain consensus @@ -384,13 +394,13 @@ jobs: ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain era &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/rollup.log & PID1=$! - + ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain validium &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/validium.log & PID2=$! - + ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain custom_token &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/custom_token.log & PID3=$! - + ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain consensus &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/consensus.log & PID4=$! @@ -409,7 +419,7 @@ jobs: ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain custom_token &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/custom_token.log & PID3=$! - + ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain consensus &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/consensus.log & PID4=$! @@ -448,7 +458,7 @@ jobs: run: | ci_run killall -INT zksync_server || true ci_run killall -INT zksync_external_node || true - + ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain era &> ${{ env.REVERT_LOGS_DIR }}/rollup.log & PID1=$! @@ -460,7 +470,7 @@ jobs: ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain consensus &> ${{ env.REVERT_LOGS_DIR }}/consensus.log & PID4=$! - + wait $PID1 wait $PID2 wait $PID3 diff --git a/.github/workflows/ci-docs-reusable.yml b/.github/workflows/ci-docs-reusable.yml index 2b8eea15a827..5b1d5a9bcdfa 100644 --- a/.github/workflows/ci-docs-reusable.yml +++ b/.github/workflows/ci-docs-reusable.yml @@ -17,12 +17,16 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env - name: Start services run: | run_retried docker compose pull zk docker compose up -d zk - + - name: Build run: | ci_run ./bin/zkt diff --git a/.github/workflows/ci-prover-reusable.yml b/.github/workflows/ci-prover-reusable.yml index ac971dafac99..367a86c5f40f 100644 --- a/.github/workflows/ci-prover-reusable.yml +++ b/.github/workflows/ci-prover-reusable.yml @@ -17,6 +17,10 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env echo "prover_url=postgres://postgres:notsecurepassword@localhost:5432/zksync_local_prover" >> $GITHUB_ENV echo "core_url=postgres://postgres:notsecurepassword@localhost:5432/zksync_local" >> $GITHUB_ENV @@ -25,7 +29,6 @@ jobs: run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - ci_run sccache --start-server - name: Init run: | @@ -50,13 +53,16 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env - name: Start services run: | run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - ci_run sccache --start-server - name: Init run: | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 291f9237ac52..0a27a719aeb6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -111,7 +111,7 @@ jobs: name: Build core images needs: changed_files if: ${{ (needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} - uses: ./.github/workflows/build-core-template.yml + uses: ./.github/workflows/new-build-core-template.yml with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} action: "build" @@ -136,7 +136,7 @@ jobs: name: Build contract verifier needs: changed_files if: ${{ (needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} - uses: ./.github/workflows/build-contract-verifier-template.yml + uses: ./.github/workflows/new-build-contract-verifier-template.yml with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} action: "build" @@ -148,7 +148,7 @@ jobs: name: Build prover images needs: changed_files if: ${{ (needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} - uses: ./.github/workflows/build-prover-template.yml + uses: ./.github/workflows/new-build-prover-template.yml with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} action: "build" @@ -162,12 +162,10 @@ jobs: name: Build prover images with avx512 instructions needs: changed_files if: ${{ (needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} - uses: ./.github/workflows/build-witness-generator-template.yml + uses: ./.github/workflows/new-build-witness-generator-template.yml with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }}-avx512 action: "build" - ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} - is_pr_from_fork: ${{ github.event.pull_request.head.repo.fork == true }} WITNESS_GENERATOR_RUST_FLAGS: "-Ctarget_feature=+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl" secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} diff --git a/.github/workflows/new-build-contract-verifier-template.yml b/.github/workflows/new-build-contract-verifier-template.yml new file mode 100644 index 000000000000..42791eab6669 --- /dev/null +++ b/.github/workflows/new-build-contract-verifier-template.yml @@ -0,0 +1,271 @@ +name: Build contract verifier +on: + workflow_call: + secrets: + DOCKERHUB_USER: + description: "DOCKERHUB_USER" + required: true + DOCKERHUB_TOKEN: + description: "DOCKERHUB_TOKEN" + required: true + inputs: + image_tag_suffix: + description: "Optional suffix to override tag name generation" + type: string + required: false + compilers: + description: 'JSON of required compilers and their versions' + type: string + required: false + default: '[{ "zksolc": ["1.3.14", "1.3.16", "1.3.17", "1.3.1", "1.3.7", "1.3.18", "1.3.19", "1.3.21"] } , { "zkvyper": ["1.3.13"] }]' + action: + type: string + default: non-push + required: false + +jobs: + prepare-contracts: + name: Prepare contracts + runs-on: matterlabs-ci-runner-high-performance + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: Prepare ENV + shell: bash + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: Download contracts + shell: bash + run: | + commit_sha=$(git submodule status contracts | awk '{print $1}' | tr -d '-') + page=1 + filtered_tag="" + while [ true ]; do + echo "Page: $page" + tags=$(run_retried curl -s -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" -H "Accept: application/vnd.github+json" \ + "https://api.github.com/repos/matter-labs/era-contracts/tags?per_page=100&page=${page}" | jq .) + if [ $(jq length <<<"$tags") -eq 0 ]; then + echo "No tag found on all pages." + echo "BUILD_CONTRACTS=true" >> "$GITHUB_ENV" + exit 0 + fi + filtered_tag=$(jq -r --arg commit_sha "$commit_sha" 'map(select(.commit.sha == $commit_sha)) | .[].name' <<<"$tags") + if [[ ! -z "$filtered_tag" ]]; then + echo "BUILD_CONTRACTS=false" >> "$GITHUB_ENV" + break + fi + ((page++)) + done + echo "Contracts tag is: ${filtered_tag}" + mkdir -p ./contracts + run_retried curl -s -LO https://github.com/matter-labs/era-contracts/releases/download/${filtered_tag}/l1-contracts.tar.gz + run_retried curl -s -LO https://github.com/matter-labs/era-contracts/releases/download/${filtered_tag}/l2-contracts.tar.gz + run_retried curl -s -LO https://github.com/matter-labs/era-contracts/releases/download/${filtered_tag}/system-contracts.tar.gz + tar -C ./contracts -zxf l1-contracts.tar.gz + tar -C ./contracts -zxf l2-contracts.tar.gz + tar -C ./contracts -zxf system-contracts.tar.gz + + - name: Install Apt dependencies + shell: bash + run: | + sudo apt-get update && sudo apt-get install -y libssl-dev pkg-config + + - name: Install Node + uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 + with: + node-version: 20 + cache: 'npm' + + - name: Install Yarn + run: npm install -g yarn + + - name: Setup rust + uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 + with: + toolchain: nightly-2024-08-01 + + - name: Install cargo-nextest from crates.io + uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 + with: + crate: cargo-nextest + + - name: Install sqlx-cli from crates.io + uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 + with: + crate: sqlx-cli + tag: 0.8.1 + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773 # v1.2.0 + + - name: Pre-download compilers + shell: bash + run: | + # Download needed versions of vyper compiler + # Not sanitized due to unconventional path and tags + mkdir -p ./hardhat-nodejs/compilers-v2/vyper/linux + wget -nv -O ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.10 https://github.com/vyperlang/vyper/releases/download/v0.3.10/vyper.0.3.10+commit.91361694.linux + wget -nv -O ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.3 https://github.com/vyperlang/vyper/releases/download/v0.3.3/vyper.0.3.3+commit.48e326f0.linux + chmod +x ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.10 + chmod +x ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.3 + + COMPILERS_JSON='${{ inputs.compilers }}' + echo "$COMPILERS_JSON" | jq -r '.[] | to_entries[] | .key as $compiler | .value[] | "\(.),\($compiler)"' | while IFS=, read -r version compiler; do + mkdir -p "./hardhat-nodejs/compilers-v2/$compiler" + wget -nv -O "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" "https://github.com/matter-labs/${compiler}-bin/releases/download/v${version}/${compiler}-linux-amd64-musl-v${version}" + chmod +x "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" + done + + - name: init + shell: bash + run: | + mkdir -p ./volumes/postgres + docker compose up -d postgres + zkt || true + + - name: build contracts + shell: bash + run: | + cp etc/tokens/{test,localhost}.json + zk_supervisor contracts + + - name: Upload contracts + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 + with: + name: contacts-verifier + path: | + ./contracts + + build-images: + name: Build and Push Docker Images + needs: prepare-contracts + runs-on: ${{ fromJSON('["matterlabs-ci-runner-high-performance", "matterlabs-ci-runner-arm"]')[contains(matrix.platforms, 'arm')] }} + strategy: + matrix: + components: + - contract-verifier + - verified-sources-fetcher + platforms: + - linux/amd64 + + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: Setup env + shell: bash + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: Download setup key + shell: bash + run: | + run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key + + - name: Set env vars + shell: bash + run: | + echo PLATFORM=$(echo ${{ matrix.platforms }} | tr '/' '-') >> $GITHUB_ENV + echo IMAGE_TAG_SHA=$(git rev-parse --short HEAD) >> $GITHUB_ENV + # Support for custom tag suffix + if [ -n "${{ inputs.image_tag_suffix }}" ]; then + echo IMAGE_TAG_SHA_TS="${{ inputs.image_tag_suffix }}" >> $GITHUB_ENV + else + echo IMAGE_TAG_SHA_TS=$(git rev-parse --short HEAD)-$(date +%s) >> $GITHUB_ENV + fi + + - name: Download contracts + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: contacts-verifier + path: | + ./contracts + + - name: login to Docker registries + if: ${{ inputs.action == 'push' }} + shell: bash + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Build and push + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + context: . + push: ${{ inputs.action == 'push' }} + file: docker/${{ matrix.components }}/Dockerfile + build-args: | + SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage + SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com + SCCACHE_GCS_RW_MODE=READ_WRITE + RUSTC_WRAPPER=sccache + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest + matterlabs/${{ matrix.components }}:latest + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest2.0 + matterlabs/${{ matrix.components }}:latest2.0 + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} + matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} + + create_manifest: + name: Create release manifest + runs-on: matterlabs-ci-runner + needs: build-images + if: ${{ inputs.action == 'push' }} + strategy: + matrix: + component: + - name: contract-verifier + platform: linux/amd64 + - name: verified-sources-fetcher + platform: linux/amd64 + env: + IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }} + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + + - name: login to Docker registries + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Create Docker manifest + run: | + docker_repositories=("matterlabs/${{ matrix.component.name }}" "us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component.name }}") + platforms=${{ matrix.component.platform }} + for repo in "${docker_repositories[@]}"; do + platform_tags="" + for platform in ${platforms//,/ }; do + platform=$(echo $platform | tr '/' '-') + platform_tags+=" --amend ${repo}:${IMAGE_TAG_SUFFIX}-${platform}" + done + for manifest in "${repo}:${IMAGE_TAG_SUFFIX}" "${repo}:2.0-${IMAGE_TAG_SUFFIX}" "${repo}:latest" "${repo}:latest2.0"; do + docker manifest create ${manifest} ${platform_tags} + docker manifest push ${manifest} + done + done diff --git a/.github/workflows/new-build-core-template.yml b/.github/workflows/new-build-core-template.yml new file mode 100644 index 000000000000..fba6a68b8eec --- /dev/null +++ b/.github/workflows/new-build-core-template.yml @@ -0,0 +1,287 @@ +name: Build Core images +on: + workflow_call: + secrets: + DOCKERHUB_USER: + description: "DOCKERHUB_USER" + required: true + DOCKERHUB_TOKEN: + description: "DOCKERHUB_TOKEN" + required: true + inputs: + image_tag_suffix: + description: "Optional suffix to override tag name generation" + type: string + required: false + compilers: + description: 'JSON of required compilers and their versions' + type: string + required: false + default: '[{ "zksolc": ["1.3.14", "1.3.16", "1.3.17", "1.3.1", "1.3.7", "1.3.18", "1.3.19", "1.3.21"] } , { "zkvyper": ["1.3.13"] }]' + en_alpha_release: + description: 'Flag that determins if EN release should be marked as alpha' + type: boolean + required: false + default: false + action: + type: string + required: false + default: "do nothing" + +jobs: + prepare-contracts: + name: Prepare contracts + runs-on: matterlabs-ci-runner-high-performance + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: Prepare ENV + shell: bash + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: Download contracts + shell: bash + run: | + commit_sha=$(git submodule status contracts | awk '{print $1}' | tr -d '-') + page=1 + filtered_tag="" + while [ true ]; do + echo "Page: $page" + tags=$(run_retried curl -s -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" -H "Accept: application/vnd.github+json" \ + "https://api.github.com/repos/matter-labs/era-contracts/tags?per_page=100&page=${page}" | jq .) + if [ $(jq length <<<"$tags") -eq 0 ]; then + echo "No tag found on all pages." + echo "BUILD_CONTRACTS=true" >> "$GITHUB_ENV" + exit 0 + fi + filtered_tag=$(jq -r --arg commit_sha "$commit_sha" 'map(select(.commit.sha == $commit_sha)) | .[].name' <<<"$tags") + if [[ ! -z "$filtered_tag" ]]; then + echo "BUILD_CONTRACTS=false" >> "$GITHUB_ENV" + break + fi + ((page++)) + done + echo "Contracts tag is: ${filtered_tag}" + mkdir -p ./contracts + run_retried curl -s -LO https://github.com/matter-labs/era-contracts/releases/download/${filtered_tag}/l1-contracts.tar.gz + run_retried curl -s -LO https://github.com/matter-labs/era-contracts/releases/download/${filtered_tag}/l2-contracts.tar.gz + run_retried curl -s -LO https://github.com/matter-labs/era-contracts/releases/download/${filtered_tag}/system-contracts.tar.gz + tar -C ./contracts -zxf l1-contracts.tar.gz + tar -C ./contracts -zxf l2-contracts.tar.gz + tar -C ./contracts -zxf system-contracts.tar.gz + + - name: Install Apt dependencies + shell: bash + run: | + sudo apt-get update && sudo apt-get install -y libssl-dev pkg-config + + - name: Install Node + uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 + with: + node-version: 20 + cache: 'npm' + + - name: Install Yarn + run: npm install -g yarn + + - name: Setup rust + uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 + with: + toolchain: nightly-2024-08-01 + + - name: Install cargo-nextest from crates.io + uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 + with: + crate: cargo-nextest + + - name: Install sqlx-cli from crates.io + uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 + with: + crate: sqlx-cli + tag: 0.8.1 + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773 # v1.2.0 + + - name: Pre-download compilers + shell: bash + run: | + # Download needed versions of vyper compiler + # Not sanitized due to unconventional path and tags + mkdir -p ./hardhat-nodejs/compilers-v2/vyper/linux + wget -nv -O ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.10 https://github.com/vyperlang/vyper/releases/download/v0.3.10/vyper.0.3.10+commit.91361694.linux + wget -nv -O ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.3 https://github.com/vyperlang/vyper/releases/download/v0.3.3/vyper.0.3.3+commit.48e326f0.linux + chmod +x ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.10 + chmod +x ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.3 + + COMPILERS_JSON='${{ inputs.compilers }}' + echo "$COMPILERS_JSON" | jq -r '.[] | to_entries[] | .key as $compiler | .value[] | "\(.),\($compiler)"' | while IFS=, read -r version compiler; do + mkdir -p "./hardhat-nodejs/compilers-v2/$compiler" + wget -nv -O "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" "https://github.com/matter-labs/${compiler}-bin/releases/download/v${version}/${compiler}-linux-amd64-musl-v${version}" + chmod +x "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" + done + + - name: init + shell: bash + run: | + mkdir -p ./volumes/postgres + docker compose up -d postgres + zkt || true + + - name: build contracts + shell: bash + run: | + cp etc/tokens/{test,localhost}.json + zk_supervisor contracts + + - name: Upload contracts + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 + with: + name: contacts + path: | + ./contracts + + build-images: + name: Build and Push Docker Images + needs: prepare-contracts + env: + IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }}${{ (inputs.en_alpha_release && matrix.components == 'external-node') && '-alpha' || '' }} + runs-on: ${{ fromJSON('["matterlabs-ci-runner-high-performance", "matterlabs-ci-runner-arm"]')[contains(matrix.platforms, 'arm')] }} + strategy: + matrix: + components: + - server-v2 + - external-node + - snapshots-creator + platforms: + - linux/amd64 + include: + - components: external-node + platforms: linux/arm64 + + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: Setup env + shell: bash + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: Download setup key + shell: bash + run: | + run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key + + - name: Set env vars + shell: bash + run: | + echo PLATFORM=$(echo ${{ matrix.platforms }} | tr '/' '-') >> $GITHUB_ENV + echo IMAGE_TAG_SHA=$(git rev-parse --short HEAD) >> $GITHUB_ENV + # Support for custom tag suffix + if [ -n "${{ inputs.image_tag_suffix }}" ]; then + echo IMAGE_TAG_SHA_TS="${{ inputs.image_tag_suffix }}" >> $GITHUB_ENV + else + echo IMAGE_TAG_SHA_TS=$(git rev-parse --short HEAD)-$(date +%s) >> $GITHUB_ENV + fi + + - name: Download contracts + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: contacts + path: | + ./contracts + + - name: login to Docker registries + if: ${{ inputs.action == 'push' }} + shell: bash + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Build and push + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + context: . + push: ${{ inputs.action == 'push' }} + file: docker/${{ matrix.components }}/Dockerfile + build-args: | + SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage + SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com + SCCACHE_GCS_RW_MODE=READ_WRITE + RUSTC_WRAPPER=sccache + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest + matterlabs/${{ matrix.components }}:latest + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest2.0 + matterlabs/${{ matrix.components }}:latest2.0 + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} + matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} + + create_manifest: + name: Create release manifest + runs-on: matterlabs-ci-runner + needs: build-images + if: ${{ inputs.action == 'push' }} + strategy: + matrix: + component: + - name: server-v2 + platform: linux/amd64 + - name: external-node + platform: linux/amd64,linux/arm64 + - name: snapshots-creator + platform: linux/amd64 + + env: + IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }}${{ (inputs.en_alpha_release && matrix.component.name == 'external-node') && '-alpha' || '' }} + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + + - name: login to Docker registries + shell: bash + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Create Docker manifest + shell: bash + run: | + docker_repositories=("matterlabs/${{ matrix.component.name }}" "us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component.name }}") + platforms=${{ matrix.component.platform }} + for repo in "${docker_repositories[@]}"; do + platform_tags="" + for platform in ${platforms//,/ }; do + platform=$(echo $platform | tr '/' '-') + platform_tags+=" --amend ${repo}:${IMAGE_TAG_SUFFIX}-${platform}" + done + for manifest in "${repo}:${IMAGE_TAG_SUFFIX}" "${repo}:2.0-${IMAGE_TAG_SUFFIX}" "${repo}:latest" "${repo}:latest2.0"; do + docker manifest create ${manifest} ${platform_tags} + docker manifest push ${manifest} + done + done diff --git a/.github/workflows/new-build-prover-template.yml b/.github/workflows/new-build-prover-template.yml new file mode 100644 index 000000000000..60c152213e60 --- /dev/null +++ b/.github/workflows/new-build-prover-template.yml @@ -0,0 +1,198 @@ +name: Build Prover images +on: + workflow_call: + secrets: + DOCKERHUB_USER: + description: "DOCKERHUB_USER" + required: true + DOCKERHUB_TOKEN: + description: "DOCKERHUB_TOKEN" + required: true + inputs: + ERA_BELLMAN_CUDA_RELEASE: + description: "ERA_BELLMAN_CUDA_RELEASE" + type: string + required: true + image_tag_suffix: + description: "Optional suffix to override tag name generation" + type: string + required: false + action: + description: "Action with docker image" + type: string + default: "push" + required: false + is_pr_from_fork: + description: "Indicates whether the workflow is invoked from a PR created from fork" + type: boolean + default: false + required: false + CUDA_ARCH: + description: "CUDA Arch to build" + type: string + default: "89" + required: false + outputs: + protocol_version: + description: "Protocol version of the binary" + value: ${{ jobs.get-protocol-version.outputs.protocol_version }} + +jobs: + get-protocol-version: + name: Get protocol version + runs-on: [ matterlabs-ci-runner-high-performance ] + outputs: + protocol_version: ${{ steps.protocolversion.outputs.protocol_version }} + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: setup-env + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: setup rust + uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 + with: + toolchain: nightly-2024-08-01 + + - name: Prepare sccache-cache env vars + shell: bash + run: | + echo SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage >> $GITHUB_ENV + echo SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com >> $GITHUB_ENV + echo SCCACHE_ERROR_LOG=/tmp/sccache_log.txt >> $GITHUB_ENV + echo SCCACHE_GCS_RW_MODE=READ_WRITE >> $GITHUB_ENV + echo RUSTC_WRAPPER=sccache >> $GITHUB_ENV + + - name: protocol-version + id: protocolversion + # TODO: use -C flag, when it will become stable. + shell: bash + run: | + cd prover + cargo build --release --bin prover_version + PPV=$(target/release/prover_version) + echo Protocol version is ${PPV} + echo "protocol_version=${PPV}" >> $GITHUB_OUTPUT + + build-images: + name: Build and Push Docker Images + needs: get-protocol-version + env: + PROTOCOL_VERSION: ${{ needs.get-protocol-version.outputs.protocol_version }} + runs-on: [ matterlabs-ci-runner-high-performance ] + strategy: + matrix: + components: + - witness-generator + - prover-gpu-fri + - witness-vector-generator + - prover-fri-gateway + - prover-job-monitor + - proof-fri-gpu-compressor + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: setup-env + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: Set env vars + shell: bash + run: | + # Support for custom tag suffix + if [ -n "${{ inputs.image_tag_suffix }}" ]; then + echo IMAGE_TAG_SHA_TS="${{ inputs.image_tag_suffix }}" >> $GITHUB_ENV + else + echo IMAGE_TAG_SHA_TS=$(git rev-parse --short HEAD)-$(date +%s) >> $GITHUB_ENV + fi + + - name: download CRS for GPU compressor + if: matrix.components == 'proof-fri-gpu-compressor' + run: | + run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^24.key + + # We need to run this only when ERA_BELLMAN_CUDA_RELEASE is not available + # In our case it happens only when PR is created from fork + - name: Wait for runner IP to be not rate-limited against GH API + if: ( inputs.is_pr_from_fork == true && matrix.components == 'proof-fri-gpu-compressor' ) + run: ./.github/scripts/rate_limit_check.sh + + - name: Hack to set env vars inside docker container + shell: bash + run: | + sed -i '/^FROM matterlabs\/zksync-build-base:latest as builder/a ENV SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage\nENV SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com\nENV SCCACHE_GCS_RW_MODE=READ_WRITE\nENV RUSTC_WRAPPER=sccache' ./docker/${{ matrix.components }}/Dockerfile + #TODO: remove AS version =) + sed -i '/^FROM matterlabs\/zksync-build-base:latest AS builder/a ENV SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage\nENV SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com\nENV SCCACHE_GCS_RW_MODE=READ_WRITE\nENV RUSTC_WRAPPER=sccache' ./docker/${{ matrix.components }}/Dockerfile + cat ./docker/${{ matrix.components }}/Dockerfile + + - name: login to Docker registries + if: github.event_name != 'pull_request' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/')) + shell: bash + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Build and push + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + context: . + push: ${{ inputs.action == 'push' }} + build-args: | + CUDA_ARCH=${{ inputs.CUDA_ARCH }} + SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage + SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com + SCCACHE_GCS_RW_MODE=READ_WRITE + RUSTC_WRAPPER=sccache + file: docker/${{ matrix.components }}/Dockerfile + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ env.IMAGE_TAG_SHA_TS }} + + copy-images: + name: Copy images between docker registries + needs: [ build-images, get-protocol-version ] + env: + PROTOCOL_VERSION: ${{ needs.get-protocol-version.outputs.protocol_version }} + runs-on: matterlabs-ci-runner + if: ${{ inputs.action == 'push' }} + strategy: + matrix: + component: + - witness-vector-generator + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: Login to us-central1 GAR + run: | + gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://us-docker.pkg.dev + + - name: Login and push to Asia GAR + run: | + gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://asia-docker.pkg.dev + docker buildx imagetools create \ + --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} + + - name: Login and push to Europe GAR + run: | + gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://europe-docker.pkg.dev + docker buildx imagetools create \ + --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} diff --git a/.github/workflows/new-build-witness-generator-template.yml b/.github/workflows/new-build-witness-generator-template.yml new file mode 100644 index 000000000000..2f1fc0b2dd86 --- /dev/null +++ b/.github/workflows/new-build-witness-generator-template.yml @@ -0,0 +1,133 @@ +name: Build witness generator image with custom compiler flags +on: + workflow_call: + secrets: + DOCKERHUB_USER: + description: "DOCKERHUB_USER" + required: true + DOCKERHUB_TOKEN: + description: "DOCKERHUB_TOKEN" + required: true + inputs: + image_tag_suffix: + description: "Optional suffix to override tag name generation" + type: string + required: false + action: + type: string + default: non-push + required: false + CUDA_ARCH: + description: "CUDA Arch to build" + type: string + default: "89" + required: false + WITNESS_GENERATOR_RUST_FLAGS: + description: "Rust flags for witness_generator compilation" + type: string + default: "" + required: false + outputs: + protocol_version: + description: "Protocol version of the binary" + value: ${{ jobs.get-protocol-version.outputs.protocol_version }} + +jobs: + get-protocol-version: + name: Get protocol version + runs-on: [ matterlabs-ci-runner-high-performance ] + outputs: + protocol_version: ${{ steps.protocolversion.outputs.protocol_version }} + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: setup-env + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: setup rust + uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 + with: + toolchain: nightly-2024-08-01 + + - name: Prepare sccache-cache env vars + shell: bash + run: | + echo SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage >> $GITHUB_ENV + echo SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com >> $GITHUB_ENV + echo SCCACHE_ERROR_LOG=/tmp/sccache_log.txt >> $GITHUB_ENV + echo SCCACHE_GCS_RW_MODE=READ_WRITE >> $GITHUB_ENV + echo RUSTC_WRAPPER=sccache >> $GITHUB_ENV + + - name: protocol-version + id: protocolversion + # TODO: use -C flag, when it will become stable. + shell: bash + run: | + cd prover + cargo build --release --bin prover_version + PPV=$(target/release/prover_version) + echo Protocol version is ${PPV} + echo "protocol_version=${PPV}" >> $GITHUB_OUTPUT + + build-images: + name: Build and Push Docker Images + needs: get-protocol-version + env: + PROTOCOL_VERSION: ${{ needs.get-protocol-version.outputs.protocol_version }} + runs-on: [ matterlabs-ci-runner-c3d ] + strategy: + matrix: + components: + - witness-generator + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: setup-env + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: Set env vars + shell: bash + run: | + # Support for custom tag suffix + if [ -n "${{ inputs.image_tag_suffix }}" ]; then + echo IMAGE_TAG_SHA_TS="${{ inputs.image_tag_suffix }}" >> $GITHUB_ENV + else + echo IMAGE_TAG_SHA_TS=$(git rev-parse --short HEAD)-$(date +%s) >> $GITHUB_ENV + fi + + - name: login to Docker registries + if: github.event_name != 'pull_request' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/')) + shell: bash + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Build and push + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + context: . + push: ${{ inputs.action == 'push' }} + build-args: | + CUDA_ARCH=${{ inputs.CUDA_ARCH }} + SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage + SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com + SCCACHE_GCS_RW_MODE=READ_WRITE + RUSTC_WRAPPER=sccache + file: docker/${{ matrix.components }}/Dockerfile + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ env.IMAGE_TAG_SHA_TS }} diff --git a/.github/workflows/vm-perf-comparison.yml b/.github/workflows/vm-perf-comparison.yml index cd6f490b3368..6e044287ad3d 100644 --- a/.github/workflows/vm-perf-comparison.yml +++ b/.github/workflows/vm-perf-comparison.yml @@ -35,6 +35,10 @@ jobs: touch .env echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env - name: init run: | diff --git a/.github/workflows/vm-perf-to-prometheus.yml b/.github/workflows/vm-perf-to-prometheus.yml index d201949b3ebe..4c8c90a0d8f2 100644 --- a/.github/workflows/vm-perf-to-prometheus.yml +++ b/.github/workflows/vm-perf-to-prometheus.yml @@ -22,7 +22,10 @@ jobs: - name: setup-env run: | echo BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL=${{ secrets.BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL }} >> .env - + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH @@ -31,7 +34,7 @@ jobs: run_retried docker compose pull zk docker compose up -d zk ci_run zkt - ci_run zks contracts all + ci_run zk_supervisor contracts - name: run benchmarks run: | diff --git a/Cargo.lock b/Cargo.lock index c2d63e15afe6..8164d412af55 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9855,7 +9855,7 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "24.24.0" +version = "24.25.0" dependencies = [ "anyhow", "assert_matches", @@ -10898,6 +10898,7 @@ dependencies = [ "secp256k1", "serde", "serde_json", + "serde_with", "strum", "thiserror", "tokio", @@ -10965,7 +10966,7 @@ dependencies = [ [[package]] name = "zksync_vm2" version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" +source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" dependencies = [ "enum_dispatch", "primitive-types", @@ -10977,7 +10978,7 @@ dependencies = [ [[package]] name = "zksync_vm2_interface" version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" +source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" dependencies = [ "primitive-types", ] diff --git a/Cargo.toml b/Cargo.toml index 1e2fb9e0c7aa..5a8a507b0340 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -227,7 +227,7 @@ zk_evm_1_4_1 = { package = "zk_evm", version = "0.141" } zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.5" } # New VM; pinned to a specific commit because of instability -zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "cd6136c42ec56856e0abcf2a98d1a9e120161482" } +zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "74577d9be13b1bff9d1a712389731f669b179e47" } # Consensus dependencies. zksync_concurrency = "=0.1.1" diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 7d4381b09bef..f07928071035 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,32 @@ # Changelog +## [24.25.0](https://github.com/matter-labs/zksync-era/compare/core-v24.24.0...core-v24.25.0) (2024-09-19) + + +### Features + +* (DB migration) Rename recursion_scheduler_level_vk_hash to snark_wrapper_vk_hash ([#2809](https://github.com/matter-labs/zksync-era/issues/2809)) ([64f9551](https://github.com/matter-labs/zksync-era/commit/64f95514c99f95da2a19a97ff064c29a97efc22f)) +* add da clients ([#2743](https://github.com/matter-labs/zksync-era/issues/2743)) ([9218612](https://github.com/matter-labs/zksync-era/commit/9218612fdb2b63c20841e2e2e5a45bbd23c01fbc)) +* attester committees data extractor (BFT-434) ([#2684](https://github.com/matter-labs/zksync-era/issues/2684)) ([92dde03](https://github.com/matter-labs/zksync-era/commit/92dde039ee8a0bc08e2019b7fa6f243a34d9816f)) +* emit errors in prover API metrics ([#2890](https://github.com/matter-labs/zksync-era/issues/2890)) ([2ac7cc5](https://github.com/matter-labs/zksync-era/commit/2ac7cc5836e69fc82c98df2005fedee01c1084e1)) +* **en:** Resume incomplete snapshot in snapshot creator in more cases ([#2886](https://github.com/matter-labs/zksync-era/issues/2886)) ([f095b4a](https://github.com/matter-labs/zksync-era/commit/f095b4a3223222ac712de53592fe1e68f766600f)) +* make `to` address optional for transaction data ([#2852](https://github.com/matter-labs/zksync-era/issues/2852)) ([8363c1d](https://github.com/matter-labs/zksync-era/commit/8363c1d8697ad9bd2fe5d326218476bc3dad38af)) +* **prover:** Optimize setup keys loading ([#2847](https://github.com/matter-labs/zksync-era/issues/2847)) ([19887ef](https://github.com/matter-labs/zksync-era/commit/19887ef21a8bbd26977353f8ee277b711850dfd2)) +* Selector generator tool ([#2844](https://github.com/matter-labs/zksync-era/issues/2844)) ([b359b08](https://github.com/matter-labs/zksync-era/commit/b359b085895da6582f1d28722107bc5b25f1232c)) +* **tee:** use hex serialization for RPC responses ([#2887](https://github.com/matter-labs/zksync-era/issues/2887)) ([abe0440](https://github.com/matter-labs/zksync-era/commit/abe0440811ae4daf4a0f307922a282e9664308e0)) +* **utils:** Rework locate_workspace, introduce Workspace type ([#2830](https://github.com/matter-labs/zksync-era/issues/2830)) ([d256092](https://github.com/matter-labs/zksync-era/commit/d2560928cc67b40a97a5497ac8542915bf6f91a9)) +* **zk_toolbox:** Add external_node consensus support ([#2821](https://github.com/matter-labs/zksync-era/issues/2821)) ([4a10d7d](https://github.com/matter-labs/zksync-era/commit/4a10d7d9554d6c1aa2f4fc46557d40baaad8ff2f)) + + +### Bug Fixes + +* count SECP256 precompile to account validation gas limit as well ([#2859](https://github.com/matter-labs/zksync-era/issues/2859)) ([fee0c2a](https://github.com/matter-labs/zksync-era/commit/fee0c2ad08a5ab4a04252765b367eb9fbb1f3db7)) +* **en:** Fix connection starvation during snapshot recovery ([#2836](https://github.com/matter-labs/zksync-era/issues/2836)) ([52f4f76](https://github.com/matter-labs/zksync-era/commit/52f4f763674d25f8a5e7f3a111354a559f798d52)) +* **eth_watch:** fix `get_events_inner` ([#2882](https://github.com/matter-labs/zksync-era/issues/2882)) ([c957dd8](https://github.com/matter-labs/zksync-era/commit/c957dd8011213e0e95fa5962e2310321b29a0d16)) +* handling of HTTP 403 thrown by proxyd ([#2835](https://github.com/matter-labs/zksync-era/issues/2835)) ([2d71c74](https://github.com/matter-labs/zksync-era/commit/2d71c7408a0eed3662fc51f70fa9f525d66e4c6f)) +* **state-keeper:** Restore processed tx metrics in state keeper ([#2815](https://github.com/matter-labs/zksync-era/issues/2815)) ([4d8862b](https://github.com/matter-labs/zksync-era/commit/4d8862b76a55ac78edd481694fefd2107736ffd9)) +* **tee-prover:** fix deserialization of `std::time::Duration` in `envy` config ([#2817](https://github.com/matter-labs/zksync-era/issues/2817)) ([df8641a](https://github.com/matter-labs/zksync-era/commit/df8641a912a8d480ceecff58b0bfaef05e04f0c8)) + ## [24.24.0](https://github.com/matter-labs/zksync-era/compare/core-v24.23.0...core-v24.24.0) (2024-09-05) diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index a1d3951ff3d8..c7a4476173f8 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "zksync_external_node" description = "Non-validator ZKsync node" -version = "24.24.0" # x-release-please-version +version = "24.25.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/bin/external_node/src/config/observability.rs b/core/bin/external_node/src/config/observability.rs index 0dd83f3bd35b..91b721bf77c9 100644 --- a/core/bin/external_node/src/config/observability.rs +++ b/core/bin/external_node/src/config/observability.rs @@ -95,11 +95,10 @@ impl ObservabilityENConfig { ) }) .transpose()?; - let guard = zksync_vlog::ObservabilityBuilder::new() + zksync_vlog::ObservabilityBuilder::new() .with_logs(Some(logs)) .with_sentry(sentry) - .build(); - Ok(guard) + .try_build() } pub(crate) fn from_configs(general_config: &GeneralConfig) -> anyhow::Result { diff --git a/core/bin/external_node/src/config/tests.rs b/core/bin/external_node/src/config/tests.rs index 43210a765723..a32be3eff725 100644 --- a/core/bin/external_node/src/config/tests.rs +++ b/core/bin/external_node/src/config/tests.rs @@ -63,7 +63,10 @@ fn parsing_observability_config() { fn using_unset_sentry_url() { let env_vars = MockEnvironment::new(&[("MISC_SENTRY_URL", "unset")]); let config = ObservabilityENConfig::new(&env_vars).unwrap(); - config.build_observability().unwrap(); + if let Err(err) = config.build_observability() { + // Global tracer may be installed by another test, but the logic shouldn't fail before that. + assert!(format!("{err:?}").contains("global tracer"), "{err:?}"); + } } #[test] diff --git a/core/bin/external_node/src/tests/mod.rs b/core/bin/external_node/src/tests/mod.rs index 5e9e7b3eeb38..b21dbd0db9a3 100644 --- a/core/bin/external_node/src/tests/mod.rs +++ b/core/bin/external_node/src/tests/mod.rs @@ -21,7 +21,7 @@ const POLL_INTERVAL: Duration = Duration::from_millis(100); #[tokio::test] #[tracing::instrument] // Add args to the test logs async fn external_node_basics(components_str: &'static str) { - let _guard = zksync_vlog::ObservabilityBuilder::new().build(); // Enable logging to simplify debugging + let _guard = zksync_vlog::ObservabilityBuilder::new().try_build().ok(); // Enable logging to simplify debugging let (env, env_handles) = utils::TestEnvironment::with_genesis_block(components_str).await; @@ -92,7 +92,7 @@ async fn external_node_basics(components_str: &'static str) { #[tokio::test] async fn node_reacts_to_stop_signal_during_initial_reorg_detection() { - let _guard = zksync_vlog::ObservabilityBuilder::new().build(); // Enable logging to simplify debugging + let _guard = zksync_vlog::ObservabilityBuilder::new().try_build().ok(); // Enable logging to simplify debugging let (env, env_handles) = utils::TestEnvironment::with_genesis_block("core").await; let l2_client = utils::mock_l2_client_hanging(); @@ -128,7 +128,7 @@ async fn node_reacts_to_stop_signal_during_initial_reorg_detection() { #[tokio::test] async fn running_tree_without_core_is_not_allowed() { - let _guard = zksync_vlog::ObservabilityBuilder::new().build(); // Enable logging to simplify debugging + let _guard = zksync_vlog::ObservabilityBuilder::new().try_build().ok(); // Enable logging to simplify debugging let (env, _env_handles) = utils::TestEnvironment::with_genesis_block("tree").await; let l2_client = utils::mock_l2_client(&env); @@ -165,7 +165,7 @@ async fn running_tree_without_core_is_not_allowed() { #[tokio::test] async fn running_tree_api_without_tree_is_not_allowed() { - let _guard = zksync_vlog::ObservabilityBuilder::new().build(); // Enable logging to simplify debugging + let _guard = zksync_vlog::ObservabilityBuilder::new().try_build().ok(); // Enable logging to simplify debugging let (env, _env_handles) = utils::TestEnvironment::with_genesis_block("core,tree_api").await; let l2_client = utils::mock_l2_client(&env); diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index e596208a7949..3269fc962042 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -89,7 +89,7 @@ pub(super) fn get_l2_tx( pubdata_price: u32, ) -> L2Tx { L2Tx::new_signed( - contract_address, + Some(contract_address), vec![], Nonce(0), Fee { @@ -134,7 +134,7 @@ pub(super) fn get_l1_tx( ) -> L1Tx { L1Tx { execute: Execute { - contract_address, + contract_address: Some(contract_address), calldata: custom_calldata.unwrap_or_default(), value: U256::from(0), factory_deps, diff --git a/core/lib/basic_types/src/prover_dal.rs b/core/lib/basic_types/src/prover_dal.rs index 7eb671448608..36f6c89135a0 100644 --- a/core/lib/basic_types/src/prover_dal.rs +++ b/core/lib/basic_types/src/prover_dal.rs @@ -2,6 +2,7 @@ use std::{net::IpAddr, ops::Add, str::FromStr}; use chrono::{DateTime, Duration, NaiveDateTime, NaiveTime, Utc}; +use serde::{Deserialize, Serialize}; use strum::{Display, EnumString}; use crate::{ @@ -27,7 +28,7 @@ pub struct ExtendedJobCountStatistics { pub successful: usize, } -#[derive(Debug, Clone, Copy, Default)] +#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize)] pub struct JobCountStatistics { pub queued: usize, pub in_progress: usize, diff --git a/core/lib/config/src/configs/consensus.rs b/core/lib/config/src/configs/consensus.rs index 759e13128338..2e277341b07d 100644 --- a/core/lib/config/src/configs/consensus.rs +++ b/core/lib/config/src/configs/consensus.rs @@ -92,6 +92,8 @@ pub struct GenesisSpec { pub leader: ValidatorPublicKey, /// Address of the registry contract. pub registry_address: Option, + /// Recommended list of peers to connect to. + pub seed_peers: BTreeMap, } #[derive(Clone, Debug, PartialEq, Default)] diff --git a/core/lib/config/src/configs/prover_job_monitor.rs b/core/lib/config/src/configs/prover_job_monitor.rs index c16b1db81b7a..d60a0e90c204 100644 --- a/core/lib/config/src/configs/prover_job_monitor.rs +++ b/core/lib/config/src/configs/prover_job_monitor.rs @@ -61,6 +61,8 @@ pub struct ProverJobMonitorConfig { /// The interval between runs for Witness Job Queuer. #[serde(default = "ProverJobMonitorConfig::default_witness_job_queuer_run_interval_ms")] pub witness_job_queuer_run_interval_ms: u64, + /// HTTP port of the ProverJobMonitor to send requests to. + pub http_port: u16, } impl ProverJobMonitorConfig { diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 4a2858b9cbfc..5a5a54304425 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -774,7 +774,9 @@ impl Distribution for EncodeDist { impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::consensus::GenesisSpec { - use configs::consensus::{GenesisSpec, ProtocolVersion, ValidatorPublicKey}; + use configs::consensus::{ + GenesisSpec, Host, NodePublicKey, ProtocolVersion, ValidatorPublicKey, + }; GenesisSpec { chain_id: L2ChainId::default(), protocol_version: ProtocolVersion(self.sample(rng)), @@ -782,6 +784,10 @@ impl Distribution for EncodeDist { attesters: self.sample_collect(rng), leader: ValidatorPublicKey(self.sample(rng)), registry_address: self.sample_opt(|| rng.gen()), + seed_peers: self + .sample_range(rng) + .map(|_| (NodePublicKey(self.sample(rng)), Host(self.sample(rng)))) + .collect(), } } } @@ -1113,6 +1119,7 @@ impl Distribution for Encod prover_queue_reporter_run_interval_ms: self.sample(rng), witness_generator_queue_reporter_run_interval_ms: self.sample(rng), witness_job_queuer_run_interval_ms: self.sample(rng), + http_port: self.sample(rng), } } } diff --git a/core/lib/dal/Cargo.toml b/core/lib/dal/Cargo.toml index 9c13eeb30147..ccca49525e40 100644 --- a/core/lib/dal/Cargo.toml +++ b/core/lib/dal/Cargo.toml @@ -19,6 +19,7 @@ zksync_utils.workspace = true zksync_system_constants.workspace = true zksync_contracts.workspace = true zksync_types.workspace = true +zksync_concurrency.workspace = true zksync_consensus_roles.workspace = true zksync_consensus_storage.workspace = true zksync_protobuf.workspace = true diff --git a/core/lib/dal/src/consensus/mod.rs b/core/lib/dal/src/consensus/mod.rs index f0ef336bc543..f01655d56a95 100644 --- a/core/lib/dal/src/consensus/mod.rs +++ b/core/lib/dal/src/consensus/mod.rs @@ -5,8 +5,11 @@ mod testonly; #[cfg(test)] mod tests; +use std::collections::BTreeMap; + use anyhow::{anyhow, Context as _}; -use zksync_consensus_roles::{attester, validator}; +use zksync_concurrency::net; +use zksync_consensus_roles::{attester, node, validator}; use zksync_protobuf::{read_required, required, ProtoFmt, ProtoRepr}; use zksync_types::{ abi, ethabi, @@ -27,6 +30,23 @@ use crate::models::{parse_h160, parse_h256}; pub struct GlobalConfig { pub genesis: validator::Genesis, pub registry_address: Option, + pub seed_peers: BTreeMap, +} + +impl ProtoRepr for proto::NodeAddr { + type Type = (node::PublicKey, net::Host); + fn read(&self) -> anyhow::Result { + Ok(( + read_required(&self.key).context("key")?, + net::Host(required(&self.addr).context("addr")?.clone()), + )) + } + fn build(this: &Self::Type) -> Self { + Self { + key: Some(this.0.build()), + addr: Some(this.1 .0.clone()), + } + } } impl ProtoFmt for GlobalConfig { @@ -41,6 +61,13 @@ impl ProtoFmt for GlobalConfig { .map(|a| parse_h160(a)) .transpose() .context("registry_address")?, + seed_peers: r + .seed_peers + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("seed_peers")?, }) } @@ -48,6 +75,11 @@ impl ProtoFmt for GlobalConfig { Self::Proto { genesis: Some(self.genesis.build()), registry_address: self.registry_address.map(|a| a.as_bytes().to_vec()), + seed_peers: self + .seed_peers + .iter() + .map(|(k, v)| ProtoRepr::build(&(k.clone(), v.clone()))) + .collect(), } } } @@ -401,9 +433,10 @@ impl ProtoRepr for proto::Transaction { } }, execute: Execute { - contract_address: required(&execute.contract_address) - .and_then(|x| parse_h160(x)) - .context("execute.contract_address")?, + contract_address: execute + .contract_address + .as_ref() + .and_then(|x| parse_h160(x).ok()), calldata: required(&execute.calldata).context("calldata")?.clone(), value: required(&execute.value) .and_then(|x| parse_h256(x)) @@ -487,7 +520,7 @@ impl ProtoRepr for proto::Transaction { } }; let execute = proto::Execute { - contract_address: Some(this.execute.contract_address.as_bytes().into()), + contract_address: this.execute.contract_address.map(|x| x.as_bytes().into()), calldata: Some(this.execute.calldata.clone()), value: Some(u256_to_h256(this.execute.value).as_bytes().into()), factory_deps: this.execute.factory_deps.clone(), diff --git a/core/lib/dal/src/consensus/proto/mod.proto b/core/lib/dal/src/consensus/proto/mod.proto index da9151f10f4d..ab1245f3ef6a 100644 --- a/core/lib/dal/src/consensus/proto/mod.proto +++ b/core/lib/dal/src/consensus/proto/mod.proto @@ -4,6 +4,7 @@ package zksync.dal; import "zksync/roles/validator.proto"; import "zksync/roles/attester.proto"; +import "zksync/roles/node.proto"; message Payload { // zksync-era ProtocolVersionId @@ -102,7 +103,7 @@ message ProtocolUpgradeTxCommonData { } message Execute { - optional bytes contract_address = 1; // required; H160 + optional bytes contract_address = 1; // optional; H160 optional bytes calldata = 2; // required optional bytes value = 3; // required; U256 repeated bytes factory_deps = 4; @@ -122,9 +123,15 @@ message AttesterCommittee { repeated roles.attester.WeightedAttester members = 1; // required } +message NodeAddr { + optional roles.node.PublicKey key = 1; // required + optional string addr = 2; // required; Host +} + message GlobalConfig { optional roles.validator.Genesis genesis = 1; // required optional bytes registry_address = 2; // optional; H160 + repeated NodeAddr seed_peers = 3; } message AttestationStatus { diff --git a/core/lib/dal/src/consensus/tests.rs b/core/lib/dal/src/consensus/tests.rs index f21d09290a2f..7059f1a74ea0 100644 --- a/core/lib/dal/src/consensus/tests.rs +++ b/core/lib/dal/src/consensus/tests.rs @@ -17,7 +17,7 @@ use crate::tests::mock_protocol_upgrade_transaction; fn execute(rng: &mut impl Rng) -> Execute { Execute { - contract_address: rng.gen(), + contract_address: Some(rng.gen()), value: rng.gen::().into(), calldata: (0..10 * 32).map(|_| rng.gen()).collect(), // TODO: find a way to generate valid random bytecode. diff --git a/core/lib/dal/src/consensus_dal.rs b/core/lib/dal/src/consensus_dal.rs index 2dca58e2a6a6..711ce3ddf392 100644 --- a/core/lib/dal/src/consensus_dal.rs +++ b/core/lib/dal/src/consensus_dal.rs @@ -66,6 +66,7 @@ impl ConsensusDal<'_, '_> { return Ok(Some(GlobalConfig { genesis, registry_address: None, + seed_peers: [].into(), })); } Ok(None) @@ -184,6 +185,7 @@ impl ConsensusDal<'_, '_> { } .with_hash(), registry_address: old.registry_address, + seed_peers: old.seed_peers, }; txn.consensus_dal().try_update_global_config(&new).await?; txn.commit().await?; @@ -681,6 +683,7 @@ mod tests { let cfg = GlobalConfig { genesis: genesis.with_hash(), registry_address: Some(rng.gen()), + seed_peers: [].into(), // TODO: rng.gen() for Host }; conn.consensus_dal() .try_update_global_config(&cfg) @@ -715,6 +718,7 @@ mod tests { let cfg = GlobalConfig { genesis: setup.genesis.clone(), registry_address: Some(rng.gen()), + seed_peers: [].into(), }; conn.consensus_dal() .try_update_global_config(&cfg) diff --git a/core/lib/dal/src/models/tests.rs b/core/lib/dal/src/models/tests.rs index 34cfde108f19..b4949dc101d6 100644 --- a/core/lib/dal/src/models/tests.rs +++ b/core/lib/dal/src/models/tests.rs @@ -13,7 +13,7 @@ use crate::{models::storage_transaction::StorageTransaction, BigDecimal}; fn default_execute() -> Execute { Execute { - contract_address: H160::random(), + contract_address: Some(H160::random()), value: U256::from(10i32), calldata: hex::decode( "a9059cbb00000000000000000000000058d595f318167d5af45d9e44ade4348dd4e\ diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index c17e8c5d1fe3..dc672fa1f807 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -74,7 +74,7 @@ pub(crate) fn mock_l2_transaction() -> L2Tx { gas_per_pubdata_limit: U256::from(DEFAULT_GAS_PER_PUBDATA), }; let mut l2_tx = L2Tx::new_signed( - Address::random(), + Some(Address::random()), vec![], zksync_types::Nonce(0), fee, @@ -110,7 +110,7 @@ pub(crate) fn mock_l1_execute() -> L1Tx { }; let execute = Execute { - contract_address: H160::random(), + contract_address: Some(H160::random()), value: Default::default(), calldata: vec![], factory_deps: vec![], @@ -138,7 +138,7 @@ pub(crate) fn mock_protocol_upgrade_transaction() -> ProtocolUpgradeTx { }; let execute = Execute { - contract_address: H160::random(), + contract_address: Some(H160::random()), value: Default::default(), calldata: vec![], factory_deps: vec![], diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index 49791f776e08..0a72289b48a4 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -58,7 +58,8 @@ impl TransactionsDal<'_, '_> { tx: &L1Tx, l1_block_number: L1BlockNumber, ) -> DalResult<()> { - let contract_address = tx.execute.contract_address.as_bytes(); + let contract_address = tx.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); let tx_hash = tx.hash(); let tx_hash_bytes = tx_hash.as_bytes(); let json_data = serde_json::to_value(&tx.execute) @@ -143,7 +144,7 @@ impl TransactionsDal<'_, '_> { serial_id, full_fee, layer_2_tip_fee, - contract_address, + contract_address_as_bytes, l1_block_number.0 as i32, value, empty_address.as_bytes(), @@ -161,7 +162,8 @@ impl TransactionsDal<'_, '_> { } pub async fn insert_system_transaction(&mut self, tx: &ProtocolUpgradeTx) -> DalResult<()> { - let contract_address = tx.execute.contract_address.as_bytes().to_vec(); + let contract_address = tx.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); let tx_hash = tx.common_data.hash().0.to_vec(); let json_data = serde_json::to_value(&tx.execute) .unwrap_or_else(|_| panic!("cannot serialize tx {:?} to json", tx.common_data.hash())); @@ -238,7 +240,7 @@ impl TransactionsDal<'_, '_> { gas_per_pubdata_limit, json_data, upgrade_id, - contract_address, + contract_address_as_bytes, l1_block_number, value, &Address::default().0.to_vec(), @@ -284,7 +286,8 @@ impl TransactionsDal<'_, '_> { } let initiator_address = tx.initiator_account(); - let contract_address = tx.execute.contract_address.as_bytes(); + let contract_address = tx.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); let json_data = serde_json::to_value(&tx.execute) .unwrap_or_else(|_| panic!("cannot serialize tx {:?} to json", tx.hash())); let gas_limit = u256_to_big_decimal(tx.common_data.fee.gas_limit); @@ -413,7 +416,7 @@ impl TransactionsDal<'_, '_> { input_data, &json_data, tx_format, - contract_address, + contract_address_as_bytes, value, &paymaster, &paymaster_input, @@ -697,8 +700,10 @@ impl TransactionsDal<'_, '_> { .arg_error(&format!("transactions[{index_in_block}].refunded_gas"), err) })?; + let contract_address = transaction.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); l2_values.push(u256_to_big_decimal(transaction.execute.value)); - l2_contract_addresses.push(transaction.execute.contract_address.as_bytes()); + l2_contract_addresses.push(contract_address_as_bytes); l2_paymaster_input.push(&common_data.paymaster_params.paymaster_input[..]); l2_paymaster.push(common_data.paymaster_params.paymaster.as_bytes()); l2_hashes.push(tx_res.hash.as_bytes()); @@ -818,7 +823,7 @@ impl TransactionsDal<'_, '_> { &l2_inputs as &[&[u8]], &l2_datas, &l2_tx_formats, - &l2_contract_addresses as &[&[u8]], + &l2_contract_addresses as &[Option>], &l2_values, &l2_paymaster as &[&[u8]], &l2_paymaster_input as &[&[u8]], @@ -901,8 +906,10 @@ impl TransactionsDal<'_, '_> { .arg_error(&format!("transactions[{index_in_block}].refunded_gas"), err) })?; + let contract_address = transaction.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); l2_values.push(u256_to_big_decimal(transaction.execute.value)); - l2_contract_addresses.push(transaction.execute.contract_address.as_bytes()); + l2_contract_addresses.push(contract_address_as_bytes); l2_paymaster_input.push(&common_data.paymaster_params.paymaster_input[..]); l2_paymaster.push(common_data.paymaster_params.paymaster.as_bytes()); l2_hashes.push(tx_res.hash.as_bytes()); @@ -1013,7 +1020,7 @@ impl TransactionsDal<'_, '_> { &l2_datas, &l2_refunded_gas, &l2_values, - &l2_contract_addresses as &[&[u8]], + &l2_contract_addresses as &[Option>], &l2_paymaster as &[&[u8]], &l2_paymaster_input as &[&[u8]], l2_block_number.0 as i32, @@ -1083,6 +1090,8 @@ impl TransactionsDal<'_, '_> { .arg_error(&format!("transactions[{index_in_block}].refunded_gas"), err) })?; + let contract_address = transaction.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); let tx = &tx_res.transaction; l1_hashes.push(tx_res.hash.as_bytes()); l1_initiator_address.push(common_data.sender.as_bytes()); @@ -1096,7 +1105,7 @@ impl TransactionsDal<'_, '_> { l1_priority_op_id.push(common_data.serial_id.0 as i64); l1_full_fee.push(u256_to_big_decimal(common_data.full_fee)); l1_layer_2_tip_fee.push(u256_to_big_decimal(common_data.layer_2_tip_fee)); - l1_contract_address.push(tx.execute.contract_address.as_bytes()); + l1_contract_address.push(contract_address_as_bytes); l1_l1_block_number.push(common_data.eth_block as i32); l1_value.push(u256_to_big_decimal(tx.execute.value)); l1_tx_format.push(common_data.tx_format() as i32); @@ -1203,7 +1212,7 @@ impl TransactionsDal<'_, '_> { &l1_priority_op_id, &l1_full_fee, &l1_layer_2_tip_fee, - &l1_contract_address as &[&[u8]], + &l1_contract_address as &[Option>], &l1_l1_block_number, &l1_value, &l1_tx_format, @@ -1373,6 +1382,8 @@ impl TransactionsDal<'_, '_> { .arg_error(&format!("transactions[{index_in_block}].refunded_gas"), err) })?; + let contract_address = transaction.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); let tx = &tx_res.transaction; upgrade_hashes.push(tx_res.hash.as_bytes()); upgrade_initiator_address.push(common_data.sender.as_bytes()); @@ -1385,7 +1396,7 @@ impl TransactionsDal<'_, '_> { .unwrap_or_else(|_| panic!("cannot serialize tx {:?} to json", tx.hash())), ); upgrade_upgrade_id.push(common_data.upgrade_id as i32); - upgrade_contract_address.push(tx.execute.contract_address.as_bytes()); + upgrade_contract_address.push(contract_address_as_bytes); upgrade_l1_block_number.push(common_data.eth_block as i32); upgrade_value.push(u256_to_big_decimal(tx.execute.value)); upgrade_tx_format.push(common_data.tx_format() as i32); @@ -1484,7 +1495,7 @@ impl TransactionsDal<'_, '_> { &upgrade_gas_per_pubdata_limit, &upgrade_data, &upgrade_upgrade_id, - &upgrade_contract_address as &[&[u8]], + &upgrade_contract_address as &[Option>], &upgrade_l1_block_number, &upgrade_value, &upgrade_tx_format, diff --git a/core/lib/env_config/src/prover_job_monitor.rs b/core/lib/env_config/src/prover_job_monitor.rs index 3a8f80473eb1..884ebecacbb8 100644 --- a/core/lib/env_config/src/prover_job_monitor.rs +++ b/core/lib/env_config/src/prover_job_monitor.rs @@ -31,6 +31,7 @@ mod tests { prover_queue_reporter_run_interval_ms: 10000, witness_generator_queue_reporter_run_interval_ms: 10000, witness_job_queuer_run_interval_ms: 10000, + http_port: 3074, } } @@ -55,6 +56,7 @@ mod tests { fn from_env_with_default() { let config = r#" PROVER_JOB_MONITOR_PROMETHEUS_PORT=3317 + PROVER_JOB_MONITOR_HTTP_PORT=3074 PROVER_JOB_MONITOR_MAX_DB_CONNECTIONS=9 "#; let mut lock = MUTEX.lock(); @@ -80,6 +82,7 @@ mod tests { PROVER_JOB_MONITOR_PROVER_QUEUE_REPORTER_RUN_INTERVAL_MS=10001 PROVER_JOB_MONITOR_WITNESS_GENERATOR_QUEUE_REPORTER_RUN_INTERVAL_MS=10001 PROVER_JOB_MONITOR_WITNESS_JOB_QUEUER_RUN_INTERVAL_MS=10001 + PROVER_JOB_MONITOR_HTTP_PORT=3074 "#; let mut lock = MUTEX.lock(); lock.set_env(config); diff --git a/core/lib/mempool/src/tests.rs b/core/lib/mempool/src/tests.rs index 6ea1be3b514b..96ef600984f9 100644 --- a/core/lib/mempool/src/tests.rs +++ b/core/lib/mempool/src/tests.rs @@ -371,7 +371,7 @@ fn gen_l2_tx(address: Address, nonce: Nonce) -> Transaction { fn gen_l2_tx_with_timestamp(address: Address, nonce: Nonce, received_at_ms: u64) -> Transaction { let mut txn = L2Tx::new( - Address::default(), + Some(Address::default()), Vec::new(), nonce, Fee::default(), @@ -386,7 +386,7 @@ fn gen_l2_tx_with_timestamp(address: Address, nonce: Nonce, received_at_ms: u64) fn gen_l1_tx(priority_id: PriorityOpId) -> Transaction { let execute = Execute { - contract_address: Address::repeat_byte(0x11), + contract_address: Some(Address::repeat_byte(0x11)), calldata: vec![1, 2, 3], factory_deps: vec![], value: U256::zero(), diff --git a/core/lib/multivm/src/versions/tests.rs b/core/lib/multivm/src/versions/tests.rs index 78584f32e528..96a85c86816e 100644 --- a/core/lib/multivm/src/versions/tests.rs +++ b/core/lib/multivm/src/versions/tests.rs @@ -125,7 +125,7 @@ impl Harness { fn execute_on_vm(&mut self, vm: &mut impl VmInterface) { let transfer_exec = Execute { - contract_address: self.bob.address(), + contract_address: Some(self.bob.address()), calldata: vec![], value: 1_000_000_000.into(), factory_deps: vec![], @@ -152,7 +152,7 @@ impl Harness { let write_fn = self.storage_contract_abi.function("simpleWrite").unwrap(); let simple_write_tx = self.alice.get_l2_tx_for_execute( Execute { - contract_address: Self::STORAGE_CONTRACT_ADDRESS, + contract_address: Some(Self::STORAGE_CONTRACT_ADDRESS), calldata: write_fn.encode_input(&[]).unwrap(), value: 0.into(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs b/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs index a29e1101d520..34c70e0f9c45 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs @@ -153,7 +153,7 @@ pub fn get_create_execute(code: &[u8], calldata: &[u8]) -> Execute { .expect("failed to encode parameters"); Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, factory_deps: vec![code.to_vec()], value: U256::zero(), diff --git a/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs b/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs index 788a52206e80..0285320daa30 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_1_3_2::vm_with_bootloader::{ pub struct TransactionData { pub tx_type: u8, pub from: Address, - pub to: Address, + pub to: Option
, pub gas_limit: U256, pub pubdata_price_limit: U256, pub max_fee_per_gas: U256, @@ -170,7 +170,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -593,7 +593,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs index 0ec921450daf..9dfda9e1a68c 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs @@ -21,10 +21,8 @@ fn test_tx_gas_limit_offset() { let gas_limit = 9999.into(); let tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: Default::default(), - calldata: vec![], - value: Default::default(), - factory_deps: None, + contract_address: Some(Default::default()), + ..Default::default() }, Some(Fee { gas_limit, diff --git a/core/lib/multivm/src/versions/vm_1_4_1/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_1_4_1/types/internals/transaction_data.rs index 1379b853a542..f7384da76d0d 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/types/internals/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_1_4_1::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -311,7 +311,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs index 6a57fd07ae71..b84e9d32126c 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs @@ -20,10 +20,8 @@ fn test_tx_gas_limit_offset() { let gas_limit = 9999.into(); let tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: Default::default(), - calldata: vec![], - value: Default::default(), - factory_deps: None, + contract_address: Some(Default::default()), + ..Default::default() }, Some(Fee { gas_limit, diff --git a/core/lib/multivm/src/versions/vm_1_4_2/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_1_4_2/types/internals/transaction_data.rs index 3498e51ec308..38280aa80513 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/types/internals/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_1_4_2::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -311,7 +311,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs index 30a65097111d..637fd94c1c89 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs @@ -21,10 +21,8 @@ fn test_tx_gas_limit_offset() { let gas_limit = 9999.into(); let tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: Default::default(), - calldata: vec![], - value: Default::default(), - factory_deps: None, + contract_address: Some(Default::default()), + ..Default::default() }, Some(Fee { gas_limit, diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/transaction_data.rs index ad740a279dcd..8bf575effe06 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_boojum_integration::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -325,7 +325,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs b/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs index de6ead71e655..e6b1a53e9d05 100644 --- a/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs +++ b/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs @@ -1,5 +1,5 @@ use circuit_sequencer_api_1_5_0::{geometry_config::get_geometry_config, toolset::GeometryConfig}; -use zksync_vm2::{CycleStats, Opcode, OpcodeType, StateInterface, Tracer}; +use zksync_vm2::interface::{CycleStats, Opcode, OpcodeType, StateInterface, Tracer}; use zksync_vm_interface::CircuitStatistic; use crate::vm_latest::tracers::circuits_capacity::*; @@ -17,7 +17,7 @@ pub(crate) struct CircuitsTracer { keccak256_cycles: u32, ecrecover_cycles: u32, sha256_cycles: u32, - secp256k1_verify_cycles: u32, + secp256r1_verify_cycles: u32, transient_storage_checker_cycles: u32, } @@ -115,7 +115,7 @@ impl Tracer for CircuitsTracer { CycleStats::Keccak256(cycles) => self.keccak256_cycles += cycles, CycleStats::Sha256(cycles) => self.sha256_cycles += cycles, CycleStats::EcRecover(cycles) => self.ecrecover_cycles += cycles, - CycleStats::Secp256k1Verify(cycles) => self.secp256k1_verify_cycles += cycles, + CycleStats::Secp256r1Verify(cycles) => self.secp256r1_verify_cycles += cycles, CycleStats::Decommit(cycles) => self.code_decommitter_cycles += cycles, CycleStats::StorageRead => self.storage_application_cycles += 1, CycleStats::StorageWrite => self.storage_application_cycles += 2, @@ -146,7 +146,7 @@ impl CircuitsTracer { ecrecover: self.ecrecover_cycles as f32 / GEOMETRY_CONFIG.cycles_per_ecrecover_circuit as f32, sha256: self.sha256_cycles as f32 / GEOMETRY_CONFIG.cycles_per_sha256_circuit as f32, - secp256k1_verify: self.secp256k1_verify_cycles as f32 + secp256k1_verify: self.secp256r1_verify_cycles as f32 / GEOMETRY_CONFIG.cycles_per_secp256r1_verify_circuit as f32, transient_storage_checker: self.transient_storage_checker_cycles as f32 / GEOMETRY_CONFIG.cycles_per_transient_storage_sorter as f32, diff --git a/core/lib/multivm/src/versions/vm_fast/events.rs b/core/lib/multivm/src/versions/vm_fast/events.rs index 2312c3d97b40..294e8adce32b 100644 --- a/core/lib/multivm/src/versions/vm_fast/events.rs +++ b/core/lib/multivm/src/versions/vm_fast/events.rs @@ -1,6 +1,6 @@ use zksync_types::{L1BatchNumber, H256}; use zksync_utils::h256_to_account_address; -use zksync_vm2::Event; +use zksync_vm2::interface::Event; use crate::interface::VmEvent; @@ -23,18 +23,21 @@ impl EventAccumulator { } } -pub(crate) fn merge_events(events: &[Event], block_number: L1BatchNumber) -> Vec { +pub(crate) fn merge_events( + events: impl Iterator, + block_number: L1BatchNumber, +) -> Vec { let mut result = vec![]; let mut current: Option<(usize, u32, EventAccumulator)> = None; - for message in events.iter() { + for event in events { let Event { shard_id, is_first, tx_number, key, value, - } = message.clone(); + } = event; if !is_first { if let Some((mut remaining_data_length, mut remaining_topics, mut event)) = diff --git a/core/lib/multivm/src/versions/vm_fast/glue.rs b/core/lib/multivm/src/versions/vm_fast/glue.rs index f24c82af11e9..c2d38f351c04 100644 --- a/core/lib/multivm/src/versions/vm_fast/glue.rs +++ b/core/lib/multivm/src/versions/vm_fast/glue.rs @@ -1,18 +1,19 @@ use zksync_types::l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log}; use zksync_utils::u256_to_h256; +use zksync_vm2::interface; use crate::glue::GlueFrom; -impl GlueFrom<&zksync_vm2::L2ToL1Log> for SystemL2ToL1Log { - fn glue_from(value: &zksync_vm2::L2ToL1Log) -> Self { - let zksync_vm2::L2ToL1Log { +impl GlueFrom for SystemL2ToL1Log { + fn glue_from(value: interface::L2ToL1Log) -> Self { + let interface::L2ToL1Log { key, value, is_service, address, shard_id, tx_number, - } = *value; + } = value; Self(L2ToL1Log { shard_id, diff --git a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs index 92706483c6f6..dd407c616682 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs @@ -148,7 +148,7 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { for (i, data) in txs_data.into_iter().enumerate() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: CONTRACT_FORCE_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_FORCE_DEPLOYER_ADDRESS), calldata: data, value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs index 5c1158a5909d..48e1b10de442 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs @@ -1,6 +1,6 @@ use assert_matches::assert_matches; use zksync_types::U256; -use zksync_vm2::HeapId; +use zksync_vm2::interface::HeapId; use crate::{ interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterfaceExt}, diff --git a/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs index 0270ac35475b..a119a31618e9 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs @@ -21,7 +21,7 @@ fn test_circuits() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: Address::random(), + contract_address: Some(Address::random()), calldata: Vec::new(), value: U256::from(1u8), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs index abd213301a51..156af43dcf24 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs @@ -58,7 +58,7 @@ fn test_code_oracle() { // Firstly, let's ensure that the contract works. let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), @@ -82,7 +82,7 @@ fn test_code_oracle() { // the decommitted bytecode gets erased (it shouldn't). let tx2 = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), @@ -151,7 +151,7 @@ fn test_code_oracle_big_bytecode() { // Firstly, let's ensure that the contract works. let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(big_zkevm_bytecode_hash.0.to_vec()), @@ -218,7 +218,7 @@ fn refunds_in_code_oracle() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), diff --git a/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs index b7a2154bdc71..3f0a47b980e2 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs @@ -18,7 +18,10 @@ fn test_tx_gas_limit_offset() { let gas_limit = 9999.into(); let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute::default(), + Execute { + contract_address: Some(Default::default()), + ..Default::default() + }, Some(Fee { gas_limit, ..Account::default_fee() diff --git a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs index 9f01684d73fb..b8942dcbb6a8 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs @@ -66,7 +66,7 @@ fn test_get_used_contracts() { let account2 = Account::random(); let tx2 = account2.get_l1_tx( Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata: big_calldata, value: Default::default(), factory_deps: vec![vec![1; 32]], @@ -154,7 +154,7 @@ fn execute_proxy_counter(gas: u32) -> (VmTester, ProxyCounterData, VmExecutionRe let increment = proxy_counter_abi.function("increment").unwrap(); let increment_tx = account.get_l2_tx_for_execute( Execute { - contract_address: deploy_tx.address, + contract_address: Some(deploy_tx.address), calldata: increment .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) .unwrap(), @@ -201,7 +201,7 @@ fn get_used_contracts_with_out_of_gas_far_call() { let increment = proxy_counter_abi.function("increment").unwrap(); let increment_tx = account.get_l2_tx_for_execute( Execute { - contract_address: data.proxy_counter_address, + contract_address: Some(data.proxy_counter_address), calldata: increment .encode_input(&[Token::Uint(1.into()), Token::Uint(u64::MAX.into())]) .unwrap(), diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs index 3b58565098d5..1abb1e39e19b 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs @@ -176,7 +176,7 @@ fn test_l1_tx_execution_high_gas_limit() { let mut tx = account.get_l1_tx( Execute { - contract_address: L1_MESSENGER_ADDRESS, + contract_address: Some(L1_MESSENGER_ADDRESS), value: 0.into(), factory_deps: vec![], calldata, diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs index 149d74c3d043..fde94d9da6cd 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs @@ -35,7 +35,7 @@ fn get_l1_noop() -> Transaction { ..Default::default() }), execute: Execute { - contract_address: H160::zero(), + contract_address: Some(H160::zero()), calldata: vec![], value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs index cb3243b022da..f72e95da9f87 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs @@ -59,7 +59,7 @@ fn test_nonce_holder() { vm.reset_state(true); let mut transaction = account.get_l2_tx_for_execute_with_nonce( Execute { - contract_address: account.address, + contract_address: Some(account.address), calldata: vec![12], value: Default::default(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs index 468dcf827132..5bc3f614d61b 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs @@ -29,7 +29,7 @@ fn test_keccak() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: hex::decode(keccak1000_calldata).unwrap(), value: 0.into(), factory_deps: vec![], @@ -66,7 +66,7 @@ fn test_sha256() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: hex::decode(sha1000_calldata).unwrap(), value: 0.into(), factory_deps: vec![], @@ -96,7 +96,7 @@ fn test_ecrecover() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: account.address, + contract_address: Some(account.address), calldata: vec![], value: 0.into(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs b/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs index 26eb5ddd4c38..1856995149aa 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs @@ -181,7 +181,7 @@ fn negative_pubdata_for_transaction() { let expensive_tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: expensive_contract_address, + contract_address: Some(expensive_contract_address), calldata: expensive_function .encode_input(&[Token::Uint(10.into())]) .unwrap(), @@ -200,7 +200,7 @@ fn negative_pubdata_for_transaction() { // This transaction cleans all initial writes in the contract, thus having negative `pubdata` impact. let clean_up_tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: expensive_contract_address, + contract_address: Some(expensive_contract_address), calldata: cleanup_function.encode_input(&[]).unwrap(), value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs index be807bc80275..1fd2ebd523b0 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs @@ -72,7 +72,7 @@ async fn test_require_eip712() { let tx = private_account.get_l2_tx_for_execute( Execute { - contract_address: account_abstraction.address, + contract_address: Some(account_abstraction.address), calldata: encoded_input, value: Default::default(), factory_deps: vec![], @@ -129,7 +129,7 @@ async fn test_require_eip712() { // // Now send the 'classic' EIP712 transaction let tx_712 = L2Tx::new( - beneficiary.address, + Some(beneficiary.address), vec![], Nonce(1), Fee { diff --git a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs index a677a61c6029..e7b3f2043385 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs @@ -83,7 +83,7 @@ fn test_vm_loadnext_rollbacks() { let loadnext_tx_1 = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: LoadnextContractExecutionParams { reads: 100, writes: 100, @@ -101,7 +101,7 @@ fn test_vm_loadnext_rollbacks() { let loadnext_tx_2 = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: LoadnextContractExecutionParams { reads: 100, writes: 100, diff --git a/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs b/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs index a61a0a2bd91c..55ca372c4a9f 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs @@ -48,7 +48,7 @@ fn test_sekp256r1() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: P256VERIFY_PRECOMPILE_ADDRESS, + contract_address: Some(P256VERIFY_PRECOMPILE_ADDRESS), calldata: [digest, encoded_r, encoded_s, x, y].concat(), value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/storage.rs b/core/lib/multivm/src/versions/vm_fast/tests/storage.rs index d272668c697a..2cfadb640e72 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/storage.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/storage.rs @@ -31,7 +31,7 @@ fn test_storage(first_tx_calldata: Vec, second_tx_calldata: Vec) -> u32 let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata: first_tx_calldata, value: 0.into(), factory_deps: vec![], @@ -41,7 +41,7 @@ fn test_storage(first_tx_calldata: Vec, second_tx_calldata: Vec) -> u32 let tx2 = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata: second_tx_calldata, value: 0.into(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs index ce45390260c5..b84af0481d70 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs @@ -1,6 +1,7 @@ use std::fmt; use zksync_types::{ExecuteTransactionCommon, Transaction, H160, U256}; +use zksync_vm2::interface::{Event, StateInterface}; use super::VmTester; use crate::{ @@ -190,7 +191,7 @@ impl TransactionTestInfo { struct VmStateDump { state: S, storage_writes: Vec<((H160, U256), U256)>, - events: Box<[zksync_vm2::Event]>, + events: Box<[Event]>, } impl PartialEq for VmStateDump { @@ -205,14 +206,8 @@ impl Vm { fn dump_state(&self) -> VmStateDump { VmStateDump { state: self.inner.dump_state(), - storage_writes: self - .inner - .world_diff() - .get_storage_state() - .iter() - .map(|(k, v)| (*k, *v)) - .collect(), - events: self.inner.world_diff().events().into(), + storage_writes: self.inner.get_storage_state().collect(), + events: self.inner.events().collect(), } } } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs index bccb1e6ec16f..89f0fa236206 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs @@ -28,7 +28,7 @@ fn test_tracing_of_execution_errors() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address, + contract_address: Some(contract_address), calldata: get_execute_error_calldata(), value: Default::default(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs index 4ba513ce0d35..ef510546f11c 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs @@ -71,7 +71,7 @@ fn test_send_or_transfer(test_option: TestOptions) { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata, value: U256::zero(), factory_deps: vec![], @@ -164,7 +164,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { let account = &mut vm.rich_accounts[0]; let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: reentrant_recipient_address, + contract_address: Some(reentrant_recipient_address), calldata: reentrant_recipient_abi .function("setX") .unwrap() @@ -185,7 +185,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { let tx2 = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata, value, factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs index dd25c2097405..ba4863f7c457 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs @@ -265,7 +265,7 @@ fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { .expect("failed to encode parameters"); let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, factory_deps: vec![], value: U256::zero(), @@ -315,7 +315,7 @@ fn get_complex_upgrade_tx( .unwrap(); let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, + contract_address: Some(COMPLEX_UPGRADER_ADDRESS), calldata: complex_upgrader_calldata, factory_deps: vec![], value: U256::zero(), diff --git a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs index d91e13076514..5ab5aa0dec92 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs @@ -10,7 +10,7 @@ use zksync_types::{ U256, }; use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; -use zksync_vm2::{HeapId, StateInterface}; +use zksync_vm2::interface::{HeapId, StateInterface}; use crate::interface::storage::ReadStorage; diff --git a/core/lib/multivm/src/versions/vm_fast/transaction_data.rs b/core/lib/multivm/src/versions/vm_fast/transaction_data.rs index 502be0dc22cc..2ec86eb3ceaf 100644 --- a/core/lib/multivm/src/versions/vm_fast/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_fast/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_latest::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -305,7 +305,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index ecec35af26b7..0b6172a4d8a7 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -16,8 +16,8 @@ use zksync_types::{ }; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; use zksync_vm2::{ - decode::decode_program, CallframeInterface, ExecutionEnd, FatPointer, HeapId, Program, - Settings, StateInterface, Tracer, VirtualMachine, + interface::{CallframeInterface, HeapId, StateInterface, Tracer}, + ExecutionEnd, FatPointer, Program, Settings, VirtualMachine, }; use super::{ @@ -91,7 +91,7 @@ impl Vm { BOOTLOADER_ADDRESS, bootloader, H160::zero(), - vec![], + &[], system_env.bootloader_gas_limit, Settings { default_aa_code_hash, @@ -139,7 +139,7 @@ impl Vm { operator_suggested_refund: 0, }; let mut last_tx_result = None; - let mut pubdata_before = self.inner.world_diff().pubdata() as u32; + let mut pubdata_before = self.inner.pubdata() as u32; let result = loop { let hook = match self.inner.run(&mut self.world, tracer) { @@ -185,7 +185,7 @@ impl Vm { ) .as_u64(); - let pubdata_published = self.inner.world_diff().pubdata() as u32; + let pubdata_published = self.inner.pubdata() as u32; refunds.operator_suggested_refund = compute_refund( &self.batch_env, @@ -246,8 +246,7 @@ impl Vm { unreachable!("We do not provide the pubdata when executing the block tip or a single transaction"); } - let events = - merge_events(self.inner.world_diff().events(), self.batch_env.number); + let events = merge_events(self.inner.events(), self.batch_env.number); let published_bytecodes = events .iter() @@ -458,7 +457,8 @@ impl Vm { // visible for testing pub(super) fn get_current_execution_state(&self) -> CurrentExecutionState { let world_diff = self.inner.world_diff(); - let events = merge_events(world_diff.events(), self.batch_env.number); + let vm = &self.inner; + let events = merge_events(vm.events(), self.batch_env.number); let user_l2_to_l1_logs = extract_l2tol1logs_from_l1_messenger(&events) .into_iter() @@ -477,22 +477,12 @@ impl Vm { }) .collect(), used_contract_hashes: self.decommitted_hashes().collect(), - system_logs: world_diff - .l2_to_l1_logs() - .iter() - .map(|x| x.glue_into()) - .collect(), + system_logs: vm.l2_to_l1_logs().map(GlueInto::glue_into).collect(), user_l2_to_l1_logs, storage_refunds: world_diff.storage_refunds().to_vec(), pubdata_costs: world_diff.pubdata_costs().to_vec(), } } - - fn delete_history_if_appropriate(&mut self) { - if self.snapshot.is_none() && !self.has_previous_far_calls() { - self.inner.delete_history(); - } - } } impl VmFactory> for Vm> { @@ -527,7 +517,7 @@ impl VmInterface for Vm { let mut tracer = CircuitsTracer::default(); let start = self.inner.world_diff().snapshot(); - let pubdata_before = self.inner.world_diff().pubdata(); + let pubdata_before = self.inner.pubdata(); let gas_before = self.gas_remaining(); let (result, refunds) = self.run(execution_mode, &mut tracer, track_refunds); @@ -557,7 +547,7 @@ impl VmInterface for Vm { }) .collect(); let events = merge_events( - self.inner.world_diff().events_after(&start), + self.inner.world_diff().events_after(&start).iter().copied(), self.batch_env.number, ); let user_l2_to_l1_logs = extract_l2tol1logs_from_l1_messenger(&events) @@ -570,7 +560,7 @@ impl VmInterface for Vm { .world_diff() .l2_to_l1_logs_after(&start) .iter() - .map(|x| x.glue_into()) + .map(|&log| log.glue_into()) .collect(); VmExecutionLogs { storage_logs, @@ -581,7 +571,7 @@ impl VmInterface for Vm { } }; - let pubdata_after = self.inner.world_diff().pubdata(); + let pubdata_after = self.inner.pubdata(); let circuit_statistic = tracer.circuit_statistic(); let gas_remaining = self.gas_remaining(); VmExecutionResultAndLogs { @@ -656,7 +646,6 @@ impl VmInterface for Vm { #[derive(Debug)] struct VmSnapshot { - vm_snapshot: zksync_vm2::Snapshot, bootloader_snapshot: BootloaderStateSnapshot, gas_for_account_validation: u32, } @@ -668,9 +657,8 @@ impl VmInterfaceHistoryEnabled for Vm { "cannot create a VM snapshot until a previous snapshot is rolled back to or popped" ); - self.delete_history_if_appropriate(); + self.inner.make_snapshot(); self.snapshot = Some(VmSnapshot { - vm_snapshot: self.inner.snapshot(), bootloader_snapshot: self.bootloader_state.get_snapshot(), gas_for_account_validation: self.gas_for_account_validation, }); @@ -678,21 +666,18 @@ impl VmInterfaceHistoryEnabled for Vm { fn rollback_to_the_latest_snapshot(&mut self) { let VmSnapshot { - vm_snapshot, bootloader_snapshot, gas_for_account_validation, } = self.snapshot.take().expect("no snapshots to rollback to"); - self.inner.rollback(vm_snapshot); + self.inner.rollback(); self.bootloader_state.apply_snapshot(bootloader_snapshot); self.gas_for_account_validation = gas_for_account_validation; - - self.delete_history_if_appropriate(); } fn pop_snapshot_no_rollback(&mut self) { + self.inner.pop_snapshot(); self.snapshot = None; - self.delete_history_if_appropriate(); } } @@ -735,39 +720,13 @@ impl World { } } - fn bytecode_to_program(bytecode: &[u8]) -> Program { - Program::new( - decode_program( - &bytecode - .chunks_exact(8) - .map(|chunk| u64::from_be_bytes(chunk.try_into().unwrap())) - .collect::>(), - false, - ), - bytecode - .chunks_exact(32) - .map(U256::from_big_endian) - .collect::>(), - ) - } - fn convert_system_contract_code( code: &SystemContractCode, is_bootloader: bool, ) -> (U256, Program) { ( h256_to_u256(code.hash), - Program::new( - decode_program( - &code - .code - .iter() - .flat_map(|x| x.0.into_iter().rev()) - .collect::>(), - is_bootloader, - ), - code.code.clone(), - ), + Program::from_words(code.code.clone(), is_bootloader), ) } } @@ -822,11 +781,12 @@ impl zksync_vm2::World for World { self.program_cache .entry(hash) .or_insert_with(|| { - Self::bytecode_to_program(self.bytecode_cache.entry(hash).or_insert_with(|| { + let bytecode = self.bytecode_cache.entry(hash).or_insert_with(|| { self.storage .load_factory_dep(u256_to_h256(hash)) .expect("vm tried to decommit nonexistent bytecode") - })) + }); + Program::new(bytecode, false) }) .clone() } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs index bed348afd2d9..02c73344a543 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs @@ -164,7 +164,7 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { for (i, data) in txs_data.into_iter().enumerate() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: CONTRACT_FORCE_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_FORCE_DEPLOYER_ADDRESS), calldata: data, value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs index a4d0eb2d17e2..df7a78855426 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs @@ -34,7 +34,7 @@ fn test_max_depth() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: vec![], value: Default::default(), factory_deps: vec![], @@ -69,7 +69,7 @@ fn test_basic_behavior() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: hex::decode(increment_by_6_calldata).unwrap(), value: Default::default(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs b/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs index 02ec2dc58aaa..35412ee4d1bd 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs @@ -22,7 +22,7 @@ fn test_circuits() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: Address::random(), + contract_address: Some(Address::random()), calldata: Vec::new(), value: U256::from(1u8), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs index 0708d67e27a3..b15ef7fde2bf 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs @@ -69,7 +69,7 @@ fn test_code_oracle() { // Firstly, let's ensure that the contract works. let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), @@ -93,7 +93,7 @@ fn test_code_oracle() { // the decommitted bytecode gets erased (it shouldn't). let tx2 = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), @@ -169,7 +169,7 @@ fn test_code_oracle_big_bytecode() { // Firstly, let's ensure that the contract works. let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(big_zkevm_bytecode_hash.0.to_vec()), @@ -251,7 +251,7 @@ fn refunds_in_code_oracle() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), diff --git a/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs index 34e1e2d25f31..cc9aac5bb91b 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs @@ -21,7 +21,10 @@ fn test_tx_gas_limit_offset() { let gas_limit = 9999.into(); let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute::default(), + Execute { + contract_address: Some(Default::default()), + ..Default::default() + }, Some(Fee { gas_limit, ..Account::default_fee() diff --git a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs index a42037a7f5be..ef19717a627c 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs @@ -82,7 +82,7 @@ fn test_get_used_contracts() { let account2 = Account::random(); let tx2 = account2.get_l1_tx( Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata: big_calldata, value: Default::default(), factory_deps: vec![vec![1; 32]], @@ -208,7 +208,7 @@ fn execute_proxy_counter(gas: u32) -> (VmTester, U256, VmExecut let increment = proxy_counter_abi.function("increment").unwrap(); let increment_tx = account.get_l2_tx_for_execute( Execute { - contract_address: deploy_tx.address, + contract_address: Some(deploy_tx.address), calldata: increment .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) .unwrap(), diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs index dcb1bff06d09..0fc12848227e 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs @@ -173,7 +173,7 @@ fn test_l1_tx_execution_high_gas_limit() { let mut tx = account.get_l1_tx( Execute { - contract_address: L1_MESSENGER_ADDRESS, + contract_address: Some(L1_MESSENGER_ADDRESS), value: 0.into(), factory_deps: vec![], calldata, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs index 661286ca9697..91d78c69a931 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs @@ -64,7 +64,7 @@ fn test_nonce_holder() { let mut transaction_data: TransactionData = account .get_l2_tx_for_execute_with_nonce( Execute { - contract_address: account.address, + contract_address: Some(account.address), calldata: vec![12], value: Default::default(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs index 2ab40faf22ca..9388d0161846 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs @@ -31,7 +31,7 @@ fn test_keccak() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: hex::decode(keccak1000_calldata).unwrap(), value: Default::default(), factory_deps: vec![], @@ -75,7 +75,7 @@ fn test_sha256() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: hex::decode(sha1000_calldata).unwrap(), value: Default::default(), factory_deps: vec![], @@ -112,7 +112,7 @@ fn test_ecrecover() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: account.address, + contract_address: Some(account.address), calldata: Vec::new(), value: Default::default(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs index eb3104fd637a..8bf5e9919889 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs @@ -88,7 +88,7 @@ fn test_prestate_tracer_diff_mode() { //enter ether to contract to see difference in the balance post execution let tx0 = Execute { - contract_address: vm.test_contract.unwrap(), + contract_address: Some(vm.test_contract.unwrap()), calldata: Default::default(), value: U256::from(100000), factory_deps: vec![], @@ -98,7 +98,7 @@ fn test_prestate_tracer_diff_mode() { .push_transaction(account.get_l2_tx_for_execute(tx0.clone(), None)); let tx1 = Execute { - contract_address: deployed_address2, + contract_address: Some(deployed_address2), calldata: Default::default(), value: U256::from(200000), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs index ca058d672d2e..cc0085f20252 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs @@ -188,7 +188,7 @@ fn negative_pubdata_for_transaction() { let expensive_tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: expensive_contract_address, + contract_address: Some(expensive_contract_address), calldata: expensive_function .encode_input(&[Token::Uint(10.into())]) .unwrap(), @@ -207,7 +207,7 @@ fn negative_pubdata_for_transaction() { // This transaction cleans all initial writes in the contract, thus having negative `pubdata` impact. let clean_up_tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: expensive_contract_address, + contract_address: Some(expensive_contract_address), calldata: cleanup_function.encode_input(&[]).unwrap(), value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs index 779e9b5c629d..cdd71354c8de 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs @@ -63,7 +63,7 @@ async fn test_require_eip712() { let tx = private_account.get_l2_tx_for_execute( Execute { - contract_address: account_abstraction.address, + contract_address: Some(account_abstraction.address), calldata: encoded_input, value: Default::default(), factory_deps: vec![], @@ -120,7 +120,7 @@ async fn test_require_eip712() { // // Now send the 'classic' EIP712 transaction let tx_712 = L2Tx::new( - beneficiary.address, + Some(beneficiary.address), vec![], Nonce(1), Fee { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs index 43e7baae3b2d..52e4d24bc0b4 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs @@ -92,7 +92,7 @@ fn test_vm_loadnext_rollbacks() { let loadnext_tx_1 = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: LoadnextContractExecutionParams { reads: 100, writes: 100, @@ -110,7 +110,7 @@ fn test_vm_loadnext_rollbacks() { let loadnext_tx_2 = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: LoadnextContractExecutionParams { reads: 100, writes: 100, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs b/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs index 6cc731a1387c..93be9506a3b0 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs @@ -48,7 +48,7 @@ fn test_sekp256r1() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: P256VERIFY_PRECOMPILE_ADDRESS, + contract_address: Some(P256VERIFY_PRECOMPILE_ADDRESS), calldata: [digest, encoded_r, encoded_s, x, y].concat(), value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/storage.rs b/core/lib/multivm/src/versions/vm_latest/tests/storage.rs index 0fe0b0220fae..126d174a6468 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/storage.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/storage.rs @@ -50,7 +50,7 @@ fn test_storage(txs: Vec) -> u32 { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata, value: 0.into(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs index 58c5ef77dc42..2db37881352f 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs @@ -27,7 +27,7 @@ fn test_tracing_of_execution_errors() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address, + contract_address: Some(contract_address), calldata: get_execute_error_calldata(), value: Default::default(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs b/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs index 31f6c3291ef6..2c380623636a 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs @@ -73,7 +73,7 @@ fn test_send_or_transfer(test_option: TestOptions) { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata, value: U256::zero(), factory_deps: vec![], @@ -169,7 +169,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { let account = &mut vm.rich_accounts[0]; let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: reentrant_recipeint_address, + contract_address: Some(reentrant_recipeint_address), calldata: reentrant_recipient_abi .function("setX") .unwrap() @@ -190,7 +190,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { let tx2 = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata, value, factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs index 7c3ebff4a77d..d85a504de40f 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs @@ -276,7 +276,7 @@ fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { .expect("failed to encode parameters"); let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, factory_deps: vec![], value: U256::zero(), @@ -326,7 +326,7 @@ fn get_complex_upgrade_tx( .unwrap(); let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, + contract_address: Some(COMPLEX_UPGRADER_ADDRESS), calldata: complex_upgrader_calldata, factory_deps: vec![], value: U256::zero(), diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs index 502be0dc22cc..2ec86eb3ceaf 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_latest::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -305,7 +305,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_m5/test_utils.rs b/core/lib/multivm/src/versions/vm_m5/test_utils.rs index 785eb49835f1..d7c0dfb9f6d0 100644 --- a/core/lib/multivm/src/versions/vm_m5/test_utils.rs +++ b/core/lib/multivm/src/versions/vm_m5/test_utils.rs @@ -151,7 +151,7 @@ pub fn get_create_execute(code: &[u8], calldata: &[u8]) -> Execute { .expect("failed to encode parameters"); Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, factory_deps: vec![code.to_vec()], value: U256::zero(), diff --git a/core/lib/multivm/src/versions/vm_m5/transaction_data.rs b/core/lib/multivm/src/versions/vm_m5/transaction_data.rs index 7ef739fd5bf5..b64e3f770185 100644 --- a/core/lib/multivm/src/versions/vm_m5/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_m5/transaction_data.rs @@ -22,7 +22,7 @@ const L1_TX_TYPE: u8 = 255; pub struct TransactionData { pub tx_type: u8, pub from: Address, - pub to: Address, + pub to: Option
, pub gas_limit: U256, pub pubdata_price_limit: U256, pub max_fee_per_gas: U256, @@ -144,7 +144,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -479,7 +479,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_m6/test_utils.rs b/core/lib/multivm/src/versions/vm_m6/test_utils.rs index ecad7d911b40..4bd39bc56dd4 100644 --- a/core/lib/multivm/src/versions/vm_m6/test_utils.rs +++ b/core/lib/multivm/src/versions/vm_m6/test_utils.rs @@ -151,7 +151,7 @@ pub fn get_create_execute(code: &[u8], calldata: &[u8]) -> Execute { .expect("failed to encode parameters"); Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, factory_deps: vec![code.to_vec()], value: U256::zero(), diff --git a/core/lib/multivm/src/versions/vm_m6/transaction_data.rs b/core/lib/multivm/src/versions/vm_m6/transaction_data.rs index 99ce4671c29b..a8f80ea3255e 100644 --- a/core/lib/multivm/src/versions/vm_m6/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_m6/transaction_data.rs @@ -23,7 +23,7 @@ pub(crate) const L1_TX_TYPE: u8 = 255; pub struct TransactionData { pub tx_type: u8, pub from: Address, - pub to: Address, + pub to: Option
, pub gas_limit: U256, pub pubdata_price_limit: U256, pub max_fee_per_gas: U256, @@ -171,7 +171,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -592,7 +592,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs index 0ea1669cf217..1ff6ce12557f 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs @@ -21,10 +21,8 @@ fn test_tx_gas_limit_offset() { let gas_limit = 9999.into(); let tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: Default::default(), - calldata: vec![], - value: Default::default(), - factory_deps: None, + contract_address: Some(Default::default()), + ..Default::default() }, Some(Fee { gas_limit, diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs index 205090ba633e..22ab09296c91 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_refunds_enhancement::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -325,7 +325,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs index 01ebe4c0d225..e51b8cab570e 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs @@ -21,10 +21,8 @@ fn test_tx_gas_limit_offset() { let gas_limit = 9999.into(); let tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: Default::default(), - calldata: vec![], - value: Default::default(), - factory_deps: None, + contract_address: Some(Default::default()), + ..Default::default() }, Some(Fee { gas_limit, diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs index b42950399f61..c96004163a65 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_virtual_blocks::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -325,7 +325,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/protobuf_config/src/consensus.rs b/core/lib/protobuf_config/src/consensus.rs index f5eb5c5b2f10..a5b552dffc4a 100644 --- a/core/lib/protobuf_config/src/consensus.rs +++ b/core/lib/protobuf_config/src/consensus.rs @@ -71,6 +71,13 @@ impl ProtoRepr for proto::GenesisSpec { .map(|x| parse_h160(x)) .transpose() .context("registry_address")?, + seed_peers: self + .seed_peers + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("seed_peers")?, }) } fn build(this: &Self::Type) -> Self { @@ -81,6 +88,11 @@ impl ProtoRepr for proto::GenesisSpec { attesters: this.attesters.iter().map(ProtoRepr::build).collect(), leader: Some(this.leader.0.clone()), registry_address: this.registry_address.map(|a| format!("{:?}", a)), + seed_peers: this + .seed_peers + .iter() + .map(|(k, v)| proto::NodeAddr::build(&(k.clone(), v.clone()))) + .collect(), } } } @@ -99,15 +111,25 @@ impl ProtoRepr for proto::RpcConfig { } } +impl ProtoRepr for proto::NodeAddr { + type Type = (NodePublicKey, Host); + fn read(&self) -> anyhow::Result { + Ok(( + NodePublicKey(required(&self.key).context("key")?.clone()), + Host(required(&self.addr).context("addr")?.clone()), + )) + } + fn build(this: &Self::Type) -> Self { + Self { + key: Some(this.0 .0.clone()), + addr: Some(this.1 .0.clone()), + } + } +} + impl ProtoRepr for proto::Config { type Type = ConsensusConfig; fn read(&self) -> anyhow::Result { - let read_addr = |e: &proto::NodeAddr| { - let key = NodePublicKey(required(&e.key).context("key")?.clone()); - let addr = Host(required(&e.addr).context("addr")?.clone()); - anyhow::Ok((key, addr)) - }; - let max_payload_size = required(&self.max_payload_size) .and_then(|x| Ok((*x).try_into()?)) .context("max_payload_size")?; @@ -144,8 +166,9 @@ impl ProtoRepr for proto::Config { .gossip_static_outbound .iter() .enumerate() - .map(|(i, e)| read_addr(e).context(i)) - .collect::>()?, + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("gossip_static_outbound")?, genesis_spec: read_optional_repr(&self.genesis_spec), rpc: read_optional_repr(&self.rpc_config), }) @@ -168,10 +191,7 @@ impl ProtoRepr for proto::Config { gossip_static_outbound: this .gossip_static_outbound .iter() - .map(|x| proto::NodeAddr { - key: Some(x.0 .0.clone()), - addr: Some(x.1 .0.clone()), - }) + .map(|(k, v)| proto::NodeAddr::build(&(k.clone(), v.clone()))) .collect(), genesis_spec: this.genesis_spec.as_ref().map(ProtoRepr::build), rpc_config: this.rpc.as_ref().map(ProtoRepr::build), diff --git a/core/lib/protobuf_config/src/proto/config/prover_job_monitor.proto b/core/lib/protobuf_config/src/proto/config/prover_job_monitor.proto index 7b505aa3bcfb..9aabf6e34832 100644 --- a/core/lib/protobuf_config/src/proto/config/prover_job_monitor.proto +++ b/core/lib/protobuf_config/src/proto/config/prover_job_monitor.proto @@ -17,4 +17,5 @@ message ProverJobMonitor { optional uint64 prover_queue_reporter_run_interval_ms = 12; // optional; ms optional uint64 witness_generator_queue_reporter_run_interval_ms = 13; // optional; ms optional uint64 witness_job_queuer_run_interval_ms = 14; // optional; ms + optional uint32 http_port = 15; // required; u32 } diff --git a/core/lib/protobuf_config/src/proto/core/consensus.proto b/core/lib/protobuf_config/src/proto/core/consensus.proto index 835ead1ab65c..6cabc45fc12a 100644 --- a/core/lib/protobuf_config/src/proto/core/consensus.proto +++ b/core/lib/protobuf_config/src/proto/core/consensus.proto @@ -31,10 +31,10 @@ package zksync.core.consensus; import "zksync/std.proto"; -// (public key, ip address) of a gossip network node. +// (public key, host address) of a gossip network node. message NodeAddr { optional string key = 1; // required; NodePublicKey - optional string addr = 2; // required; IpAddr + optional string addr = 2; // required; Host } // Weighted member of a validator committee. @@ -58,6 +58,7 @@ message GenesisSpec { repeated WeightedAttester attesters = 5; // can be empty; attester committee. // Currently not in consensus genesis, but still a part of the global configuration. optional string registry_address = 6; // optional; H160 + repeated NodeAddr seed_peers = 7; } // Per peer connection RPC rate limits. diff --git a/core/lib/protobuf_config/src/prover_job_monitor.rs b/core/lib/protobuf_config/src/prover_job_monitor.rs index a1c5a7c05995..a174d0882406 100644 --- a/core/lib/protobuf_config/src/prover_job_monitor.rs +++ b/core/lib/protobuf_config/src/prover_job_monitor.rs @@ -95,6 +95,9 @@ impl ProtoRepr for proto::ProverJobMonitor { .or_else(|| Some(Self::Type::default_witness_job_queuer_run_interval_ms())), ) .context("witness_job_queuer_run_interval_ms")?, + http_port: required(&self.http_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("http_port")?, }) } @@ -126,6 +129,7 @@ impl ProtoRepr for proto::ProverJobMonitor { this.witness_generator_queue_reporter_run_interval_ms, ), witness_job_queuer_run_interval_ms: Some(this.witness_job_queuer_run_interval_ms), + http_port: Some(this.http_port.into()), } } } diff --git a/core/lib/prover_interface/Cargo.toml b/core/lib/prover_interface/Cargo.toml index 8c73c2c6ac38..889b80b4fbee 100644 --- a/core/lib/prover_interface/Cargo.toml +++ b/core/lib/prover_interface/Cargo.toml @@ -20,7 +20,7 @@ circuit_sequencer_api_1_5_0.workspace = true serde.workspace = true strum = { workspace = true, features = ["derive"] } -serde_with = { workspace = true, features = ["base64"] } +serde_with = { workspace = true, features = ["base64", "hex"] } chrono = { workspace = true, features = ["serde"] } [dev-dependencies] diff --git a/core/lib/prover_interface/src/api.rs b/core/lib/prover_interface/src/api.rs index bc95345bbbaa..776cd3141cbe 100644 --- a/core/lib/prover_interface/src/api.rs +++ b/core/lib/prover_interface/src/api.rs @@ -2,6 +2,7 @@ //! This module defines the types used in the API. use serde::{Deserialize, Serialize}; +use serde_with::{hex::Hex, serde_as}; use zksync_types::{ protocol_version::{L1VerifierConfig, ProtocolSemanticVersion}, tee_types::TeeType, @@ -71,8 +72,11 @@ pub struct VerifyProofRequest(pub Box); #[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct SubmitTeeProofRequest(pub Box); +#[serde_as] #[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct RegisterTeeAttestationRequest { + #[serde_as(as = "Hex")] pub attestation: Vec, + #[serde_as(as = "Hex")] pub pubkey: Vec, } diff --git a/core/lib/prover_interface/src/outputs.rs b/core/lib/prover_interface/src/outputs.rs index 9672bfb2142b..60a9eaba760b 100644 --- a/core/lib/prover_interface/src/outputs.rs +++ b/core/lib/prover_interface/src/outputs.rs @@ -2,6 +2,7 @@ use core::fmt; use circuit_sequencer_api_1_5_0::proof::FinalProof; use serde::{Deserialize, Serialize}; +use serde_with::{hex::Hex, serde_as}; use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; use zksync_types::{protocol_version::ProtocolSemanticVersion, tee_types::TeeType, L1BatchNumber}; @@ -14,14 +15,18 @@ pub struct L1BatchProofForL1 { } /// A "final" TEE proof that can be sent to the L1 contract. +#[serde_as] #[derive(Clone, PartialEq, Serialize, Deserialize)] pub struct L1BatchTeeProofForL1 { // signature generated within the TEE enclave, using the privkey corresponding to the pubkey + #[serde_as(as = "Hex")] pub signature: Vec, // pubkey used for signature verification; each key pair is attested by the TEE attestation // stored in the db + #[serde_as(as = "Hex")] pub pubkey: Vec, // data that was signed + #[serde_as(as = "Hex")] pub proof: Vec, // type of TEE used for attestation pub tee_type: TeeType, diff --git a/core/lib/prover_interface/tests/job_serialization.rs b/core/lib/prover_interface/tests/job_serialization.rs index a2aee0c2733e..ead59749abe3 100644 --- a/core/lib/prover_interface/tests/job_serialization.rs +++ b/core/lib/prover_interface/tests/job_serialization.rs @@ -167,9 +167,9 @@ fn test_proof_request_serialization() { #[test] fn test_tee_proof_request_serialization() { let tee_proof_str = r#"{ - "signature": [ 0, 1, 2, 3, 4 ], - "pubkey": [ 5, 6, 7, 8, 9 ], - "proof": [ 10, 11, 12, 13, 14 ], + "signature": "0001020304", + "pubkey": "0506070809", + "proof": "0A0B0C0D0E", "tee_type": "sgx" }"#; let tee_proof_result = serde_json::from_str::(tee_proof_str).unwrap(); diff --git a/core/lib/types/Cargo.toml b/core/lib/types/Cargo.toml index 55cbef761ad5..54c38384a7ad 100644 --- a/core/lib/types/Cargo.toml +++ b/core/lib/types/Cargo.toml @@ -28,6 +28,7 @@ once_cell.workspace = true rlp.workspace = true serde.workspace = true serde_json.workspace = true +serde_with = { workspace = true, features = ["hex"] } bigdecimal.workspace = true strum = { workspace = true, features = ["derive"] } thiserror.workspace = true diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index 916fae6a35bc..f648204ca557 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -1,6 +1,7 @@ use chrono::{DateTime, Utc}; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use serde_json::Value; +use serde_with::{hex::Hex, serde_as}; use strum::Display; use zksync_basic_types::{ tee_types::TeeType, @@ -784,15 +785,20 @@ pub struct Proof { pub storage_proof: Vec, } +#[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct TeeProof { pub l1_batch_number: L1BatchNumber, pub tee_type: Option, + #[serde_as(as = "Option")] pub pubkey: Option>, + #[serde_as(as = "Option")] pub signature: Option>, + #[serde_as(as = "Option")] pub proof: Option>, pub proved_at: DateTime, + #[serde_as(as = "Option")] pub attestation: Option>, } diff --git a/core/lib/types/src/l1/mod.rs b/core/lib/types/src/l1/mod.rs index 05f08987a2d3..e8144c75db2e 100644 --- a/core/lib/types/src/l1/mod.rs +++ b/core/lib/types/src/l1/mod.rs @@ -274,7 +274,9 @@ impl From for abi::NewPriorityRequest { transaction: abi::L2CanonicalTransaction { tx_type: PRIORITY_OPERATION_L2_TX_TYPE.into(), from: address_to_u256(&t.common_data.sender), - to: address_to_u256(&t.execute.contract_address), + // Unwrap used here because the contract address should always be present for L1 transactions. + // TODO: Consider restricting the contract address to not be optional in L1Tx. + to: address_to_u256(&t.execute.contract_address.unwrap()), gas_limit: t.common_data.gas_limit, gas_per_pubdata_byte_limit: t.common_data.gas_per_pubdata_limit, max_fee_per_gas: t.common_data.max_fee_per_gas, @@ -345,7 +347,7 @@ impl TryFrom for L1Tx { }; let execute = Execute { - contract_address: u256_to_account_address(&req.transaction.to), + contract_address: Some(u256_to_account_address(&req.transaction.to)), calldata: req.transaction.data, factory_deps: req.factory_deps, value: req.transaction.value, diff --git a/core/lib/types/src/l2/mod.rs b/core/lib/types/src/l2/mod.rs index 5a5276407529..036d2a7a036d 100644 --- a/core/lib/types/src/l2/mod.rs +++ b/core/lib/types/src/l2/mod.rs @@ -153,7 +153,7 @@ pub struct L2Tx { impl L2Tx { #[allow(clippy::too_many_arguments)] pub fn new( - contract_address: Address, + contract_address: Option
, calldata: Vec, nonce: Nonce, fee: Fee, @@ -185,7 +185,7 @@ impl L2Tx { #[allow(clippy::too_many_arguments)] pub fn new_signed( - contract_address: Address, + contract_address: Option
, calldata: Vec, nonce: Nonce, fee: Fee, @@ -232,7 +232,7 @@ impl L2Tx { } /// Returns recipient account of the transaction. - pub fn recipient_account(&self) -> Address { + pub fn recipient_account(&self) -> Option
{ self.execute.contract_address } @@ -324,7 +324,7 @@ impl From for TransactionRequest { let mut base_tx_req = TransactionRequest { nonce: U256::from(tx.common_data.nonce.0), from: Some(tx.common_data.initiator_address), - to: Some(tx.recipient_account()), + to: tx.recipient_account(), value: tx.execute.value, gas_price: tx.common_data.fee.max_fee_per_gas, max_priority_fee_per_gas: None, @@ -400,7 +400,7 @@ impl From for api::Transaction { chain_id: U256::from(tx.common_data.extract_chain_id().unwrap_or_default()), nonce: U256::from(tx.common_data.nonce.0), from: Some(tx.common_data.initiator_address), - to: Some(tx.recipient_account()), + to: tx.recipient_account(), value: tx.execute.value, gas_price: Some(tx.common_data.fee.max_fee_per_gas), max_priority_fee_per_gas: Some(tx.common_data.fee.max_priority_fee_per_gas), diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index 402e16afd435..86b2e3f03d51 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -104,7 +104,7 @@ impl Eq for Transaction {} impl Transaction { /// Returns recipient account of the transaction. - pub fn recipient_account(&self) -> Address { + pub fn recipient_account(&self) -> Option
{ self.execute.contract_address } @@ -253,7 +253,7 @@ impl TryFrom for abi::Transaction { tx: abi::L2CanonicalTransaction { tx_type: PRIORITY_OPERATION_L2_TX_TYPE.into(), from: address_to_u256(&data.sender), - to: address_to_u256(&tx.execute.contract_address), + to: address_to_u256(&tx.execute.contract_address.unwrap_or_default()), gas_limit: data.gas_limit, gas_per_pubdata_byte_limit: data.gas_per_pubdata_limit, max_fee_per_gas: data.max_fee_per_gas, @@ -284,7 +284,7 @@ impl TryFrom for abi::Transaction { tx: abi::L2CanonicalTransaction { tx_type: PROTOCOL_UPGRADE_TX_TYPE.into(), from: address_to_u256(&data.sender), - to: address_to_u256(&tx.execute.contract_address), + to: address_to_u256(&tx.execute.contract_address.unwrap_or_default()), gas_limit: data.gas_limit, gas_per_pubdata_byte_limit: data.gas_per_pubdata_limit, max_fee_per_gas: data.max_fee_per_gas, @@ -377,7 +377,7 @@ impl TryFrom for Transaction { unknown_type => anyhow::bail!("unknown tx type {unknown_type}"), }, execute: Execute { - contract_address: u256_to_account_address(&tx.to), + contract_address: Some(u256_to_account_address(&tx.to)), calldata: tx.data, factory_deps, value: tx.value, diff --git a/core/lib/types/src/transaction_request.rs b/core/lib/types/src/transaction_request.rs index c71e6e4206c5..5f26b1d6a6a5 100644 --- a/core/lib/types/src/transaction_request.rs +++ b/core/lib/types/src/transaction_request.rs @@ -108,8 +108,8 @@ impl CallRequestBuilder { } /// Set to address (None allowed for eth_estimateGas) - pub fn to(mut self, to: Address) -> Self { - self.call_request.to = Some(to); + pub fn to(mut self, to: Option
) -> Self { + self.call_request.to = to; self } @@ -817,10 +817,13 @@ impl L2Tx { let meta = value.eip712_meta.take().unwrap_or_default(); validate_factory_deps(&meta.factory_deps)?; + // TODO: Remove this check when evm equivalence gets enabled + if value.to.is_none() { + return Err(SerializationTransactionError::ToAddressIsNull); + } + let mut tx = L2Tx::new( - value - .to - .ok_or(SerializationTransactionError::ToAddressIsNull)?, + value.to, value.input.0.clone(), nonce, fee, diff --git a/core/lib/types/src/tx/execute.rs b/core/lib/types/src/tx/execute.rs index 03762040a6b8..c133261bc232 100644 --- a/core/lib/types/src/tx/execute.rs +++ b/core/lib/types/src/tx/execute.rs @@ -15,7 +15,7 @@ use crate::{ethabi, Address, EIP712TypedStructure, StructBuilder, H256, U256}; #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] struct ExecuteSerde { - contract_address: Address, + contract_address: Option
, #[serde(with = "ZeroPrefixHexSerde")] calldata: Vec, value: U256, @@ -25,7 +25,7 @@ struct ExecuteSerde { /// `Execute` transaction executes a previously deployed smart contract in the L2 rollup. #[derive(Clone, Default, PartialEq)] pub struct Execute { - pub contract_address: Address, + pub contract_address: Option
, pub calldata: Vec, pub value: U256, /// Factory dependencies: list of contract bytecodes associated with the deploy transaction. @@ -72,7 +72,9 @@ impl EIP712TypedStructure for Execute { const TYPE_NAME: &'static str = "Transaction"; fn build_structure(&self, builder: &mut BUILDER) { - builder.add_member("to", &U256::from(self.contract_address.as_bytes())); + if let Some(contract_address) = self.contract_address { + builder.add_member("to", &contract_address); + } builder.add_member("value", &self.value); builder.add_member("data", &self.calldata.as_slice()); // Factory deps are not included into the transaction signature, since they are parsed from the diff --git a/core/lib/vlog/src/lib.rs b/core/lib/vlog/src/lib.rs index 268fbd0b39eb..598d17879b84 100644 --- a/core/lib/vlog/src/lib.rs +++ b/core/lib/vlog/src/lib.rs @@ -4,6 +4,7 @@ use std::time::Duration; use ::sentry::ClientInitGuard; +use anyhow::Context as _; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; pub use crate::{logs::Logs, opentelemetry::OpenTelemetry, sentry::Sentry}; @@ -126,8 +127,9 @@ impl ObservabilityBuilder { self } - /// Initializes the observability subsystem. - pub fn build(self) -> ObservabilityGuard { + /// Tries to initialize the observability subsystem. Returns an error if it's already initialized. + /// This is mostly useful in tests. + pub fn try_build(self) -> anyhow::Result { let logs = self.logs.unwrap_or_default(); logs.install_panic_hook(); @@ -151,14 +153,20 @@ impl ObservabilityBuilder { .with(logs_layer) .with(otlp_tracing_layer) .with(otlp_logging_layer) - .init(); + .try_init() + .context("failed installing global tracer / logger")?; let sentry_guard = self.sentry.map(|sentry| sentry.install()); - ObservabilityGuard { + Ok(ObservabilityGuard { otlp_tracing_provider, otlp_logging_provider, sentry_guard, - } + }) + } + + /// Initializes the observability subsystem. + pub fn build(self) -> ObservabilityGuard { + self.try_build().unwrap() } } diff --git a/core/node/api_server/src/execution_sandbox/tests.rs b/core/node/api_server/src/execution_sandbox/tests.rs index 35103779a49e..79c5a7330384 100644 --- a/core/node/api_server/src/execution_sandbox/tests.rs +++ b/core/node/api_server/src/execution_sandbox/tests.rs @@ -236,7 +236,7 @@ fn create_transfer(fee_per_gas: u64, gas_per_pubdata: u64) -> L2Tx { gas_per_pubdata_limit: gas_per_pubdata.into(), }; L2Tx::new_signed( - Address::random(), + Some(Address::random()), vec![], Nonce(0), fee, diff --git a/core/node/api_server/src/web3/testonly.rs b/core/node/api_server/src/web3/testonly.rs index a77498d4341d..18ee3a641d0a 100644 --- a/core/node/api_server/src/web3/testonly.rs +++ b/core/node/api_server/src/web3/testonly.rs @@ -182,7 +182,7 @@ async fn spawn_server( let (pub_sub_events_sender, pub_sub_events_receiver) = mpsc::unbounded_channel(); let mut namespaces = Namespace::DEFAULT.to_vec(); - namespaces.extend([Namespace::Debug, Namespace::Snapshots]); + namespaces.extend([Namespace::Debug, Namespace::Snapshots, Namespace::Unstable]); let server_builder = match transport { ApiTransportLabel::Http => ApiBuilder::jsonrpsee_backend(api_config, pool).http(0), diff --git a/core/node/api_server/src/web3/tests/mod.rs b/core/node/api_server/src/web3/tests/mod.rs index 635620e9c525..fe90f1483a5a 100644 --- a/core/node/api_server/src/web3/tests/mod.rs +++ b/core/node/api_server/src/web3/tests/mod.rs @@ -63,6 +63,7 @@ use crate::web3::testonly::{spawn_http_server, spawn_ws_server}; mod debug; mod filters; mod snapshots; +mod unstable; mod vm; mod ws; diff --git a/core/node/api_server/src/web3/tests/unstable.rs b/core/node/api_server/src/web3/tests/unstable.rs new file mode 100644 index 000000000000..1d425f8b9515 --- /dev/null +++ b/core/node/api_server/src/web3/tests/unstable.rs @@ -0,0 +1,69 @@ +//! Tests for the `unstable` Web3 namespace. + +use zksync_types::tee_types::TeeType; +use zksync_web3_decl::namespaces::UnstableNamespaceClient; + +use super::*; + +#[derive(Debug)] +struct GetTeeProofsTest {} + +impl GetTeeProofsTest { + fn new() -> Self { + Self {} + } +} + +#[async_trait] +impl HttpTest for GetTeeProofsTest { + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let batch_no = L1BatchNumber(1337); + let tee_type = TeeType::Sgx; + let proof = client.tee_proofs(batch_no, Some(tee_type)).await?; + + assert!(proof.is_empty()); + + let mut storage = pool.connection().await.unwrap(); + storage + .tee_verifier_input_producer_dal() + .create_tee_verifier_input_producer_job(batch_no) + .await?; + + let pubkey = vec![0xDE, 0xAD, 0xBE, 0xEF]; + let attestation = vec![0xC0, 0xFF, 0xEE]; + let mut tee_proof_generation_dal = storage.tee_proof_generation_dal(); + tee_proof_generation_dal + .save_attestation(&pubkey, &attestation) + .await?; + tee_proof_generation_dal + .insert_tee_proof_generation_job(batch_no, tee_type) + .await?; + + let signature = vec![0, 1, 2, 3, 4]; + let proof_vec = vec![5, 6, 7, 8, 9]; + tee_proof_generation_dal + .save_proof_artifacts_metadata(batch_no, tee_type, &pubkey, &signature, &proof_vec) + .await?; + + let proofs = client.tee_proofs(batch_no, Some(tee_type)).await?; + assert!(proofs.len() == 1); + let proof = &proofs[0]; + assert!(proof.l1_batch_number == batch_no); + assert!(proof.tee_type == Some(tee_type)); + assert!(proof.pubkey.as_ref() == Some(&pubkey)); + assert!(proof.signature.as_ref() == Some(&signature)); + assert!(proof.proof.as_ref() == Some(&proof_vec)); + assert!(proof.attestation.as_ref() == Some(&attestation)); + + Ok(()) + } +} + +#[tokio::test] +async fn get_tee_proofs() { + test_http_server(GetTeeProofsTest::new()).await; +} diff --git a/core/node/consensus/src/config.rs b/core/node/consensus/src/config.rs index 22f8fc01192f..cada58b0756f 100644 --- a/core/node/consensus/src/config.rs +++ b/core/node/consensus/src/config.rs @@ -1,5 +1,5 @@ //! Configuration utilities for the consensus component. -use std::collections::HashMap; +use std::collections::{BTreeMap, HashMap}; use anyhow::Context as _; use secrecy::{ExposeSecret as _, Secret}; @@ -44,6 +44,7 @@ pub(super) struct GenesisSpec { pub(super) attesters: Option, pub(super) leader_selection: validator::LeaderSelectionMode, pub(super) registry_address: Option, + pub(super) seed_peers: BTreeMap, } impl GenesisSpec { @@ -55,6 +56,7 @@ impl GenesisSpec { attesters: cfg.genesis.attesters.clone(), leader_selection: cfg.genesis.leader_selection.clone(), registry_address: cfg.registry_address, + seed_peers: cfg.seed_peers.clone(), } } @@ -98,6 +100,19 @@ impl GenesisSpec { Some(attester::Committee::new(attesters).context("attesters")?) }, registry_address: x.registry_address, + seed_peers: x + .seed_peers + .iter() + .map(|(key, addr)| { + anyhow::Ok(( + Text::new(&key.0) + .decode::() + .context("key")?, + net::Host(addr.0.clone()), + )) + }) + .collect::>() + .context("seed_peers")?, }) } } @@ -109,9 +124,18 @@ pub(super) fn node_key(secrets: &ConsensusSecrets) -> anyhow::Result, ) -> anyhow::Result { - let mut gossip_static_outbound = HashMap::new(); + // Always connect to seed peers. + // Once we implement dynamic peer discovery, + // we won't establish a persistent connection to seed peers + // but rather just ask them for more peers. + let mut gossip_static_outbound: HashMap<_, _> = global_config + .seed_peers + .iter() + .map(|(k, v)| (k.clone(), v.clone())) + .collect(); { let mut append = |key: &NodePublicKey, addr: &Host| { gossip_static_outbound.insert( diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index a52393c0f488..0b78662f8c25 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -125,7 +125,7 @@ impl EN { )); let executor = executor::Executor { - config: config::executor(&cfg, &secrets, build_version)?, + config: config::executor(&cfg, &secrets, &global_config, build_version)?, block_store, batch_store, validator: config::validator_key(&secrets) @@ -304,6 +304,7 @@ impl EN { Ok(consensus_dal::GlobalConfig { genesis: zksync_protobuf::serde::deserialize(&genesis.0).context("deserialize()")?, registry_address: None, + seed_peers: [].into(), }) } diff --git a/core/node/consensus/src/mn.rs b/core/node/consensus/src/mn.rs index 4d428346ebe4..f80bfe58954c 100644 --- a/core/node/consensus/src/mn.rs +++ b/core/node/consensus/src/mn.rs @@ -76,12 +76,12 @@ pub async fn run_main_node( s.spawn_bg(run_attestation_controller( ctx, &pool, - global_config, + global_config.clone(), attestation.clone(), )); let executor = executor::Executor { - config: config::executor(&cfg, &secrets, None)?, + config: config::executor(&cfg, &secrets, &global_config, None)?, block_store, batch_store, validator: Some(executor::Validator { diff --git a/core/node/consensus/src/registry/testonly.rs b/core/node/consensus/src/registry/testonly.rs index a0c55a557feb..07a87e3b676e 100644 --- a/core/node/consensus/src/registry/testonly.rs +++ b/core/node/consensus/src/registry/testonly.rs @@ -13,7 +13,7 @@ pub(crate) fn make_tx( ) -> Transaction { account.get_l2_tx_for_execute( Execute { - contract_address: *address, + contract_address: Some(*address), calldata: call.calldata().unwrap(), value: U256::zero(), factory_deps: vec![], diff --git a/core/node/consensus/src/storage/connection.rs b/core/node/consensus/src/storage/connection.rs index 512b37e81a11..2c297eed7275 100644 --- a/core/node/consensus/src/storage/connection.rs +++ b/core/node/consensus/src/storage/connection.rs @@ -317,6 +317,7 @@ impl<'a> Connection<'a> { } .with_hash(), registry_address: spec.registry_address, + seed_peers: spec.seed_peers.clone(), }; txn.try_update_global_config(ctx, &new) diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 241998f26928..bcd22186a4bc 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -91,11 +91,8 @@ impl ConfigSet { } } -pub(super) fn new_configs( - rng: &mut impl Rng, - setup: &Setup, - gossip_peers: usize, -) -> Vec { +pub(super) fn new_configs(rng: &mut impl Rng, setup: &Setup, seed_peers: usize) -> Vec { + let net_cfgs = network::testonly::new_configs(rng, setup, 0); let genesis_spec = config::GenesisSpec { chain_id: setup.genesis.chain_id.0.try_into().unwrap(), protocol_version: config::ProtocolVersion(setup.genesis.protocol_version.0), @@ -117,8 +114,17 @@ pub(super) fn new_configs( .collect(), leader: config::ValidatorPublicKey(setup.validator_keys[0].public().encode()), registry_address: None, + seed_peers: net_cfgs[..seed_peers] + .iter() + .map(|c| { + ( + config::NodePublicKey(c.gossip.key.public().encode()), + config::Host(c.public_addr.0.clone()), + ) + }) + .collect(), }; - network::testonly::new_configs(rng, setup, gossip_peers) + net_cfgs .into_iter() .enumerate() .map(|(i, net)| ConfigSet { diff --git a/core/node/consensus/src/tests/attestation.rs b/core/node/consensus/src/tests/attestation.rs index abd35508c7f7..e783dbecdc35 100644 --- a/core/node/consensus/src/tests/attestation.rs +++ b/core/node/consensus/src/tests/attestation.rs @@ -47,6 +47,7 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { &consensus_dal::GlobalConfig { genesis: setup.genesis.clone(), registry_address: None, + seed_peers: [].into(), }, ) .await diff --git a/core/node/consensus/src/tests/mod.rs b/core/node/consensus/src/tests/mod.rs index 91f01f865a2b..aabfff462a81 100644 --- a/core/node/consensus/src/tests/mod.rs +++ b/core/node/consensus/src/tests/mod.rs @@ -49,6 +49,7 @@ async fn test_validator_block_store(version: ProtocolVersionId) { &consensus_dal::GlobalConfig { genesis: setup.genesis.clone(), registry_address: None, + seed_peers: [].into(), }, ) .await diff --git a/core/node/consensus/src/vm.rs b/core/node/consensus/src/vm.rs index 11b6b5c67e3b..c93cafc09f9c 100644 --- a/core/node/consensus/src/vm.rs +++ b/core/node/consensus/src/vm.rs @@ -56,7 +56,7 @@ impl VM { call: abi::Call, ) -> ctx::Result { let tx = L2Tx::new( - *address, + Some(*address), call.calldata().context("call.calldata()")?, Nonce(0), Fee { diff --git a/core/node/eth_sender/src/eth_fees_oracle.rs b/core/node/eth_sender/src/eth_fees_oracle.rs index 271a33d49c32..0c708fee3698 100644 --- a/core/node/eth_sender/src/eth_fees_oracle.rs +++ b/core/node/eth_sender/src/eth_fees_oracle.rs @@ -35,13 +35,24 @@ pub(crate) struct GasAdjusterFeesOracle { } impl GasAdjusterFeesOracle { + fn assert_fee_is_not_zero(&self, value: u64, fee_type: &'static str) { + if value == 0 { + panic!( + "L1 RPC incorrectly reported {fee_type} prices, either it doesn't return them at \ + all or returns 0's, eth-sender cannot continue without proper {fee_type} prices!" + ); + } + } fn calculate_fees_with_blob_sidecar( &self, previous_sent_tx: &Option, ) -> Result { let base_fee_per_gas = self.gas_adjuster.get_blob_tx_base_fee(); + self.assert_fee_is_not_zero(base_fee_per_gas, "base"); let priority_fee_per_gas = self.gas_adjuster.get_blob_tx_priority_fee(); - let blob_base_fee_per_gas = Some(self.gas_adjuster.get_blob_tx_blob_base_fee()); + let blob_base_fee_per_gas = self.gas_adjuster.get_blob_tx_blob_base_fee(); + self.assert_fee_is_not_zero(blob_base_fee_per_gas, "blob"); + let blob_base_fee_per_gas = Some(blob_base_fee_per_gas); if let Some(previous_sent_tx) = previous_sent_tx { // for blob transactions on re-sending need to double all gas prices @@ -72,6 +83,7 @@ impl GasAdjusterFeesOracle { time_in_mempool: u32, ) -> Result { let mut base_fee_per_gas = self.gas_adjuster.get_base_fee(time_in_mempool); + self.assert_fee_is_not_zero(base_fee_per_gas, "base"); if let Some(previous_sent_tx) = previous_sent_tx { self.verify_base_fee_not_too_low_on_resend( previous_sent_tx.id, diff --git a/core/node/eth_sender/src/tester.rs b/core/node/eth_sender/src/tester.rs index c6d993a9c97f..9be1384daae2 100644 --- a/core/node/eth_sender/src/tester.rs +++ b/core/node/eth_sender/src/tester.rs @@ -153,7 +153,7 @@ impl EthSenderTester { .into_iter() .map(|base_fee_per_gas| BaseFees { base_fee_per_gas, - base_fee_per_blob_gas: 0.into(), + base_fee_per_blob_gas: 1.into(), l2_pubdata_price: 0.into(), }) .collect(); @@ -161,8 +161,8 @@ impl EthSenderTester { let gateway = MockSettlementLayer::builder() .with_fee_history( std::iter::repeat_with(|| BaseFees { - base_fee_per_gas: 0, - base_fee_per_blob_gas: 0.into(), + base_fee_per_gas: 1, + base_fee_per_blob_gas: 1.into(), l2_pubdata_price: 0.into(), }) .take(Self::WAIT_CONFIRMATIONS as usize) @@ -181,8 +181,8 @@ impl EthSenderTester { let l2_gateway: MockSettlementLayer = MockSettlementLayer::builder() .with_fee_history( std::iter::repeat_with(|| BaseFees { - base_fee_per_gas: 0, - base_fee_per_blob_gas: 0.into(), + base_fee_per_gas: 1, + base_fee_per_blob_gas: 1.into(), l2_pubdata_price: 0.into(), }) .take(Self::WAIT_CONFIRMATIONS as usize) @@ -201,8 +201,8 @@ impl EthSenderTester { let gateway_blobs = MockSettlementLayer::builder() .with_fee_history( std::iter::repeat_with(|| BaseFees { - base_fee_per_gas: 0, - base_fee_per_blob_gas: 0.into(), + base_fee_per_gas: 1, + base_fee_per_blob_gas: 1.into(), l2_pubdata_price: 0.into(), }) .take(Self::WAIT_CONFIRMATIONS as usize) diff --git a/core/node/eth_watch/src/tests.rs b/core/node/eth_watch/src/tests.rs index 7ae3b5494e98..e6e343f50bca 100644 --- a/core/node/eth_watch/src/tests.rs +++ b/core/node/eth_watch/src/tests.rs @@ -147,7 +147,7 @@ impl EthClient for MockEthClient { fn build_l1_tx(serial_id: u64, eth_block: u64) -> L1Tx { let tx = L1Tx { execute: Execute { - contract_address: Address::repeat_byte(0x11), + contract_address: Some(Address::repeat_byte(0x11)), calldata: vec![1, 2, 3], factory_deps: vec![], value: U256::zero(), @@ -178,7 +178,7 @@ fn build_l1_tx(serial_id: u64, eth_block: u64) -> L1Tx { fn build_upgrade_tx(id: ProtocolVersionId, eth_block: u64) -> ProtocolUpgradeTx { let tx = ProtocolUpgradeTx { execute: Execute { - contract_address: Address::repeat_byte(0x11), + contract_address: Some(Address::repeat_byte(0x11)), calldata: vec![1, 2, 3], factory_deps: vec![], value: U256::zero(), diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs index 2643e4b3c424..47023203de0e 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs @@ -152,7 +152,7 @@ async fn kept_updated_l2(commitment_mode: L1BatchCommitmentMode) { .zip(TEST_PUBDATA_PRICES) .map(|(block, pubdata)| BaseFees { base_fee_per_gas: block, - base_fee_per_blob_gas: 0.into(), + base_fee_per_blob_gas: 1.into(), l2_pubdata_price: pubdata.into(), }) .collect(); diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index 6ab7e4dec436..86cc53234486 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -131,9 +131,9 @@ async fn submit_tee_proof() { // send a request to the /tee/submit_proofs endpoint, using a mocked TEE proof let tee_proof_request_str = r#"{ - "signature": [ 0, 1, 2, 3, 4 ], - "pubkey": [ 5, 6, 7, 8, 9 ], - "proof": [ 10, 11, 12, 13, 14 ], + "signature": "0001020304", + "pubkey": "0506070809", + "proof": "0A0B0C0D0E", "tee_type": "sgx" }"#; let tee_proof_request = diff --git a/core/node/state_keeper/src/executor/tests/tester.rs b/core/node/state_keeper/src/executor/tests/tester.rs index 8256435f2f5b..d524d1a20dd7 100644 --- a/core/node/state_keeper/src/executor/tests/tester.rs +++ b/core/node/state_keeper/src/executor/tests/tester.rs @@ -396,7 +396,7 @@ impl AccountLoadNextExecutable for Account { self.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: LoadnextContractExecutionParams { reads: 100, writes: writes as usize, @@ -432,7 +432,7 @@ impl AccountLoadNextExecutable for Account { self.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata, value: Default::default(), factory_deps: vec![], diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index 23aec8af49fb..edcf3ccc4f5c 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -131,7 +131,7 @@ pub fn fee(gas_limit: u32) -> Fee { pub fn l2_transaction(account: &mut Account, gas_limit: u32) -> Transaction { account.get_l2_tx_for_execute( Execute { - contract_address: Address::random(), + contract_address: Some(Address::random()), calldata: vec![], value: Default::default(), factory_deps: vec![], @@ -143,7 +143,7 @@ pub fn l2_transaction(account: &mut Account, gas_limit: u32) -> Transaction { pub fn l1_transaction(account: &mut Account, serial_id: PriorityOpId) -> Transaction { account.get_l1_tx( Execute { - contract_address: Address::random(), + contract_address: Some(Address::random()), value: Default::default(), calldata: vec![], factory_deps: vec![], diff --git a/core/node/test_utils/src/lib.rs b/core/node/test_utils/src/lib.rs index acb65bf1634d..b9984b782111 100644 --- a/core/node/test_utils/src/lib.rs +++ b/core/node/test_utils/src/lib.rs @@ -138,7 +138,7 @@ pub fn create_l2_transaction(fee_per_gas: u64, gas_per_pubdata: u64) -> L2Tx { gas_per_pubdata_limit: gas_per_pubdata.into(), }; let mut tx = L2Tx::new_signed( - Address::random(), + Some(Address::random()), vec![], Nonce(0), fee, diff --git a/core/node/vm_runner/src/tests/mod.rs b/core/node/vm_runner/src/tests/mod.rs index 9fe9e99e92c8..53bef106a8f4 100644 --- a/core/node/vm_runner/src/tests/mod.rs +++ b/core/node/vm_runner/src/tests/mod.rs @@ -202,7 +202,7 @@ pub fn create_l2_transaction( }; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: Address::random(), + contract_address: Some(Address::random()), calldata: vec![], value: Default::default(), factory_deps: vec![], diff --git a/core/tests/loadnext/src/sdk/operations/deploy_contract.rs b/core/tests/loadnext/src/sdk/operations/deploy_contract.rs index 161d156a53e9..67e877ae8efb 100644 --- a/core/tests/loadnext/src/sdk/operations/deploy_contract.rs +++ b/core/tests/loadnext/src/sdk/operations/deploy_contract.rs @@ -145,7 +145,7 @@ where let mut factory_deps = self.factory_deps.clone().unwrap_or_default(); factory_deps.push(bytecode); let l2_tx = L2Tx::new( - CONTRACT_DEPLOYER_ADDRESS, + Some(CONTRACT_DEPLOYER_ADDRESS), Execute::encode_deploy_params_create(Default::default(), main_contract_hash, calldata), Nonce(0), Default::default(), diff --git a/core/tests/loadnext/src/sdk/operations/execute_contract.rs b/core/tests/loadnext/src/sdk/operations/execute_contract.rs index d5fe57c7b79f..627e889ed012 100644 --- a/core/tests/loadnext/src/sdk/operations/execute_contract.rs +++ b/core/tests/loadnext/src/sdk/operations/execute_contract.rs @@ -144,7 +144,7 @@ where .unwrap_or_default(); let execute = L2Tx::new( - contract_address, + Some(contract_address), calldata, Nonce(0), Default::default(), diff --git a/core/tests/loadnext/src/sdk/operations/transfer.rs b/core/tests/loadnext/src/sdk/operations/transfer.rs index 94ee3aeb6082..651fabeb788b 100644 --- a/core/tests/loadnext/src/sdk/operations/transfer.rs +++ b/core/tests/loadnext/src/sdk/operations/transfer.rs @@ -153,7 +153,7 @@ where let tx = if token.is_zero() || token == L2_BASE_TOKEN_ADDRESS { // ETH estimate Execute { - contract_address: to, + contract_address: Some(to), calldata: Default::default(), factory_deps: vec![], value: amount, @@ -161,7 +161,7 @@ where } else { // ERC-20 estimate Execute { - contract_address: token, + contract_address: Some(token), calldata: create_transfer_calldata(to, amount), factory_deps: vec![], value: Default::default(), diff --git a/core/tests/loadnext/src/sdk/signer.rs b/core/tests/loadnext/src/sdk/signer.rs index 0f4b1cf29717..6f98f674ed95 100644 --- a/core/tests/loadnext/src/sdk/signer.rs +++ b/core/tests/loadnext/src/sdk/signer.rs @@ -51,7 +51,7 @@ impl Signer { // Sign Ether transfer if token.is_zero() || token == L2_BASE_TOKEN_ADDRESS { let mut transfer = L2Tx::new( - to, + Some(to), Default::default(), nonce, fee, @@ -73,7 +73,7 @@ impl Signer { // Sign ERC-20 transfer let data = create_transfer_calldata(to, amount); let mut transfer = L2Tx::new( - token, + Some(token), data, nonce, fee, @@ -122,7 +122,7 @@ impl Signer { paymaster_params: PaymasterParams, ) -> Result { let mut execute_contract = L2Tx::new( - contract, + Some(contract), calldata, nonce, fee, diff --git a/core/tests/recovery-test/src/index.ts b/core/tests/recovery-test/src/index.ts index 6599e7c5d298..462404af6065 100644 --- a/core/tests/recovery-test/src/index.ts +++ b/core/tests/recovery-test/src/index.ts @@ -271,7 +271,7 @@ export class FundedWallet { await depositTx.waitFinalize(); } - /** Generates at least one L1 batch by transfering funds to itself. */ + /** Generates at least one L1 batch by transferring funds to itself. */ async generateL1Batch(): Promise { const transactionResponse = await this.wallet.transfer({ to: this.wallet.address, @@ -279,15 +279,15 @@ export class FundedWallet { token: zksync.utils.ETH_ADDRESS }); console.log('Generated a transaction from funded wallet', transactionResponse); - const receipt = await transactionResponse.wait(); - console.log('Got finalized transaction receipt', receipt); - // Wait until an L1 batch with the transaction is sealed. - const pastL1BatchNumber = await this.wallet.provider.getL1BatchNumber(); - let newL1BatchNumber: number; - while ((newL1BatchNumber = await this.wallet.provider.getL1BatchNumber()) <= pastL1BatchNumber) { + let receipt: zksync.types.TransactionReceipt; + while (!(receipt = await transactionResponse.wait()).l1BatchNumber) { + console.log('Transaction is not included in L1 batch; sleeping'); await sleep(1000); } + + console.log('Got finalized transaction receipt', receipt); + const newL1BatchNumber = receipt.l1BatchNumber; console.log(`Sealed L1 batch #${newL1BatchNumber}`); return newL1BatchNumber; } diff --git a/core/tests/test_account/src/lib.rs b/core/tests/test_account/src/lib.rs index 28e3d609e63d..d0c97abab729 100644 --- a/core/tests/test_account/src/lib.rs +++ b/core/tests/test_account/src/lib.rs @@ -129,7 +129,7 @@ impl Account { .expect("failed to encode parameters"); let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, factory_deps, value: U256::zero(), @@ -158,7 +158,7 @@ impl Account { tx: abi::L2CanonicalTransaction { tx_type: PRIORITY_OPERATION_L2_TX_TYPE.into(), from: address_to_u256(&self.address), - to: address_to_u256(&execute.contract_address), + to: address_to_u256(&execute.contract_address.unwrap_or_default()), gas_limit, gas_per_pubdata_byte_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), max_fee_per_gas, @@ -216,7 +216,7 @@ impl Account { .expect("failed to encode parameters"); let execute = Execute { - contract_address: address, + contract_address: Some(address), calldata, value: value.unwrap_or_default(), factory_deps: vec![], @@ -235,7 +235,7 @@ impl Account { ) -> Transaction { let calldata = params.to_bytes(); let execute = Execute { - contract_address: address, + contract_address: Some(address), calldata, value: U256::zero(), factory_deps: vec![], diff --git a/core/tests/ts-integration/hardhat.config.ts b/core/tests/ts-integration/hardhat.config.ts index 42ee6bacf7ad..a96a83ca3ee3 100644 --- a/core/tests/ts-integration/hardhat.config.ts +++ b/core/tests/ts-integration/hardhat.config.ts @@ -20,8 +20,11 @@ export default { } }, solidity: { - version: '0.8.18', - eraVersion: '1.0.1' + version: '0.8.26', + eraVersion: '1.0.1', + settings: { + evmVersion: 'cancun' + } }, vyper: { version: '0.3.10' diff --git a/core/tests/ts-integration/jest.config.json b/core/tests/ts-integration/jest.config.json index 8fa5ea1eb721..1756de1bb02d 100644 --- a/core/tests/ts-integration/jest.config.json +++ b/core/tests/ts-integration/jest.config.json @@ -14,6 +14,7 @@ "testTimeout": 605000, "globalSetup": "/src/jest-setup/global-setup.ts", "globalTeardown": "/src/jest-setup/global-teardown.ts", + "testEnvironment": "/src/jest-setup/env.ts", "setupFilesAfterEnv": [ "/src/jest-setup/add-matchers.ts" ], diff --git a/core/tests/ts-integration/package.json b/core/tests/ts-integration/package.json index 9d53420edaad..8e5c0cf7470e 100644 --- a/core/tests/ts-integration/package.json +++ b/core/tests/ts-integration/package.json @@ -25,6 +25,7 @@ "ethers": "^6.7.1", "hardhat": "=2.22.2", "jest": "^29.0.3", + "jest-environment-node": "^29.0.3", "jest-matcher-utils": "^29.0.3", "node-fetch": "^2.6.1", "ts-jest": "^29.0.1", diff --git a/core/tests/ts-integration/src/jest-setup/env.ts b/core/tests/ts-integration/src/jest-setup/env.ts new file mode 100644 index 000000000000..77bbfc929111 --- /dev/null +++ b/core/tests/ts-integration/src/jest-setup/env.ts @@ -0,0 +1,14 @@ +import NodeEnvironment from 'jest-environment-node'; +import type { EnvironmentContext, JestEnvironmentConfig } from '@jest/environment'; + +export default class IntegrationTestEnvironment extends NodeEnvironment { + constructor(config: JestEnvironmentConfig, context: EnvironmentContext) { + super(config, context); + } + + override async setup() { + await super.setup(); + // Provide access to raw console in order to produce less cluttered debug messages + this.global.rawWriteToConsole = console.log; + } +} diff --git a/core/tests/ts-integration/src/jest-setup/global-setup.ts b/core/tests/ts-integration/src/jest-setup/global-setup.ts index d84d70fe69da..ffb1a8c35030 100644 --- a/core/tests/ts-integration/src/jest-setup/global-setup.ts +++ b/core/tests/ts-integration/src/jest-setup/global-setup.ts @@ -11,11 +11,12 @@ declare global { */ async function performSetup(_globalConfig: any, _projectConfig: any) { // Perform the test initialization. - // This is an expensive operation that preceeds running any tests, as we need + // This is an expensive operation that precedes running any tests, as we need // to deposit & distribute funds, deploy some contracts, and perform basic server checks. // Jest writes an initial message without a newline, so we have to do it manually. console.log(''); + globalThis.rawWriteToConsole = console.log; // Before starting any actual logic, we need to ensure that the server is running (it may not // be the case, for example, right after deployment on stage). diff --git a/core/tests/ts-integration/src/l1-provider.ts b/core/tests/ts-integration/src/l1-provider.ts new file mode 100644 index 000000000000..39b0397cd069 --- /dev/null +++ b/core/tests/ts-integration/src/l1-provider.ts @@ -0,0 +1,82 @@ +import { + ethers, + JsonRpcProvider, + Network, + TransactionRequest, + TransactionResponse, + TransactionResponseParams +} from 'ethers'; +import { Reporter } from './reporter'; +import { AugmentedTransactionResponse } from './transaction-response'; + +export class L1Provider extends JsonRpcProvider { + readonly reporter: Reporter; + + constructor(url: string, reporter?: Reporter) { + super(url, undefined, { batchMaxCount: 1 }); + this.reporter = reporter ?? new Reporter(); + } + + override _wrapTransactionResponse(tx: TransactionResponseParams, network: Network): L1TransactionResponse { + const base = super._wrapTransactionResponse(tx, network); + return new L1TransactionResponse(base, this.reporter); + } +} + +class L1TransactionResponse extends ethers.TransactionResponse implements AugmentedTransactionResponse { + public readonly kind = 'L1'; + private isWaitingReported: boolean = false; + private isReceiptReported: boolean = false; + + constructor(base: ethers.TransactionResponse, public readonly reporter: Reporter) { + super(base, base.provider); + } + + override async wait(confirmations?: number, timeout?: number) { + if (!this.isWaitingReported) { + this.reporter.debug( + `Started waiting for L1 transaction ${this.hash} (from=${this.from}, nonce=${this.nonce})` + ); + this.isWaitingReported = true; + } + + const receipt = await super.wait(confirmations, timeout); + if (receipt !== null && !this.isReceiptReported) { + this.reporter.debug( + `Obtained receipt for L1 transaction ${this.hash}: blockNumber=${receipt.blockNumber}, status=${receipt.status}` + ); + this.isReceiptReported = true; + } + return receipt; + } + + override replaceableTransaction(startBlock: number): L1TransactionResponse { + const base = super.replaceableTransaction(startBlock); + return new L1TransactionResponse(base, this.reporter); + } +} + +/** Wallet that retries `sendTransaction` requests on "nonce expired" errors, provided that it's possible (i.e., no nonce is set in the request). */ +export class RetryableL1Wallet extends ethers.Wallet { + constructor(key: string, provider: L1Provider) { + super(key, provider); + } + + override async sendTransaction(tx: TransactionRequest): Promise { + const reporter = (this.provider!).reporter; + while (true) { + try { + return await super.sendTransaction(tx); + } catch (err: any) { + // For unknown reason, `reth` sometimes returns outdated transaction count under load, leading to transactions getting rejected. + // This is a workaround for this issue. + reporter.debug('L1 transaction request failed', tx, err); + if (err.code === 'NONCE_EXPIRED' && (tx.nonce === null || tx.nonce === undefined)) { + reporter.debug('Retrying L1 transaction request', tx); + } else { + throw err; + } + } + } + } +} diff --git a/core/tests/ts-integration/src/matchers/transaction.ts b/core/tests/ts-integration/src/matchers/transaction.ts index 89e90b6d5f16..ac5bf8e77eaf 100644 --- a/core/tests/ts-integration/src/matchers/transaction.ts +++ b/core/tests/ts-integration/src/matchers/transaction.ts @@ -1,7 +1,8 @@ import { TestMessage } from './matcher-helpers'; import { MatcherModifier } from '../modifiers'; import * as zksync from 'zksync-ethers'; -import { AugmentedTransactionResponse } from '../retry-provider'; +import { AugmentedTransactionResponse } from '../transaction-response'; +import { ethers } from 'ethers'; // This file contains implementation of matchers for ZKsync/ethereum transaction. // For actual doc-comments, see `typings/jest.d.ts` file. @@ -207,7 +208,7 @@ function fail(message: string) { * * @returns If check has failed, returns a Jest error object. Otherwise, returns `undefined`. */ -function checkReceiptFields(request: zksync.types.TransactionResponse, receipt: zksync.types.TransactionReceipt) { +function checkReceiptFields(request: ethers.TransactionResponseParams, receipt: zksync.types.TransactionReceipt) { const errorMessageBuilder = new TestMessage() .matcherHint('.checkReceiptFields') .line('Transaction receipt is not properly formatted. Transaction request:') diff --git a/core/tests/ts-integration/src/reporter.ts b/core/tests/ts-integration/src/reporter.ts index 903ff3101ef9..e6a11f0725bf 100644 --- a/core/tests/ts-integration/src/reporter.ts +++ b/core/tests/ts-integration/src/reporter.ts @@ -102,7 +102,7 @@ export class Reporter { // Timestamps only make sense to include in tests. const timestampString = testName === undefined ? '' : timestamp(`${new Date().toISOString()} `); const testString = testName ? info(` [${testName}]`) : ''; - console.log(this.indent(`${timestampString}DEBUG${testString}: ${message}`), ...args); + rawWriteToConsole(this.indent(`${timestampString}DEBUG${testString}: ${message}`), ...args); } } diff --git a/core/tests/ts-integration/src/retry-provider.ts b/core/tests/ts-integration/src/retry-provider.ts index 1763c0e4edf5..51d88357c6c3 100644 --- a/core/tests/ts-integration/src/retry-provider.ts +++ b/core/tests/ts-integration/src/retry-provider.ts @@ -1,12 +1,15 @@ import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; import { Reporter } from './reporter'; +import { AugmentedTransactionResponse } from './transaction-response'; +import { L1Provider, RetryableL1Wallet } from './l1-provider'; /** * RetryProvider retries every RPC request if it detects a timeout-related issue on the server side. */ export class RetryProvider extends zksync.Provider { private readonly reporter: Reporter; + private readonly knownTransactionHashes: Set = new Set(); constructor(_url?: string | { url: string; timeout: number }, network?: ethers.Networkish, reporter?: Reporter) { let url; @@ -55,15 +58,63 @@ export class RetryProvider extends zksync.Provider { } } + override _wrapTransactionResponse(txResponse: any): L2TransactionResponse { + const base = super._wrapTransactionResponse(txResponse); + this.knownTransactionHashes.add(base.hash); + return new L2TransactionResponse(base, this.reporter); + } + override _wrapTransactionReceipt(receipt: any): zksync.types.TransactionReceipt { const wrapped = super._wrapTransactionReceipt(receipt); - this.reporter.debug( - `Obtained receipt for transaction ${receipt.transactionHash}: blockNumber=${receipt.blockNumber}, status=${receipt.status}` - ); + if (!this.knownTransactionHashes.has(receipt.transactionHash)) { + this.knownTransactionHashes.add(receipt.transactionHash); + this.reporter.debug( + `Obtained receipt for L2 transaction ${receipt.transactionHash}: blockNumber=${receipt.blockNumber}, status=${receipt.status}` + ); + } return wrapped; } } -export interface AugmentedTransactionResponse extends zksync.types.TransactionResponse { - readonly reporter?: Reporter; +class L2TransactionResponse extends zksync.types.TransactionResponse implements AugmentedTransactionResponse { + public readonly kind = 'L2'; + private isWaitingReported: boolean = false; + private isReceiptReported: boolean = false; + + constructor(base: zksync.types.TransactionResponse, public readonly reporter: Reporter) { + super(base, base.provider); + } + + override async wait(confirmations?: number) { + if (!this.isWaitingReported) { + this.reporter.debug( + `Started waiting for L2 transaction ${this.hash} (from=${this.from}, nonce=${this.nonce})` + ); + this.isWaitingReported = true; + } + const receipt = await super.wait(confirmations); + if (receipt !== null && !this.isReceiptReported) { + this.reporter.debug( + `Obtained receipt for L2 transaction ${this.hash}: blockNumber=${receipt.blockNumber}, status=${receipt.status}` + ); + this.isReceiptReported = true; + } + return receipt; + } + + override replaceableTransaction(startBlock: number): L2TransactionResponse { + const base = super.replaceableTransaction(startBlock); + return new L2TransactionResponse(base, this.reporter); + } +} + +/** Wallet that retries expired nonce errors for L1 transactions. */ +export class RetryableWallet extends zksync.Wallet { + constructor(privateKey: string, l2Provider: RetryProvider, l1Provider: L1Provider) { + super(privateKey, l2Provider, l1Provider); + } + + override ethWallet(): RetryableL1Wallet { + return new RetryableL1Wallet(this.privateKey, this._providerL1()); + } } diff --git a/core/tests/ts-integration/src/test-master.ts b/core/tests/ts-integration/src/test-master.ts index 09fddd1589ca..297116b0b512 100644 --- a/core/tests/ts-integration/src/test-master.ts +++ b/core/tests/ts-integration/src/test-master.ts @@ -2,9 +2,10 @@ import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; import { TestEnvironment, TestContext } from './types'; import { claimEtherBack } from './context-owner'; -import { RetryProvider } from './retry-provider'; +import { RetryableWallet, RetryProvider } from './retry-provider'; import { Reporter } from './reporter'; import { bigIntReviver } from './helpers'; +import { L1Provider } from './l1-provider'; /** * Test master is a singleton class (per suite) that is capable of providing wallets to the suite. @@ -19,8 +20,8 @@ export class TestMaster { private readonly env: TestEnvironment; readonly reporter: Reporter; - private readonly l1Provider: ethers.JsonRpcProvider; - private readonly l2Provider: zksync.Provider; + private readonly l1Provider: L1Provider; + private readonly l2Provider: RetryProvider; private readonly mainWallet: zksync.Wallet; private readonly subAccounts: zksync.Wallet[] = []; @@ -52,7 +53,7 @@ export class TestMaster { if (!suiteWalletPK) { throw new Error(`Wallet for ${suiteName} suite was not provided`); } - this.l1Provider = new ethers.JsonRpcProvider(this.env.l1NodeUrl); + this.l1Provider = new L1Provider(this.env.l1NodeUrl, this.reporter); this.l2Provider = new RetryProvider( { url: this.env.l2NodeUrl, @@ -71,7 +72,7 @@ export class TestMaster { this.l2Provider.pollingInterval = 5000; } - this.mainWallet = new zksync.Wallet(suiteWalletPK, this.l2Provider, this.l1Provider); + this.mainWallet = new RetryableWallet(suiteWalletPK, this.l2Provider, this.l1Provider); } /** @@ -112,7 +113,7 @@ export class TestMaster { */ newEmptyAccount(): zksync.Wallet { const randomPK = ethers.Wallet.createRandom().privateKey; - const newWallet = new zksync.Wallet(randomPK, this.l2Provider, this.l1Provider); + const newWallet = new RetryableWallet(randomPK, this.l2Provider, this.l1Provider); this.subAccounts.push(newWallet); return newWallet; } diff --git a/core/tests/ts-integration/src/transaction-response.ts b/core/tests/ts-integration/src/transaction-response.ts new file mode 100644 index 000000000000..a104b0107edd --- /dev/null +++ b/core/tests/ts-integration/src/transaction-response.ts @@ -0,0 +1,9 @@ +import { ethers } from 'ethers'; +import { Reporter } from './reporter'; + +export interface AugmentedTransactionResponse extends ethers.TransactionResponseParams { + readonly kind: 'L1' | 'L2'; + readonly reporter?: Reporter; + + wait(confirmations?: number, timeout?: number): Promise; +} diff --git a/core/tests/ts-integration/tests/api/contract-verification.test.ts b/core/tests/ts-integration/tests/api/contract-verification.test.ts index 77ab2e6ddfcb..8f8830ce7516 100644 --- a/core/tests/ts-integration/tests/api/contract-verification.test.ts +++ b/core/tests/ts-integration/tests/api/contract-verification.test.ts @@ -12,7 +12,7 @@ const DATE_REGEX = /\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?/; const ZKSOLC_VERSION = 'v1.5.3'; const SOLC_VERSION = '0.8.26'; -const ZK_VM_SOLC_VERSION = 'zkVM-0.8.18-1.0.1'; +const ZK_VM_SOLC_VERSION = 'zkVM-0.8.26-1.0.1'; const ZKVYPER_VERSION = 'v1.5.4'; const VYPER_VERSION = '0.3.10'; diff --git a/core/tests/ts-integration/typings/jest.d.ts b/core/tests/ts-integration/typings/jest.d.ts index 4d8f1c3530c5..3bb62732cf70 100644 --- a/core/tests/ts-integration/typings/jest.d.ts +++ b/core/tests/ts-integration/typings/jest.d.ts @@ -1,6 +1,8 @@ import { MatcherModifier } from '../src/matchers/transaction-modifiers'; export declare global { + function rawWriteToConsole(message: string, ...args: any[]); + namespace jest { interface Matchers { // Generic matchers diff --git a/core/tests/vm-benchmark/src/transaction.rs b/core/tests/vm-benchmark/src/transaction.rs index 90e1c6360b81..d5fedfa4df94 100644 --- a/core/tests/vm-benchmark/src/transaction.rs +++ b/core/tests/vm-benchmark/src/transaction.rs @@ -47,7 +47,7 @@ pub fn get_deploy_tx_with_gas_limit(code: &[u8], gas_limit: u32, nonce: u32) -> .collect(); let mut signed = L2Tx::new_signed( - CONTRACT_DEPLOYER_ADDRESS, + Some(CONTRACT_DEPLOYER_ADDRESS), calldata, Nonce(nonce), tx_fee(gas_limit), @@ -76,7 +76,7 @@ fn tx_fee(gas_limit: u32) -> Fee { pub fn get_transfer_tx(nonce: u32) -> Transaction { let mut signed = L2Tx::new_signed( - PRIVATE_KEY.address(), + Some(PRIVATE_KEY.address()), vec![], // calldata Nonce(nonce), tx_fee(1_000_000), @@ -109,7 +109,7 @@ pub fn get_load_test_deploy_tx() -> Transaction { factory_deps.push(LOAD_TEST_CONTRACT.bytecode.clone()); let mut signed = L2Tx::new_signed( - CONTRACT_DEPLOYER_ADDRESS, + Some(CONTRACT_DEPLOYER_ADDRESS), create_calldata, Nonce(0), tx_fee(100_000_000), @@ -147,7 +147,7 @@ pub fn get_load_test_tx(nonce: u32, gas_limit: u32, params: LoadTestParams) -> T .expect("cannot encode `execute` inputs"); let mut signed = L2Tx::new_signed( - *LOAD_TEST_CONTRACT_ADDRESS, + Some(*LOAD_TEST_CONTRACT_ADDRESS), calldata, Nonce(nonce), tx_fee(gas_limit), diff --git a/docker-compose-cpu-runner.yml b/docker-compose-cpu-runner.yml index e0f751130eb0..beb54f3ade98 100644 --- a/docker-compose-cpu-runner.yml +++ b/docker-compose-cpu-runner.yml @@ -11,7 +11,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 600ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config ports: - 127.0.0.1:8545:8545 diff --git a/docker-compose-gpu-runner-cuda-12-0.yml b/docker-compose-gpu-runner-cuda-12-0.yml index f2089446a41d..35a0faeb9620 100644 --- a/docker-compose-gpu-runner-cuda-12-0.yml +++ b/docker-compose-gpu-runner-cuda-12-0.yml @@ -11,7 +11,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 600ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config ports: - 127.0.0.1:8545:8545 diff --git a/docker-compose-gpu-runner.yml b/docker-compose-gpu-runner.yml index 35c6c3778f22..f95ae0d5f544 100644 --- a/docker-compose-gpu-runner.yml +++ b/docker-compose-gpu-runner.yml @@ -11,7 +11,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 600ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config ports: - 127.0.0.1:8545:8545 diff --git a/docker-compose.yml b/docker-compose.yml index 7e1b52f83347..1e3a273ec9a4 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -13,7 +13,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 600ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config postgres: diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 7ed1906b8574..7fcc695bf70b 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -1,14 +1,27 @@ # syntax=docker/dockerfile:experimental -FROM matterlabs/zksync-build-base:latest as builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder + +ARG CUDA_ARCH=89 +ENV CUDAARCHS=${CUDA_ARCH} +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} WORKDIR /usr/src/zksync COPY . . RUN cargo build --release -FROM debian:bookworm-slim +FROM ghcr.io/matter-labs/zksync-runtime-base:latest -RUN apt-get update && apt-get install -y curl libpq5 ca-certificates wget python3 jq && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y wget python3 jq && rm -rf /var/lib/apt/lists/* # install zksolc 1.3.x RUN skip_versions="v1.3.12 v1.3.15 v1.3.20" && \ diff --git a/docker/external-node/Dockerfile b/docker/external-node/Dockerfile index 7d276941dc42..79b79cbc5f68 100644 --- a/docker/external-node/Dockerfile +++ b/docker/external-node/Dockerfile @@ -1,14 +1,23 @@ # Will work locally only after prior contracts build -FROM matterlabs/zksync-build-base:latest as builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder + +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} WORKDIR /usr/src/zksync COPY . . RUN cargo build --release -FROM debian:bookworm-slim - -RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* +FROM ghcr.io/matter-labs/zksync-runtime-base:latest COPY --from=builder /usr/src/zksync/target/release/zksync_external_node /usr/bin COPY --from=builder /usr/src/zksync/target/release/block_reverter /usr/bin diff --git a/docker/proof-fri-gpu-compressor/Dockerfile b/docker/proof-fri-gpu-compressor/Dockerfile index 45f2ffa51b04..e744787c8259 100644 --- a/docker/proof-fri-gpu-compressor/Dockerfile +++ b/docker/proof-fri-gpu-compressor/Dockerfile @@ -1,10 +1,20 @@ # Will work locally only after prior universal setup key download -FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 as builder +FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 AS builder ARG DEBIAN_FRONTEND=noninteractive ARG CUDA_ARCH=89 ENV CUDAARCHS=${CUDA_ARCH} +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ git \ pkg-config build-essential libclang-dev && \ @@ -22,6 +32,14 @@ RUN curl -Lo cmake-3.24.2-linux-x86_64.sh https://github.com/Kitware/CMake/relea chmod +x cmake-3.24.2-linux-x86_64.sh && \ ./cmake-3.24.2-linux-x86_64.sh --skip-license --prefix=/usr/local +# install sccache +RUN curl -Lo sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + tar -xzf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + cp sccache-v0.8.1-x86_64-unknown-linux-musl/sccache /usr/local/sbin/ && \ + rm -rf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + rm -rf sccache-v0.8.1-x86_64-unknown-linux-musl && \ + chmod +x /usr/local/sbin/sccache + WORKDIR /usr/src/zksync COPY . . diff --git a/docker/prover-fri-gateway/Dockerfile b/docker/prover-fri-gateway/Dockerfile index de59451fee8f..3e631b35156e 100644 --- a/docker/prover-fri-gateway/Dockerfile +++ b/docker/prover-fri-gateway/Dockerfile @@ -1,14 +1,24 @@ -FROM matterlabs/zksync-build-base:latest as builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + WORKDIR /usr/src/zksync COPY . . RUN cd prover && cargo build --release --bin zksync_prover_fri_gateway -FROM debian:bookworm-slim -RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* +FROM ghcr.io/matter-labs/zksync-runtime-base:latest # copy VK required for proof wrapping COPY prover/data/keys/ /prover/data/keys/ diff --git a/docker/prover-gpu-fri/Dockerfile b/docker/prover-gpu-fri/Dockerfile index ad3ff1ff7197..2a680a49c5de 100644 --- a/docker/prover-gpu-fri/Dockerfile +++ b/docker/prover-gpu-fri/Dockerfile @@ -1,10 +1,21 @@ -FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 as builder +FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 AS builder ARG DEBIAN_FRONTEND=noninteractive ARG CUDA_ARCH=89 ENV CUDAARCHS=${CUDA_ARCH} +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ \ pkg-config build-essential libclang-dev && \ rm -rf /var/lib/apt/lists/* @@ -21,6 +32,14 @@ RUN curl -Lo cmake-3.24.2-linux-x86_64.sh https://github.com/Kitware/CMake/relea chmod +x cmake-3.24.2-linux-x86_64.sh && \ ./cmake-3.24.2-linux-x86_64.sh --skip-license --prefix=/usr/local +# install sccache +RUN curl -Lo sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + tar -xzf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + cp sccache-v0.8.1-x86_64-unknown-linux-musl/sccache /usr/local/sbin/ && \ + rm -rf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + rm -rf sccache-v0.8.1-x86_64-unknown-linux-musl && \ + chmod +x /usr/local/sbin/sccache + WORKDIR /usr/src/zksync COPY . . diff --git a/docker/prover-job-monitor/Dockerfile b/docker/prover-job-monitor/Dockerfile index 25d5dcd3af95..88b46df27ffa 100644 --- a/docker/prover-job-monitor/Dockerfile +++ b/docker/prover-job-monitor/Dockerfile @@ -1,14 +1,24 @@ -FROM matterlabs/zksync-build-base:latest as builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + WORKDIR /usr/src/zksync COPY . . RUN cd prover && cargo build --release --bin zksync_prover_job_monitor -FROM debian:bookworm-slim -RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* +FROM ghcr.io/matter-labs/zksync-runtime-base:latest COPY --from=builder /usr/src/zksync/prover/target/release/zksync_prover_job_monitor /usr/bin/ diff --git a/docker/runtime-base/Dockerfile b/docker/runtime-base/Dockerfile new file mode 100644 index 000000000000..09d920b1c436 --- /dev/null +++ b/docker/runtime-base/Dockerfile @@ -0,0 +1,9 @@ +FROM debian:bookworm-slim + +RUN apt-get update && \ + apt-get install -y \ + curl \ + libpq5 \ + ca-certificates \ + && \ + rm -rf /var/lib/apt/lists/* diff --git a/docker/server-v2/Dockerfile b/docker/server-v2/Dockerfile index 07611a1d7b4d..460ac70c622c 100644 --- a/docker/server-v2/Dockerfile +++ b/docker/server-v2/Dockerfile @@ -1,6 +1,17 @@ # Will work locally only after prior contracts build # syntax=docker/dockerfile:experimental -FROM matterlabs/zksync-build-base:latest as builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder + +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} WORKDIR /usr/src/zksync @@ -8,9 +19,9 @@ COPY . . RUN cargo build --release --features=rocksdb/io-uring -FROM debian:bookworm-slim +FROM ghcr.io/matter-labs/zksync-runtime-base:latest -RUN apt-get update && apt-get install -y curl libpq5 liburing-dev ca-certificates && \ +RUN apt-get update && apt-get install -y liburing-dev && \ rm -rf /var/lib/apt/lists/* EXPOSE 3000 diff --git a/docker/snapshots-creator/Dockerfile b/docker/snapshots-creator/Dockerfile index 10eef06dfbbc..2d3c83064981 100644 --- a/docker/snapshots-creator/Dockerfile +++ b/docker/snapshots-creator/Dockerfile @@ -1,14 +1,25 @@ # syntax=docker/dockerfile:experimental -FROM matterlabs/zksync-build-base:latest as builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder + +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} WORKDIR /usr/src/zksync COPY . . RUN cargo build --release --bin snapshots_creator -FROM debian:bookworm-slim +FROM ghcr.io/matter-labs/zksync-runtime-base:latest -RUN apt-get update && apt-get install -y curl libpq5 liburing-dev ca-certificates && \ +RUN apt-get update && apt-get install -y liburing-dev && \ rm -rf /var/lib/apt/lists/* COPY --from=builder /usr/src/zksync/target/release/snapshots_creator /usr/bin diff --git a/docker/verified-sources-fetcher/Dockerfile b/docker/verified-sources-fetcher/Dockerfile index 972f85d0faf5..87475f3187f3 100644 --- a/docker/verified-sources-fetcher/Dockerfile +++ b/docker/verified-sources-fetcher/Dockerfile @@ -1,14 +1,26 @@ -FROM matterlabs/zksync-build-base:latest as builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + WORKDIR /usr/src/zksync COPY . . RUN cargo build --release --bin verified_sources_fetcher -FROM debian:bookworm-slim -RUN apt-get update && apt-get install -y apt-transport-https ca-certificates gnupg curl git && rm -rf /var/lib/apt/lists/* +FROM ghcr.io/matter-labs/zksync-runtime-base:latest + +RUN apt-get update && apt-get install -y apt-transport-https gnupg git && rm -rf /var/lib/apt/lists/* RUN curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor -o /usr/share/keyrings/cloud.google.gpg RUN echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list diff --git a/docker/witness-generator/Dockerfile b/docker/witness-generator/Dockerfile index 2eebe07515e4..9211c3e23e53 100644 --- a/docker/witness-generator/Dockerfile +++ b/docker/witness-generator/Dockerfile @@ -1,17 +1,26 @@ -FROM matterlabs/zksync-build-base:latest AS builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive ARG RUST_FLAGS="" ENV RUSTFLAGS=${RUST_FLAGS} +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + WORKDIR /usr/src/zksync COPY . . RUN cd prover && cargo build --release --bin zksync_witness_generator -FROM debian:bookworm-slim - -RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* +FROM ghcr.io/matter-labs/zksync-runtime-base:latest COPY prover/data/keys/ /prover/data/keys/ diff --git a/docker/witness-vector-generator/Dockerfile b/docker/witness-vector-generator/Dockerfile index e315f670101a..93d8dd308a58 100644 --- a/docker/witness-vector-generator/Dockerfile +++ b/docker/witness-vector-generator/Dockerfile @@ -1,15 +1,24 @@ -FROM matterlabs/zksync-build-base:latest as builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + WORKDIR /usr/src/zksync COPY . . RUN cd prover && cargo build --release --bin zksync_witness_vector_generator -FROM debian:bookworm-slim - -RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* +FROM ghcr.io/matter-labs/zksync-runtime-base:latest # copy finalization hints required for witness vector generation COPY prover/data/keys/ /prover/data/keys/ diff --git a/docs/guides/external-node/00_quick_start.md b/docs/guides/external-node/00_quick_start.md index 3902fdc15560..776e8a56e497 100644 --- a/docs/guides/external-node/00_quick_start.md +++ b/docs/guides/external-node/00_quick_start.md @@ -65,12 +65,12 @@ The HTTP JSON-RPC API can be accessed on port `3060` and WebSocket API can be ac > [!NOTE] > -> To stop state growth, you can enable state pruning by uncommenting `EN_PRUNING_ENABLED: true` in docker compose file, +> To stop historical DB growth, you can enable DB pruning by uncommenting `EN_PRUNING_ENABLED: true` in docker compose file, > you can read more about pruning in > [08_pruning.md](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/08_pruning.md) - 32 GB of RAM and a relatively modern CPU -- 30 GB of storage for testnet nodes +- 50 GB of storage for testnet nodes - 300 GB of storage for mainnet nodes - 100 Mbps connection (1 Gbps+ recommended) diff --git a/etc/env/base/prover_job_monitor.toml b/etc/env/base/prover_job_monitor.toml index 40cdf76b8b10..ce206c74ffde 100644 --- a/etc/env/base/prover_job_monitor.toml +++ b/etc/env/base/prover_job_monitor.toml @@ -13,3 +13,4 @@ proof_compressor_queue_reporter_run_interval_ms = 10000 prover_queue_reporter_run_interval_ms = 10000 witness_generator_queue_reporter_run_interval_ms = 10000 witness_job_queuer_run_interval_ms = 10000 +http_port = 3074 diff --git a/etc/env/base/rust.toml b/etc/env/base/rust.toml index 1bb69374ab1a..d8bef020c642 100644 --- a/etc/env/base/rust.toml +++ b/etc/env/base/rust.toml @@ -7,7 +7,7 @@ RUST_LOG="""\ zksync_node_framework=info,\ zksync_block_reverter=info,\ -zksync_commitment_generator=info,\ +zksync_commitment_generator=debug,\ zksync_node_db_pruner=info,\ zksync_eth_sender=info,\ zksync_node_fee_model=info,\ diff --git a/etc/env/configs/ext-node.toml b/etc/env/configs/ext-node.toml index b2f740065591..a5eb22db5ec1 100644 --- a/etc/env/configs/ext-node.toml +++ b/etc/env/configs/ext-node.toml @@ -63,7 +63,7 @@ zksync_node_consensus=info,\ zksync_consensus_bft=info,\ zksync_consensus_network=info,\ zksync_consensus_storage=info,\ -zksync_commitment_generator=info,\ +zksync_commitment_generator=debug,\ zksync_core=debug,\ zksync_dal=info,\ zksync_db_connection=info,\ diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 138905883e30..6a36f65c97c3 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -287,6 +287,7 @@ prover_job_monitor: prover_queue_reporter_run_interval_ms: 10000 witness_generator_queue_reporter_run_interval_ms: 10000 witness_job_queuer_run_interval_ms: 10000 + http_port: 3074 base_token_adjuster: @@ -311,7 +312,7 @@ prometheus: observability: log_format: plain - log_directives: "zksync_node_test_utils=info,zksync_state_keeper=info,zksync_reorg_detector=info,zksync_consistency_checker=info,zksync_metadata_calculator=info,zksync_node_sync=info,zksync_node_consensus=info,zksync_contract_verification_server=info,zksync_node_api_server=info,zksync_tee_verifier_input_producer=info,zksync_node_framework=info,zksync_block_reverter=info,zksync_commitment_generator=info,zksync_node_db_pruner=info,zksync_eth_sender=info,zksync_node_fee_model=info,zksync_node_genesis=info,zksync_house_keeper=info,zksync_proof_data_handler=info,zksync_shared_metrics=info,zksync_node_test_utils=info,zksync_vm_runner=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core_leftovers=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_db_connection=info,zksync_eth_client=info,zksync_eth_watch=debug,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_web3_decl=debug,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=error,snapshots_creator=debug,zksync_base_token_adjuster=debug,zksync_external_price_api=debug,zksync_external_proof_integration_api=info" + log_directives: "zksync_node_test_utils=info,zksync_state_keeper=info,zksync_reorg_detector=info,zksync_consistency_checker=info,zksync_metadata_calculator=info,zksync_node_sync=info,zksync_node_consensus=info,zksync_contract_verification_server=info,zksync_node_api_server=info,zksync_tee_verifier_input_producer=info,zksync_node_framework=info,zksync_block_reverter=info,zksync_commitment_generator=debug,zksync_node_db_pruner=info,zksync_eth_sender=info,zksync_node_fee_model=info,zksync_node_genesis=info,zksync_house_keeper=info,zksync_proof_data_handler=info,zksync_shared_metrics=info,zksync_node_test_utils=info,zksync_vm_runner=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core_leftovers=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_db_connection=info,zksync_eth_client=info,zksync_eth_watch=debug,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_web3_decl=debug,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=error,snapshots_creator=debug,zksync_base_token_adjuster=debug,zksync_external_price_api=debug,zksync_external_proof_integration_api=info" # Uncomment only if needed # sentry: # url: unset diff --git a/flake.nix b/flake.nix index cc14faebfed5..ef618816f9c9 100644 --- a/flake.nix +++ b/flake.nix @@ -67,7 +67,6 @@ }; craneLib = (crane.mkLib pkgs).overrideToolchain rustVersion; - NIX_OUTPATH_USED_AS_RANDOM_SEED = "aaaaaaaaaa"; commonArgs = { nativeBuildInputs = with pkgs;[ @@ -81,6 +80,8 @@ snappy.dev lz4.dev bzip2.dev + rocksdb + snappy.dev ]; src = with pkgs.lib.fileset; toSource { @@ -97,7 +98,9 @@ env = { OPENSSL_NO_VENDOR = "1"; - inherit NIX_OUTPATH_USED_AS_RANDOM_SEED; + ROCKSDB_LIB_DIR = "${pkgs.rocksdb.out}/lib"; + SNAPPY_LIB_DIR = "${pkgs.snappy.out}/lib"; + NIX_OUTPATH_USED_AS_RANDOM_SEED = "aaaaaaaaaa"; }; doCheck = false; diff --git a/prover/Cargo.lock b/prover/Cargo.lock index da49f0340b46..38c2ca162c43 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -313,6 +313,8 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", + "hyper 1.3.1", + "hyper-util", "itoa", "matchit", "memchr", @@ -321,10 +323,15 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", "sync_wrapper 1.0.1", + "tokio", "tower", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -345,6 +352,7 @@ dependencies = [ "sync_wrapper 0.1.2", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -5112,9 +5120,9 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" [[package]] name = "serde" -version = "1.0.203" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] @@ -5131,9 +5139,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.203" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2 1.0.85", "quote 1.0.36", @@ -5151,6 +5159,16 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" +dependencies = [ + "itoa", + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -7533,6 +7551,7 @@ dependencies = [ "tokio", "tracing", "vise", + "zksync_concurrency", "zksync_consensus_roles", "zksync_consensus_storage", "zksync_contracts", @@ -7927,13 +7946,16 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "axum", "clap 4.5.4", "ctrlc", + "serde", "tokio", "tracing", "vise", "zksync_config", "zksync_core_leftovers", + "zksync_db_connection", "zksync_prover_dal", "zksync_types", "zksync_utils", @@ -8001,6 +8023,7 @@ dependencies = [ "secp256k1", "serde", "serde_json", + "serde_with", "strum", "thiserror", "tracing", @@ -8084,7 +8107,7 @@ dependencies = [ [[package]] name = "zksync_vm2" version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" +source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" dependencies = [ "enum_dispatch", "primitive-types", @@ -8096,7 +8119,7 @@ dependencies = [ [[package]] name = "zksync_vm2_interface" version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" +source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" dependencies = [ "primitive-types", ] diff --git a/prover/Cargo.toml b/prover/Cargo.toml index 624661adc8dc..fd171b254d5a 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -19,6 +19,7 @@ categories = ["cryptography"] [workspace.dependencies] # Common dependencies anyhow = "1.0" +axum = "0.7.5" async-trait = "0.1" bincode = "1" chrono = "0.4.38" diff --git a/prover/crates/bin/prover_job_monitor/Cargo.toml b/prover/crates/bin/prover_job_monitor/Cargo.toml index 160d3a603e36..a4bf8765a946 100644 --- a/prover/crates/bin/prover_job_monitor/Cargo.toml +++ b/prover/crates/bin/prover_job_monitor/Cargo.toml @@ -16,6 +16,7 @@ zksync_prover_dal.workspace = true zksync_utils.workspace = true zksync_types.workspace = true zksync_config = { workspace = true, features = ["observability_ext"] } +zksync_db_connection.workspace = true vise.workspace = true @@ -25,3 +26,5 @@ clap = { workspace = true, features = ["derive"] } ctrlc = { workspace = true, features = ["termination"] } tracing.workspace = true async-trait.workspace = true +serde.workspace = true +axum.workspace = true diff --git a/prover/crates/bin/prover_job_monitor/src/autoscaler_queue_reporter.rs b/prover/crates/bin/prover_job_monitor/src/autoscaler_queue_reporter.rs new file mode 100644 index 000000000000..aff78409dbb3 --- /dev/null +++ b/prover/crates/bin/prover_job_monitor/src/autoscaler_queue_reporter.rs @@ -0,0 +1,176 @@ +use std::collections::HashMap; + +use axum::{ + http::StatusCode, + response::{IntoResponse, Response}, + routing::get, + Json, Router, +}; +use serde::{Deserialize, Serialize}; +use zksync_db_connection::error::DalError; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_types::{ + basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, + prover_dal::JobCountStatistics, +}; + +#[derive(Debug, Clone)] +pub struct AutoscalerQueueReporter { + connection_pool: ConnectionPool, +} + +#[derive(Default, Debug, Serialize, Deserialize)] +pub struct QueueReport { + pub basic_witness_jobs: JobCountStatistics, + pub leaf_witness_jobs: JobCountStatistics, + pub node_witness_jobs: JobCountStatistics, + pub recursion_tip_witness_jobs: JobCountStatistics, + pub scheduler_witness_jobs: JobCountStatistics, + pub prover_jobs: JobCountStatistics, + pub proof_compressor_jobs: JobCountStatistics, +} + +#[derive(Default, Debug, Serialize, Deserialize)] +pub struct VersionedQueueReport { + pub version: ProtocolSemanticVersion, + pub report: QueueReport, +} + +impl AutoscalerQueueReporter { + pub fn new(connection_pool: ConnectionPool) -> Self { + Self { connection_pool } + } + + pub async fn get_report(&self) -> Result>, ProcessorError> { + tracing::debug!("Received request to get queue report"); + + let mut result = HashMap::::new(); + + for round in AggregationRound::ALL_ROUNDS { + self.get_witness_jobs_report(round, &mut result).await?; + } + + self.get_prover_jobs_report(&mut result).await?; + self.get_proof_compressor_jobs_report(&mut result).await?; + + Ok(Json( + result + .into_iter() + .map(|(version, report)| VersionedQueueReport { version, report }) + .collect(), + )) + } + + async fn get_witness_jobs_report( + &self, + aggregation_round: AggregationRound, + state: &mut HashMap, + ) -> anyhow::Result<()> { + let stats = self + .connection_pool + .connection() + .await? + .fri_witness_generator_dal() + .get_witness_jobs_stats(aggregation_round) + .await; + + for (protocol_version, job_stats) in stats { + let report = state.entry(protocol_version).or_default(); + + match aggregation_round { + AggregationRound::BasicCircuits => report.basic_witness_jobs = job_stats, + AggregationRound::LeafAggregation => report.leaf_witness_jobs = job_stats, + AggregationRound::NodeAggregation => report.node_witness_jobs = job_stats, + AggregationRound::RecursionTip => report.recursion_tip_witness_jobs = job_stats, + AggregationRound::Scheduler => report.scheduler_witness_jobs = job_stats, + } + } + Ok(()) + } + + async fn get_prover_jobs_report( + &self, + state: &mut HashMap, + ) -> anyhow::Result<()> { + let stats = self + .connection_pool + .connection() + .await? + .fri_prover_jobs_dal() + .get_generic_prover_jobs_stats() + .await; + + for (protocol_version, stats) in stats { + let report = state.entry(protocol_version).or_default(); + + report.prover_jobs = stats; + } + Ok(()) + } + + async fn get_proof_compressor_jobs_report( + &self, + state: &mut HashMap, + ) -> anyhow::Result<()> { + let stats = self + .connection_pool + .connection() + .await? + .fri_proof_compressor_dal() + .get_jobs_stats() + .await; + + for (protocol_version, stats) in stats { + let report = state.entry(protocol_version).or_default(); + + report.proof_compressor_jobs = stats; + } + + Ok(()) + } +} + +pub fn get_queue_reporter_router(connection_pool: ConnectionPool) -> Router { + let autoscaler_queue_reporter = AutoscalerQueueReporter::new(connection_pool); + + Router::new().route( + "/queue_report", + get(move || async move { autoscaler_queue_reporter.get_report().await }), + ) +} + +pub enum ProcessorError { + Dal(DalError), + Custom(String), +} + +impl From for ProcessorError { + fn from(err: DalError) -> Self { + ProcessorError::Dal(err) + } +} + +impl From for ProcessorError { + fn from(err: anyhow::Error) -> Self { + ProcessorError::Custom(err.to_string()) + } +} + +impl IntoResponse for ProcessorError { + fn into_response(self) -> Response { + let (status_code, message) = match self { + ProcessorError::Dal(err) => { + tracing::error!("Sqlx error: {:?}", err); + ( + StatusCode::INTERNAL_SERVER_ERROR, + "Failed getting data from database", + ) + } + ProcessorError::Custom(err) => { + tracing::error!("Custom error invoked: {:?}", &err); + (StatusCode::INTERNAL_SERVER_ERROR, "Internal error") + } + }; + (status_code, message).into_response() + } +} diff --git a/prover/crates/bin/prover_job_monitor/src/lib.rs b/prover/crates/bin/prover_job_monitor/src/lib.rs index 60d8be297cfe..0d6a0ebe104c 100644 --- a/prover/crates/bin/prover_job_monitor/src/lib.rs +++ b/prover/crates/bin/prover_job_monitor/src/lib.rs @@ -1,4 +1,5 @@ pub mod archiver; +pub mod autoscaler_queue_reporter; pub mod job_requeuer; pub(crate) mod metrics; pub mod queue_reporter; diff --git a/prover/crates/bin/prover_job_monitor/src/main.rs b/prover/crates/bin/prover_job_monitor/src/main.rs index 734a4bac38a2..9195b92882dd 100644 --- a/prover/crates/bin/prover_job_monitor/src/main.rs +++ b/prover/crates/bin/prover_job_monitor/src/main.rs @@ -1,3 +1,5 @@ +use std::{future::IntoFuture, net::SocketAddr}; + use anyhow::Context as _; use clap::Parser; use tokio::{ @@ -12,6 +14,7 @@ use zksync_core_leftovers::temp_config_store::{load_database_secrets, load_gener use zksync_prover_dal::{ConnectionPool, Prover}; use zksync_prover_job_monitor::{ archiver::{GpuProverArchiver, ProverJobsArchiver}, + autoscaler_queue_reporter::get_queue_reporter_router, job_requeuer::{ProofCompressorJobRequeuer, ProverJobRequeuer, WitnessGeneratorJobRequeuer}, queue_reporter::{ ProofCompressorQueueReporter, ProverQueueReporter, WitnessGeneratorQueueReporter, @@ -85,21 +88,42 @@ async fn main() -> anyhow::Result<()> { let mut tasks = vec![tokio::spawn(exporter_config.run(stop_receiver.clone()))]; tasks.extend(get_tasks( - connection_pool, - prover_job_monitor_config, + connection_pool.clone(), + prover_job_monitor_config.clone(), proof_compressor_config, prover_config, witness_generator_config, prover_group_config, - stop_receiver, + stop_receiver.clone(), )?); let mut tasks = ManagedTasks::new(tasks); + let bind_address = SocketAddr::from(([0, 0, 0, 0], prover_job_monitor_config.http_port)); + + tracing::info!("Starting PJM server on {bind_address}"); + + let listener = tokio::net::TcpListener::bind(bind_address) + .await + .with_context(|| format!("Failed binding PJM server to {bind_address}"))?; + + let mut receiver = stop_receiver.clone(); + let app = axum::serve(listener, get_queue_reporter_router(connection_pool)) + .with_graceful_shutdown(async move { + if receiver.changed().await.is_err() { + tracing::warn!( + "Stop signal sender for PJM server was dropped without sending a signal" + ); + } + tracing::info!("Stop signal received, PJM server is shutting down"); + }) + .into_future(); + tokio::select! { _ = tasks.wait_single() => {}, _ = stop_signal_receiver => { tracing::info!("Stop signal received, shutting down"); } + _ = app => {} } stop_sender.send(true).ok(); tasks.complete(graceful_shutdown_timeout).await; diff --git a/prover/crates/bin/prover_job_monitor/src/queue_reporter/witness_generator_queue_reporter.rs b/prover/crates/bin/prover_job_monitor/src/queue_reporter/witness_generator_queue_reporter.rs index 5f507a753649..914f2e9ca856 100644 --- a/prover/crates/bin/prover_job_monitor/src/queue_reporter/witness_generator_queue_reporter.rs +++ b/prover/crates/bin/prover_job_monitor/src/queue_reporter/witness_generator_queue_reporter.rs @@ -58,7 +58,7 @@ impl Task for WitnessGeneratorQueueReporter { .fri_witness_generator_dal() .get_witness_jobs_stats(round) .await; - for ((round, semantic_protocol_version), job_stats) in stats { + for (semantic_protocol_version, job_stats) in stats { Self::emit_metrics_for_round(round, semantic_protocol_version, &job_stats); } } diff --git a/prover/crates/bin/witness_generator/src/artifacts.rs b/prover/crates/bin/witness_generator/src/artifacts.rs index f509d3b2f64a..7c444da047b2 100644 --- a/prover/crates/bin/witness_generator/src/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/artifacts.rs @@ -6,45 +6,33 @@ use zksync_prover_dal::{ConnectionPool, Prover}; #[derive(Debug)] pub(crate) struct AggregationBlobUrls { - pub aggregations_urls: String, + pub aggregation_urls: String, pub circuit_ids_and_urls: Vec<(u8, String)>, } -#[derive(Debug)] -pub(crate) struct SchedulerBlobUrls { - pub circuit_ids_and_urls: Vec<(u8, String)>, - pub closed_form_inputs_and_urls: Vec<(u8, String, usize)>, - pub scheduler_witness_url: String, -} - -pub(crate) enum BlobUrls { - Url(String), - Aggregation(AggregationBlobUrls), - Scheduler(SchedulerBlobUrls), -} - #[async_trait] pub(crate) trait ArtifactsManager { type InputMetadata; type InputArtifacts; type OutputArtifacts; + type BlobUrls; async fn get_artifacts( metadata: &Self::InputMetadata, object_store: &dyn ObjectStore, ) -> anyhow::Result; - async fn save_artifacts( + async fn save_to_bucket( job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, - ) -> BlobUrls; + ) -> Self::BlobUrls; - async fn update_database( + async fn save_to_database( connection_pool: &ConnectionPool, job_id: u32, started_at: Instant, - blob_urls: BlobUrls, + blob_urls: Self::BlobUrls, artifacts: Self::OutputArtifacts, ) -> anyhow::Result<()>; } diff --git a/prover/crates/bin/witness_generator/src/basic_circuits/artifacts.rs b/prover/crates/bin/witness_generator/src/basic_circuits/artifacts.rs index 3447659f8296..aa85d185e66b 100644 --- a/prover/crates/bin/witness_generator/src/basic_circuits/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/basic_circuits/artifacts.rs @@ -8,7 +8,7 @@ use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; use crate::{ - artifacts::{ArtifactsManager, BlobUrls}, + artifacts::ArtifactsManager, basic_circuits::{BasicCircuitArtifacts, BasicWitnessGenerator, BasicWitnessGeneratorJob}, utils::SchedulerPartialInputWrapper, }; @@ -18,6 +18,7 @@ impl ArtifactsManager for BasicWitnessGenerator { type InputMetadata = L1BatchNumber; type InputArtifacts = BasicWitnessGeneratorJob; type OutputArtifacts = BasicCircuitArtifacts; + type BlobUrls = String; async fn get_artifacts( metadata: &Self::InputMetadata, @@ -31,38 +32,31 @@ impl ArtifactsManager for BasicWitnessGenerator { }) } - async fn save_artifacts( + async fn save_to_bucket( job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, - ) -> BlobUrls { + ) -> String { let aux_output_witness_wrapper = AuxOutputWitnessWrapper(artifacts.aux_output_witness); object_store .put(L1BatchNumber(job_id), &aux_output_witness_wrapper) .await .unwrap(); let wrapper = SchedulerPartialInputWrapper(artifacts.scheduler_witness); - let url = object_store + object_store .put(L1BatchNumber(job_id), &wrapper) .await - .unwrap(); - - BlobUrls::Url(url) + .unwrap() } #[tracing::instrument(skip_all, fields(l1_batch = %job_id))] - async fn update_database( + async fn save_to_database( connection_pool: &ConnectionPool, job_id: u32, started_at: Instant, - blob_urls: BlobUrls, - _artifacts: Self::OutputArtifacts, + blob_urls: String, + artifacts: Self::OutputArtifacts, ) -> anyhow::Result<()> { - let blob_urls = match blob_urls { - BlobUrls::Scheduler(blobs) => blobs, - _ => unreachable!(), - }; - let mut connection = connection_pool .connection() .await @@ -79,7 +73,7 @@ impl ArtifactsManager for BasicWitnessGenerator { .fri_prover_jobs_dal() .insert_prover_jobs( L1BatchNumber(job_id), - blob_urls.circuit_ids_and_urls, + artifacts.circuit_urls, AggregationRound::BasicCircuits, 0, protocol_version_id, @@ -89,8 +83,8 @@ impl ArtifactsManager for BasicWitnessGenerator { .fri_witness_generator_dal() .create_aggregation_jobs( L1BatchNumber(job_id), - &blob_urls.closed_form_inputs_and_urls, - &blob_urls.scheduler_witness_url, + &artifacts.queue_urls, + &blob_urls, get_recursive_layer_circuit_id_for_base_layer, protocol_version_id, ) diff --git a/prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs b/prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs index 08732689e3a6..50e747b1ce1b 100644 --- a/prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs +++ b/prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs @@ -4,13 +4,15 @@ use anyhow::Context as _; use tracing::Instrument; use zksync_prover_dal::ProverDal; use zksync_prover_fri_types::{get_current_pod_name, AuxOutputWitnessWrapper}; +use zksync_prover_keystore::keystore::Keystore; use zksync_queued_job_processor::{async_trait, JobProcessor}; use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; use crate::{ - artifacts::{ArtifactsManager, BlobUrls, SchedulerBlobUrls}, + artifacts::ArtifactsManager, basic_circuits::{BasicCircuitArtifacts, BasicWitnessGenerator, BasicWitnessGeneratorJob}, metrics::WITNESS_GENERATOR_METRICS, + witness_generator::WitnessGenerator, }; #[async_trait] @@ -35,19 +37,15 @@ impl JobProcessor for BasicWitnessGenerator { ) .await { - Some(block_number) => { - tracing::info!( - "Processing FRI basic witness-gen for block {}", - block_number - ); - let started_at = Instant::now(); - let job = Self::get_artifacts(&block_number, &*self.object_store).await?; - - WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::BasicCircuits.into()] - .observe(started_at.elapsed()); - - Ok(Some((block_number, job))) - } + Some(block_number) => Ok(Some(( + block_number, + ::prepare_job( + block_number, + &*self.object_store, + Keystore::locate(), // todo: this should be removed + ) + .await?, + ))), None => Ok(None), } } @@ -73,11 +71,15 @@ impl JobProcessor for BasicWitnessGenerator { let max_circuits_in_flight = self.config.max_circuits_in_flight; tokio::spawn(async move { let block_number = job.block_number; - Ok( - Self::process_job_impl(object_store, job, started_at, max_circuits_in_flight) - .instrument(tracing::info_span!("basic_circuit", %block_number)) - .await, + ::process_job( + job, + object_store, + Some(max_circuits_in_flight), + started_at, ) + .instrument(tracing::info_span!("basic_circuit", %block_number)) + .await + .map(Some) }) } @@ -92,8 +94,6 @@ impl JobProcessor for BasicWitnessGenerator { None => Ok(()), Some(artifacts) => { let blob_started_at = Instant::now(); - let circuit_urls = artifacts.circuit_urls.clone(); - let queue_urls = artifacts.queue_urls.clone(); let aux_output_witness_wrapper = AuxOutputWitnessWrapper(artifacts.aux_output_witness.clone()); @@ -105,26 +105,17 @@ impl JobProcessor for BasicWitnessGenerator { .unwrap(); } - let scheduler_witness_url = - match Self::save_artifacts(job_id.0, artifacts.clone(), &*self.object_store) - .await - { - BlobUrls::Url(url) => url, - _ => unreachable!(), - }; + let blob_urls = + Self::save_to_bucket(job_id.0, artifacts.clone(), &*self.object_store).await; WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::BasicCircuits.into()] .observe(blob_started_at.elapsed()); - Self::update_database( + Self::save_to_database( &self.prover_connection_pool, job_id.0, started_at, - BlobUrls::Scheduler(SchedulerBlobUrls { - circuit_ids_and_urls: circuit_urls, - closed_form_inputs_and_urls: queue_urls, - scheduler_witness_url, - }), + blob_urls, artifacts, ) .await?; diff --git a/prover/crates/bin/witness_generator/src/basic_circuits/mod.rs b/prover/crates/bin/witness_generator/src/basic_circuits/mod.rs index c9755c333dad..e76ef180c526 100644 --- a/prover/crates/bin/witness_generator/src/basic_circuits/mod.rs +++ b/prover/crates/bin/witness_generator/src/basic_circuits/mod.rs @@ -5,6 +5,7 @@ use std::{ time::Instant, }; +use async_trait::async_trait; use circuit_definitions::{ circuit_definitions::base_layer::{ZkSyncBaseLayerCircuit, ZkSyncBaseLayerStorage}, encodings::recursion_request::RecursionQueueSimulator, @@ -35,12 +36,14 @@ use zksync_object_store::ObjectStore; use zksync_prover_dal::{ConnectionPool, Prover}; use zksync_prover_fri_types::{keys::ClosedFormInputKey, CircuitAuxData}; use zksync_prover_interface::inputs::WitnessInputData; +use zksync_prover_keystore::keystore::Keystore; use zksync_system_constants::BOOTLOADER_ADDRESS; use zksync_types::{ basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, L1BatchNumber, }; use crate::{ + artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, precalculated_merkle_paths_provider::PrecalculatedMerklePathsProvider, storage_oracle::StorageOracle, @@ -49,6 +52,7 @@ use crate::{ ClosedFormInputWrapper, KZG_TRUSTED_SETUP_FILE, }, witness::WitnessStorage, + witness_generator::WitnessGenerator, }; mod artifacts; @@ -108,17 +112,24 @@ impl BasicWitnessGenerator { protocol_version, } } +} + +#[async_trait] +impl WitnessGenerator for BasicWitnessGenerator { + type Job = BasicWitnessGeneratorJob; + type Metadata = L1BatchNumber; + type Artifacts = BasicCircuitArtifacts; - async fn process_job_impl( + async fn process_job( + job: BasicWitnessGeneratorJob, object_store: Arc, - basic_job: BasicWitnessGeneratorJob, + max_circuits_in_flight: Option, started_at: Instant, - max_circuits_in_flight: usize, - ) -> Option { + ) -> anyhow::Result { let BasicWitnessGeneratorJob { block_number, data: job, - } = basic_job; + } = job; tracing::info!( "Starting witness generation of type {:?} for block {}", @@ -126,65 +137,43 @@ impl BasicWitnessGenerator { block_number.0 ); - Some( - process_basic_circuits_job( - object_store, - started_at, - block_number, - job, - max_circuits_in_flight, - ) - .await, + let (circuit_urls, queue_urls, scheduler_witness, aux_output_witness) = generate_witness( + block_number, + object_store, + job, + max_circuits_in_flight.unwrap(), ) + .await; + WITNESS_GENERATOR_METRICS.witness_generation_time[&AggregationRound::BasicCircuits.into()] + .observe(started_at.elapsed()); + tracing::info!( + "Witness generation for block {} is complete in {:?}", + block_number.0, + started_at.elapsed() + ); + + Ok(BasicCircuitArtifacts { + circuit_urls, + queue_urls, + scheduler_witness, + aux_output_witness, + }) } -} -#[tracing::instrument(skip_all, fields(l1_batch = %block_number))] -pub(super) async fn process_basic_circuits_job( - object_store: Arc, - started_at: Instant, - block_number: L1BatchNumber, - job: WitnessInputData, - max_circuits_in_flight: usize, -) -> BasicCircuitArtifacts { - let (circuit_urls, queue_urls, scheduler_witness, aux_output_witness) = - generate_witness(block_number, object_store, job, max_circuits_in_flight).await; - WITNESS_GENERATOR_METRICS.witness_generation_time[&AggregationRound::BasicCircuits.into()] - .observe(started_at.elapsed()); - tracing::info!( - "Witness generation for block {} is complete in {:?}", - block_number.0, - started_at.elapsed() - ); + async fn prepare_job( + metadata: L1BatchNumber, + object_store: &dyn ObjectStore, + _keystore: Keystore, + ) -> anyhow::Result { + tracing::info!("Processing FRI basic witness-gen for block {}", metadata.0); + let started_at = Instant::now(); + let job = Self::get_artifacts(&metadata, object_store).await?; - BasicCircuitArtifacts { - circuit_urls, - queue_urls, - scheduler_witness, - aux_output_witness, - } -} + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::BasicCircuits.into()] + .observe(started_at.elapsed()); -#[tracing::instrument(skip_all, fields(l1_batch = %block_number, circuit_id = %circuit_id))] -async fn save_recursion_queue( - block_number: L1BatchNumber, - circuit_id: u8, - recursion_queue_simulator: RecursionQueueSimulator, - closed_form_inputs: Vec>, - object_store: Arc, -) -> (u8, String, usize) { - let key = ClosedFormInputKey { - block_number, - circuit_id, - }; - let basic_circuit_count = closed_form_inputs.len(); - let closed_form_inputs = closed_form_inputs - .iter() - .map(|x| ZkSyncBaseLayerStorage::from_inner(circuit_id, x.clone())) - .collect(); - let wrapper = ClosedFormInputWrapper(closed_form_inputs, recursion_queue_simulator); - let blob_url = object_store.put(key, &wrapper).await.unwrap(); - (circuit_id, blob_url, basic_circuit_count) + Ok(job) + } } #[tracing::instrument(skip_all, fields(l1_batch = %block_number))] @@ -464,3 +453,25 @@ async fn generate_witness( block_aux_witness, ) } + +#[tracing::instrument(skip_all, fields(l1_batch = %block_number, circuit_id = %circuit_id))] +async fn save_recursion_queue( + block_number: L1BatchNumber, + circuit_id: u8, + recursion_queue_simulator: RecursionQueueSimulator, + closed_form_inputs: Vec>, + object_store: Arc, +) -> (u8, String, usize) { + let key = ClosedFormInputKey { + block_number, + circuit_id, + }; + let basic_circuit_count = closed_form_inputs.len(); + let closed_form_inputs = closed_form_inputs + .iter() + .map(|x| ZkSyncBaseLayerStorage::from_inner(circuit_id, x.clone())) + .collect(); + let wrapper = ClosedFormInputWrapper(closed_form_inputs, recursion_queue_simulator); + let blob_url = object_store.put(key, &wrapper).await.unwrap(); + (circuit_id, blob_url, basic_circuit_count) +} diff --git a/prover/crates/bin/witness_generator/src/leaf_aggregation/artifacts.rs b/prover/crates/bin/witness_generator/src/leaf_aggregation/artifacts.rs index a94587d00ec6..c83997e36b80 100644 --- a/prover/crates/bin/witness_generator/src/leaf_aggregation/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/leaf_aggregation/artifacts.rs @@ -3,15 +3,15 @@ use std::time::Instant; use async_trait::async_trait; use zksync_object_store::ObjectStore; use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; -use zksync_prover_fri_types::keys::ClosedFormInputKey; +use zksync_prover_fri_types::keys::{AggregationsKey, ClosedFormInputKey}; use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; use zksync_types::{basic_fri_types::AggregationRound, prover_dal::LeafAggregationJobMetadata}; use crate::{ - artifacts::{AggregationBlobUrls, ArtifactsManager, BlobUrls}, + artifacts::{AggregationBlobUrls, ArtifactsManager}, leaf_aggregation::{LeafAggregationArtifacts, LeafAggregationWitnessGenerator}, metrics::WITNESS_GENERATOR_METRICS, - utils::{save_node_aggregations_artifacts, ClosedFormInputWrapper}, + utils::{AggregationWrapper, ClosedFormInputWrapper}, }; #[async_trait] @@ -19,6 +19,7 @@ impl ArtifactsManager for LeafAggregationWitnessGenerator { type InputMetadata = LeafAggregationJobMetadata; type InputArtifacts = ClosedFormInputWrapper; type OutputArtifacts = LeafAggregationArtifacts; + type BlobUrls = AggregationBlobUrls; async fn get_artifacts( metadata: &Self::InputMetadata, @@ -41,38 +42,40 @@ impl ArtifactsManager for LeafAggregationWitnessGenerator { skip_all, fields(l1_batch = %artifacts.block_number, circuit_id = %artifacts.circuit_id) )] - async fn save_artifacts( + async fn save_to_bucket( _job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, - ) -> BlobUrls { + ) -> AggregationBlobUrls { let started_at = Instant::now(); - let aggregations_urls = save_node_aggregations_artifacts( - artifacts.block_number, - get_recursive_layer_circuit_id_for_base_layer(artifacts.circuit_id), - 0, - artifacts.aggregations, - object_store, - ) - .await; + let key = AggregationsKey { + block_number: artifacts.block_number, + circuit_id: get_recursive_layer_circuit_id_for_base_layer(artifacts.circuit_id), + depth: 0, + }; + let aggregation_urls = object_store + .put(key, &AggregationWrapper(artifacts.aggregations)) + .await + .unwrap(); + WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::LeafAggregation.into()] .observe(started_at.elapsed()); - BlobUrls::Aggregation(AggregationBlobUrls { - aggregations_urls, + AggregationBlobUrls { + aggregation_urls, circuit_ids_and_urls: artifacts.circuit_ids_and_urls, - }) + } } #[tracing::instrument( skip_all, fields(l1_batch = %job_id) )] - async fn update_database( + async fn save_to_database( connection_pool: &ConnectionPool, job_id: u32, started_at: Instant, - blob_urls: BlobUrls, + blob_urls: AggregationBlobUrls, artifacts: Self::OutputArtifacts, ) -> anyhow::Result<()> { tracing::info!( @@ -82,11 +85,6 @@ impl ArtifactsManager for LeafAggregationWitnessGenerator { artifacts.circuit_id, ); - let blob_urls = match blob_urls { - BlobUrls::Aggregation(blob_urls) => blob_urls, - _ => panic!("Unexpected blob urls type"), - }; - let mut prover_connection = connection_pool.connection().await.unwrap(); let mut transaction = prover_connection.start_transaction().await.unwrap(); let number_of_dependent_jobs = blob_urls.circuit_ids_and_urls.len(); @@ -124,7 +122,7 @@ impl ArtifactsManager for LeafAggregationWitnessGenerator { get_recursive_layer_circuit_id_for_base_layer(artifacts.circuit_id), number_of_dependent_jobs, 0, - blob_urls.aggregations_urls, + blob_urls.aggregation_urls, ) .await; tracing::info!( diff --git a/prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs b/prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs index e032084151eb..440636b85fae 100644 --- a/prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs +++ b/prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs @@ -10,10 +10,11 @@ use zksync_types::basic_fri_types::AggregationRound; use crate::{ artifacts::ArtifactsManager, leaf_aggregation::{ - prepare_leaf_aggregation_job, LeafAggregationArtifacts, LeafAggregationWitnessGenerator, + LeafAggregationArtifacts, LeafAggregationWitnessGenerator, LeafAggregationWitnessGeneratorJob, }, metrics::WITNESS_GENERATOR_METRICS, + witness_generator::WitnessGenerator, }; #[async_trait] @@ -37,9 +38,13 @@ impl JobProcessor for LeafAggregationWitnessGenerator { tracing::info!("Processing leaf aggregation job {:?}", metadata.id); Ok(Some(( metadata.id, - prepare_leaf_aggregation_job(metadata, &*self.object_store, self.keystore.clone()) - .await - .context("prepare_leaf_aggregation_job()")?, + ::prepare_job( + metadata, + &*self.object_store, + self.keystore.clone(), + ) + .await + .context("prepare_leaf_aggregation_job()")?, ))) } @@ -63,7 +68,13 @@ impl JobProcessor for LeafAggregationWitnessGenerator { let object_store = self.object_store.clone(); let max_circuits_in_flight = self.config.max_circuits_in_flight; tokio::spawn(async move { - Ok(Self::process_job_impl(job, started_at, object_store, max_circuits_in_flight).await) + ::process_job( + job, + object_store, + Some(max_circuits_in_flight), + started_at, + ) + .await }) } @@ -83,7 +94,7 @@ impl JobProcessor for LeafAggregationWitnessGenerator { let blob_save_started_at = Instant::now(); - let blob_urls = Self::save_artifacts(job_id, artifacts.clone(), &*self.object_store).await; + let blob_urls = Self::save_to_bucket(job_id, artifacts.clone(), &*self.object_store).await; WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::LeafAggregation.into()] .observe(blob_save_started_at.elapsed()); @@ -93,7 +104,7 @@ impl JobProcessor for LeafAggregationWitnessGenerator { block_number.0, circuit_id, ); - Self::update_database( + Self::save_to_database( &self.prover_connection_pool, job_id, started_at, diff --git a/prover/crates/bin/witness_generator/src/leaf_aggregation/mod.rs b/prover/crates/bin/witness_generator/src/leaf_aggregation/mod.rs index d669a4cc97e3..960843259c32 100644 --- a/prover/crates/bin/witness_generator/src/leaf_aggregation/mod.rs +++ b/prover/crates/bin/witness_generator/src/leaf_aggregation/mod.rs @@ -1,6 +1,7 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context as _; +use async_trait::async_trait; use circuit_definitions::circuit_definitions::recursion_layer::base_circuit_type_into_recursive_leaf_circuit_type; use tokio::sync::Semaphore; use zkevm_test_harness::{ @@ -36,6 +37,7 @@ use crate::{ load_proofs_for_job_ids, save_recursive_layer_prover_input_artifacts, ClosedFormInputWrapper, }, + witness_generator::WitnessGenerator, }; mod artifacts; @@ -85,69 +87,6 @@ impl LeafAggregationWitnessGenerator { keystore, } } - - #[tracing::instrument( - skip_all, - fields(l1_batch = %leaf_job.block_number, circuit_id = %leaf_job.circuit_id) - )] - pub async fn process_job_impl( - leaf_job: LeafAggregationWitnessGeneratorJob, - started_at: Instant, - object_store: Arc, - max_circuits_in_flight: usize, - ) -> LeafAggregationArtifacts { - tracing::info!( - "Starting witness generation of type {:?} for block {} with circuit {}", - AggregationRound::LeafAggregation, - leaf_job.block_number.0, - leaf_job.circuit_id, - ); - process_leaf_aggregation_job(started_at, leaf_job, object_store, max_circuits_in_flight) - .await - } -} - -#[tracing::instrument( - skip_all, - fields(l1_batch = %metadata.block_number, circuit_id = %metadata.circuit_id) -)] -pub async fn prepare_leaf_aggregation_job( - metadata: LeafAggregationJobMetadata, - object_store: &dyn ObjectStore, - keystore: Keystore, -) -> anyhow::Result { - let started_at = Instant::now(); - let closed_form_input = - LeafAggregationWitnessGenerator::get_artifacts(&metadata, object_store).await?; - - WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::LeafAggregation.into()] - .observe(started_at.elapsed()); - - let started_at = Instant::now(); - let base_vk = keystore - .load_base_layer_verification_key(metadata.circuit_id) - .context("get_base_layer_vk_for_circuit_type()")?; - - let leaf_circuit_id = base_circuit_type_into_recursive_leaf_circuit_type( - BaseLayerCircuitType::from_numeric_value(metadata.circuit_id), - ) as u8; - - let leaf_vk = keystore - .load_recursive_layer_verification_key(leaf_circuit_id) - .context("get_recursive_layer_vk_for_circuit_type()")?; - let leaf_params = compute_leaf_params(metadata.circuit_id, base_vk.clone(), leaf_vk); - - WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::LeafAggregation.into()] - .observe(started_at.elapsed()); - - Ok(LeafAggregationWitnessGeneratorJob { - circuit_id: metadata.circuit_id, - block_number: metadata.block_number, - closed_form_inputs: closed_form_input, - proofs_ids: metadata.prover_job_ids_for_proofs, - base_vk, - leaf_params, - }) } #[tracing::instrument( @@ -261,3 +200,78 @@ pub async fn process_leaf_aggregation_job( closed_form_inputs: job.closed_form_inputs.0, } } + +#[async_trait] +impl WitnessGenerator for LeafAggregationWitnessGenerator { + type Job = LeafAggregationWitnessGeneratorJob; + type Metadata = LeafAggregationJobMetadata; + type Artifacts = LeafAggregationArtifacts; + + #[tracing::instrument( + skip_all, + fields(l1_batch = %job.block_number, circuit_id = %job.circuit_id) + )] + async fn process_job( + job: LeafAggregationWitnessGeneratorJob, + object_store: Arc, + max_circuits_in_flight: Option, + started_at: Instant, + ) -> anyhow::Result { + tracing::info!( + "Starting witness generation of type {:?} for block {} with circuit {}", + AggregationRound::LeafAggregation, + job.block_number.0, + job.circuit_id, + ); + Ok(process_leaf_aggregation_job( + started_at, + job, + object_store, + max_circuits_in_flight.unwrap(), + ) + .await) + } + + #[tracing::instrument( + skip_all, + fields(l1_batch = %metadata.block_number, circuit_id = %metadata.circuit_id) + )] + async fn prepare_job( + metadata: LeafAggregationJobMetadata, + object_store: &dyn ObjectStore, + keystore: Keystore, + ) -> anyhow::Result { + let started_at = Instant::now(); + let closed_form_input = + LeafAggregationWitnessGenerator::get_artifacts(&metadata, object_store).await?; + + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::LeafAggregation.into()] + .observe(started_at.elapsed()); + + let started_at = Instant::now(); + let base_vk = keystore + .load_base_layer_verification_key(metadata.circuit_id) + .context("get_base_layer_vk_for_circuit_type()")?; + + let leaf_circuit_id = base_circuit_type_into_recursive_leaf_circuit_type( + BaseLayerCircuitType::from_numeric_value(metadata.circuit_id), + ) as u8; + + let leaf_vk = keystore + .load_recursive_layer_verification_key(leaf_circuit_id) + .context("get_recursive_layer_vk_for_circuit_type()")?; + let leaf_params = compute_leaf_params(metadata.circuit_id, base_vk.clone(), leaf_vk); + + WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::LeafAggregation.into()] + .observe(started_at.elapsed()); + + Ok(LeafAggregationWitnessGeneratorJob { + circuit_id: metadata.circuit_id, + block_number: metadata.block_number, + closed_form_inputs: closed_form_input, + proofs_ids: metadata.prover_job_ids_for_proofs, + base_vk, + leaf_params, + }) + } +} diff --git a/prover/crates/bin/witness_generator/src/lib.rs b/prover/crates/bin/witness_generator/src/lib.rs index c0ac9718c6ee..b24b548a49ba 100644 --- a/prover/crates/bin/witness_generator/src/lib.rs +++ b/prover/crates/bin/witness_generator/src/lib.rs @@ -14,3 +14,4 @@ mod storage_oracle; mod tests; pub mod utils; mod witness; +pub mod witness_generator; diff --git a/prover/crates/bin/witness_generator/src/node_aggregation/artifacts.rs b/prover/crates/bin/witness_generator/src/node_aggregation/artifacts.rs index 245027f0d677..09f01899bf3c 100644 --- a/prover/crates/bin/witness_generator/src/node_aggregation/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/node_aggregation/artifacts.rs @@ -7,10 +7,10 @@ use zksync_prover_fri_types::keys::AggregationsKey; use zksync_types::{basic_fri_types::AggregationRound, prover_dal::NodeAggregationJobMetadata}; use crate::{ - artifacts::{AggregationBlobUrls, ArtifactsManager, BlobUrls}, + artifacts::{AggregationBlobUrls, ArtifactsManager}, metrics::WITNESS_GENERATOR_METRICS, node_aggregation::{NodeAggregationArtifacts, NodeAggregationWitnessGenerator}, - utils::{save_node_aggregations_artifacts, AggregationWrapper}, + utils::AggregationWrapper, }; #[async_trait] @@ -18,6 +18,7 @@ impl ArtifactsManager for NodeAggregationWitnessGenerator { type InputMetadata = NodeAggregationJobMetadata; type InputArtifacts = AggregationWrapper; type OutputArtifacts = NodeAggregationArtifacts; + type BlobUrls = AggregationBlobUrls; #[tracing::instrument( skip_all, @@ -46,46 +47,43 @@ impl ArtifactsManager for NodeAggregationWitnessGenerator { skip_all, fields(l1_batch = %artifacts.block_number, circuit_id = %artifacts.circuit_id) )] - async fn save_artifacts( + async fn save_to_bucket( _job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, - ) -> BlobUrls { + ) -> AggregationBlobUrls { let started_at = Instant::now(); - let aggregations_urls = save_node_aggregations_artifacts( - artifacts.block_number, - artifacts.circuit_id, - artifacts.depth, - artifacts.next_aggregations, - object_store, - ) - .await; + let key = AggregationsKey { + block_number: artifacts.block_number, + circuit_id: artifacts.circuit_id, + depth: artifacts.depth, + }; + let aggregation_urls = object_store + .put(key, &AggregationWrapper(artifacts.next_aggregations)) + .await + .unwrap(); WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::NodeAggregation.into()] .observe(started_at.elapsed()); - BlobUrls::Aggregation(AggregationBlobUrls { - aggregations_urls, + AggregationBlobUrls { + aggregation_urls, circuit_ids_and_urls: artifacts.recursive_circuit_ids_and_urls, - }) + } } #[tracing::instrument( skip_all, fields(l1_batch = % job_id) )] - async fn update_database( + async fn save_to_database( connection_pool: &ConnectionPool, job_id: u32, started_at: Instant, - blob_urls: BlobUrls, + blob_urls: AggregationBlobUrls, artifacts: Self::OutputArtifacts, ) -> anyhow::Result<()> { let mut prover_connection = connection_pool.connection().await.unwrap(); - let blob_urls = match blob_urls { - BlobUrls::Aggregation(blobs) => blobs, - _ => unreachable!(), - }; let mut transaction = prover_connection.start_transaction().await.unwrap(); let dependent_jobs = blob_urls.circuit_ids_and_urls.len(); let protocol_version_id = transaction @@ -111,7 +109,7 @@ impl ArtifactsManager for NodeAggregationWitnessGenerator { artifacts.circuit_id, Some(dependent_jobs as i32), artifacts.depth, - &blob_urls.aggregations_urls, + &blob_urls.aggregation_urls, protocol_version_id, ) .await; diff --git a/prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs b/prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs index a015462cd6fe..0f66c988c10d 100644 --- a/prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs +++ b/prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs @@ -11,9 +11,10 @@ use crate::{ artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, node_aggregation::{ - prepare_job, NodeAggregationArtifacts, NodeAggregationWitnessGenerator, + NodeAggregationArtifacts, NodeAggregationWitnessGenerator, NodeAggregationWitnessGeneratorJob, }, + witness_generator::WitnessGenerator, }; #[async_trait] @@ -37,9 +38,13 @@ impl JobProcessor for NodeAggregationWitnessGenerator { tracing::info!("Processing node aggregation job {:?}", metadata.id); Ok(Some(( metadata.id, - prepare_job(metadata, &*self.object_store, self.keystore.clone()) - .await - .context("prepare_job()")?, + ::prepare_job( + metadata, + &*self.object_store, + self.keystore.clone(), + ) + .await + .context("prepare_job()")?, ))) } @@ -63,7 +68,13 @@ impl JobProcessor for NodeAggregationWitnessGenerator { let object_store = self.object_store.clone(); let max_circuits_in_flight = self.config.max_circuits_in_flight; tokio::spawn(async move { - Ok(Self::process_job_impl(job, started_at, object_store, max_circuits_in_flight).await) + ::process_job( + job, + object_store, + Some(max_circuits_in_flight), + started_at, + ) + .await }) } @@ -79,12 +90,12 @@ impl JobProcessor for NodeAggregationWitnessGenerator { ) -> anyhow::Result<()> { let blob_save_started_at = Instant::now(); - let blob_urls = Self::save_artifacts(job_id, artifacts.clone(), &*self.object_store).await; + let blob_urls = Self::save_to_bucket(job_id, artifacts.clone(), &*self.object_store).await; WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::NodeAggregation.into()] .observe(blob_save_started_at.elapsed()); - Self::update_database( + Self::save_to_database( &self.prover_connection_pool, job_id, started_at, diff --git a/prover/crates/bin/witness_generator/src/node_aggregation/mod.rs b/prover/crates/bin/witness_generator/src/node_aggregation/mod.rs index 047caa363a89..f2c9a6fb8919 100644 --- a/prover/crates/bin/witness_generator/src/node_aggregation/mod.rs +++ b/prover/crates/bin/witness_generator/src/node_aggregation/mod.rs @@ -1,6 +1,7 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context as _; +use async_trait::async_trait; use circuit_definitions::circuit_definitions::recursion_layer::RECURSION_ARITY; use tokio::sync::Semaphore; use zkevm_test_harness::witness::recursive_aggregation::{ @@ -30,6 +31,7 @@ use crate::{ artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, utils::{load_proofs_for_job_ids, save_recursive_layer_prover_input_artifacts}, + witness_generator::WitnessGenerator, }; mod artifacts; @@ -81,17 +83,24 @@ impl NodeAggregationWitnessGenerator { keystore, } } +} + +#[async_trait] +impl WitnessGenerator for NodeAggregationWitnessGenerator { + type Job = NodeAggregationWitnessGeneratorJob; + type Metadata = NodeAggregationJobMetadata; + type Artifacts = NodeAggregationArtifacts; #[tracing::instrument( skip_all, fields(l1_batch = % job.block_number, circuit_id = % job.circuit_id) )] - pub async fn process_job_impl( + async fn process_job( job: NodeAggregationWitnessGeneratorJob, - started_at: Instant, object_store: Arc, - max_circuits_in_flight: usize, - ) -> NodeAggregationArtifacts { + max_circuits_in_flight: Option, + started_at: Instant, + ) -> anyhow::Result { let node_vk_commitment = compute_node_vk_commitment(job.node_vk.clone()); tracing::info!( "Starting witness generation of type {:?} for block {} circuit id {} depth {}", @@ -117,7 +126,7 @@ impl NodeAggregationWitnessGenerator { proofs_ids.len() ); - let semaphore = Arc::new(Semaphore::new(max_circuits_in_flight)); + let semaphore = Arc::new(Semaphore::new(max_circuits_in_flight.unwrap())); let mut handles = vec![]; for (circuit_idx, (chunk, proofs_ids_for_chunk)) in job @@ -205,52 +214,54 @@ impl NodeAggregationWitnessGenerator { started_at.elapsed(), ); - NodeAggregationArtifacts { + Ok(NodeAggregationArtifacts { circuit_id: job.circuit_id, block_number: job.block_number, depth: job.depth + 1, next_aggregations, recursive_circuit_ids_and_urls, - } + }) } -} -#[tracing::instrument( - skip_all, - fields(l1_batch = % metadata.block_number, circuit_id = % metadata.circuit_id) -)] -pub async fn prepare_job( - metadata: NodeAggregationJobMetadata, - object_store: &dyn ObjectStore, - keystore: Keystore, -) -> anyhow::Result { - let started_at = Instant::now(); - let artifacts = NodeAggregationWitnessGenerator::get_artifacts(&metadata, object_store).await?; + #[tracing::instrument( + skip_all, + fields(l1_batch = % metadata.block_number, circuit_id = % metadata.circuit_id) + )] + async fn prepare_job( + metadata: Self::Metadata, + object_store: &dyn ObjectStore, + keystore: Keystore, + ) -> anyhow::Result { + let started_at = Instant::now(); + let artifacts = + NodeAggregationWitnessGenerator::get_artifacts(&metadata, object_store).await?; - WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::NodeAggregation.into()] - .observe(started_at.elapsed()); + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::NodeAggregation.into()] + .observe(started_at.elapsed()); - let started_at = Instant::now(); - let leaf_vk = keystore - .load_recursive_layer_verification_key(metadata.circuit_id) - .context("get_recursive_layer_vk_for_circuit_type")?; - let node_vk = keystore - .load_recursive_layer_verification_key( - ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, - ) - .context("get_recursive_layer_vk_for_circuit_type()")?; + let started_at = Instant::now(); + let leaf_vk = keystore + .load_recursive_layer_verification_key(metadata.circuit_id) + .context("get_recursive_layer_vk_for_circuit_type")?; + let node_vk = keystore + .load_recursive_layer_verification_key( + ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, + ) + .context("get_recursive_layer_vk_for_circuit_type()")?; - WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::NodeAggregation.into()] - .observe(started_at.elapsed()); + WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::NodeAggregation.into()] + .observe(started_at.elapsed()); - Ok(NodeAggregationWitnessGeneratorJob { - circuit_id: metadata.circuit_id, - block_number: metadata.block_number, - depth: metadata.depth, - aggregations: artifacts.0, - proofs_ids: metadata.prover_job_ids_for_proofs, - leaf_vk, - node_vk, - all_leafs_layer_params: get_leaf_vk_params(&keystore).context("get_leaf_vk_params()")?, - }) + Ok(NodeAggregationWitnessGeneratorJob { + circuit_id: metadata.circuit_id, + block_number: metadata.block_number, + depth: metadata.depth, + aggregations: artifacts.0, + proofs_ids: metadata.prover_job_ids_for_proofs, + leaf_vk, + node_vk, + all_leafs_layer_params: get_leaf_vk_params(&keystore) + .context("get_leaf_vk_params()")?, + }) + } } diff --git a/prover/crates/bin/witness_generator/src/recursion_tip/artifacts.rs b/prover/crates/bin/witness_generator/src/recursion_tip/artifacts.rs index 8379fcf9f933..b61aa948100b 100644 --- a/prover/crates/bin/witness_generator/src/recursion_tip/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/recursion_tip/artifacts.rs @@ -12,7 +12,7 @@ use zksync_prover_fri_types::{keys::FriCircuitKey, CircuitWrapper, FriProofWrapp use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; use crate::{ - artifacts::{ArtifactsManager, BlobUrls}, + artifacts::ArtifactsManager, recursion_tip::{RecursionTipArtifacts, RecursionTipWitnessGenerator}, }; @@ -21,6 +21,7 @@ impl ArtifactsManager for RecursionTipWitnessGenerator { type InputMetadata = Vec<(u8, u32)>; type InputArtifacts = Vec; type OutputArtifacts = RecursionTipArtifacts; + type BlobUrls = String; /// Loads all proofs for a given recursion tip's job ids. /// Note that recursion tip may not have proofs for some specific circuits (because the batch didn't contain them). @@ -73,11 +74,11 @@ impl ArtifactsManager for RecursionTipWitnessGenerator { Ok(proofs) } - async fn save_artifacts( + async fn save_to_bucket( job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, - ) -> BlobUrls { + ) -> String { let key = FriCircuitKey { block_number: L1BatchNumber(job_id), circuit_id: 255, @@ -86,29 +87,22 @@ impl ArtifactsManager for RecursionTipWitnessGenerator { aggregation_round: AggregationRound::RecursionTip, }; - let blob_url = object_store + object_store .put( key, &CircuitWrapper::Recursive(artifacts.recursion_tip_circuit.clone()), ) .await - .unwrap(); - - BlobUrls::Url(blob_url) + .unwrap() } - async fn update_database( + async fn save_to_database( connection_pool: &ConnectionPool, job_id: u32, started_at: Instant, - blob_urls: BlobUrls, + blob_urls: String, _artifacts: Self::OutputArtifacts, ) -> anyhow::Result<()> { - let blob_url = match blob_urls { - BlobUrls::Url(url) => url, - _ => panic!("Unexpected blob urls type"), - }; - let mut prover_connection = connection_pool.connection().await?; let mut transaction = prover_connection.start_transaction().await?; let protocol_version_id = transaction @@ -123,7 +117,7 @@ impl ArtifactsManager for RecursionTipWitnessGenerator { 0, 0, AggregationRound::RecursionTip, - &blob_url, + &blob_urls, false, protocol_version_id, ) diff --git a/prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs b/prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs index f114724cfec4..9ab7d934a3e7 100644 --- a/prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs +++ b/prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs @@ -11,9 +11,10 @@ use crate::{ artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, recursion_tip::{ - prepare_job, RecursionTipArtifacts, RecursionTipWitnessGenerator, + RecursionTipArtifacts, RecursionTipJobMetadata, RecursionTipWitnessGenerator, RecursionTipWitnessGeneratorJob, }, + witness_generator::WitnessGenerator, }; #[async_trait] @@ -49,9 +50,11 @@ impl JobProcessor for RecursionTipWitnessGenerator { Ok(Some(( l1_batch_number, - prepare_job( - l1_batch_number, - final_node_proof_job_ids, + ::prepare_job( + RecursionTipJobMetadata { + l1_batch_number, + final_node_proof_job_ids, + }, &*self.object_store, self.keystore.clone(), ) @@ -77,7 +80,10 @@ impl JobProcessor for RecursionTipWitnessGenerator { job: RecursionTipWitnessGeneratorJob, started_at: Instant, ) -> tokio::task::JoinHandle> { - tokio::task::spawn_blocking(move || Ok(Self::process_job_sync(job, started_at))) + let object_store = self.object_store.clone(); + tokio::spawn(async move { + ::process_job(job, object_store, None, started_at).await + }) } #[tracing::instrument( @@ -93,12 +99,12 @@ impl JobProcessor for RecursionTipWitnessGenerator { let blob_save_started_at = Instant::now(); let blob_urls = - Self::save_artifacts(job_id.0, artifacts.clone(), &*self.object_store).await; + Self::save_to_bucket(job_id.0, artifacts.clone(), &*self.object_store).await; WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::RecursionTip.into()] .observe(blob_save_started_at.elapsed()); - Self::update_database( + Self::save_to_database( &self.prover_connection_pool, job_id.0, started_at, diff --git a/prover/crates/bin/witness_generator/src/recursion_tip/mod.rs b/prover/crates/bin/witness_generator/src/recursion_tip/mod.rs index 4abb56a7d788..40abb756c8a5 100644 --- a/prover/crates/bin/witness_generator/src/recursion_tip/mod.rs +++ b/prover/crates/bin/witness_generator/src/recursion_tip/mod.rs @@ -1,6 +1,7 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context; +use async_trait::async_trait; use circuit_definitions::{ circuit_definitions::recursion_layer::{ recursion_tip::RecursionTipCircuit, ZkSyncRecursionLayerStorageType, @@ -45,6 +46,7 @@ use zksync_types::{ use crate::{ artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, utils::ClosedFormInputWrapper, + witness_generator::WitnessGenerator, }; mod artifacts; @@ -66,6 +68,11 @@ pub struct RecursionTipArtifacts { pub recursion_tip_circuit: ZkSyncRecursiveLayerCircuit, } +pub struct RecursionTipJobMetadata { + pub l1_batch_number: L1BatchNumber, + pub final_node_proof_job_ids: Vec<(u8, u32)>, +} + #[derive(Debug)] pub struct RecursionTipWitnessGenerator { config: FriWitnessGeneratorConfig, @@ -91,15 +98,24 @@ impl RecursionTipWitnessGenerator { keystore, } } +} + +#[async_trait] +impl WitnessGenerator for RecursionTipWitnessGenerator { + type Job = RecursionTipWitnessGeneratorJob; + type Metadata = RecursionTipJobMetadata; + type Artifacts = RecursionTipArtifacts; #[tracing::instrument( skip_all, fields(l1_batch = %job.block_number) )] - pub fn process_job_sync( - job: RecursionTipWitnessGeneratorJob, + async fn process_job( + job: Self::Job, + _object_store: Arc, + _max_circuits_in_flight: Option, started_at: Instant, - ) -> RecursionTipArtifacts { + ) -> anyhow::Result { tracing::info!( "Starting fri witness generation of type {:?} for block {}", AggregationRound::RecursionTip, @@ -127,100 +143,102 @@ impl RecursionTipWitnessGenerator { started_at.elapsed() ); - RecursionTipArtifacts { + Ok(RecursionTipArtifacts { recursion_tip_circuit: ZkSyncRecursiveLayerCircuit::RecursionTipCircuit( recursive_tip_circuit, ), - } + }) } -} -#[tracing::instrument( - skip_all, - fields(l1_batch = %l1_batch_number) -)] -pub async fn prepare_job( - l1_batch_number: L1BatchNumber, - final_node_proof_job_ids: Vec<(u8, u32)>, - object_store: &dyn ObjectStore, - keystore: Keystore, -) -> anyhow::Result { - let started_at = Instant::now(); - let recursion_tip_proofs = - RecursionTipWitnessGenerator::get_artifacts(&final_node_proof_job_ids, object_store) - .await?; - WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::RecursionTip.into()] - .observe(started_at.elapsed()); - - let node_vk = keystore - .load_recursive_layer_verification_key( - ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, + #[tracing::instrument( + skip_all, + fields(l1_batch = %metadata.l1_batch_number) + )] + async fn prepare_job( + metadata: RecursionTipJobMetadata, + object_store: &dyn ObjectStore, + keystore: Keystore, + ) -> anyhow::Result { + let started_at = Instant::now(); + let recursion_tip_proofs = RecursionTipWitnessGenerator::get_artifacts( + &metadata.final_node_proof_job_ids, + object_store, ) - .context("get_recursive_layer_vk_for_circuit_type()")?; + .await?; + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::RecursionTip.into()] + .observe(started_at.elapsed()); + + let node_vk = keystore + .load_recursive_layer_verification_key( + ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, + ) + .context("get_recursive_layer_vk_for_circuit_type()")?; + + let node_layer_vk_commitment = compute_node_vk_commitment(node_vk.clone()); + + let mut recursion_queues = vec![]; + for circuit_id in BaseLayerCircuitType::as_iter_u8() { + let key = ClosedFormInputKey { + block_number: metadata.l1_batch_number, + circuit_id, + }; + let ClosedFormInputWrapper(_, recursion_queue) = object_store.get(key).await?; + recursion_queues.push((circuit_id, recursion_queue)); + } + + // RECURSION_TIP_ARITY is the maximum amount of proof that a single recursion tip can support. + // Given recursion_tip has at most 1 proof per circuit, it implies we can't add more circuit types without bumping arity up. + assert!( + RECURSION_TIP_ARITY >= recursion_queues.len(), + "recursion tip received more circuits ({}) than supported ({})", + recursion_queues.len(), + RECURSION_TIP_ARITY + ); + let mut branch_circuit_type_set = [GoldilocksField::ZERO; RECURSION_TIP_ARITY]; + let mut queue_set: [_; RECURSION_TIP_ARITY] = + std::array::from_fn(|_| QueueState::placeholder_witness()); + + for (index, (circuit_id, recursion_queue)) in recursion_queues.iter().enumerate() { + branch_circuit_type_set[index] = + GoldilocksField::from_u64_unchecked(*circuit_id as u64); + queue_set[index] = take_sponge_like_queue_state_from_simulator(recursion_queue); + } - let node_layer_vk_commitment = compute_node_vk_commitment(node_vk.clone()); + let leaf_vk_commits = get_leaf_vk_params(&keystore).context("get_leaf_vk_params()")?; + assert_eq!( + leaf_vk_commits.len(), + 16, + "expected 16 leaf vk commits, which corresponds to the numebr of circuits, got {}", + leaf_vk_commits.len() + ); + let leaf_layer_parameters: [RecursionLeafParametersWitness; 16] = + leaf_vk_commits + .iter() + .map(|el| el.1.clone()) + .collect::>() + .try_into() + .unwrap(); + + let input = RecursionTipInputWitness { + leaf_layer_parameters, + node_layer_vk_commitment, + branch_circuit_type_set, + queue_set, + }; - let mut recursion_queues = vec![]; - for circuit_id in BaseLayerCircuitType::as_iter_u8() { - let key = ClosedFormInputKey { - block_number: l1_batch_number, - circuit_id, + let recursion_tip_witness = RecursionTipInstanceWitness { + input, + vk_witness: node_vk.clone().into_inner(), + proof_witnesses: recursion_tip_proofs.into(), }; - let ClosedFormInputWrapper(_, recursion_queue) = object_store.get(key).await?; - recursion_queues.push((circuit_id, recursion_queue)); - } - // RECURSION_TIP_ARITY is the maximum amount of proof that a single recursion tip can support. - // Given recursion_tip has at most 1 proof per circuit, it implies we can't add more circuit types without bumping arity up. - assert!( - RECURSION_TIP_ARITY >= recursion_queues.len(), - "recursion tip received more circuits ({}) than supported ({})", - recursion_queues.len(), - RECURSION_TIP_ARITY - ); - let mut branch_circuit_type_set = [GoldilocksField::ZERO; RECURSION_TIP_ARITY]; - let mut queue_set: [_; RECURSION_TIP_ARITY] = - std::array::from_fn(|_| QueueState::placeholder_witness()); - - for (index, (circuit_id, recursion_queue)) in recursion_queues.iter().enumerate() { - branch_circuit_type_set[index] = GoldilocksField::from_u64_unchecked(*circuit_id as u64); - queue_set[index] = take_sponge_like_queue_state_from_simulator(recursion_queue); - } + WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::RecursionTip.into()] + .observe(started_at.elapsed()); - let leaf_vk_commits = get_leaf_vk_params(&keystore).context("get_leaf_vk_params()")?; - assert_eq!( - leaf_vk_commits.len(), - 16, - "expected 16 leaf vk commits, which corresponds to the numebr of circuits, got {}", - leaf_vk_commits.len() - ); - let leaf_layer_parameters: [RecursionLeafParametersWitness; 16] = - leaf_vk_commits - .iter() - .map(|el| el.1.clone()) - .collect::>() - .try_into() - .unwrap(); - - let input = RecursionTipInputWitness { - leaf_layer_parameters, - node_layer_vk_commitment, - branch_circuit_type_set, - queue_set, - }; - - let recursion_tip_witness = RecursionTipInstanceWitness { - input, - vk_witness: node_vk.clone().into_inner(), - proof_witnesses: recursion_tip_proofs.into(), - }; - - WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::RecursionTip.into()] - .observe(started_at.elapsed()); - - Ok(RecursionTipWitnessGeneratorJob { - block_number: l1_batch_number, - recursion_tip_witness, - node_vk, - }) + Ok(RecursionTipWitnessGeneratorJob { + block_number: metadata.l1_batch_number, + recursion_tip_witness, + node_vk, + }) + } } diff --git a/prover/crates/bin/witness_generator/src/scheduler/artifacts.rs b/prover/crates/bin/witness_generator/src/scheduler/artifacts.rs index b20a97641887..77d1da685d0b 100644 --- a/prover/crates/bin/witness_generator/src/scheduler/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/scheduler/artifacts.rs @@ -8,7 +8,7 @@ use zksync_prover_fri_types::{keys::FriCircuitKey, CircuitWrapper, FriProofWrapp use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; use crate::{ - artifacts::{ArtifactsManager, BlobUrls}, + artifacts::ArtifactsManager, scheduler::{SchedulerArtifacts, SchedulerWitnessGenerator}, }; @@ -17,6 +17,7 @@ impl ArtifactsManager for SchedulerWitnessGenerator { type InputMetadata = u32; type InputArtifacts = FriProofWrapper; type OutputArtifacts = SchedulerArtifacts; + type BlobUrls = String; async fn get_artifacts( metadata: &Self::InputMetadata, @@ -27,11 +28,11 @@ impl ArtifactsManager for SchedulerWitnessGenerator { Ok(artifacts) } - async fn save_artifacts( + async fn save_to_bucket( job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, - ) -> BlobUrls { + ) -> String { let key = FriCircuitKey { block_number: L1BatchNumber(job_id), circuit_id: 1, @@ -40,29 +41,22 @@ impl ArtifactsManager for SchedulerWitnessGenerator { aggregation_round: AggregationRound::Scheduler, }; - let blob_url = object_store + object_store .put( key, &CircuitWrapper::Recursive(artifacts.scheduler_circuit.clone()), ) .await - .unwrap(); - - BlobUrls::Url(blob_url) + .unwrap() } - async fn update_database( + async fn save_to_database( connection_pool: &ConnectionPool, job_id: u32, started_at: Instant, - blob_urls: BlobUrls, + blob_urls: String, _artifacts: Self::OutputArtifacts, ) -> anyhow::Result<()> { - let blob_url = match blob_urls { - BlobUrls::Url(url) => url, - _ => panic!("Unexpected blob urls type"), - }; - let mut prover_connection = connection_pool.connection().await?; let mut transaction = prover_connection.start_transaction().await?; let protocol_version_id = transaction @@ -77,7 +71,7 @@ impl ArtifactsManager for SchedulerWitnessGenerator { 0, 0, AggregationRound::Scheduler, - &blob_url, + &blob_urls, false, protocol_version_id, ) diff --git a/prover/crates/bin/witness_generator/src/scheduler/job_processor.rs b/prover/crates/bin/witness_generator/src/scheduler/job_processor.rs index fe4f2db4090a..b5745f980917 100644 --- a/prover/crates/bin/witness_generator/src/scheduler/job_processor.rs +++ b/prover/crates/bin/witness_generator/src/scheduler/job_processor.rs @@ -11,8 +11,10 @@ use crate::{ artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, scheduler::{ - prepare_job, SchedulerArtifacts, SchedulerWitnessGenerator, SchedulerWitnessGeneratorJob, + SchedulerArtifacts, SchedulerWitnessGenerator, SchedulerWitnessGeneratorJob, + SchedulerWitnessJobMetadata, }, + witness_generator::WitnessGenerator, }; #[async_trait] @@ -44,9 +46,11 @@ impl JobProcessor for SchedulerWitnessGenerator { Ok(Some(( l1_batch_number, - prepare_job( - l1_batch_number, - recursion_tip_job_id, + ::prepare_job( + SchedulerWitnessJobMetadata { + l1_batch_number, + recursion_tip_job_id, + }, &*self.object_store, self.keystore.clone(), ) @@ -72,10 +76,9 @@ impl JobProcessor for SchedulerWitnessGenerator { job: SchedulerWitnessGeneratorJob, started_at: Instant, ) -> tokio::task::JoinHandle> { - tokio::task::spawn_blocking(move || { - let block_number = job.block_number; - let _span = tracing::info_span!("scheduler", %block_number).entered(); - Ok(Self::process_job_sync(job, started_at)) + let object_store = self.object_store.clone(); + tokio::spawn(async move { + ::process_job(job, object_store, None, started_at).await }) } @@ -92,12 +95,12 @@ impl JobProcessor for SchedulerWitnessGenerator { let blob_save_started_at = Instant::now(); let blob_urls = - Self::save_artifacts(job_id.0, artifacts.clone(), &*self.object_store).await; + Self::save_to_bucket(job_id.0, artifacts.clone(), &*self.object_store).await; WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::Scheduler.into()] .observe(blob_save_started_at.elapsed()); - Self::update_database( + Self::save_to_database( &self.prover_connection_pool, job_id.0, started_at, diff --git a/prover/crates/bin/witness_generator/src/scheduler/mod.rs b/prover/crates/bin/witness_generator/src/scheduler/mod.rs index 10230b35c4f6..7af3d68d5a75 100644 --- a/prover/crates/bin/witness_generator/src/scheduler/mod.rs +++ b/prover/crates/bin/witness_generator/src/scheduler/mod.rs @@ -1,6 +1,7 @@ use std::{convert::TryInto, sync::Arc, time::Instant}; use anyhow::Context as _; +use async_trait::async_trait; use zkevm_test_harness::zkevm_circuits::recursion::{ leaf_layer::input::RecursionLeafParametersWitness, NUM_BASE_LAYER_CIRCUITS, }; @@ -29,7 +30,7 @@ use zksync_types::{ use crate::{ artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, - utils::SchedulerPartialInputWrapper, + utils::SchedulerPartialInputWrapper, witness_generator::WitnessGenerator, }; mod artifacts; @@ -54,6 +55,11 @@ pub struct SchedulerWitnessGeneratorJob { [RecursionLeafParametersWitness; NUM_BASE_LAYER_CIRCUITS], } +pub struct SchedulerWitnessJobMetadata { + pub l1_batch_number: L1BatchNumber, + pub recursion_tip_job_id: u32, +} + #[derive(Debug)] pub struct SchedulerWitnessGenerator { config: FriWitnessGeneratorConfig, @@ -79,15 +85,24 @@ impl SchedulerWitnessGenerator { keystore, } } +} + +#[async_trait] +impl WitnessGenerator for SchedulerWitnessGenerator { + type Job = SchedulerWitnessGeneratorJob; + type Metadata = SchedulerWitnessJobMetadata; + type Artifacts = SchedulerArtifacts; #[tracing::instrument( skip_all, fields(l1_batch = %job.block_number) )] - pub fn process_job_sync( + async fn process_job( job: SchedulerWitnessGeneratorJob, + _object_store: Arc, + _max_circuits_in_flight: Option, started_at: Instant, - ) -> SchedulerArtifacts { + ) -> anyhow::Result { tracing::info!( "Starting fri witness generation of type {:?} for block {}", AggregationRound::Scheduler, @@ -118,66 +133,67 @@ impl SchedulerWitnessGenerator { started_at.elapsed() ); - SchedulerArtifacts { + Ok(SchedulerArtifacts { scheduler_circuit: ZkSyncRecursiveLayerCircuit::SchedulerCircuit(scheduler_circuit), - } + }) } -} -#[tracing::instrument( - skip_all, - fields(l1_batch = %l1_batch_number) -)] -pub async fn prepare_job( - l1_batch_number: L1BatchNumber, - recursion_tip_job_id: u32, - object_store: &dyn ObjectStore, - keystore: Keystore, -) -> anyhow::Result { - let started_at = Instant::now(); - let wrapper = - SchedulerWitnessGenerator::get_artifacts(&recursion_tip_job_id, object_store).await?; - let recursion_tip_proof = match wrapper { - FriProofWrapper::Base(_) => Err(anyhow::anyhow!( - "Expected only recursive proofs for scheduler l1 batch {l1_batch_number}, got Base" - )), - FriProofWrapper::Recursive(recursive_proof) => Ok(recursive_proof.into_inner()), - }?; - WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::Scheduler.into()] - .observe(started_at.elapsed()); - - let started_at = Instant::now(); - let node_vk = keystore - .load_recursive_layer_verification_key( - ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, - ) - .context("get_recursive_layer_vk_for_circuit_type()")?; - let SchedulerPartialInputWrapper(mut scheduler_witness) = - object_store.get(l1_batch_number).await?; - - let recursion_tip_vk = keystore - .load_recursive_layer_verification_key( - ZkSyncRecursionLayerStorageType::RecursionTipCircuit as u8, - ) - .context("get_recursion_tip_vk()")?; - scheduler_witness.proof_witnesses = vec![recursion_tip_proof].into(); - - let leaf_vk_commits = get_leaf_vk_params(&keystore).context("get_leaf_vk_params()")?; - let leaf_layer_parameters = leaf_vk_commits - .iter() - .map(|el| el.1.clone()) - .collect::>() - .try_into() - .unwrap(); - - WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::Scheduler.into()] - .observe(started_at.elapsed()); - - Ok(SchedulerWitnessGeneratorJob { - block_number: l1_batch_number, - scheduler_witness, - node_vk, - leaf_layer_parameters, - recursion_tip_vk, - }) + #[tracing::instrument( + skip_all, + fields(l1_batch = %metadata.l1_batch_number) + )] + async fn prepare_job( + metadata: SchedulerWitnessJobMetadata, + object_store: &dyn ObjectStore, + keystore: Keystore, + ) -> anyhow::Result { + let started_at = Instant::now(); + let wrapper = + SchedulerWitnessGenerator::get_artifacts(&metadata.recursion_tip_job_id, object_store) + .await?; + let recursion_tip_proof = match wrapper { + FriProofWrapper::Base(_) => Err(anyhow::anyhow!( + "Expected only recursive proofs for scheduler l1 batch {}, got Base", + metadata.l1_batch_number + )), + FriProofWrapper::Recursive(recursive_proof) => Ok(recursive_proof.into_inner()), + }?; + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::Scheduler.into()] + .observe(started_at.elapsed()); + + let started_at = Instant::now(); + let node_vk = keystore + .load_recursive_layer_verification_key( + ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, + ) + .context("get_recursive_layer_vk_for_circuit_type()")?; + let SchedulerPartialInputWrapper(mut scheduler_witness) = + object_store.get(metadata.l1_batch_number).await?; + + let recursion_tip_vk = keystore + .load_recursive_layer_verification_key( + ZkSyncRecursionLayerStorageType::RecursionTipCircuit as u8, + ) + .context("get_recursion_tip_vk()")?; + scheduler_witness.proof_witnesses = vec![recursion_tip_proof].into(); + + let leaf_vk_commits = get_leaf_vk_params(&keystore).context("get_leaf_vk_params()")?; + let leaf_layer_parameters = leaf_vk_commits + .iter() + .map(|el| el.1.clone()) + .collect::>() + .try_into() + .unwrap(); + + WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::Scheduler.into()] + .observe(started_at.elapsed()); + + Ok(SchedulerWitnessGeneratorJob { + block_number: metadata.l1_batch_number, + scheduler_witness, + node_vk, + leaf_layer_parameters, + recursion_tip_vk, + }) + } } diff --git a/prover/crates/bin/witness_generator/src/utils.rs b/prover/crates/bin/witness_generator/src/utils.rs index 3ea2b539773f..8524bdae9ff0 100644 --- a/prover/crates/bin/witness_generator/src/utils.rs +++ b/prover/crates/bin/witness_generator/src/utils.rs @@ -204,28 +204,6 @@ pub async fn save_recursive_layer_prover_input_artifacts( ids_and_urls } -#[tracing::instrument( - skip_all, - fields(l1_batch = %block_number, circuit_id = %circuit_id) -)] -pub async fn save_node_aggregations_artifacts( - block_number: L1BatchNumber, - circuit_id: u8, - depth: u16, - aggregations: Vec<(u64, RecursionQueueSimulator)>, - object_store: &dyn ObjectStore, -) -> String { - let key = AggregationsKey { - block_number, - circuit_id, - depth, - }; - object_store - .put(key, &AggregationWrapper(aggregations)) - .await - .unwrap() -} - #[tracing::instrument(skip_all)] pub async fn load_proofs_for_job_ids( job_ids: &[u32], diff --git a/prover/crates/bin/witness_generator/src/witness_generator.rs b/prover/crates/bin/witness_generator/src/witness_generator.rs new file mode 100644 index 000000000000..eb9200d7950d --- /dev/null +++ b/prover/crates/bin/witness_generator/src/witness_generator.rs @@ -0,0 +1,25 @@ +use std::{sync::Arc, time::Instant}; + +use async_trait::async_trait; +use zksync_object_store::ObjectStore; +use zksync_prover_keystore::keystore::Keystore; + +#[async_trait] +pub trait WitnessGenerator { + type Job: Send + 'static; + type Metadata; + type Artifacts; + + async fn process_job( + job: Self::Job, + object_store: Arc, + max_circuits_in_flight: Option, + started_at: Instant, + ) -> anyhow::Result; + + async fn prepare_job( + metadata: Self::Metadata, + object_store: &dyn ObjectStore, + keystore: Keystore, + ) -> anyhow::Result; +} diff --git a/prover/crates/bin/witness_generator/tests/basic_test.rs b/prover/crates/bin/witness_generator/tests/basic_test.rs index 3323e3c681e4..379ddc3a4eb4 100644 --- a/prover/crates/bin/witness_generator/tests/basic_test.rs +++ b/prover/crates/bin/witness_generator/tests/basic_test.rs @@ -15,9 +15,9 @@ use zksync_types::{ L1BatchNumber, }; use zksync_witness_generator::{ - leaf_aggregation::{prepare_leaf_aggregation_job, LeafAggregationWitnessGenerator}, - node_aggregation::{self, NodeAggregationWitnessGenerator}, - utils::AggregationWrapper, + leaf_aggregation::LeafAggregationWitnessGenerator, + node_aggregation::NodeAggregationWitnessGenerator, utils::AggregationWrapper, + witness_generator::WitnessGenerator, }; fn compare_serialized(expected: &T, actual: &T) { @@ -52,17 +52,22 @@ async fn test_leaf_witness_gen() { .unwrap(); let keystore = Keystore::locate(); - let job = prepare_leaf_aggregation_job(leaf_aggregation_job_metadata, &*object_store, keystore) - .await - .unwrap(); + let job = LeafAggregationWitnessGenerator::prepare_job( + leaf_aggregation_job_metadata, + &*object_store, + keystore, + ) + .await + .unwrap(); - let artifacts = LeafAggregationWitnessGenerator::process_job_impl( + let artifacts = LeafAggregationWitnessGenerator::process_job( job, - Instant::now(), object_store.clone(), - 500, + Some(500), + Instant::now(), ) - .await; + .await + .unwrap(); let aggregations = AggregationWrapper(artifacts.aggregations); @@ -142,18 +147,23 @@ async fn test_node_witness_gen() { }; let keystore = Keystore::locate(); - let job = - node_aggregation::prepare_job(node_aggregation_job_metadata, &*object_store, keystore) - .await - .unwrap(); + let job = NodeAggregationWitnessGenerator::prepare_job( + node_aggregation_job_metadata, + &*object_store, + keystore, + ) + .await + .unwrap(); - let artifacts = NodeAggregationWitnessGenerator::process_job_impl( + let artifacts = NodeAggregationWitnessGenerator::process_job( job, - Instant::now(), object_store.clone(), - 500, + Some(500), + Instant::now(), ) - .await; + .await + .unwrap(); + let aggregations = AggregationWrapper(artifacts.next_aggregations); let expected_results_object_store_config = ObjectStoreConfig { diff --git a/prover/crates/lib/prover_dal/.sqlx/query-97adb49780c9edde6a3cfda09dadbd694e1781e013247d090a280a1f894de464.json b/prover/crates/lib/prover_dal/.sqlx/query-97adb49780c9edde6a3cfda09dadbd694e1781e013247d090a280a1f894de464.json new file mode 100644 index 000000000000..ce9e492a7d4a --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-97adb49780c9edde6a3cfda09dadbd694e1781e013247d090a280a1f894de464.json @@ -0,0 +1,38 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n protocol_version AS \"protocol_version!\",\n protocol_version_patch AS \"protocol_version_patch!\",\n COUNT(*) FILTER (WHERE status = 'queued') as queued,\n COUNT(*) FILTER (WHERE status = 'in_progress') as in_progress\n FROM\n prover_jobs_fri\n WHERE\n status IN ('queued', 'in_progress')\n AND protocol_version IS NOT NULL\n GROUP BY\n protocol_version,\n protocol_version_patch\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "protocol_version!", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "protocol_version_patch!", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "queued", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "in_progress", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true, + false, + null, + null + ] + }, + "hash": "97adb49780c9edde6a3cfda09dadbd694e1781e013247d090a280a1f894de464" +} diff --git a/prover/crates/lib/prover_dal/src/fri_prover_dal.rs b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs index 4e68154290da..1b6c43f4c177 100644 --- a/prover/crates/lib/prover_dal/src/fri_prover_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs @@ -6,8 +6,10 @@ use zksync_basic_types::{ AggregationRound, CircuitIdRoundTuple, CircuitProverStatsEntry, ProtocolVersionedCircuitProverStats, }, - protocol_version::{ProtocolSemanticVersion, ProtocolVersionId}, - prover_dal::{FriProverJobMetadata, ProverJobFriInfo, ProverJobStatus, StuckJobs}, + protocol_version::{ProtocolSemanticVersion, ProtocolVersionId, VersionPatch}, + prover_dal::{ + FriProverJobMetadata, JobCountStatistics, ProverJobFriInfo, ProverJobStatus, StuckJobs, + }, L1BatchNumber, }; use zksync_db_connection::{ @@ -445,6 +447,47 @@ impl FriProverDal<'_, '_> { } } + pub async fn get_generic_prover_jobs_stats( + &mut self, + ) -> HashMap { + { + sqlx::query!( + r#" + SELECT + protocol_version AS "protocol_version!", + protocol_version_patch AS "protocol_version_patch!", + COUNT(*) FILTER (WHERE status = 'queued') as queued, + COUNT(*) FILTER (WHERE status = 'in_progress') as in_progress + FROM + prover_jobs_fri + WHERE + status IN ('queued', 'in_progress') + AND protocol_version IS NOT NULL + GROUP BY + protocol_version, + protocol_version_patch + "# + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| { + let protocol_semantic_version = ProtocolSemanticVersion::new( + ProtocolVersionId::try_from(row.protocol_version as u16).unwrap(), + VersionPatch(row.protocol_version_patch as u32), + ); + let key = protocol_semantic_version; + let value = JobCountStatistics { + queued: row.queued.unwrap() as usize, + in_progress: row.in_progress.unwrap() as usize, + }; + (key, value) + }) + .collect() + } + } + pub async fn min_unproved_l1_batch_number(&mut self) -> HashMap<(u8, u8), L1BatchNumber> { { sqlx::query!( diff --git a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs index 9958527a98b0..791038be0bb8 100644 --- a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs @@ -1378,7 +1378,7 @@ impl FriWitnessGeneratorDal<'_, '_> { pub async fn get_witness_jobs_stats( &mut self, aggregation_round: AggregationRound, - ) -> HashMap<(AggregationRound, ProtocolSemanticVersion), JobCountStatistics> { + ) -> HashMap { let table_name = Self::input_table_name_for(aggregation_round); let sql = format!( r#" @@ -1407,7 +1407,7 @@ impl FriWitnessGeneratorDal<'_, '_> { .unwrap(), VersionPatch(row.get::("protocol_version_patch") as u32), ); - let key = (aggregation_round, protocol_semantic_version); + let key = protocol_semantic_version; let value = JobCountStatistics { queued: row.get::("queued") as usize, in_progress: row.get::("in_progress") as usize, diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index fd524865d567..296037094529 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -4607,6 +4607,7 @@ version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" dependencies = [ + "hex", "serde", "serde_with_macros", ] @@ -6710,6 +6711,7 @@ dependencies = [ "secp256k1", "serde", "serde_json", + "serde_with", "strum", "thiserror", "tracing", diff --git a/zk_toolbox/crates/common/src/cmd.rs b/zk_toolbox/crates/common/src/cmd.rs index 7bf0147b69c0..130a3b2c100e 100644 --- a/zk_toolbox/crates/common/src/cmd.rs +++ b/zk_toolbox/crates/common/src/cmd.rs @@ -147,10 +147,7 @@ impl<'a> Cmd<'a> { fn check_output_status(command_text: &str, output: &std::process::Output) -> CmdResult<()> { if !output.status.success() { logger::new_line(); - logger::error_note( - &format!("Command failed to run: {}", command_text), - &log_output(output), - ); + logger::error_note("Command failed to run", &log_output(output)); return Err(CmdError { stderr: Some(String::from_utf8(output.stderr.clone())?), source: anyhow::anyhow!("Command failed to run: {}", command_text), diff --git a/zk_toolbox/crates/common/src/term/logger.rs b/zk_toolbox/crates/common/src/term/logger.rs index 33a88bd961e2..17e518d9ad92 100644 --- a/zk_toolbox/crates/common/src/term/logger.rs +++ b/zk_toolbox/crates/common/src/term/logger.rs @@ -56,10 +56,9 @@ pub fn note(msg: impl Display, content: impl Display) { } pub fn error_note(msg: &str, content: &str) { - let symbol = CliclackTheme.state_symbol(&ThemeState::Submit); - let note = CliclackTheme - .format_note(msg, content) - .replace(&symbol, &CliclackTheme.error_symbol()); + let note = CliclackTheme.format_log(msg, &CliclackTheme.error_symbol()); + term_write(note); + let note = CliclackTheme.format_log(content, &CliclackTheme.error_symbol()); term_write(note); } diff --git a/zk_toolbox/crates/zk_inception/src/utils/consensus.rs b/zk_toolbox/crates/zk_inception/src/utils/consensus.rs index 06848334a6e1..0b24bbe5cdc6 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/consensus.rs +++ b/zk_toolbox/crates/zk_inception/src/utils/consensus.rs @@ -95,6 +95,7 @@ pub fn get_genesis_specs( attesters: vec![attester], leader, registry_address: None, + seed_peers: [].into(), } } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs index fc3ba974118c..fb3e1436acc3 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs @@ -42,7 +42,7 @@ pub async fn run(shell: &Shell, args: IntegrationArgs) -> anyhow::Result<()> { let test_pattern = args.test_pattern; let mut command = cmd!( shell, - "yarn jest api/contract-verification.test.ts --forceExit --testTimeout 120000 -t {test_pattern...}" + "yarn jest --forceExit --testTimeout 120000 -t {test_pattern...}" ) .env("CHAIN_NAME", ecosystem_config.current_chain()) .env("MASTER_WALLET_PK", wallets.get_test_pk(&chain_config)?);