From 51e02cb78cd56ecd9f5df31f2b590ba19d866e1b Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Mon, 2 Oct 2023 13:02:37 +0200 Subject: [PATCH 01/29] fix: Bumps webpki to fix RUSTSEC-2023-0052. (#130) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ Bumps webpki to 0.22.2 due to RUSTSEC-2023-0052. ## Why ❔ To fix vulnerability. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- Cargo.lock | 4 ++-- prover/Cargo.lock | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e57439a3b244..e03ed8eaf61a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7176,9 +7176,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.22.1" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0e74f82d49d545ad128049b7e88f6576df2da6b02e9ce565c6f533be576957e" +checksum = "07ecc0cd7cac091bf682ec5efa18b1cff79d617b84181f38b3951dbe135f607f" dependencies = [ "ring", "untrusted", diff --git a/prover/Cargo.lock b/prover/Cargo.lock index c5dde4ef70da..c1703e814745 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -6135,9 +6135,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +checksum = "07ecc0cd7cac091bf682ec5efa18b1cff79d617b84181f38b3951dbe135f607f" dependencies = [ "ring", "untrusted", From 38d66b1f8929d2381ae43fe388343c3e7e9a630e Mon Sep 17 00:00:00 2001 From: Aleksandr Stepanov Date: Mon, 2 Oct 2023 14:56:38 +0300 Subject: [PATCH 02/29] ci: Migration from ci-runner to matterlabs-ci-runner (#133) --- .github/workflows/build-external-node-docker.yml | 2 +- .github/workflows/build-gar-reusable.yml | 2 +- .github/workflows/build-local-node-docker.yml | 2 +- .github/workflows/build-prover-fri-gpu-gar.yml | 2 +- .github/workflows/coverage.yml | 2 +- .github/workflows/vm-perf-comparison.yml | 2 +- .github/workflows/vm-perf-to-prometheus.yml | 2 +- .github/workflows/zk-environment-cuda-12-0.publish.yml | 2 +- .github/workflows/zk-environment.publish.yml | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/build-external-node-docker.yml b/.github/workflows/build-external-node-docker.yml index 31ec1e81c093..cccbc84c6c9b 100644 --- a/.github/workflows/build-external-node-docker.yml +++ b/.github/workflows/build-external-node-docker.yml @@ -11,7 +11,7 @@ on: jobs: build-images: name: External Node - Build and Push Docker Image - runs-on: [self-hosted, ci-runner] + runs-on: [matterlabs-ci-runner] steps: - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 with: diff --git a/.github/workflows/build-gar-reusable.yml b/.github/workflows/build-gar-reusable.yml index 745ff6666e13..9a14508fdc7a 100644 --- a/.github/workflows/build-gar-reusable.yml +++ b/.github/workflows/build-gar-reusable.yml @@ -20,7 +20,7 @@ on: jobs: build-gar-prover: name: Build GAR prover - runs-on: [self-hosted, ci-runner] + runs-on: [matterlabs-ci-runner] strategy: fail-fast: false matrix: diff --git a/.github/workflows/build-local-node-docker.yml b/.github/workflows/build-local-node-docker.yml index 5f4cfa4547eb..9880361206c1 100644 --- a/.github/workflows/build-local-node-docker.yml +++ b/.github/workflows/build-local-node-docker.yml @@ -11,7 +11,7 @@ on: jobs: build-images: name: Local Node - Build and Push Docker Image - runs-on: [self-hosted, ci-runner] + runs-on: [matterlabs-ci-runner] steps: - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 with: diff --git a/.github/workflows/build-prover-fri-gpu-gar.yml b/.github/workflows/build-prover-fri-gpu-gar.yml index 61304a5eb2c9..9643d9433188 100644 --- a/.github/workflows/build-prover-fri-gpu-gar.yml +++ b/.github/workflows/build-prover-fri-gpu-gar.yml @@ -15,7 +15,7 @@ on: jobs: build-gar-prover-fri-gpu: name: Build prover FRI GPU GAR - runs-on: [self-hosted, ci-runner] + runs-on: [matterlabs-ci-runner] steps: - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 with: diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 5064bfe22722..e7ed89eb760a 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -7,7 +7,7 @@ on: jobs: generate: - runs-on: [self-hosted, ci-runner] + runs-on: [matterlabs-ci-runner] steps: - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 diff --git a/.github/workflows/vm-perf-comparison.yml b/.github/workflows/vm-perf-comparison.yml index 3078c9bfa8a1..1e5b65a35989 100644 --- a/.github/workflows/vm-perf-comparison.yml +++ b/.github/workflows/vm-perf-comparison.yml @@ -6,7 +6,7 @@ on: jobs: vm-benchmarks: name: Run VM benchmarks - runs-on: [self-hosted, ci-runner] + runs-on: [matterlabs-ci-runner] steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/vm-perf-to-prometheus.yml b/.github/workflows/vm-perf-to-prometheus.yml index c5fe27632736..d2a6594ffca2 100644 --- a/.github/workflows/vm-perf-to-prometheus.yml +++ b/.github/workflows/vm-perf-to-prometheus.yml @@ -12,7 +12,7 @@ concurrency: vm-benchmarks jobs: vm-benchmarks: name: Run VM benchmarks - runs-on: [self-hosted, ci-runner] + runs-on: [matterlabs-ci-runner] steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/zk-environment-cuda-12-0.publish.yml b/.github/workflows/zk-environment-cuda-12-0.publish.yml index f6471582bdf0..7018a61117ff 100644 --- a/.github/workflows/zk-environment-cuda-12-0.publish.yml +++ b/.github/workflows/zk-environment-cuda-12-0.publish.yml @@ -16,7 +16,7 @@ jobs: push_to_registry: if: github.event.pull_request.merged == true || github.event_name == 'workflow_dispatch' name: Push Docker image to Docker Hub - runs-on: [self-hosted, ci-runner] + runs-on: [matterlabs-ci-runner] steps: - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 with: diff --git a/.github/workflows/zk-environment.publish.yml b/.github/workflows/zk-environment.publish.yml index 561da6190210..6070d37c7071 100644 --- a/.github/workflows/zk-environment.publish.yml +++ b/.github/workflows/zk-environment.publish.yml @@ -16,7 +16,7 @@ jobs: push_to_registry: if: github.event.pull_request.merged == true || github.event_name == 'workflow_dispatch' name: Push Docker image to Docker Hub - runs-on: [self-hosted, ci-runner] + runs-on: [matterlabs-ci-runner] steps: - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 with: From 8df11278ca76000d842fc0f73f5233dfc85ef77e Mon Sep 17 00:00:00 2001 From: Maksym Date: Mon, 2 Oct 2023 15:48:39 +0300 Subject: [PATCH 03/29] ci: refactor zk env workflow (#135) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ ## Why ❔ Merge jobs which has something to do with zk-environment to one workflow ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/ci.yml | 5 + .../zk-environment-cuda-12-0.publish.yml | 50 -------- .github/workflows/zk-environment.publish.yml | 108 +++++++++++++++++- 3 files changed, 108 insertions(+), 55 deletions(-) delete mode 100644 .github/workflows/zk-environment-cuda-12-0.publish.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 091941a66aa9..2de28c23d154 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,6 +2,11 @@ name: CI on: pull_request: + paths-ignore: + - ".github/workflows/zk-environment.publish.yml" + - "docker/zk-environment/Dockerfile" + - "docker/zk-environment-cuda-12-0/Dockerfile" + - "docker/zk-rust-nightly-environment/Dockerfile" merge_group: push: branches: diff --git a/.github/workflows/zk-environment-cuda-12-0.publish.yml b/.github/workflows/zk-environment-cuda-12-0.publish.yml deleted file mode 100644 index 7018a61117ff..000000000000 --- a/.github/workflows/zk-environment-cuda-12-0.publish.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: publish zk-environment with cuda 12.0 docker image - -on: - pull_request: - branches: - - main - types: [closed] - paths: - - "docker/zk-environment-cuda-12-0/Dockerfile" - - ".github/workflows/zk-environment-cuda-12-0.publish.yml" - workflow_dispatch: - branches: - - "main" - -jobs: - push_to_registry: - if: github.event.pull_request.merged == true || github.event_name == 'workflow_dispatch' - name: Push Docker image to Docker Hub - runs-on: [matterlabs-ci-runner] - steps: - - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 - with: - submodules: "recursive" - - - name: Login to us-central1 GAR - run: | - gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://us-docker.pkg.dev - - - name: Log in to Docker Hub - uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0 - with: - username: ${{ secrets.DOCKERHUB_USER }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Set up QEMU - uses: docker/setup-qemu-action@v2 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - - name: Build and push - uses: docker/build-push-action@v4 - with: - context: . - file: docker/zk-environment-cuda-12-0/Dockerfile - push: true - target: nvidia-tools - tags: | - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zk-environment-cuda-12-0:latest - matterlabs/zk-environment:cuda-12-0-latest diff --git a/.github/workflows/zk-environment.publish.yml b/.github/workflows/zk-environment.publish.yml index 6070d37c7071..2fb5c1d88135 100644 --- a/.github/workflows/zk-environment.publish.yml +++ b/.github/workflows/zk-environment.publish.yml @@ -1,4 +1,4 @@ -name: publish zk-environment docker image +name: publish zk-environment docker images on: pull_request: @@ -8,15 +8,47 @@ on: paths: - "docker/zk-environment/Dockerfile" - ".github/workflows/zk-environment.publish.yml" + - "docker/zk-environment-cuda-12-0/Dockerfile" + - ".github/workflows/zk-environment-cuda-12-0.publish.yml" + - "docker/zk-rust-nightly-environment/Dockerfile" + - ".github/workflows/rust-nightly-environment.publish.yml" workflow_dispatch: branches: - "main" jobs: - push_to_registry: - if: github.event.pull_request.merged == true || github.event_name == 'workflow_dispatch' - name: Push Docker image to Docker Hub - runs-on: [matterlabs-ci-runner] + changed_files: + name: Changed files + outputs: + zk_environment: ${{ steps.changed-files-yaml.outputs.zk_env_any_changed }} + zk_environment_cuda_12: ${{ steps.changed-files-yaml.outputs.zk_env_cuda_12_any_changed }} + rust_nightly: ${{ steps.changed-files-yaml.outputs.rust_nightly_any_changed }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 + with: + submodules: "recursive" + + - name: Get changed files + id: changed-files-yaml + uses: tj-actions/changed-files@v39 + with: + files_yaml: | + zk_env: + - docker/zk-environment/Dockerfile + - .github/workflows/zk-environment.publish.yml + zk_env_cuda_12: + - docker/zk-environment-cuda-12-0/Dockerfile + - .github/workflows/zk-environment-cuda-12-0.publish.yml + rust_nightly: + - docker/zk-rust-nightly-environment/Dockerfile + - .github/workflows/rust-nightly-environment.publish.yml + + zk_environment: + if: needs.changed_files.outputs.zk_environment == 'true' && github.event.pull_request.merged == true || github.event_name == 'workflow_dispatch' + name: Push zk-environment docker image to Docker Hub + runs-on: ubuntu-latest + needs: changed_files steps: - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 with: @@ -51,3 +83,69 @@ jobs: tags: "matterlabs/zk-environment:latest2.0" file: docker/zk-environment/Dockerfile no-cache: true + + rust_nightly: + if: needs.changed_files.outputs.rust_nightly == 'true' && github.event.pull_request.merged == true || github.event_name == 'workflow_dispatch' + name: Push rust nightly docker image to Docker Hub + needs: changed_files + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 + with: + submodules: "recursive" + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v2 + + - name: Log in to Docker Hub + uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0 + with: + username: ${{ secrets.DOCKERHUB_USER }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Build and push Docker image + uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v4.0.0 + with: + context: . + push: true + tags: "matterlabs/zksync_rust:nightly" + file: docker/zk-rust-nightly-environment/Dockerfile + no-cache: true + + + zk_environment_cuda_12: + if: needs.changed_files.outputs.zk_environment_cuda_12 == 'true' github.event.pull_request.merged == true || github.event_name == 'workflow_dispatch' + name: Push zk-environment cuda 12 docker image to Docker Hub + runs-on: [matterlabs-ci-runner] + needs: changed_files + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 + with: + submodules: "recursive" + + - name: Login to us-central1 GAR + run: | + gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://us-docker.pkg.dev + + - name: Log in to Docker Hub + uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0 + with: + username: ${{ secrets.DOCKERHUB_USER }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Build and push + uses: docker/build-push-action@v4 + with: + context: . + file: docker/zk-environment-cuda-12-0/Dockerfile + push: true + target: nvidia-tools + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zk-environment-cuda-12-0:latest + matterlabs/zk-environment:cuda-12-0-latest From c340a17cf347340b816222f72dbf76d113f03adb Mon Sep 17 00:00:00 2001 From: Maksym Date: Tue, 3 Oct 2023 15:25:20 +0300 Subject: [PATCH 04/29] ci: fix zk-env dockerfile (#143) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ Refactoring rust nightly docker image ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/ci.yml | 8 +++----- .github/workflows/zk-environment.publish.yml | 11 +++-------- docker/zk-environment/Dockerfile | 5 +++++ docker/zk-rust-nightly-environment/Dockerfile | 19 ------------------- 4 files changed, 11 insertions(+), 32 deletions(-) delete mode 100644 docker/zk-rust-nightly-environment/Dockerfile diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2de28c23d154..1afb539d0d9d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,11 +2,6 @@ name: CI on: pull_request: - paths-ignore: - - ".github/workflows/zk-environment.publish.yml" - - "docker/zk-environment/Dockerfile" - - "docker/zk-environment-cuda-12-0/Dockerfile" - - "docker/zk-rust-nightly-environment/Dockerfile" merge_group: push: branches: @@ -46,6 +41,9 @@ jobs: all: - '!core/**' - '!prover/**' + - '!.github/workflows/zk-environment.publish.yml' + - '!docker/zk-environment/Dockerfile' + - '!docker/zk-environment-cuda-12-0/Dockerfile' ci-for-core: name: CI for Core Components needs: changed_files diff --git a/.github/workflows/zk-environment.publish.yml b/.github/workflows/zk-environment.publish.yml index 2fb5c1d88135..b733fab9b176 100644 --- a/.github/workflows/zk-environment.publish.yml +++ b/.github/workflows/zk-environment.publish.yml @@ -10,8 +10,6 @@ on: - ".github/workflows/zk-environment.publish.yml" - "docker/zk-environment-cuda-12-0/Dockerfile" - ".github/workflows/zk-environment-cuda-12-0.publish.yml" - - "docker/zk-rust-nightly-environment/Dockerfile" - - ".github/workflows/rust-nightly-environment.publish.yml" workflow_dispatch: branches: - "main" @@ -22,7 +20,6 @@ jobs: outputs: zk_environment: ${{ steps.changed-files-yaml.outputs.zk_env_any_changed }} zk_environment_cuda_12: ${{ steps.changed-files-yaml.outputs.zk_env_cuda_12_any_changed }} - rust_nightly: ${{ steps.changed-files-yaml.outputs.rust_nightly_any_changed }} runs-on: ubuntu-latest steps: - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 @@ -40,9 +37,6 @@ jobs: zk_env_cuda_12: - docker/zk-environment-cuda-12-0/Dockerfile - .github/workflows/zk-environment-cuda-12-0.publish.yml - rust_nightly: - - docker/zk-rust-nightly-environment/Dockerfile - - .github/workflows/rust-nightly-environment.publish.yml zk_environment: if: needs.changed_files.outputs.zk_environment == 'true' && github.event.pull_request.merged == true || github.event_name == 'workflow_dispatch' @@ -85,7 +79,7 @@ jobs: no-cache: true rust_nightly: - if: needs.changed_files.outputs.rust_nightly == 'true' && github.event.pull_request.merged == true || github.event_name == 'workflow_dispatch' + if: needs.changed_files.outputs.zk_environment == 'true' && github.event.pull_request.merged == true || github.event_name == 'workflow_dispatch' name: Push rust nightly docker image to Docker Hub needs: changed_files runs-on: ubuntu-latest @@ -108,8 +102,9 @@ jobs: with: context: . push: true + target: rust-nightly tags: "matterlabs/zksync_rust:nightly" - file: docker/zk-rust-nightly-environment/Dockerfile + file: docker/zk-environment/Dockerfile no-cache: true diff --git a/docker/zk-environment/Dockerfile b/docker/zk-environment/Dockerfile index 573d42b79ea2..c9871f98afb8 100644 --- a/docker/zk-environment/Dockerfile +++ b/docker/zk-environment/Dockerfile @@ -193,3 +193,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ && rm -rf /var/lib/apt/lists/* ENV LIBRARY_PATH /usr/local/cuda/lib64/stubs + + +FROM base as rust-nightly + +RUN rustup default nightly-2023-07-21 diff --git a/docker/zk-rust-nightly-environment/Dockerfile b/docker/zk-rust-nightly-environment/Dockerfile deleted file mode 100644 index db3c8515c36b..000000000000 --- a/docker/zk-rust-nightly-environment/Dockerfile +++ /dev/null @@ -1,19 +0,0 @@ -FROM debian:bookworm-slim - -ARG DEBIAN_FRONTEND=noninteractive - -RUN apt update && apt install git curl clang openssl libssl-dev gcc g++ pkg-config build-essential libclang-dev -y - -ENV RUSTUP_HOME=/usr/local/rustup \ - CARGO_HOME=/usr/local/cargo \ - PATH=/usr/local/cargo/bin:$PATH - -# Setup rust nightly -RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-07-21 && \ - rustup default nightly-2023-07-21 - -# Setup cmake -RUN curl -Lo cmake-3.24.2-linux-x86_64.sh https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh && \ - chmod +x cmake-3.24.2-linux-x86_64.sh && \ - ./cmake-3.24.2-linux-x86_64.sh --skip-license --prefix=/usr/local From 5e61bdc75b2baa03004d4d3e801170c094766964 Mon Sep 17 00:00:00 2001 From: Danil Date: Tue, 3 Oct 2023 14:26:33 +0200 Subject: [PATCH 05/29] feat(vm): Restore system-constants-generator (#115) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ Restore system-constants-generator ## Why ❔ This crate is rarely used, but it is important to keep it alive: it's used sometimes to tune our fee-related constants. Making sure that the code at least compiles is the smallest yet essential part of maintenance. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- Cargo.lock | 15 ++ Cargo.toml | 2 +- .../src/intrinsic_costs.rs | 2 +- .../system-constants-generator/src/main.rs | 21 +- .../system-constants-generator/src/utils.rs | 214 ++++++++++-------- core/lib/vm/src/constants.rs | 4 +- core/lib/vm/src/tracers/traits.rs | 2 +- 7 files changed, 149 insertions(+), 111 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e03ed8eaf61a..0b584dc9a2ea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6208,6 +6208,21 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "system-constants-generator" +version = "0.1.0" +dependencies = [ + "codegen 0.2.0", + "once_cell", + "serde", + "serde_json", + "vm", + "zksync_contracts", + "zksync_state", + "zksync_types", + "zksync_utils", +] + [[package]] name = "tagptr" version = "0.2.0" diff --git a/Cargo.toml b/Cargo.toml index 9a62cde88d73..9211084819da 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,7 @@ members = [ "core/bin/merkle_tree_consistency_checker", "core/bin/rocksdb_util", "core/bin/storage_logs_dedup_migration", - # "core/bin/system-constants-generator", + "core/bin/system-constants-generator", "core/bin/verification_key_generator_and_server", "core/bin/verified_sources_fetcher", "core/bin/zksync_server", diff --git a/core/bin/system-constants-generator/src/intrinsic_costs.rs b/core/bin/system-constants-generator/src/intrinsic_costs.rs index c663939db6ee..0491be494ab8 100644 --- a/core/bin/system-constants-generator/src/intrinsic_costs.rs +++ b/core/bin/system-constants-generator/src/intrinsic_costs.rs @@ -9,7 +9,7 @@ use crate::utils::{ get_l2_txs, }; use crate::utils::{metrics_from_txs, TransactionGenerator}; -use vm::vm_with_bootloader::BOOTLOADER_TX_ENCODING_SPACE; +use vm::constants::BOOTLOADER_TX_ENCODING_SPACE; use zksync_types::{ethabi::Address, IntrinsicSystemGasConstants, U256}; #[derive(Debug, Clone, Copy, PartialEq)] diff --git a/core/bin/system-constants-generator/src/main.rs b/core/bin/system-constants-generator/src/main.rs index d101647134f8..f076eadd8c8d 100644 --- a/core/bin/system-constants-generator/src/main.rs +++ b/core/bin/system-constants-generator/src/main.rs @@ -1,16 +1,6 @@ use std::fs; use serde::{Deserialize, Serialize}; -use vm::{ - vm_with_bootloader::{BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_L1_GAS, BOOTLOADER_TX_ENCODING_SPACE}, - zk_evm::zkevm_opcode_defs::{ - circuit_prices::{ - ECRECOVER_CIRCUIT_COST_IN_ERGS, KECCAK256_CIRCUIT_COST_IN_ERGS, - SHA256_CIRCUIT_COST_IN_ERGS, - }, - system_params::{MAX_PUBDATA_PER_BLOCK, MAX_TX_ERGS_LIMIT}, - }, -}; use zksync_types::{ IntrinsicSystemGasConstants, GUARANTEED_PUBDATA_IN_TX, L1_GAS_PER_PUBDATA_BYTE, MAX_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, MAX_TXS_IN_BLOCK, @@ -21,6 +11,13 @@ mod utils; use codegen::Block; use codegen::Scope; +use vm::constants::{ + BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_L1_GAS, BOOTLOADER_TX_ENCODING_SPACE, MAX_PUBDATA_PER_BLOCK, +}; +use zksync_types::zkevm_test_harness::zk_evm::zkevm_opcode_defs::circuit_prices::{ + ECRECOVER_CIRCUIT_COST_IN_ERGS, KECCAK256_CIRCUIT_COST_IN_ERGS, SHA256_CIRCUIT_COST_IN_ERGS, +}; +use zksync_types::zkevm_test_harness::zk_evm::zkevm_opcode_defs::system_params::MAX_TX_ERGS_LIMIT; // Params needed for L1 contracts #[derive(Copy, Clone, Debug, Serialize, Deserialize)] @@ -128,7 +125,7 @@ fn generate_rust_fee_constants(intrinsic_gas_constants: &IntrinsicSystemGasConst scope.import("super", "IntrinsicSystemGasConstants"); scope.raw( - vec![ + [ "// TODO (SMA-1699): Use this method to ensure that the transactions provide enough", "// intrinsic gas on the API level.", ] @@ -193,7 +190,7 @@ fn generate_rust_fee_constants(intrinsic_gas_constants: &IntrinsicSystemGasConst get_intrinsic_constants_fn.push_block(struct_block); } - vec![ + [ "//! THIS FILE IS AUTOGENERATED: DO NOT EDIT MANUALLY!\n".to_string(), "//! The file with constants related to fees most of which need to be computed\n" .to_string(), diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index 3af5df328f42..afb00b5cda7d 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -1,36 +1,62 @@ use once_cell::sync::Lazy; +use std::cell::RefCell; +use std::rc::Rc; +use vm::constants::{BLOCK_GAS_LIMIT, BOOTLOADER_HEAP_PAGE}; use vm::{ - utils::{create_test_block_params, read_bootloader_test_code, BLOCK_GAS_LIMIT}, - vm_with_bootloader::{ - init_vm_inner, push_raw_transaction_to_bootloader_memory, BlockContextMode, - BootloaderJobType, DerivedBlockContext, TxExecutionMode, - }, - zk_evm::{aux_structures::Timestamp, zkevm_opcode_defs::BOOTLOADER_HEAP_PAGE}, - HistoryEnabled, OracleTools, + BootloaderState, BoxedTracer, DynTracer, ExecutionEndTracer, ExecutionProcessing, + HistoryEnabled, HistoryMode, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, Vm, + VmExecutionMode, VmExecutionStopReason, VmTracer, ZkSyncVmState, }; use zksync_contracts::{ - load_sys_contract, read_bootloader_code, read_sys_contract_bytecode, BaseSystemContracts, - ContractLanguage, SystemContractCode, + load_sys_contract, read_bootloader_code, read_sys_contract_bytecode, read_zbin_bytecode, + BaseSystemContracts, ContractLanguage, SystemContractCode, }; use zksync_state::{InMemoryStorage, StorageView, WriteStorage}; +use zksync_types::block::legacy_miniblock_hash; use zksync_types::{ - ethabi::Token, - fee::Fee, - l1::L1Tx, - l2::L2Tx, - tx::{ - tx_execution_info::{TxExecutionStatus, VmExecutionLogs}, - ExecutionMetrics, - }, - utils::storage_key_for_eth_balance, - AccountTreeId, Address, Execute, L1TxCommonData, L2ChainId, Nonce, StorageKey, Transaction, - BOOTLOADER_ADDRESS, H256, SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_GAS_PRICE_POSITION, - SYSTEM_CONTEXT_TX_ORIGIN_POSITION, U256, + ethabi::Token, fee::Fee, l1::L1Tx, l2::L2Tx, utils::storage_key_for_eth_balance, AccountTreeId, + Address, Execute, L1BatchNumber, L1TxCommonData, L2ChainId, MiniblockNumber, Nonce, + ProtocolVersionId, StorageKey, Timestamp, Transaction, BOOTLOADER_ADDRESS, H256, + SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_GAS_PRICE_POSITION, SYSTEM_CONTEXT_TX_ORIGIN_POSITION, + U256, ZKPORTER_IS_AVAILABLE, }; use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, u256_to_h256}; use crate::intrinsic_costs::VmSpentResourcesResult; +/// Tracer for setting the data for bootloader with custom input +/// and receive an output from this custom bootloader +struct SpecialBootloaderTracer { + input: Vec<(usize, U256)>, + output: Rc>, +} + +impl DynTracer for SpecialBootloaderTracer {} + +impl ExecutionEndTracer for SpecialBootloaderTracer {} + +impl ExecutionProcessing for SpecialBootloaderTracer { + fn initialize_tracer(&mut self, state: &mut ZkSyncVmState) { + state.memory.populate_page( + BOOTLOADER_HEAP_PAGE as usize, + self.input.clone(), + Timestamp(0), + ); + } + fn after_vm_execution( + &mut self, + state: &mut ZkSyncVmState, + _bootloader_state: &BootloaderState, + _stop_reason: VmExecutionStopReason, + ) { + let value_recorded_from_test = state.memory.read_slot(BOOTLOADER_HEAP_PAGE as usize, 0); + let mut res = self.output.borrow_mut(); + *res = value_recorded_from_test.value.as_u32(); + } +} + +impl VmTracer for SpecialBootloaderTracer {} + pub static GAS_TEST_SYSTEM_CONTRACTS: Lazy = Lazy::new(|| { let bytecode = read_bootloader_code("gas_test"); let hash = hash_bytecode(&bytecode); @@ -135,19 +161,39 @@ pub(super) fn get_l1_txs(number_of_txs: usize) -> (Vec, Vec Vec { + read_zbin_bytecode(format!( + "etc/system-contracts/bootloader/tests/artifacts/{}.yul/{}.yul.zbin", + test, test + )) +} + +fn default_l1_batch() -> L1BatchEnv { + L1BatchEnv { + previous_batch_hash: None, + number: L1BatchNumber(1), + timestamp: 100, + l1_gas_price: 50_000_000_000, // 50 gwei + fair_l2_gas_price: 250_000_000, // 0.25 gwei + fee_account: Address::random(), + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number: 1, + timestamp: 100, + prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), + max_virtual_blocks_to_create: 100, + }, + } +} + /// Executes the "internal transfer test" of the bootloader -- the test that /// returns the amount of gas needed to perform and internal transfer, assuming no gas price /// per pubdata, i.e. under assumption that the refund will not touch any new slots. pub(super) fn execute_internal_transfer_test() -> u32 { - let (block_context, block_properties) = create_test_block_params(); - let block_context: DerivedBlockContext = block_context.into(); - let raw_storage = InMemoryStorage::with_system_contracts(hash_bytecode); let mut storage_view = StorageView::new(raw_storage); let bootloader_balance_key = storage_key_for_eth_balance(&BOOTLOADER_ADDRESS); storage_view.set_value(bootloader_balance_key, u256_to_h256(U256([0, 0, 1, 0]))); - let mut oracle_tools = OracleTools::new(&mut storage_view, HistoryEnabled); - let bytecode = read_bootloader_test_code("transfer_test"); let hash = hash_bytecode(&bytecode); let bootloader = SystemContractCode { @@ -155,6 +201,8 @@ pub(super) fn execute_internal_transfer_test() -> u32 { hash, }; + let l1_batch = default_l1_batch(); + let bytecode = read_sys_contract_bytecode("", "DefaultAccount", ContractLanguage::Sol); let hash = hash_bytecode(&bytecode); let default_aa = SystemContractCode { @@ -162,19 +210,20 @@ pub(super) fn execute_internal_transfer_test() -> u32 { hash, }; - let base_system_contract = BaseSystemContracts { + let base_system_smart_contracts = BaseSystemContracts { bootloader, default_aa, }; - let mut vm = init_vm_inner( - &mut oracle_tools, - BlockContextMode::NewBlock(block_context, Default::default()), - &block_properties, - BLOCK_GAS_LIMIT, - &base_system_contract, - TxExecutionMode::VerifyExecute, - ); + let system_env = SystemEnv { + zk_porter_available: ZKPORTER_IS_AVAILABLE, + version: ProtocolVersionId::latest(), + base_system_smart_contracts, + gas_limit: BLOCK_GAS_LIMIT, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, + chain_id: L2ChainId::default(), + }; let eth_token_sys_contract = load_sys_contract("L2EthToken"); let transfer_from_to = ð_token_sys_contract @@ -197,24 +246,22 @@ pub(super) fn execute_internal_transfer_test() -> u32 { input }; let input: Vec<_> = bytes_to_be_words(input).into_iter().enumerate().collect(); - vm.state - .memory - .populate_page(BOOTLOADER_HEAP_PAGE as usize, input, Timestamp(0)); - - let result = vm.execute_till_block_end(BootloaderJobType::BlockPostprocessing); - assert!( - result.block_tip_result.revert_reason.is_none(), - "The internal call has reverted" - ); - assert!( - result.full_result.revert_reason.is_none(), - "The internal call has reverted" + let tracer_result = Rc::new(RefCell::new(0)); + let tracer = SpecialBootloaderTracer { + input, + output: tracer_result.clone(), + }; + let mut vm = Vm::new( + l1_batch, + system_env, + Rc::new(RefCell::new(storage_view)), + HistoryEnabled, ); + let result = vm.inspect(vec![tracer.into_boxed()], VmExecutionMode::Bootloader); - let value_recorded_from_test = vm.state.memory.read_slot(BOOTLOADER_HEAP_PAGE as usize, 0); - - value_recorded_from_test.value.as_u32() + assert!(!result.result.is_failed(), "The internal call has reverted"); + tracer_result.take() } // Executes an array of transactions in the VM. @@ -226,9 +273,6 @@ pub(super) fn execute_user_txs_in_test_gas_vm( .iter() .fold(U256::zero(), |sum, elem| sum + elem.gas_limit()); - let (block_context, block_properties) = create_test_block_params(); - let block_context: DerivedBlockContext = block_context.into(); - let raw_storage = InMemoryStorage::with_system_contracts(hash_bytecode); let mut storage_view = StorageView::new(raw_storage); @@ -256,61 +300,43 @@ pub(super) fn execute_user_txs_in_test_gas_vm( storage_view.set_value(tx_gas_price_key, u256_to_h256(U256([1, 0, 0, 0]))); } - let mut oracle_tools = OracleTools::new(&mut storage_view, HistoryEnabled); + let l1_batch = default_l1_batch(); + let system_env = SystemEnv { + zk_porter_available: ZKPORTER_IS_AVAILABLE, + version: ProtocolVersionId::latest(), + base_system_smart_contracts: GAS_TEST_SYSTEM_CONTRACTS.clone(), + gas_limit: BLOCK_GAS_LIMIT, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, + chain_id: L2ChainId::default(), + }; - let mut vm = init_vm_inner( - &mut oracle_tools, - BlockContextMode::NewBlock(block_context, Default::default()), - &block_properties, - BLOCK_GAS_LIMIT, - &GAS_TEST_SYSTEM_CONTRACTS, - TxExecutionMode::VerifyExecute, + let mut vm = Vm::new( + l1_batch, + system_env, + Rc::new(RefCell::new(storage_view)), + HistoryEnabled, ); - vm.start_next_l2_block(vm.get_current_l2_block_info().dummy_next_block_info()); let mut total_gas_refunded = 0; for tx in txs { - push_raw_transaction_to_bootloader_memory( - &mut vm, - tx.clone().into(), - TxExecutionMode::VerifyExecute, - 0, - None, - ); - let tx_execution_result = vm - .execute_next_tx(u32::MAX, false) - .expect("Bootloader failed while processing transaction"); + vm.push_transaction(tx); + let tx_execution_result = vm.execute(VmExecutionMode::OneTx); - total_gas_refunded += tx_execution_result.gas_refunded; + total_gas_refunded += tx_execution_result.refunds.gas_refunded; if !accept_failure { - assert_eq!( - tx_execution_result.status, - TxExecutionStatus::Success, + assert!( + !tx_execution_result.result.is_failed(), "A transaction has failed" ); } } - let result = vm.execute_till_block_end(BootloaderJobType::BlockPostprocessing); - let execution_logs = VmExecutionLogs { - storage_logs: result.full_result.storage_log_queries, - events: result.full_result.events, - l2_to_l1_logs: result.full_result.l2_to_l1_logs, - total_log_queries_count: result.full_result.total_log_queries, - }; - - let metrics = ExecutionMetrics::new( - &execution_logs, - result.full_result.gas_used as usize, - 0, // The number of contracts deployed is irrelevant for our needs - result.full_result.contracts_used, - result.full_result.cycles_used, - result.full_result.computational_gas_used, - result.full_result.total_log_queries, - ); + let result = vm.execute(VmExecutionMode::Bootloader); + let metrics = result.get_execution_metrics(None); VmSpentResourcesResult { - gas_consumed: vm.gas_consumed(), + gas_consumed: result.statistics.gas_used, total_gas_paid: total_gas_paid_upfront.as_u32() - total_gas_refunded, pubdata_published: metrics.size() as u32, total_pubdata_paid: 0, diff --git a/core/lib/vm/src/constants.rs b/core/lib/vm/src/constants.rs index a51688b851e7..1c1cb3d5017f 100644 --- a/core/lib/vm/src/constants.rs +++ b/core/lib/vm/src/constants.rs @@ -70,8 +70,8 @@ pub(crate) const TX_GAS_LIMIT_OFFSET: usize = 4; const INITIAL_BASE_PAGE: u32 = 8; pub const BOOTLOADER_HEAP_PAGE: u32 = heap_page_from_base(MemoryPage(INITIAL_BASE_PAGE)).0; -pub(crate) const BLOCK_OVERHEAD_GAS: u32 = 1200000; -pub(crate) const BLOCK_OVERHEAD_L1_GAS: u32 = 1000000; +pub const BLOCK_OVERHEAD_GAS: u32 = 1200000; +pub const BLOCK_OVERHEAD_L1_GAS: u32 = 1000000; pub const BLOCK_OVERHEAD_PUBDATA: u32 = BLOCK_OVERHEAD_L1_GAS / L1_GAS_PER_PUBDATA_BYTE; /// VM Hooks are used for communication between bootloader and tracers. diff --git a/core/lib/vm/src/tracers/traits.rs b/core/lib/vm/src/tracers/traits.rs index 6e76a041fabc..33e149066b16 100644 --- a/core/lib/vm/src/tracers/traits.rs +++ b/core/lib/vm/src/tracers/traits.rs @@ -69,7 +69,7 @@ pub trait DynTracer { /// Save the results of the vm execution. pub trait VmTracer: - DynTracer + ExecutionEndTracer + ExecutionProcessing + Send + DynTracer + ExecutionEndTracer + ExecutionProcessing { fn save_results(&mut self, _result: &mut VmExecutionResultAndLogs) {} } From cf44a491a324199b4cf457d28658da44b6dafc61 Mon Sep 17 00:00:00 2001 From: Danil Date: Tue, 3 Oct 2023 14:45:52 +0200 Subject: [PATCH 06/29] feat(vm): Introduce new way of returning from the tracer #2569 (#116) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit … private at a4d5ca6625210471d9de66d61e7c9a41a336afb8 # What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --------- Signed-off-by: Danil --- core/lib/vm/src/errors/halt.rs | 9 ++++ core/lib/vm/src/implementation/execution.rs | 10 +++-- core/lib/vm/src/tracers/default_tracers.rs | 42 +++++++++++++------ core/lib/vm/src/tracers/result_tracer.rs | 13 +++++- .../lib/vm/src/tracers/storage_invocations.rs | 15 +++++-- core/lib/vm/src/tracers/traits.rs | 30 +++++++++---- core/lib/vm/src/tracers/utils.rs | 5 ++- core/lib/vm/src/tracers/validation/mod.rs | 19 +++++++-- .../src/api_server/execution_sandbox/error.rs | 4 ++ .../api_server/execution_sandbox/validate.rs | 2 +- 10 files changed, 112 insertions(+), 37 deletions(-) diff --git a/core/lib/vm/src/errors/halt.rs b/core/lib/vm/src/errors/halt.rs index 10c8a8d702b9..0a5057a0616f 100644 --- a/core/lib/vm/src/errors/halt.rs +++ b/core/lib/vm/src/errors/halt.rs @@ -26,6 +26,8 @@ pub enum Halt { UnexpectedVMBehavior(String), // Bootloader is out of gas. BootloaderOutOfGas, + // Validation step is out of gas + ValidationOutOfGas, // Transaction has a too big gas limit and will not be executed by the server. TooBigGasLimit, // The bootloader did not have enough gas to start the transaction in the first place @@ -37,6 +39,7 @@ pub enum Halt { // Failed to publish information about the batch and the L2 block onto L1 FailedToAppendTransactionToL2Block(String), VMPanic, + TracerCustom(String), } impl Display for Halt { @@ -102,6 +105,12 @@ impl Display for Halt { reason ) } + Halt::TracerCustom(reason) => { + write!(f, "Tracer aborted execution: {}", reason) + } + Halt::ValidationOutOfGas => { + write!(f, "Validation run out of gas") + } } } } diff --git a/core/lib/vm/src/implementation/execution.rs b/core/lib/vm/src/implementation/execution.rs index 9944a37f7e83..52c4ff0cb0da 100644 --- a/core/lib/vm/src/implementation/execution.rs +++ b/core/lib/vm/src/implementation/execution.rs @@ -6,7 +6,9 @@ use crate::old_vm::{ utils::{vm_may_have_ended_inner, VmExecutionResult}, }; use crate::tracers::{ - traits::{BoxedTracer, ExecutionEndTracer, ExecutionProcessing, VmTracer}, + traits::{ + BoxedTracer, ExecutionEndTracer, ExecutionProcessing, TracerExecutionStatus, VmTracer, + }, DefaultExecutionTracer, RefundsTracer, }; use crate::types::{inputs::VmExecutionMode, outputs::VmExecutionResultAndLogs}; @@ -104,11 +106,11 @@ impl Vm { break VmExecutionStopReason::VmFinished; } - if tracer.should_stop_execution() { - break VmExecutionStopReason::TracerRequestedStop; + if let TracerExecutionStatus::Stop(reason) = tracer.should_stop_execution() { + break VmExecutionStopReason::TracerRequestedStop(reason); } }; - tracer.after_vm_execution(&mut self.state, &self.bootloader_state, result); + tracer.after_vm_execution(&mut self.state, &self.bootloader_state, result.clone()); result } diff --git a/core/lib/vm/src/tracers/default_tracers.rs b/core/lib/vm/src/tracers/default_tracers.rs index 7cc1e19869cf..4df00193265d 100644 --- a/core/lib/vm/src/tracers/default_tracers.rs +++ b/core/lib/vm/src/tracers/default_tracers.rs @@ -16,14 +16,17 @@ use crate::bootloader_state::BootloaderState; use crate::constants::BOOTLOADER_HEAP_PAGE; use crate::old_vm::history_recorder::HistoryMode; use crate::old_vm::memory::SimpleMemory; -use crate::tracers::traits::{DynTracer, ExecutionEndTracer, ExecutionProcessing, VmTracer}; +use crate::tracers::traits::{ + DynTracer, ExecutionEndTracer, ExecutionProcessing, TracerExecutionStatus, + TracerExecutionStopReason, VmTracer, +}; use crate::tracers::utils::{ computational_gas_price, gas_spent_on_bytecodes_and_long_messages_this_opcode, print_debug_if_needed, VmHook, }; use crate::tracers::ResultTracer; use crate::types::internals::ZkSyncVmState; -use crate::{VmExecutionMode, VmExecutionStopReason}; +use crate::{Halt, VmExecutionMode, VmExecutionStopReason}; /// Default tracer for the VM. It manages the other tracers execution and stop the vm when needed. pub(crate) struct DefaultExecutionTracer { @@ -141,17 +144,32 @@ impl Tracer for DefaultExecutionTracer { } impl ExecutionEndTracer for DefaultExecutionTracer { - fn should_stop_execution(&self) -> bool { - let mut should_stop = match self.execution_mode { - VmExecutionMode::OneTx => self.tx_has_been_processed(), - VmExecutionMode::Batch => false, - VmExecutionMode::Bootloader => self.ret_from_the_bootloader == Some(RetOpcode::Ok), + fn should_stop_execution(&self) -> TracerExecutionStatus { + match self.execution_mode { + VmExecutionMode::OneTx => { + if self.tx_has_been_processed() { + return TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish); + } + } + VmExecutionMode::Bootloader => { + if self.ret_from_the_bootloader == Some(RetOpcode::Ok) { + return TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish); + } + } + VmExecutionMode::Batch => {} }; - should_stop = should_stop || self.validation_run_out_of_gas(); + if self.validation_run_out_of_gas() { + return TracerExecutionStatus::Stop(TracerExecutionStopReason::Abort( + Halt::ValidationOutOfGas, + )); + } for tracer in self.custom_tracers.iter() { - should_stop = should_stop || tracer.should_stop_execution(); + let reason = tracer.should_stop_execution(); + if TracerExecutionStatus::Continue != reason { + return reason; + } } - should_stop + TracerExecutionStatus::Continue } } @@ -244,9 +262,9 @@ impl ExecutionProcessing for DefaultExecu stop_reason: VmExecutionStopReason, ) { self.result_tracer - .after_vm_execution(state, bootloader_state, stop_reason); + .after_vm_execution(state, bootloader_state, stop_reason.clone()); for processor in self.custom_tracers.iter_mut() { - processor.after_vm_execution(state, bootloader_state, stop_reason); + processor.after_vm_execution(state, bootloader_state, stop_reason.clone()); } } } diff --git a/core/lib/vm/src/tracers/result_tracer.rs b/core/lib/vm/src/tracers/result_tracer.rs index b8e089493565..dd61ea49cea6 100644 --- a/core/lib/vm/src/tracers/result_tracer.rs +++ b/core/lib/vm/src/tracers/result_tracer.rs @@ -24,6 +24,7 @@ use crate::types::{ }; use crate::constants::{BOOTLOADER_HEAP_PAGE, RESULT_SUCCESS_FIRST_SLOT}; +use crate::tracers::traits::TracerExecutionStopReason; use crate::{Halt, TxRevertReason}; use crate::{VmExecutionMode, VmExecutionStopReason}; @@ -120,9 +121,11 @@ impl ExecutionProcessing for ResultTracer // One of the tracers above has requested to stop the execution. // If it was the correct stop we already have the result, // otherwise it can be out of gas error - VmExecutionStopReason::TracerRequestedStop => { + VmExecutionStopReason::TracerRequestedStop(reason) => { match self.execution_mode { - VmExecutionMode::OneTx => self.vm_stopped_execution(state, bootloader_state), + VmExecutionMode::OneTx => { + self.vm_stopped_execution(state, bootloader_state, reason) + } VmExecutionMode::Batch => self.vm_finished_execution(state), VmExecutionMode::Bootloader => self.vm_finished_execution(state), }; @@ -188,7 +191,13 @@ impl ResultTracer { &mut self, state: &ZkSyncVmState, bootloader_state: &BootloaderState, + reason: TracerExecutionStopReason, ) { + if let TracerExecutionStopReason::Abort(halt) = reason { + self.result = Some(Result::Halt { reason: halt }); + return; + } + if self.bootloader_out_of_gas { self.result = Some(Result::Halt { reason: Halt::BootloaderOutOfGas, diff --git a/core/lib/vm/src/tracers/storage_invocations.rs b/core/lib/vm/src/tracers/storage_invocations.rs index ef4b59c60a88..bd6f419eddfb 100644 --- a/core/lib/vm/src/tracers/storage_invocations.rs +++ b/core/lib/vm/src/tracers/storage_invocations.rs @@ -1,7 +1,11 @@ use crate::bootloader_state::BootloaderState; use crate::old_vm::history_recorder::HistoryMode; -use crate::tracers::traits::{DynTracer, ExecutionEndTracer, ExecutionProcessing, VmTracer}; +use crate::tracers::traits::{ + DynTracer, ExecutionEndTracer, ExecutionProcessing, TracerExecutionStatus, + TracerExecutionStopReason, VmTracer, +}; use crate::types::internals::ZkSyncVmState; +use crate::Halt; use zksync_state::WriteStorage; #[derive(Debug, Default, Clone)] @@ -21,8 +25,13 @@ impl StorageInvocations { impl DynTracer for StorageInvocations {} impl ExecutionEndTracer for StorageInvocations { - fn should_stop_execution(&self) -> bool { - self.current >= self.limit + fn should_stop_execution(&self) -> TracerExecutionStatus { + if self.current >= self.limit { + return TracerExecutionStatus::Stop(TracerExecutionStopReason::Abort( + Halt::TracerCustom("Storage invocations limit reached".to_string()), + )); + } + TracerExecutionStatus::Continue } } diff --git a/core/lib/vm/src/tracers/traits.rs b/core/lib/vm/src/tracers/traits.rs index 33e149066b16..4e76ed1fa15d 100644 --- a/core/lib/vm/src/tracers/traits.rs +++ b/core/lib/vm/src/tracers/traits.rs @@ -8,7 +8,7 @@ use crate::old_vm::history_recorder::HistoryMode; use crate::old_vm::memory::SimpleMemory; use crate::types::internals::ZkSyncVmState; use crate::types::outputs::VmExecutionResultAndLogs; -use crate::VmExecutionStopReason; +use crate::{Halt, VmExecutionStopReason}; /// Run tracer for collecting data during the vm execution cycles pub trait ExecutionProcessing: @@ -31,14 +31,6 @@ pub trait ExecutionProcessing: } } -/// Stop the vm execution if the tracer conditions are met -pub trait ExecutionEndTracer { - // Returns whether the vm execution should stop. - fn should_stop_execution(&self) -> bool { - false - } -} - /// Version of zk_evm::Tracer suitable for dynamic dispatch. pub trait DynTracer { fn before_decoding(&mut self, _state: VmLocalStateData<'_>, _memory: &SimpleMemory) {} @@ -83,3 +75,23 @@ impl + 'static> BoxedTracer { + // Returns whether the vm execution should stop. + fn should_stop_execution(&self) -> TracerExecutionStatus { + TracerExecutionStatus::Continue + } +} diff --git a/core/lib/vm/src/tracers/utils.rs b/core/lib/vm/src/tracers/utils.rs index f86b496b0787..5f9090d6180c 100644 --- a/core/lib/vm/src/tracers/utils.rs +++ b/core/lib/vm/src/tracers/utils.rs @@ -18,6 +18,7 @@ use crate::constants::{ use crate::old_vm::history_recorder::HistoryMode; use crate::old_vm::memory::SimpleMemory; use crate::old_vm::utils::{aux_heap_page_from_base, heap_page_from_base}; +use crate::tracers::traits::TracerExecutionStopReason; #[derive(Clone, Debug, Copy)] pub(crate) enum VmHook { @@ -217,8 +218,8 @@ pub(crate) fn get_vm_hook_params(memory: &SimpleMemory) -> Ve ) } -#[derive(Debug, Clone, Copy, PartialEq)] +#[derive(Debug, Clone, PartialEq)] pub enum VmExecutionStopReason { VmFinished, - TracerRequestedStop, + TracerRequestedStop(TracerExecutionStopReason), } diff --git a/core/lib/vm/src/tracers/validation/mod.rs b/core/lib/vm/src/tracers/validation/mod.rs index 4b94e3f177b5..d85d031665ac 100644 --- a/core/lib/vm/src/tracers/validation/mod.rs +++ b/core/lib/vm/src/tracers/validation/mod.rs @@ -27,7 +27,10 @@ use zksync_utils::{ use crate::old_vm::history_recorder::HistoryMode; use crate::old_vm::memory::SimpleMemory; -use crate::tracers::traits::{DynTracer, ExecutionEndTracer, ExecutionProcessing, VmTracer}; +use crate::tracers::traits::{ + DynTracer, ExecutionEndTracer, ExecutionProcessing, TracerExecutionStatus, + TracerExecutionStopReason, VmTracer, +}; use crate::tracers::utils::{ computational_gas_price, get_calldata_page_via_abi, print_debug_if_needed, VmHook, }; @@ -38,7 +41,7 @@ pub use params::ValidationTracerParams; use types::NewTrustedValidationItems; use types::ValidationTracerMode; -use crate::VmExecutionResultAndLogs; +use crate::{Halt, VmExecutionResultAndLogs}; /// Tracer that is used to ensure that the validation adheres to all the rules /// to prevent DDoS attacks on the server. @@ -341,8 +344,16 @@ impl DynTracer for ValidationTracer { } impl ExecutionEndTracer for ValidationTracer { - fn should_stop_execution(&self) -> bool { - self.should_stop_execution || self.result.get().is_some() + fn should_stop_execution(&self) -> TracerExecutionStatus { + if self.should_stop_execution { + return TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish); + } + if let Some(result) = self.result.get() { + return TracerExecutionStatus::Stop(TracerExecutionStopReason::Abort( + Halt::TracerCustom(format!("Validation error: {:#?}", result)), + )); + } + TracerExecutionStatus::Continue } } diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/error.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/error.rs index abc50af37a5f..b4f04e2e5d60 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/error.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/error.rs @@ -61,6 +61,10 @@ impl From for SandboxExecutionError { Halt::FailedToAppendTransactionToL2Block(reason) => { SandboxExecutionError::Revert(reason, vec![]) } + Halt::TracerCustom(reason) => SandboxExecutionError::Revert(reason, vec![]), + Halt::ValidationOutOfGas => Self::AccountValidationFailed( + "The validation of the transaction ran out of gas".to_string(), + ), } } } diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs index 05eb7f3ce2d9..2dd5ae7b9c25 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs @@ -86,8 +86,8 @@ impl TxSharedArgs { ]); let result = match (result.result, validation_result.get()) { - (ExecutionResult::Halt { reason }, _) => Err(ValidationError::FailedTx(reason)), (_, Some(err)) => Err(ValidationError::ViolatedRule(err.clone())), + (ExecutionResult::Halt { reason }, _) => Err(ValidationError::FailedTx(reason)), (_, None) => Ok(()), }; From f6284bdb627c1c3b42d587e173476a4598f5609b Mon Sep 17 00:00:00 2001 From: Maksym Date: Tue, 3 Oct 2023 15:59:03 +0300 Subject: [PATCH 07/29] ci: fix zk environment workflow condition (#147) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ Fix for zk env workflow ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/zk-environment.publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/zk-environment.publish.yml b/.github/workflows/zk-environment.publish.yml index b733fab9b176..bf2ec7fa18bf 100644 --- a/.github/workflows/zk-environment.publish.yml +++ b/.github/workflows/zk-environment.publish.yml @@ -109,7 +109,7 @@ jobs: zk_environment_cuda_12: - if: needs.changed_files.outputs.zk_environment_cuda_12 == 'true' github.event.pull_request.merged == true || github.event_name == 'workflow_dispatch' + if: needs.changed_files.outputs.zk_environment_cuda_12 == 'true' && github.event.pull_request.merged == true || github.event_name == 'workflow_dispatch' name: Push zk-environment cuda 12 docker image to Docker Hub runs-on: [matterlabs-ci-runner] needs: changed_files From 6a2367698ececdbae85dbb4ae173b8a2a537d9bd Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Tue, 3 Oct 2023 15:56:57 +0200 Subject: [PATCH 08/29] ci: Makes TruffleHog run in merge queue. (#149) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ Makes TruffleHog run in merge queue. ## Why ❔ To prevent any secrets to be merged. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/secrets_scanner.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/secrets_scanner.yaml b/.github/workflows/secrets_scanner.yaml index 54054cf7cc12..6a1faa200cc4 100644 --- a/.github/workflows/secrets_scanner.yaml +++ b/.github/workflows/secrets_scanner.yaml @@ -1,5 +1,7 @@ name: Leaked Secrets Scan -on: [pull_request] +on: + pull_request: + merge_group: jobs: TruffleHog: runs-on: ubuntu-latest From 0dec553804858a435151bae98930d2f70c1ae596 Mon Sep 17 00:00:00 2001 From: Shahar Kaminsky Date: Tue, 3 Oct 2023 17:31:52 +0300 Subject: [PATCH 09/29] fix(prover): Add Prover Readme (#146) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ This PR adds all the prover readmes from the private repo. ## Why ❔ The mirroring script ignored all readme files. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- prover/proof_fri_compressor/README.md | 7 + prover/prover/README.md | 8 + prover/prover_fri/README.md | 150 ++++++++++++++++++ prover/prover_fri_gateway/README.md | 11 ++ prover/prover_fri_types/README.md | 8 + .../README.md | 22 +++ prover/witness_generator/README.md | 52 ++++++ prover/witness_vector_generator/README.md | 7 + 8 files changed, 265 insertions(+) create mode 100644 prover/proof_fri_compressor/README.md create mode 100644 prover/prover/README.md create mode 100644 prover/prover_fri/README.md create mode 100644 prover/prover_fri_gateway/README.md create mode 100644 prover/prover_fri_types/README.md create mode 100644 prover/vk_setup_data_generator_server_fri/README.md create mode 100644 prover/witness_generator/README.md create mode 100644 prover/witness_vector_generator/README.md diff --git a/prover/proof_fri_compressor/README.md b/prover/proof_fri_compressor/README.md new file mode 100644 index 000000000000..4b0fa52ed9fd --- /dev/null +++ b/prover/proof_fri_compressor/README.md @@ -0,0 +1,7 @@ +# Witness vector generator + +Used to compress FRI proof to Bellman proof that gets sent to L1. + +## running + +`zk f cargo +nightly-2023-07-21 run --release --bin zksync_proof_fri_compressor` diff --git a/prover/prover/README.md b/prover/prover/README.md new file mode 100644 index 000000000000..2d70cf056d4b --- /dev/null +++ b/prover/prover/README.md @@ -0,0 +1,8 @@ +# Readme + +For compiling locally (no cuda) set `features=["legacy"], default-features=false` for: + +- `./Cargo.toml`: `heavy-ops-service` dependency. +- `../setup_key_generator_and_server/Cargo.toml`: `api` and `prover-service` dependencies. + +**! Don't push those changes !** diff --git a/prover/prover_fri/README.md b/prover/prover_fri/README.md new file mode 100644 index 000000000000..0b4d17def736 --- /dev/null +++ b/prover/prover_fri/README.md @@ -0,0 +1,150 @@ +# FRI Prover + +## running cpu prover + +`zk f cargo +nightly-2023-07-21 run --release --bin zksync_prover_fri` + +## running gpu prover(requires CUDA 12.0+) + +`zk f cargo +nightly-2023-07-21 run --release --features "gpu" --bin zksync_prover_fri` + +## Proving a block using CPU prover locally + +Below steps can be used to prove a block on local machine using CPU prover. This is useful for debugging and testing +Machine specs: + +- CPU: At least 8 physical cores +- RAM: 60GB of RAM(if you have lower RAM machine enable swap) +- Disk: 400GB of free disk + +1. Install the correct nightly version using command: `rustup install nightly-2023-07-21` +2. Generate the cpu setup data (no need to regenerate if it's already there). This will consume around 300Gb of disk. + Use these commands: + + ```markdown + for i in {1..13}; do zk f cargo run --release --bin zksync_setup_data_generator_fri -- --numeric-circuit $i + --is_base_layer done + + for i in {1..15}; do zk f cargo run --release --bin zksync_setup_data_generator_fri -- --numeric-circuit $i done + ``` + +3. Initialize DB and run migrations: `zk init` + +4. Override the following configuration in your `dev.env`: + + ``` + ETH_SENDER_SENDER_PROOF_SENDING_MODE=OnlyRealProofs + ETH_SENDER_SENDER_PROOF_LOADING_MODE=FriProofFromGcs + OBJECT_STORE_FILE_BACKED_BASE_PATH=/path/to/server/artifacts + PROVER_OBJECT_STORE_FILE_BACKED_BASE_PATH=/path/to/prover/artifacts + FRI_PROVER_SETUP_DATA_PATH=/path/to/above-generated/cpu-setup-data + ``` + +5. Run server `zk server --components=api,eth,tree,state_keeper,housekeeper,proof_data_handler` to produce blocks to be + proven +6. Run prover gateway to fetch blocks to be proven from server : + `zk f cargo run --release --bin zksync_prover_fri_gateway` +7. Run 4 witness generators to generate witness for each round: + + ``` + API_PROMETHEUS_LISTENER_PORT=3116 zk f cargo run --release --bin zksync_witness_generator -- --round=basic_circuits + API_PROMETHEUS_LISTENER_PORT=3117 zk f cargo run --release --bin zksync_witness_generator -- --round=leaf_aggregation + API_PROMETHEUS_LISTENER_PORT=3118 zk f cargo run --release --bin zksync_witness_generator -- --round=node_aggregation + API_PROMETHEUS_LISTENER_PORT=3119 zk f cargo run --release --bin zksync_witness_generator -- --round=scheduler + ``` + +8. Run prover to perform actual proving: `zk f cargo run --release --bin zksync_prover_fri` +9. Finally, run proof compressor to compress the proof to be sent on L1: + `zk f cargo run --release --bin zksync_proof_fri_compressor` + +## Proving a block using GPU prover locally + +Below steps can be used to prove a block on local machine using GPU prover, It requires Cuda 12.0 installation as +pre-requisite. This is useful for debugging and testing Machine specs: + +- CPU: At least 8 physical cores +- RAM: 16GB of RAM(if you have lower RAM machine enable swap) +- Disk: 30GB of free disk +- GPU: 1x Nvidia L4/T4 with 16GB of GPU RAM + +1. Install the correct nightly version using command: `rustup install nightly-2023-07-21` +2. Generate the gpu setup data (no need to regenerate if it's already there). This will consume around 300Gb of disk. + Use these commands: + + ```markdown + for i in {1..13}; do zk f cargo run --features "gpu" --release --bin zksync_setup_data_generator_fri -- + --numeric-circuit $i --is_base_layer done + + for i in {1..15}; do zk f cargo run --features "gpu" --release --bin zksync_setup_data_generator_fri -- + --numeric-circuit $i done + ``` + +3. Initialize DB and run migrations: `zk init` + +4. Override the following configuration in your `dev.env`: + + ``` + ETH_SENDER_SENDER_PROOF_SENDING_MODE=OnlyRealProofs + ETH_SENDER_SENDER_PROOF_LOADING_MODE=FriProofFromGcs + OBJECT_STORE_FILE_BACKED_BASE_PATH=/path/to/server/artifacts + PROVER_OBJECT_STORE_FILE_BACKED_BASE_PATH=/path/to/prover/artifacts + FRI_PROVER_SETUP_DATA_PATH=/path/to/above-generated/gpu-setup-data + ``` + +5. Run server `zk server --components=api,eth,tree,state_keeper,housekeeper,proof_data_handler` to produce blocks to be + proven +6. Run prover gateway to fetch blocks to be proven from server : + `zk f cargo run --release --bin zksync_prover_fri_gateway` +7. Run 4 witness generators to generate witness for each round: + + ``` + API_PROMETHEUS_LISTENER_PORT=3116 zk f cargo run --release --bin zksync_witness_generator -- --round=basic_circuits + API_PROMETHEUS_LISTENER_PORT=3117 zk f cargo run --release --bin zksync_witness_generator -- --round=leaf_aggregation + API_PROMETHEUS_LISTENER_PORT=3118 zk f cargo run --release --bin zksync_witness_generator -- --round=node_aggregation + API_PROMETHEUS_LISTENER_PORT=3119 zk f cargo run --release --bin zksync_witness_generator -- --round=scheduler + ``` + +8. Run prover to perform actual proving: `zk f cargo run --features "gpu" --release --bin zksync_prover_fri` +9. Run 5 witness vector generators to feed jobs to GPU prover: + + ``` + FRI_WITNESS_VECTOR_GENERATOR_PROMETHEUS_LISTENER_PORT=3416 zk f cargo run --release --bin zksync_witness_vector_generator + FRI_WITNESS_VECTOR_GENERATOR_PROMETHEUS_LISTENER_PORT=3417 zk f cargo run --release --bin zksync_witness_vector_generator + FRI_WITNESS_VECTOR_GENERATOR_PROMETHEUS_LISTENER_PORT=3418 zk f cargo run --release --bin zksync_witness_vector_generator + FRI_WITNESS_VECTOR_GENERATOR_PROMETHEUS_LISTENER_PORT=3419 zk f cargo run --release --bin zksync_witness_vector_generator + FRI_WITNESS_VECTOR_GENERATOR_PROMETHEUS_LISTENER_PORT=3420 zk f cargo run --release --bin zksync_witness_vector_generator + ``` + +10. Finally, run proof compressor to compress the proof to be sent on L1: + `zk f cargo run --release --bin zksync_proof_fri_compressor` + +## Performing circuit upgrade + +Performing circuit upgrade requires crypto library to be updated and generating new setup data, verification key, +finalization hints if the circuit changes. Below steps can be used to perform circuit upgrade: + +1. checkout if the circuit geometry has changed in the new version of the circuit by running the + [workflow](https://github.com/matter-labs/zkevm_test_harness/actions/workflows/geometry-config-generator.yml) in + harness and merge the generated PR. +2. update the relevant crypto dependencies(boojum, zkevm_circuit, harness, etc) in `Cargo.lock`, for example: + `cargo update -p zkevm_test_harness@1.4.0` +3. prepare an PR with the updated dependencies [sample PR](https://github.com/matter-labs/zksync-2-dev/pull/2481). +4. Run the verification key + [workflow](https://github.com/matter-labs/zksync-2-dev/actions/workflows/fri-vk-generator.yaml) against the PR to + generate the verification key and finalization hints for the new circuit. +5. Only once the above verification key workflow is successful, start the setup-data generation(cpu, gpu setup data + generation can be done in parallel), this step is important, since the setup data requires the new VK, we need to + wait for it to finish. +6. Run the cpu setup data generation + [workflow](https://github.com/matter-labs/zksync-2-dev/actions/workflows/fri-setup-data-generator.yml) against the PR + to generate the cpu setup data. +7. Run the gpu setup data generation + [workflow](https://github.com/matter-labs/zksync-2-dev/actions/workflows/fri-gpu-setup-data-generator.yml) against + the PR to generate the gpu setup data. +8. Once the setup data generation workflows are successful, update the PR with `setup_keys_id` id in + [build-docker-from-tag.yml](../../.github/workflows/build-docker-from-tag.yml) and in + [fri-gpu-prover-integration-test.yml](../../.github/workflows/fri-gpu-prover-integration-test.yml), make sure to only + do it from `FRI prover` not old. +9. Run the GPU integration test + [workflow](https://github.com/matter-labs/zksync-2-dev/actions/workflows/fri-gpu-prover-integration-test.yml) against + the PR to verify the GPU prover is working fine with new circuits. diff --git a/prover/prover_fri_gateway/README.md b/prover/prover_fri_gateway/README.md new file mode 100644 index 000000000000..bfe04e1f6511 --- /dev/null +++ b/prover/prover_fri_gateway/README.md @@ -0,0 +1,11 @@ +# FRI Prover Gateway + +The Prover Gateway is a service component in our system infrastructure that acts as an intermediary between the prover +and the server's HTTP API. It regularly invokes the server's HTTP API to get proof related data, and it submits proof. +Its primary functions include: + +- **GetProofGenerationData**: This function is responsible for pulling proof generation data from the HTTPS API. It + obtains the necessary data required to generate proofs in our system. The retrieved data is then used as input by + prover for the proof generation process. +- **SubmitProof**: Once the proof is generated by prover, this function is used to submit the resulting proof back to + the server. diff --git a/prover/prover_fri_types/README.md b/prover/prover_fri_types/README.md new file mode 100644 index 000000000000..7485656110e7 --- /dev/null +++ b/prover/prover_fri_types/README.md @@ -0,0 +1,8 @@ +# FRI Prover types + +Lib contains types used by FRI prover and shared among + +- FRI prover +- witness generator +- vk and setup data generator +- witness vector generator diff --git a/prover/vk_setup_data_generator_server_fri/README.md b/prover/vk_setup_data_generator_server_fri/README.md new file mode 100644 index 000000000000..1dc8b5c0fa2c --- /dev/null +++ b/prover/vk_setup_data_generator_server_fri/README.md @@ -0,0 +1,22 @@ +# Setup data and VK generator and server + +The SNARK VK generation requires the `CRS_FILE` environment variable to be present and point to the correct file. The +file can be downloaded from the following +[link](https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2^26.key) its also present in dir after +zk init keys/setup/setup_2^26.key + +## generating setup-data for specific circuit type + +`zk f cargo +nightly-2023-07-21 run --release --bin zksync_setup_data_generator_fri -- --numeric-circuit 1 --is_base_layer` + +## generating GPU setup-data for specific circuit type + +`zk f cargo +nightly-2023-07-21 run --features "gpu" --release --bin zksync_setup_data_generator_fri -- --numeric-circuit 1 --is_base_layer` + +## Generating VK's + +`cargo +nightly-2023-07-21 run --release --bin zksync_vk_generator_fri` + +## generating VK commitment for existing VK's + +`cargo +nightly-2023-07-21 run --release --bin zksync_commitment_generator_fri` diff --git a/prover/witness_generator/README.md b/prover/witness_generator/README.md new file mode 100644 index 000000000000..9d35fe7e054a --- /dev/null +++ b/prover/witness_generator/README.md @@ -0,0 +1,52 @@ +# WitnessGenerator + +Please read this +[doc](https://www.notion.so/matterlabs/Draft-FRI-Prover-Integration-Prover-Shadowing-c4b1373786eb43779a93118be4be5d99) +for rationale of this binary, alongside the existing one in zk-core. + +The component is responsible for generating prover jobs and saving artifacts needed for the next round of proof +aggregation. That is, every aggregation round needs two sets of input: + +- computed proofs from the previous round +- some artifacts that the witness generator of previous round(s) returns. There are four rounds of proofs for every + block, each of them starts with an invocation of `{Round}WitnessGenerator` with a corresponding + `WitnessGeneratorJobType`: + +## BasicCircuitsWitnessGenerator + +- generates basic circuits (circuits like `Main VM` - up to 50 \* 48 = 2400 circuits): +- input table: `basic_circuit_witness_jobs` (todo SMA-1362: will be renamed from `witness_inputs`) +- artifact/output table: `leaf_aggregation_jobs` (also creates job stubs in `node_aggregation_jobs` and + `scheduler_aggregation_jobs`) value in `aggregation_round` field of `prover_jobs` table: 0 + +## LeafAggregationWitnessGenerator + +- generates leaf aggregation circuits (up to 48 circuits of type `LeafAggregation`) +- input table: `leaf_aggregation_jobs` +- artifact/output table: `node_aggregation_jobs` +- value in `aggregation_round` field of `prover_jobs` table: 1 + +## NodeAggregationWitnessGenerator + +- generates one circuit of type `NodeAggregation` +- input table: `leaf_aggregation_jobs` +- value in `aggregation_round` field of `prover_jobs` table: 2 + +## SchedulerWitnessGenerator + +- generates one circuit of type `Scheduler` +- input table: `scheduler_witness_jobs` +- value in `aggregation_round` field of `prover_jobs` table: 3 + +One round of prover generation consists of: + +- `WitnessGenerator` picks up the next `queued` job in its input table and processes it (invoking the corresponding + helper function in `zkevm_test_harness` repo) +- it saves the generated circuis to `prover_jobs` table and the other artifacts to its output table +- the individual proofs are picked up by the provers, processed, and marked as complete. +- when the last proof for this round is computed, the prover updates the row in the output table setting its status to + `queued` +- `WitnessGenerator` picks up such job and proceeds to the next round + +Note that the very first input table (`witness_inputs`) is populated by the tree (as the input artifact for the +`WitnessGeneratorJobType::BasicCircuits` is the merkle proofs) diff --git a/prover/witness_vector_generator/README.md b/prover/witness_vector_generator/README.md new file mode 100644 index 000000000000..8c4328afe8cf --- /dev/null +++ b/prover/witness_vector_generator/README.md @@ -0,0 +1,7 @@ +# Witness vector generator + +Used to generate witness vectors using circuit and sending them to prover over TCP. + +## running + +`zk f cargo +nightly-2023-07-21 run --release --bin zksync_witness_vector_generator` From 7dfbc5eddab94cd24f96912e0d43ba36e1cf363f Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Tue, 3 Oct 2023 17:39:17 +0300 Subject: [PATCH 10/29] feat: Implement dynamic L2-to-L1 log tree depth (#126) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ Implements dynamic depth of the in-memory L2-to-L1 log Merkle tree. Previously, this tree always had 512 items (if necessary, additional zero items were added at the end). With these changes, the tree has *at least* 512 items (with padding); the actual number of items is `max(512, items.len().next_power_of_two())`. This makes the change backward-compatible without needing any logic tied to L1 batch number etc. ## Why ❔ We want to allow larger Merkle tree depths than previously. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/lib/mini_merkle_tree/benches/tree.rs | 4 +- core/lib/mini_merkle_tree/src/lib.rs | 60 ++++---- core/lib/mini_merkle_tree/src/tests.rs | 60 ++++++-- core/lib/types/src/commitment.rs | 11 +- core/lib/types/src/l2_to_l1_log.rs | 5 + .../src/api_server/web3/namespaces/zks.rs | 129 +++++++++--------- 6 files changed, 155 insertions(+), 114 deletions(-) diff --git a/core/lib/mini_merkle_tree/benches/tree.rs b/core/lib/mini_merkle_tree/benches/tree.rs index 7206b64e7c4a..a964456bfb45 100644 --- a/core/lib/mini_merkle_tree/benches/tree.rs +++ b/core/lib/mini_merkle_tree/benches/tree.rs @@ -10,7 +10,7 @@ const TREE_SIZES: &[usize] = &[32, 64, 128, 256, 512, 1_024]; fn compute_merkle_root(bencher: &mut Bencher<'_>, tree_size: usize) { let leaves = (0..tree_size).map(|i| [i as u8; 88]); - let tree = MiniMerkleTree::new(leaves, tree_size); + let tree = MiniMerkleTree::new(leaves, None); bencher.iter_batched( || tree.clone(), MiniMerkleTree::merkle_root, @@ -20,7 +20,7 @@ fn compute_merkle_root(bencher: &mut Bencher<'_>, tree_size: usize) { fn compute_merkle_path(bencher: &mut Bencher<'_>, tree_size: usize) { let leaves = (0..tree_size).map(|i| [i as u8; 88]); - let tree = MiniMerkleTree::new(leaves, tree_size); + let tree = MiniMerkleTree::new(leaves, None); bencher.iter_batched( || tree.clone(), |tree| tree.merkle_root_and_path(tree_size / 3), diff --git a/core/lib/mini_merkle_tree/src/lib.rs b/core/lib/mini_merkle_tree/src/lib.rs index a4e9552aad6f..18bb343bc701 100644 --- a/core/lib/mini_merkle_tree/src/lib.rs +++ b/core/lib/mini_merkle_tree/src/lib.rs @@ -15,8 +15,9 @@ mod tests; use zksync_basic_types::H256; use zksync_crypto::hasher::{keccak::KeccakHasher, Hasher}; -/// Maximum supported depth of Merkle trees. 10 means that the tree must have <=1,024 leaves. -const MAX_TREE_DEPTH: usize = 10; +/// Maximum supported depth of the tree. 32 corresponds to `2^32` elements in the tree, which +/// we unlikely to ever hit. +const MAX_TREE_DEPTH: usize = 32; /// In-memory Merkle tree of bounded depth (no more than 10). /// @@ -27,61 +28,61 @@ const MAX_TREE_DEPTH: usize = 10; pub struct MiniMerkleTree<'a, const LEAF_SIZE: usize> { hasher: &'a dyn HashEmptySubtree, hashes: Box<[H256]>, - tree_size: usize, + binary_tree_size: usize, } impl MiniMerkleTree<'static, LEAF_SIZE> where KeccakHasher: HashEmptySubtree, { - /// Creates a new Merkle tree from the supplied leaves. If `tree_size` is larger than the - /// number of the supplied leaves, the remaining leaves are `[0_u8; LEAF_SIZE]`. + /// Creates a new Merkle tree from the supplied leaves. If `min_tree_size` is supplied and is larger + /// than the number of the supplied leaves, the leaves are padded to `min_tree_size` with `[0_u8; LEAF_SIZE]` entries. /// The hash function used in keccak-256. /// /// # Panics /// /// Panics in the same situations as [`Self::with_hasher()`]. - pub fn new(leaves: impl Iterator, tree_size: usize) -> Self { - Self::with_hasher(&KeccakHasher, leaves, tree_size) + pub fn new( + leaves: impl Iterator, + min_tree_size: Option, + ) -> Self { + Self::with_hasher(&KeccakHasher, leaves, min_tree_size) } } impl<'a, const LEAF_SIZE: usize> MiniMerkleTree<'a, LEAF_SIZE> { - /// Creates a new Merkle tree from the supplied leaves. If `tree_size` is larger than the - /// number of the supplied leaves, the remaining leaves are `[0_u8; LEAF_SIZE]`. + /// Creates a new Merkle tree from the supplied leaves. If `min_tree_size` is supplied and is larger than the + /// number of the supplied leaves, the leaves are padded to `min_tree_size` with `[0_u8; LEAF_SIZE]` entries. /// /// # Panics /// /// Panics if any of the following conditions applies: /// - /// - The number of `leaves` is greater than `tree_size`. - /// - `tree_size > 1_024`. - /// - `tree_size` is not a power of 2. + /// - `min_tree_size` (if supplied) is not a power of 2. pub fn with_hasher( hasher: &'a dyn HashEmptySubtree, leaves: impl Iterator, - tree_size: usize, + min_tree_size: Option, ) -> Self { - assert!( - tree_size <= 1 << MAX_TREE_DEPTH, - "tree size must be <={}", - 1 << MAX_TREE_DEPTH - ); - assert!( - tree_size.is_power_of_two(), - "tree size must be a power of 2" - ); - let hashes: Box<[H256]> = leaves.map(|bytes| hasher.hash_bytes(&bytes)).collect(); + let mut binary_tree_size = hashes.len().next_power_of_two(); + if let Some(min_tree_size) = min_tree_size { + assert!( + min_tree_size.is_power_of_two(), + "tree size must be a power of 2" + ); + binary_tree_size = min_tree_size.max(binary_tree_size); + } assert!( - hashes.len() <= tree_size, - "tree size must be greater or equal the number of supplied leaves" + tree_depth_by_size(binary_tree_size) <= MAX_TREE_DEPTH, + "Tree contains more than {} items; this is not supported", + 1 << MAX_TREE_DEPTH ); Self { hasher, hashes, - tree_size, + binary_tree_size, } } @@ -97,7 +98,7 @@ impl<'a, const LEAF_SIZE: usize> MiniMerkleTree<'a, LEAF_SIZE> { /// Returns the root hash and the Merkle proof for a leaf with the specified 0-based `index`. pub fn merkle_root_and_path(self, index: usize) -> (H256, Vec) { - let mut merkle_path = Vec::with_capacity(MAX_TREE_DEPTH); + let mut merkle_path = vec![]; let root_hash = self.compute_merkle_root_and_path(index, Some(&mut merkle_path)); (root_hash, merkle_path) } @@ -109,7 +110,10 @@ impl<'a, const LEAF_SIZE: usize> MiniMerkleTree<'a, LEAF_SIZE> { ) -> H256 { assert!(index < self.hashes.len(), "invalid tree leaf index"); - let depth = tree_depth_by_size(self.tree_size); + let depth = tree_depth_by_size(self.binary_tree_size); + if let Some(merkle_path) = merkle_path.as_deref_mut() { + merkle_path.reserve(depth); + } let mut hashes = self.hashes; let mut level_len = hashes.len(); diff --git a/core/lib/mini_merkle_tree/src/tests.rs b/core/lib/mini_merkle_tree/src/tests.rs index f5745cf43aab..c534c87523cd 100644 --- a/core/lib/mini_merkle_tree/src/tests.rs +++ b/core/lib/mini_merkle_tree/src/tests.rs @@ -26,7 +26,7 @@ fn hash_of_empty_tree_with_single_item() { for depth in 0..=5 { let len = 1 << depth; println!("checking tree with {len} items"); - let tree = MiniMerkleTree::new(iter::once([0_u8; 88]), len); + let tree = MiniMerkleTree::new(iter::once([0_u8; 88]), Some(len)); assert_eq!(tree.merkle_root(), KeccakHasher.empty_subtree_hash(depth)); } } @@ -38,16 +38,18 @@ fn hash_of_large_empty_tree_with_multiple_items() { let leaves = iter::repeat([0_u8; 88]).take(len); let tree_size = len.next_power_of_two(); - let tree = MiniMerkleTree::new(leaves, tree_size); + let tree = MiniMerkleTree::new(leaves.clone(), Some(tree_size)); + let depth = tree_depth_by_size(tree_size); + assert_eq!(tree.merkle_root(), KeccakHasher.empty_subtree_hash(depth)); + let tree = MiniMerkleTree::new(leaves, None); let depth = tree_depth_by_size(tree_size); - assert!(depth <= MAX_TREE_DEPTH); assert_eq!(tree.merkle_root(), KeccakHasher.empty_subtree_hash(depth)); } } #[test] fn single_item_tree_snapshot() { - let tree = MiniMerkleTree::new(iter::once([1_u8; 88]), 32); + let tree = MiniMerkleTree::new(iter::once([1_u8; 88]), Some(32)); let (root_hash, path) = tree.merkle_root_and_path(0); let expected_root_hash: H256 = @@ -70,7 +72,7 @@ fn single_item_tree_snapshot() { #[test] fn full_tree_snapshot() { let leaves = (1_u8..=32).map(|byte| [byte; 88]); - let tree = MiniMerkleTree::new(leaves, 32); + let tree = MiniMerkleTree::new(leaves, None); let (root_hash, path) = tree.merkle_root_and_path(2); let expected_root_hash: H256 = @@ -93,7 +95,7 @@ fn full_tree_snapshot() { #[test] fn partial_tree_snapshot() { let leaves = (1_u8..=50).map(|byte| [byte; 88]); - let tree = MiniMerkleTree::new(leaves.clone(), 64); + let tree = MiniMerkleTree::new(leaves.clone(), None); let (root_hash, path) = tree.merkle_root_and_path(10); let expected_root_hash: H256 = @@ -113,7 +115,7 @@ fn partial_tree_snapshot() { .map(|s| s.parse::().unwrap()); assert_eq!(path, expected_path); - let tree = MiniMerkleTree::new(leaves, 64); + let tree = MiniMerkleTree::new(leaves, None); let (root_hash, path) = tree.merkle_root_and_path(49); assert_eq!(root_hash, expected_root_hash); @@ -157,7 +159,7 @@ fn verify_merkle_proof( #[test] fn merkle_proofs_are_valid_in_small_tree() { let leaves = (1_u8..=50).map(|byte| [byte; 88]); - let tree = MiniMerkleTree::new(leaves.clone(), 64); + let tree = MiniMerkleTree::new(leaves.clone(), None); for (i, item) in leaves.enumerate() { let (merkle_root, path) = tree.clone().merkle_root_and_path(i); @@ -168,10 +170,50 @@ fn merkle_proofs_are_valid_in_small_tree() { #[test] fn merkle_proofs_are_valid_in_larger_tree() { let leaves = (1_u8..=255).map(|byte| [byte; 88]); - let tree = MiniMerkleTree::new(leaves.clone(), 512); + let tree = MiniMerkleTree::new(leaves.clone(), Some(512)); for (i, item) in leaves.enumerate() { let (merkle_root, path) = tree.clone().merkle_root_and_path(i); verify_merkle_proof(&item, i, 512, &path, merkle_root); } } + +#[test] +#[allow(clippy::cast_possible_truncation)] // truncation is intentional +fn merkle_proofs_are_valid_in_very_large_tree() { + let leaves = (1_u32..=15_000).map(|byte| [byte as u8; 88]); + + let tree = MiniMerkleTree::new(leaves.clone(), None); + for (i, item) in leaves.clone().enumerate().step_by(61) { + let (merkle_root, path) = tree.clone().merkle_root_and_path(i); + verify_merkle_proof(&item, i, 1 << 14, &path, merkle_root); + } + + let tree_with_min_size = MiniMerkleTree::new(leaves.clone(), Some(512)); + assert_eq!(tree_with_min_size.clone().merkle_root(), tree.merkle_root()); + for (i, item) in leaves.enumerate().step_by(61) { + let (merkle_root, path) = tree_with_min_size.clone().merkle_root_and_path(i); + verify_merkle_proof(&item, i, 1 << 14, &path, merkle_root); + } +} + +#[test] +fn merkle_proofs_are_valid_in_very_small_trees() { + for item_count in 1..=20 { + let leaves = (1..=item_count).map(|byte| [byte; 88]); + + let tree = MiniMerkleTree::new(leaves.clone(), None); + let item_count = usize::from(item_count).next_power_of_two(); + for (i, item) in leaves.clone().enumerate() { + let (merkle_root, path) = tree.clone().merkle_root_and_path(i); + verify_merkle_proof(&item, i, item_count, &path, merkle_root); + } + + let tree_with_min_size = MiniMerkleTree::new(leaves.clone(), Some(512)); + assert_ne!(tree_with_min_size.clone().merkle_root(), tree.merkle_root()); + for (i, item) in leaves.enumerate() { + let (merkle_root, path) = tree_with_min_size.clone().merkle_root_and_path(i); + verify_merkle_proof(&item, i, 512, &path, merkle_root); + } + } +} diff --git a/core/lib/types/src/commitment.rs b/core/lib/types/src/commitment.rs index 89207309f206..abc0946fa34f 100644 --- a/core/lib/types/src/commitment.rs +++ b/core/lib/types/src/commitment.rs @@ -15,7 +15,6 @@ use zksync_mini_merkle_tree::MiniMerkleTree; use crate::{ block::L1BatchHeader, - circuit::GEOMETRY_CONFIG, ethabi::Token, l2_to_l1_log::L2ToL1Log, web3::signing::keccak256, @@ -27,8 +26,6 @@ use crate::{ pub trait SerializeCommitment { /// Size of the structure in bytes. const SERIALIZED_SIZE: usize; - /// The number of objects of this type that can be included in a single L1 batch. - const LIMIT_PER_L1_BATCH: usize; /// Serializes this struct into the provided buffer, which is guaranteed to have byte length /// [`Self::SERIALIZED_SIZE`]. fn serialize_commitment(&self, buffer: &mut [u8]); @@ -167,7 +164,6 @@ impl L1BatchWithMetadata { impl SerializeCommitment for L2ToL1Log { const SERIALIZED_SIZE: usize = 88; - const LIMIT_PER_L1_BATCH: usize = GEOMETRY_CONFIG.limit_for_l1_messages_merklizer as usize; fn serialize_commitment(&self, buffer: &mut [u8]) { buffer[0] = self.shard_id; @@ -181,8 +177,6 @@ impl SerializeCommitment for L2ToL1Log { impl SerializeCommitment for InitialStorageWrite { const SERIALIZED_SIZE: usize = 64; - const LIMIT_PER_L1_BATCH: usize = - GEOMETRY_CONFIG.limit_for_initial_writes_pubdata_hasher as usize; fn serialize_commitment(&self, buffer: &mut [u8]) { self.key.to_little_endian(&mut buffer[0..32]); @@ -192,8 +186,6 @@ impl SerializeCommitment for InitialStorageWrite { impl SerializeCommitment for RepeatedStorageWrite { const SERIALIZED_SIZE: usize = 40; - const LIMIT_PER_L1_BATCH: usize = - GEOMETRY_CONFIG.limit_for_repeated_writes_pubdata_hasher as usize; fn serialize_commitment(&self, buffer: &mut [u8]) { buffer[..8].copy_from_slice(&self.index.to_be_bytes()); @@ -238,8 +230,9 @@ impl L1BatchAuxiliaryOutput { .chunks(L2ToL1Log::SERIALIZED_SIZE) .map(|chunk| <[u8; L2ToL1Log::SERIALIZED_SIZE]>::try_from(chunk).unwrap()); // ^ Skip first 4 bytes of the serialized logs (i.e., the number of logs). + let min_tree_size = Some(L2ToL1Log::LEGACY_LIMIT_PER_L1_BATCH); let l2_l1_logs_merkle_root = - MiniMerkleTree::new(merkle_tree_leaves, L2ToL1Log::LIMIT_PER_L1_BATCH).merkle_root(); + MiniMerkleTree::new(merkle_tree_leaves, min_tree_size).merkle_root(); Self { l2_l1_logs_compressed, diff --git a/core/lib/types/src/l2_to_l1_log.rs b/core/lib/types/src/l2_to_l1_log.rs index 57338c4766c3..8ad01b6f272f 100644 --- a/core/lib/types/src/l2_to_l1_log.rs +++ b/core/lib/types/src/l2_to_l1_log.rs @@ -15,6 +15,11 @@ pub struct L2ToL1Log { } impl L2ToL1Log { + /// Legacy upper bound of L2-to-L1 logs per single L1 batch. This is not used as a limit now, + /// but still determines the minimum number of items in the Merkle tree built from L2-to-L1 logs + /// for a certain batch. + pub const LEGACY_LIMIT_PER_L1_BATCH: usize = 512; + pub fn from_slice(data: &[u8]) -> Self { assert_eq!(data.len(), Self::SERIALIZED_SIZE); Self { diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs index 54e58187afea..a9a8ee435481 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs @@ -1,6 +1,7 @@ use std::{collections::HashMap, convert::TryInto, time::Instant}; use bigdecimal::{BigDecimal, Zero}; +use zksync_dal::StorageProcessor; use zksync_mini_merkle_tree::MiniMerkleTree; use zksync_types::{ @@ -8,7 +9,6 @@ use zksync_types::{ BlockDetails, BridgeAddresses, GetLogsFilter, L1BatchDetails, L2ToL1LogProof, ProtocolVersion, TransactionDetails, }, - commitment::SerializeCommitment, fee::Fee, l1::L1Tx, l2::L2Tx, @@ -266,15 +266,9 @@ impl ZksNamespace { .map_err(|err| internal_error(METHOD_NAME, err))? .expect("L1 batch should contain at least one miniblock"); - let all_l1_logs_in_batch = storage - .blocks_web3_dal() - .get_l2_to_l1_logs(l1_batch_number) - .await - .map_err(|err| internal_error(METHOD_NAME, err))?; - // Position of l1 log in L1 batch relative to logs with identical data let l1_log_relative_position = if let Some(l2_log_position) = l2_log_position { - let pos = storage + let logs = storage .events_web3_dal() .get_logs( GetLogsFilter { @@ -286,48 +280,69 @@ impl ZksNamespace { self.state.api_config.req_entities_limit, ) .await - .map_err(|err| internal_error(METHOD_NAME, err))? - .iter() - .position(|event| { - event.block_number == Some(block_number.0.into()) - && event.log_index == Some(l2_log_position.into()) - }); - match pos { + .map_err(|err| internal_error(METHOD_NAME, err))?; + let maybe_pos = logs.iter().position(|event| { + event.block_number == Some(block_number.0.into()) + && event.log_index == Some(l2_log_position.into()) + }); + match maybe_pos { Some(pos) => pos, - None => { - return Ok(None); - } + None => return Ok(None), } } else { 0 }; - let l1_log_index = match all_l1_logs_in_batch + let log_proof = self + .get_l2_to_l1_log_proof_inner( + METHOD_NAME, + &mut storage, + l1_batch_number, + l1_log_relative_position, + |log| { + log.sender == L1_MESSENGER_ADDRESS + && log.key == address_to_h256(&sender) + && log.value == msg + }, + ) + .await?; + + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); + Ok(log_proof) + } + + async fn get_l2_to_l1_log_proof_inner( + &self, + method_name: &'static str, + storage: &mut StorageProcessor<'_>, + l1_batch_number: L1BatchNumber, + index_in_filtered_logs: usize, + log_filter: impl Fn(&L2ToL1Log) -> bool, + ) -> Result, Web3Error> { + let all_l1_logs_in_batch = storage + .blocks_web3_dal() + .get_l2_to_l1_logs(l1_batch_number) + .await + .map_err(|err| internal_error(method_name, err))?; + + let Some((l1_log_index, _)) = all_l1_logs_in_batch .iter() .enumerate() - .filter(|(_, log)| { - log.sender == L1_MESSENGER_ADDRESS - && log.key == address_to_h256(&sender) - && log.value == msg - }) - .nth(l1_log_relative_position) - { - Some(nth_elem) => nth_elem.0, - None => { - return Ok(None); - } + .filter(|(_, log)| log_filter(log)) + .nth(index_in_filtered_logs) + else { + return Ok(None); }; let merkle_tree_leaves = all_l1_logs_in_batch.iter().map(L2ToL1Log::to_bytes); - let (root, proof) = MiniMerkleTree::new(merkle_tree_leaves, L2ToL1Log::LIMIT_PER_L1_BATCH) + let min_tree_size = Some(L2ToL1Log::LEGACY_LIMIT_PER_L1_BATCH); + let (root, proof) = MiniMerkleTree::new(merkle_tree_leaves, min_tree_size) .merkle_root_and_path(l1_log_index); - let msg_proof = L2ToL1LogProof { + Ok(Some(L2ToL1LogProof { proof, root, id: l1_log_index as u32, - }; - metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); - Ok(Some(msg_proof)) + })) } #[tracing::instrument(skip(self))] @@ -345,45 +360,27 @@ impl ZksNamespace { .access_storage_tagged("api") .await .unwrap(); - let (l1_batch_number, l1_batch_tx_index) = match storage + let Some((l1_batch_number, l1_batch_tx_index)) = storage .blocks_web3_dal() .get_l1_batch_info_for_tx(tx_hash) .await .map_err(|err| internal_error(METHOD_NAME, err))? - { - Some(x) => x, - None => return Ok(None), - }; - - let all_l1_logs_in_batch = storage - .blocks_web3_dal() - .get_l2_to_l1_logs(l1_batch_number) - .await - .map_err(|err| internal_error(METHOD_NAME, err))?; - - let l1_log_index = match all_l1_logs_in_batch - .iter() - .enumerate() - .filter(|(_, log)| log.tx_number_in_block == l1_batch_tx_index) - .nth(index.unwrap_or(0)) - { - Some(nth_elem) => nth_elem.0, - None => { - return Ok(None); - } + else { + return Ok(None); }; - let merkle_tree_leaves = all_l1_logs_in_batch.iter().map(L2ToL1Log::to_bytes); - let (root, proof) = MiniMerkleTree::new(merkle_tree_leaves, L2ToL1Log::LIMIT_PER_L1_BATCH) - .merkle_root_and_path(l1_log_index); - let msg_proof = L2ToL1LogProof { - proof, - root, - id: l1_log_index as u32, - }; + let log_proof = self + .get_l2_to_l1_log_proof_inner( + METHOD_NAME, + &mut storage, + l1_batch_number, + index.unwrap_or(0), + |log| log.tx_number_in_block == l1_batch_tx_index, + ) + .await?; metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); - Ok(Some(msg_proof)) + Ok(log_proof) } #[tracing::instrument(skip(self))] From 7d017f431b886d7cbd190f13e8874f5032128185 Mon Sep 17 00:00:00 2001 From: Shahar Kaminsky Date: Tue, 3 Oct 2023 18:43:29 +0300 Subject: [PATCH 11/29] chore(codeowners): Update Owner Team (#136) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ This PR replaces the core team in codeowners with the newly created era-reviewers team. ## Why ❔ To tighten who can approve PRs and not spam the rest. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CODEOWNERS b/CODEOWNERS index 981a2db39116..8cde1cc1ade7 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,4 +1,4 @@ -* @matter-labs/core +* @matter-labs/era-reviewers .github/release-please/** @RomanBrodetski @perekopskiy @Deniallugo @popzxc **/CHANGELOG.md @RomanBrodetski @perekopskiy @Deniallugo @popzxc CODEOWNERS @RomanBrodetski @perekopskiy @Deniallugo @popzxc From aa60ecd1eb0cd54fe7955f57419729ef0cdc1831 Mon Sep 17 00:00:00 2001 From: Maksym Date: Tue, 3 Oct 2023 22:05:17 +0300 Subject: [PATCH 12/29] ci: switch default rust to nightly for prover builds (#139) Switch default rust target to nightly for prover builds. --- .github/workflows/build-prover-template.yml | 2 +- .github/workflows/ci-prover-reusable.yml | 34 +++++---------------- bin/ci_run | 4 +-- docker-compose-runner-nightly.yml | 17 +++++++++++ 4 files changed, 28 insertions(+), 29 deletions(-) create mode 100644 docker-compose-runner-nightly.yml diff --git a/.github/workflows/build-prover-template.yml b/.github/workflows/build-prover-template.yml index f084bb3c382b..b3a0c262503e 100644 --- a/.github/workflows/build-prover-template.yml +++ b/.github/workflows/build-prover-template.yml @@ -46,6 +46,7 @@ jobs: env: image_tag: ${{ inputs.image_tag }} IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }} + RUNNER_COMPOSE_FILE: "docker-compose-runner-nightly.yml" runs-on: [matterlabs-ci-runner] needs: [era-bellman-cuda] strategy: @@ -126,7 +127,6 @@ jobs: if [ "$underscored_name" == "prover_gpu_fri" ]; then underscored_name="prover_fri" fi - ci_run rustup default nightly-2023-07-21 ci_run echo [workspace] > Cargo.toml ci_run echo members = [\"prover/${underscored_name}\"] >> Cargo.toml ci_run cp prover/Cargo.lock Cargo.lock diff --git a/.github/workflows/ci-prover-reusable.yml b/.github/workflows/ci-prover-reusable.yml index e51ec94a81c3..0626457761ed 100644 --- a/.github/workflows/ci-prover-reusable.yml +++ b/.github/workflows/ci-prover-reusable.yml @@ -5,51 +5,33 @@ on: jobs: unit-tests: runs-on: [matterlabs-ci-runner] + env: + RUNNER_COMPOSE_FILE: "docker-compose-runner-nightly.yml" steps: - - name: Prepare environment - run: | - sudo apt update && sudo apt install -y \ - pkg-config libclang-dev build-essential lldb lld \ - clang openssl libssl-dev gcc g++ pkg-config libclang-dev curl wget - - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 with: submodules: "recursive" - - name: Use Node.js 18.18.0 - uses: actions/setup-node@v2 - with: - node-version: '18.18.0' - - - name: Install Rust nightly-2023-07-21 - uses: actions-rs/toolchain@v1 - with: - toolchain: nightly-2023-07-21 - override: true - - name: Setup environment run: | echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env - npm install -g yarn - cargo install sqlx-cli --version 0.5.13 - name: Start services run: | - docker-compose -f docker-compose-runner.yml pull - docker-compose -f docker-compose-runner.yml up --build -d zk + docker-compose -f ${RUNNER_COMPOSE_FILE} pull + docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d geth zk postgres ci_run sccache --start-server - name: Init run: | - zk - zk config compile - zk db setup + ci_run zk + ci_run zk config compile + ci_run zk db setup - name: Prover unit tests run: | - cd prover # Not all tests are enabled since, prover and setup_key_generator_and_server requires bellman-cuda to be present - zk f cargo +nightly-2023-07-21 test --release -p zksync_witness_generator -p vk_setup_data_generator_server_fri -p zksync_prover_fri -p zksync_witness_vector_generator -p zksync_prover_fri_utils + ci_run bash -c "cd prover && zk f cargo test --release -p zksync_witness_generator -p vk_setup_data_generator_server_fri -p zksync_prover_fri -p zksync_witness_vector_generator -p zksync_prover_fri_utils" diff --git a/bin/ci_run b/bin/ci_run index b76fce10ac70..0f578106f467 100755 --- a/bin/ci_run +++ b/bin/ci_run @@ -2,5 +2,5 @@ # Runs the command from within CI docker-compose environment. cd $ZKSYNC_HOME - -docker-compose -f docker-compose-runner.yml exec -T zk $@ +compose_file="${RUNNER_COMPOSE_FILE:-docker-compose-runner.yml}" +docker-compose -f $compose_file exec -T zk $@ diff --git a/docker-compose-runner-nightly.yml b/docker-compose-runner-nightly.yml new file mode 100644 index 000000000000..2d60a0325f63 --- /dev/null +++ b/docker-compose-runner-nightly.yml @@ -0,0 +1,17 @@ +version: '3.2' +services: + zk: + image: matterlabs/zksync_rust:nightly + extends: + file: docker-compose-runner.yml + service: zk + + postgres: + extends: + file: docker-compose-runner.yml + service: postgres + + geth: + extends: + file: docker-compose-runner.yml + service: geth From 4e2b011d13d913a667f08ea314b1d088205e08c9 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 3 Oct 2023 21:37:07 +0200 Subject: [PATCH 13/29] chore(main): release core 15.1.0 (#148) :robot: I have created a release *beep* *boop* --- ## [15.1.0](https://github.com/matter-labs/zksync-era/compare/core-v15.0.2...core-v15.1.0) (2023-10-03) ### Features * Implement dynamic L2-to-L1 log tree depth ([#126](https://github.com/matter-labs/zksync-era/issues/126)) ([7dfbc5e](https://github.com/matter-labs/zksync-era/commit/7dfbc5eddab94cd24f96912e0d43ba36e1cf363f)) * **vm:** Introduce new way of returning from the tracer [#2569](https://github.com/matter-labs/zksync-era/issues/2569) ([#116](https://github.com/matter-labs/zksync-era/issues/116)) ([cf44a49](https://github.com/matter-labs/zksync-era/commit/cf44a491a324199b4cf457d28658da44b6dafc61)) * **vm:** Restore system-constants-generator ([#115](https://github.com/matter-labs/zksync-era/issues/115)) ([5e61bdc](https://github.com/matter-labs/zksync-era/commit/5e61bdc75b2baa03004d4d3e801170c094766964)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Danil --- .github/release-please/manifest.json | 2 +- core/CHANGELOG.md | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index d3fd86e709c3..995633511099 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,6 +1,6 @@ { "sdk/zksync-web3.js": "0.15.4", "sdk/zksync-rs": "0.4.0", - "core": "15.0.2", + "core": "15.1.0", "prover": "7.1.1" } diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 1b8d1857453b..36060639819d 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## [15.1.0](https://github.com/matter-labs/zksync-era/compare/core-v15.0.2...core-v15.1.0) (2023-10-03) + + +### Features + +* Implement dynamic L2-to-L1 log tree depth ([#126](https://github.com/matter-labs/zksync-era/issues/126)) ([7dfbc5e](https://github.com/matter-labs/zksync-era/commit/7dfbc5eddab94cd24f96912e0d43ba36e1cf363f)) +* **vm:** Introduce new way of returning from the tracer [#2569](https://github.com/matter-labs/zksync-era/issues/2569) ([#116](https://github.com/matter-labs/zksync-era/issues/116)) ([cf44a49](https://github.com/matter-labs/zksync-era/commit/cf44a491a324199b4cf457d28658da44b6dafc61)) +* **vm:** Restore system-constants-generator ([#115](https://github.com/matter-labs/zksync-era/issues/115)) ([5e61bdc](https://github.com/matter-labs/zksync-era/commit/5e61bdc75b2baa03004d4d3e801170c094766964)) + ## [15.0.1](https://github.com/matter-labs/zksync-2-dev/compare/core-v15.0.0...core-v15.0.1) (2023-09-27) From 2db848998c3428664d140f1b086ada0e6f255edd Mon Sep 17 00:00:00 2001 From: agolajko <57454127+agolajko@users.noreply.github.com> Date: Wed, 4 Oct 2023 10:58:47 +0100 Subject: [PATCH 14/29] fix(hyperchain_wizard): clean up init-hyperchain (#127) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit feat: hyperchain wizard - cleaned up the `init` function - added `required` for empty information inputs - Two more things I’ll do today: - validate the private keys and addresses - Check deployer and governor keys are the same --- infrastructure/zk/src/env.ts | 2 +- infrastructure/zk/src/hyperchain_wizard.ts | 227 +++++++++++---------- infrastructure/zk/src/init.ts | 74 ++++++- infrastructure/zk/src/run/run.ts | 4 + 4 files changed, 187 insertions(+), 120 deletions(-) diff --git a/infrastructure/zk/src/env.ts b/infrastructure/zk/src/env.ts index cda73ddf6f5a..9b6eb443636d 100644 --- a/infrastructure/zk/src/env.ts +++ b/infrastructure/zk/src/env.ts @@ -57,7 +57,7 @@ export function set(env: string, print: boolean = false) { const envFile = (process.env.ENV_FILE = `etc/env/${env}.env`); if (!fs.existsSync(envFile)) { // No .env file found - we should compile it! - config.compileConfig(); + config.compileConfig(env); } reload(); get(print); diff --git a/infrastructure/zk/src/hyperchain_wizard.ts b/infrastructure/zk/src/hyperchain_wizard.ts index f588084ee0d1..40f692137eb2 100644 --- a/infrastructure/zk/src/hyperchain_wizard.ts +++ b/infrastructure/zk/src/hyperchain_wizard.ts @@ -1,14 +1,10 @@ import { Command } from 'commander'; import enquirer from 'enquirer'; -import { BigNumber, ethers } from 'ethers'; +import { BigNumber, ethers, utils } from 'ethers'; import chalk from 'chalk'; -import { announced, submoduleUpdate } from './init'; +import { announced, init, InitArgs } from './init'; import * as server from './server'; -import * as contract from './contract'; -import * as run from './run/run'; -import * as compiler from './compiler'; import * as db from './database'; -import { clean } from './clean'; import * as env from './env'; import { compileConfig } from './config'; import * as fs from 'fs'; @@ -38,34 +34,33 @@ interface BasePromptOptions { skip?: ((state: object) => boolean | Promise) | boolean; } -// An init command that allows configuring and spinning up a new Hyperchain network +// An init command that allows configuring and spinning up a new Hyperchain network. async function initHyperchain() { await announced('Initializing Hyperchain creation', setupConfiguration()); - await announced('Drop postgres db', db.drop()); - await announced('Setup postgres db', db.setup()); - await announced('Clean rocksdb', clean('db')); - await announced('Clean backups', clean('backups')); - await announced('Building L1 and L2 contracts', contract.build()); - - await announced('Deploy test tokens', initializeTestERC20s()); - await announced('Deploying L1 verifier', contract.deployVerifier([])); - await announced('Running server genesis setup', server.genesisFromSources()); - const deployerPrivateKey = process.env.DEPLOYER_PRIVATE_KEY; const governorPrivateKey = process.env.GOVERNOR_PRIVATE_KEY; const governorAddress = process.env.GOVERNOR_ADDRESS; + const deployL2Weth = Boolean(process.env.DEPLOY_L2_WETH || false); + const deployTestTokens = Boolean(process.env.DEPLOY_TEST_TOKENS || false); + + const initArgs: InitArgs = { + skipSubmodulesCheckout: false, + skipEnvSetup: false, + deployerL1ContractInputArgs: ['--private-key', deployerPrivateKey, '--governor-address', governorAddress], + governorPrivateKeyArgs: ['--private-key', governorPrivateKey], + deployerL2ContractInput: { + args: ['--private-key', deployerPrivateKey], + includePaymaster: false, + includeL2WETH: deployL2Weth + }, + testTokens: { + deploy: deployTestTokens, + args: ['--private-key', deployerPrivateKey, '--envFile', process.env.CHAIN_ETH_NETWORK!] + } + }; - await announced( - 'Deploying L1 contracts', - contract.redeployL1(['--private-key', deployerPrivateKey, '--governor-address', governorAddress]) - ); - - await announced('Initializing validator', contract.initializeValidator(['--private-key', governorPrivateKey])); - await announced('Initialize L1 allow list', contract.initializeL1AllowList(['--private-key', governorPrivateKey])); - await announced('Deploying L2 contracts', contract.deployL2(['--private-key', deployerPrivateKey], false)); - - await announced('Initialize WETH Token', initializeWethTokenForHyperchain()); + await init(initArgs); env.mergeInitToEnv(); @@ -89,7 +84,7 @@ async function setupConfiguration() { const results: any = await enquirer.prompt(questions); if (results.config === CONFIGURE) { - await announced('Setting Hyperchain metadata', setHyperchainMetadata()); + await announced('Setting Hyperchain configuration', setHyperchainMetadata()); await announced('Validating information and balances to deploy Hyperchain', checkReadinessToDeploy()); } else { const envs = env.getAvailableEnvsFromFiles(); @@ -106,10 +101,6 @@ async function setupConfiguration() { const envResults: any = await enquirer.prompt(envQuestions); env.set(envResults.env); } - await announced('Checkout system-contracts submodule', submoduleUpdate()); - await announced('Compiling JS packages', run.yarn()); - await announced('Compiling system contracts', compiler.compileSystemContracts()); - await announced('Compile l2 contracts', compiler.compileAll()); } async function setHyperchainMetadata() { @@ -124,19 +115,22 @@ async function setHyperchainMetadata() { const INSERT_KEYS = 'Insert keys'; const questions: BasePromptOptions[] = [ { - message: 'What is your hyperchain name?', + message: 'What is your Hyperchain name?', name: 'chainName', - type: 'input' + type: 'input', + required: true }, { - message: 'What is your hyperchain id? Make sure this is not used by other chains.', + message: 'What is your Hyperchain id? Make sure this is not used by other chains.', name: 'chainId', - type: 'input' + type: 'numeral', + required: true }, { - message: 'To which L1 Network will your hyperchain rollup to?', + message: 'To which L1 Network will your Hyperchain rollup to?', name: 'l1Chain', type: 'select', + required: true, choices: BASE_NETWORKS } ]; @@ -146,20 +140,25 @@ async function setHyperchainMetadata() { let deployer, governor, ethOperator, feeReceiver: ethers.Wallet | undefined; let feeReceiverAddress, l1Rpc, l1Id; + await initializeTestERC20s(); + await initializeWethTokenForHyperchain(); + if (results.l1Chain !== BaseNetwork.LOCALHOST) { const rpcQuestions: BasePromptOptions[] = [ { message: 'What is the RPC url for the L1 Network?', name: 'l1Rpc', - type: 'input' + type: 'input', + required: true } ]; if (results.l1Chain === BaseNetwork.LOCALHOST_CUSTOM) { rpcQuestions.push({ - message: 'What is netowrk id of your L1 Network?', + message: 'What is network id of your L1 Network?', name: 'l1NetworkId', - type: 'input' + type: 'numeral', + required: true }); } @@ -192,27 +191,48 @@ async function setHyperchainMetadata() { { message: 'Private key of the L1 Deployer (the one that deploys the contracts)', name: 'deployerKey', - type: 'password' + type: 'password', + required: true }, { message: 'Private key of the L1 Governor (the one that can upgrade the contracts)', name: 'governorKey', - type: 'password' + type: 'password', + required: true }, { message: 'Private key of the L1 ETH Operator (the one that rolls up the batches)', name: 'ethOperator', - type: 'password' + type: 'password', + required: true }, { message: 'Address of L2 fee receiver (the one that collects fees)', name: 'feeReceiver', - type: 'input' + type: 'input', + required: true } ]; const keyResults: any = await enquirer.prompt(keyQuestions); + if (!utils.isAddress(keyResults.deployerKey)) { + throw Error(error('Deployer address is not a valid address')); + } + if (!utils.isAddress(keyResults.governorKey)) { + throw Error(error('Governor address is not a valid address')); + } + if (!utils.isAddress(keyResults.ethOperator)) { + throw Error(error('ETH Operator address is not a valid address')); + } + if (!utils.isAddress(keyResults.feeReceiver)) { + throw Error(error('Fee Receiver address is not a valid address')); + } + + if (keyResults.deployerKey == keyResults.governorKey) { + throw Error(error('Governor and Deployer should not be the same')); + } + deployer = new ethers.Wallet(keyResults.deployerKey); governor = new ethers.Wallet(keyResults.governorKey); ethOperator = new ethers.Wallet(keyResults.ethOperator); @@ -252,10 +272,6 @@ async function setHyperchainMetadata() { ) ); - if (governor.address == deployer.address) { - throw Error(error('Governor and Deployer cannot be the same')); - } - if (results.l1Chain !== BaseNetwork.LOCALHOST_CUSTOM && results.l1Chain !== BaseNetwork.LOCALHOST) { const verifyQuestions: BasePromptOptions[] = [ { @@ -272,7 +288,8 @@ async function setHyperchainMetadata() { { message: 'Please provide your Etherscan API Key.', name: 'etherscanKey', - type: 'input' + type: 'input', + required: true } ]; @@ -302,8 +319,8 @@ async function setHyperchainMetadata() { wrapEnvModify('FEE_RECEIVER_PRIVATE_KEY', feeReceiver.privateKey); } - // For now force delay to 20 seconds to ensure batch execution doesn't not happen in same block as batch proving - // This bug will be fixed on the smart contract soon + // For now force delay to 20 seconds to ensure batch execution doesn't not happen in same block as batch proving. + // This bug will be fixed on the smart contract soon. wrapEnvModify('CONTRACTS_VALIDATOR_TIMELOCK_EXECUTION_DELAY', '0'); wrapEnvModify('ETH_SENDER_SENDER_L1_BATCH_MIN_AGE_BEFORE_EXECUTE_SECONDS', '20'); @@ -328,21 +345,12 @@ async function initializeTestERC20s() { const results: any = await enquirer.prompt(questions); if (results.deployERC20s) { - const privateKey = process.env.DEPLOYER_PRIVATE_KEY; - await announced( - 'Deploying localhost ERC20 tokens', - run.deployERC20('dev', '', '', '', [ - '--private-key', - privateKey, - '--envFile', - process.env.CHAIN_ETH_NETWORK! - ]) - ); + wrapEnvModify('DEPLOY_TEST_TOKENS', 'true'); console.log( warning( - `The addresses for the tokens can be found on the /etc/tokens/${getEnv( + `The addresses for the tokens will be available at the /etc/tokens/${getEnv( process.env.CHAIN_ETH_NETWORK! - )}.json file. The deployer address is the owner of the token contracts.` + )}.json file.` ) ); } @@ -351,7 +359,7 @@ async function initializeTestERC20s() { async function initializeWethTokenForHyperchain() { const questions: BasePromptOptions[] = [ { - message: 'Do you want to deploy a Wrapped ETH Bridge?', + message: 'Do you want to deploy Wrapped ETH to your Hyperchain?', name: 'deployWeth', type: 'confirm' } @@ -360,45 +368,44 @@ async function initializeWethTokenForHyperchain() { const results: any = await enquirer.prompt(questions); if (results.deployWeth) { - const tokens = getTokens(process.env.CHAIN_ETH_NETWORK!); - - let baseWethToken = tokens.find((token: { symbol: string }) => token.symbol == 'WETH')?.address; - - if (!baseWethToken) { - const wethQuestions = [ - { - message: 'What is the address of the Wrapped ETH on the base chain?', - name: 'l1Weth', - type: 'input' + wrapEnvModify('DEPLOY_L2_WETH', 'true'); + + if (!process.env.DEPLOY_TEST_TOKENS) { + // Only try to fetch this info if no test tokens will be deployed, otherwise WETH address will be defined later. + const tokens = getTokens(process.env.CHAIN_ETH_NETWORK!); + + let baseWethToken = tokens.find((token: { symbol: string }) => token.symbol == 'WETH')?.address; + + if (!baseWethToken) { + const wethQuestions = [ + { + message: 'What is the address of the Wrapped ETH on the base chain?', + name: 'l1Weth', + type: 'input', + required: true + } + ]; + + const wethResults: any = await enquirer.prompt(wethQuestions); + + baseWethToken = wethResults.l1Weth; + + if (fs.existsSync(`/etc/tokens/${getEnv(process.env.ZKSYNC_ENV!)}.json`)) { + tokens.push({ + name: 'Wrapped Ether', + symbol: 'WETH', + decimals: 18, + address: baseWethToken! + }); + fs.writeFileSync( + `/etc/tokens/${getEnv(process.env.ZKSYNC_ENV!)}.json`, + JSON.stringify(tokens, null, 4) + ); } - ]; - - const wethResults: any = await enquirer.prompt(wethQuestions); - - baseWethToken = wethResults.l1Weth; - - if (fs.existsSync(`/etc/tokens/${getEnv(process.env.ZKSYNC_ENV!)}.json`)) { - tokens.push({ - name: 'Wrapped Ether', - symbol: 'WETH', - decimals: 18, - address: baseWethToken! - }); - fs.writeFileSync( - `/etc/tokens/${getEnv(process.env.ZKSYNC_ENV!)}.json`, - JSON.stringify(tokens, null, 4) - ); } - } - - wrapEnvModify('CONTRACTS_L1_WETH_TOKEN_ADDR', baseWethToken!); - const governorPrivateKey = process.env.GOVERNOR_PRIVATE_KEY; - - await announced( - 'Initializing L2 WETH token', - contract.initializeWethToken(['--private-key', governorPrivateKey]) - ); + wrapEnvModify('CONTRACTS_L1_WETH_TOKEN_ADDR', baseWethToken!); + } } } @@ -441,12 +448,12 @@ async function startServer() { await server.server(false, false, components.join(',')); } -// The current env.modify requires to write down the variable name twice. This wraps it so the caller only writes the name and the value +// The current env.modify requires to write down the variable name twice. This wraps it so the caller only writes the name and the value. function wrapEnvModify(variable: string, assignedVariable: string) { env.modify(variable, `${variable}=${assignedVariable}`); } -// Make sure all env information is available and wallets are funded +// Make sure all env information is available and wallets are funded. async function checkReadinessToDeploy() { const provider = new ethers.providers.JsonRpcProvider(process.env.ETH_CLIENT_WEB3_URL!); @@ -531,13 +538,13 @@ function getL1Name(baseChain: BaseNetwork) { function getEnv(chainName: string) { return String(chainName) - .normalize('NFKD') // split accented characters into their base characters and diacritical marks - .replace(/[\u0300-\u036f]/g, '') // remove all the accents, which happen to be all in the \u03xx UNICODE block. - .trim() // trim leading or trailing whitespace - .toLowerCase() // convert to lowercase - .replace(/[^a-z0-9 -]/g, '') // remove non-alphanumeric characters - .replace(/\s+/g, '-') // replace spaces with hyphens - .replace(/-+/g, '-'); // remove consecutive hyphens + .normalize('NFKD') // Split accented characters into their base characters and diacritical marks. + .replace(/[\u0300-\u036f]/g, '') // Remove all the accents, which happen to be all in the \u03xx UNICODE block. + .trim() // Trim leading or trailing whitespace. + .toLowerCase() // Convert to lowercase. + .replace(/[^a-z0-9 -]/g, '') // Remove non-alphanumeric characters. + .replace(/\s+/g, '-') // Replace spaces with hyphens. + .replace(/-+/g, '-'); // Remove consecutive hyphens. } type L1Token = { @@ -561,5 +568,5 @@ export function getTokens(network: string): L1Token[] { } export const initHyperchainCommand = new Command('init-hyperchain') - .description('Initializes a new hyperchain network') + .description('Initializes a new Hyperchain network') .action(initHyperchain); diff --git a/infrastructure/zk/src/init.ts b/infrastructure/zk/src/init.ts index d44be33da13a..e2c65168461d 100644 --- a/infrastructure/zk/src/init.ts +++ b/infrastructure/zk/src/init.ts @@ -17,8 +17,17 @@ const announce = chalk.yellow; const success = chalk.green; const timestamp = chalk.grey; -export async function init(skipSubmodulesCheckout: boolean) { - if (!process.env.CI) { +export async function init(initArgs: InitArgs = DEFAULT_ARGS) { + const { + skipSubmodulesCheckout, + skipEnvSetup, + testTokens, + deployerL1ContractInputArgs, + governorPrivateKeyArgs, + deployerL2ContractInput + } = initArgs; + + if (!process.env.CI && !skipEnvSetup) { await announced('Pulling images', docker.pull()); await announced('Checking environment', checkEnv()); await announced('Checking git hooks', env.gitHooks()); @@ -28,6 +37,7 @@ export async function init(skipSubmodulesCheckout: boolean) { if (!skipSubmodulesCheckout) { await announced('Checkout system-contracts submodule', submoduleUpdate()); } + await announced('Compiling JS packages', run.yarn()); await announced('Compile l2 contracts', compiler.compileAll()); await announced('Drop postgres db', db.drop()); @@ -35,15 +45,27 @@ export async function init(skipSubmodulesCheckout: boolean) { await announced('Clean rocksdb', clean('db')); await announced('Clean backups', clean('backups')); await announced('Building contracts', contract.build()); - await announced('Deploying localhost ERC20 tokens', run.deployERC20('dev')); + if (testTokens.deploy) { + await announced('Deploying localhost ERC20 tokens', run.deployERC20('dev', '', '', '', testTokens.args)); + } await announced('Deploying L1 verifier', contract.deployVerifier([])); await announced('Reloading env', env.reload()); await announced('Running server genesis setup', server.genesisFromSources()); - await announced('Deploying L1 contracts', contract.redeployL1([])); - await announced('Initializing validator', contract.initializeValidator()); - await announced('Initialize L1 allow list', contract.initializeL1AllowList()); - await announced('Deploying L2 contracts', contract.deployL2([], true, true)); - await announced('Initializing L2 WETH token', contract.initializeWethToken()); + await announced('Deploying L1 contracts', contract.redeployL1(deployerL1ContractInputArgs)); + await announced('Initializing validator', contract.initializeValidator(governorPrivateKeyArgs)); + await announced('Initialize L1 allow list ', contract.initializeL1AllowList(governorPrivateKeyArgs)); + await announced( + 'Deploying L2 contracts', + contract.deployL2( + deployerL2ContractInput.args, + deployerL2ContractInput.includePaymaster, + deployerL2ContractInput.includeL2WETH + ) + ); + + if (deployerL2ContractInput.includeL2WETH) { + await announced('Initializing L2 WETH token', contract.initializeWethToken(governorPrivateKeyArgs)); + } } // A smaller version of `init` that "resets" the localhost environment, for which `init` was already called before. @@ -115,11 +137,45 @@ async function checkEnv() { } } +export interface InitArgs { + skipSubmodulesCheckout: boolean; + skipEnvSetup: boolean; + deployerL1ContractInputArgs: any[]; + governorPrivateKeyArgs: any[]; + deployerL2ContractInput: { + args: any[]; + includePaymaster: boolean; + includeL2WETH: boolean; + }; + testTokens: { + deploy: boolean; + args: any[]; + }; +} + +const DEFAULT_ARGS: InitArgs = { + skipSubmodulesCheckout: false, + skipEnvSetup: false, + deployerL1ContractInputArgs: [], + governorPrivateKeyArgs: [], + deployerL2ContractInput: { args: [], includePaymaster: true, includeL2WETH: true }, + testTokens: { deploy: true, args: [] } +}; + export const initCommand = new Command('init') .option('--skip-submodules-checkout') + .option('--skip-env-setup') .description('perform zksync network initialization for development') .action(async (cmd: Command) => { - await init(cmd.skipSubmodulesCheckout); + const initArgs: InitArgs = { + skipSubmodulesCheckout: cmd.skipSubmodulesCheckout, + skipEnvSetup: cmd.skipEnvSetup, + deployerL1ContractInputArgs: [], + governorPrivateKeyArgs: [], + deployerL2ContractInput: { args: [], includePaymaster: true, includeL2WETH: true }, + testTokens: { deploy: true, args: [] } + }; + await init(initArgs); }); export const reinitCommand = new Command('reinit') .description('"reinitializes" network. Runs faster than `init`, but requires `init` to be executed prior') diff --git a/infrastructure/zk/src/run/run.ts b/infrastructure/zk/src/run/run.ts index e895fb23a02c..ff4092d79005 100644 --- a/infrastructure/zk/src/run/run.ts +++ b/infrastructure/zk/src/run/run.ts @@ -4,6 +4,8 @@ import { Wallet } from 'ethers'; import fs from 'fs'; import * as path from 'path'; import * as dataRestore from './data-restore'; +import { getTokens } from '../hyperchain_wizard'; +import * as env from '../env'; export { dataRestore }; @@ -39,6 +41,8 @@ export async function deployERC20( { "name": "MLTTL", "symbol": "MLTTW", "decimals": 18 }, { "name": "Wrapped Ether", "symbol": "WETH", "decimals": 18, "implementation": "WETH9"} ]' ${args.join(' ')} > ./etc/tokens/${destinationFile}.json`); + const WETH = getTokens(destinationFile).find((token) => token.symbol === 'WETH')!; + env.modify('CONTRACTS_L1_WETH_TOKEN_ADDR', `CONTRACTS_L1_WETH_TOKEN_ADDR=${WETH.address}`); } else if (command == 'new') { await utils.spawn( `yarn --silent --cwd contracts/ethereum deploy-erc20 add --token-name ${name} --symbol ${symbol} --decimals ${decimals}` From d6a1399e356b7f5b26c25092235d727816f7f6c2 Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Wed, 4 Oct 2023 15:03:36 +0200 Subject: [PATCH 15/29] ci: Adds call of build-docker-from-tag.yml from release-please. (#153) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ Adds call of build-docker-from-tag.yml from release-please. Allows manually call of build-docker-from-tag.yml. ## Why ❔ To create releases due to issue in github which doesn't allow automatic workflow triggers from workflow. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/build-docker-from-tag.yml | 21 +++++++++++++++++++-- .github/workflows/release-please.yml | 4 ++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-docker-from-tag.yml b/.github/workflows/build-docker-from-tag.yml index 95f6e5998bc1..d499a7afaada 100644 --- a/.github/workflows/build-docker-from-tag.yml +++ b/.github/workflows/build-docker-from-tag.yml @@ -1,5 +1,17 @@ name: Build Image from tag on: + workflow_call: + inputs: + tag_name: + description: "Tag of an image to built" + type: string + required: true + workflow_dispatch: + inputs: + tag_name: + description: "Tag of an image to built" + type: string + required: true push: tags: - core-v** @@ -10,7 +22,7 @@ concurrency: docker-build jobs: setup: name: Setup - runs-on: [k8s, stage] + runs-on: [ubuntu-latest] outputs: image_tag_suffix: ${{ steps.set.outputs.image_tag_suffix }} steps: @@ -21,7 +33,12 @@ jobs: - name: Generate output with git tag id: set run: | - git_tag="${GITHUB_REF#refs/*/}" + git_tag="" + if [[ -z "${{ inputs.tag_name }}" ]]; then + git_tag="${GITHUB_REF#refs/*/}" + else + git_tag="${{ inputs.tag_name }}" + fi version=$(cut -d "-" -f2 <<< ${git_tag}) echo "image_tag_suffix=${version}" >> $GITHUB_OUTPUT diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml index d9f04a40e9d4..1194864aa80b 100644 --- a/.github/workflows/release-please.yml +++ b/.github/workflows/release-please.yml @@ -3,6 +3,10 @@ on: branches: - main +permissions: + contents: write + pull-requests: write + name: release-please jobs: release-please: From 0d952d43a021c2fbf18920da3e7d770a6309d990 Mon Sep 17 00:00:00 2001 From: AnastasiiaVashchuk <72273339+AnastasiiaVashchuk@users.noreply.github.com> Date: Thu, 5 Oct 2023 12:50:02 +0300 Subject: [PATCH 16/29] fix: use gauge instead histogram for replication lag metric (#159) --- core/lib/circuit_breaker/src/replication_lag.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/lib/circuit_breaker/src/replication_lag.rs b/core/lib/circuit_breaker/src/replication_lag.rs index df8e886258be..244c53349cee 100644 --- a/core/lib/circuit_breaker/src/replication_lag.rs +++ b/core/lib/circuit_breaker/src/replication_lag.rs @@ -20,7 +20,7 @@ impl CircuitBreaker for ReplicationLagChecker { .get_replication_lag_sec() .await; - metrics::histogram!("circuit_breaker.replication_lag", lag as f64); + metrics::gauge!("circuit_breaker.replication_lag", lag as f64); match self.replication_lag_limit_sec { Some(replication_lag_limit_sec) if lag > replication_lag_limit_sec => Err( CircuitBreakerError::ReplicationLag(lag, replication_lag_limit_sec), From 5531be357978a89a5143813b2a3b9700d0aa09e4 Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Thu, 5 Oct 2023 19:07:37 +0200 Subject: [PATCH 17/29] ci: Uses PAT token for release please (#165) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ Uses PAT token for release please. ## Why ❔ To allow other workflows to be triggered by tags creation. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/release-please.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml index 1194864aa80b..266a4db8158e 100644 --- a/.github/workflows/release-please.yml +++ b/.github/workflows/release-please.yml @@ -18,6 +18,7 @@ jobs: id: release uses: google-github-actions/release-please-action@v3 with: + token: ${{ secrets.RELEASE_TOKEN }} command: manifest config-file: .github/release-please/config.json manifest-file: .github/release-please/manifest.json From 0418be11faec444762f344266e9b0d1c3f238c33 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 6 Oct 2023 10:13:41 +0200 Subject: [PATCH 18/29] chore(main): release core 15.1.1 (#161) :robot: I have created a release *beep* *boop* --- ## [15.1.1](https://github.com/matter-labs/zksync-era/compare/core-v15.1.0...core-v15.1.1) (2023-10-05) ### Bug Fixes * use gauge instead histogram for replication lag metric ([#159](https://github.com/matter-labs/zksync-era/issues/159)) ([0d952d4](https://github.com/matter-labs/zksync-era/commit/0d952d43a021c2fbf18920da3e7d770a6309d990)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Danil --- .github/release-please/manifest.json | 2 +- core/CHANGELOG.md | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 995633511099..3cf890dfd6e1 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,6 +1,6 @@ { "sdk/zksync-web3.js": "0.15.4", "sdk/zksync-rs": "0.4.0", - "core": "15.1.0", + "core": "15.1.1", "prover": "7.1.1" } diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 36060639819d..a2bf915ae1b7 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## [15.1.1](https://github.com/matter-labs/zksync-era/compare/core-v15.1.0...core-v15.1.1) (2023-10-05) + + +### Bug Fixes + +* use gauge instead histogram for replication lag metric ([#159](https://github.com/matter-labs/zksync-era/issues/159)) ([0d952d4](https://github.com/matter-labs/zksync-era/commit/0d952d43a021c2fbf18920da3e7d770a6309d990)) + ## [15.1.0](https://github.com/matter-labs/zksync-era/compare/core-v15.0.2...core-v15.1.0) (2023-10-03) From f98c4fab0f10d190ceb2ae9bfa77929bf793a6ea Mon Sep 17 00:00:00 2001 From: Marcin M <128217157+mm-zk@users.noreply.github.com> Date: Fri, 6 Oct 2023 11:33:49 +0200 Subject: [PATCH 19/29] fix(vm): Make execution status and stop reason public (#169) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ * TracerExecutionStatus and TracerExecutionStopReason are part of the ExecutionEndTracer trait, but they were not publicly available. ## Why ❔ * This breaks external implementations of the ExecutionEndTracer --- core/lib/vm/src/lib.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/core/lib/vm/src/lib.rs b/core/lib/vm/src/lib.rs index 34c25f4addcf..38e6982ce818 100644 --- a/core/lib/vm/src/lib.rs +++ b/core/lib/vm/src/lib.rs @@ -15,7 +15,10 @@ pub use errors::{ pub use tracers::{ call::CallTracer, - traits::{BoxedTracer, DynTracer, ExecutionEndTracer, ExecutionProcessing, VmTracer}, + traits::{ + BoxedTracer, DynTracer, ExecutionEndTracer, ExecutionProcessing, TracerExecutionStatus, + TracerExecutionStopReason, VmTracer, + }, utils::VmExecutionStopReason, validation::ViolatedValidationRule, StorageInvocations, ValidationError, ValidationTracer, ValidationTracerParams, From f94b8192c9a20259f692f77f87eb0dc9bc7e3418 Mon Sep 17 00:00:00 2001 From: Igor Borodin Date: Fri, 6 Oct 2023 12:23:47 +0200 Subject: [PATCH 20/29] fix: Add exec to replace shell inside entrypoint with the actual binary (#134) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ Replaces container shell with EN and local-env executables in corresponding Docker images to eg. properly handle termination signals. More details in original PR: https://github.com/matter-labs/zksync-era/pull/76 Courtesy of https://github.com/voron Thanks for the contribution, and sorry it took so long to review - we've been busy with FOSS'ing our repos. ## Why ❔ https://github.com/matter-labs/zksync-era/pull/76#issue-1849818996 ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. Co-authored-by: Alex Vorona Co-authored-by: Roman Brodetski --- docker/external-node/entrypoint.sh | 2 +- docker/local-node/entrypoint.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/external-node/entrypoint.sh b/docker/external-node/entrypoint.sh index a64390145cab..bf6e98616f39 100644 --- a/docker/external-node/entrypoint.sh +++ b/docker/external-node/entrypoint.sh @@ -5,4 +5,4 @@ set -e # Prepare the database if it's not ready. No-op if the DB is prepared. sqlx database setup # Run the external node. -zksync_external_node +exec zksync_external_node diff --git a/docker/local-node/entrypoint.sh b/docker/local-node/entrypoint.sh index 440dd3fa318d..664cf4b3b6d0 100755 --- a/docker/local-node/entrypoint.sh +++ b/docker/local-node/entrypoint.sh @@ -43,4 +43,4 @@ fi # start server source /etc/env/dev.env source /etc/env/.init.env -zksync_server +exec zksync_server From f14bf6851059a7add6677c89b3192e1b23cbf3c5 Mon Sep 17 00:00:00 2001 From: AnastasiiaVashchuk <72273339+AnastasiiaVashchuk@users.noreply.github.com> Date: Fri, 6 Oct 2023 15:18:24 +0300 Subject: [PATCH 21/29] feat: change chainId to u64 (#167) --- Cargo.lock | 5 +- core/bin/external_node/src/config/mod.rs | 15 +- .../system-constants-generator/src/utils.rs | 2 +- core/lib/basic_types/Cargo.toml | 1 + core/lib/basic_types/src/lib.rs | 162 ++++++++++++++++-- core/lib/config/src/configs/chain.rs | 6 +- core/lib/dal/src/blocks_web3_dal.rs | 4 +- .../lib/dal/src/models/storage_transaction.rs | 4 +- core/lib/dal/src/tests/mod.rs | 2 +- core/lib/dal/src/transactions_web3_dal.rs | 6 +- core/lib/state/src/in_memory.rs | 4 +- core/lib/test_account/src/lib.rs | 2 +- core/lib/types/src/api/mod.rs | 2 +- core/lib/types/src/l2/mod.rs | 8 +- core/lib/types/src/storage/mod.rs | 2 +- core/lib/types/src/transaction_request.rs | 79 +++++---- .../eip712_signature/typed_structure.rs | 2 +- .../src/tx/primitives/packed_eth_signature.rs | 11 +- core/lib/vm/src/tests/l1_tx_execution.rs | 2 +- core/lib/vm/src/tests/require_eip712.rs | 11 +- core/lib/vm/src/tests/tester/vm_tester.rs | 2 +- .../src/api_server/web3/namespaces/eth.rs | 2 +- .../src/api_server/web3/namespaces/net.rs | 2 +- .../zksync_core/src/api_server/web3/state.rs | 4 +- core/lib/zksync_core/src/genesis.rs | 30 +++- core/lib/zksync_core/src/lib.rs | 6 +- .../src/metadata_calculator/helpers.rs | 8 +- .../src/metadata_calculator/tests.rs | 4 +- .../batch_executor/tests/tester.rs | 4 +- .../src/state_keeper/io/tests/tester.rs | 4 +- core/lib/zksync_core/src/state_keeper/mod.rs | 3 +- .../zksync_core/src/state_keeper/tests/mod.rs | 4 +- .../src/state_keeper/tests/tester.rs | 4 +- core/multivm_deps/vm_1_3_2/src/test_utils.rs | 8 +- core/multivm_deps/vm_m5/src/test_utils.rs | 8 +- core/multivm_deps/vm_m6/src/test_utils.rs | 8 +- core/tests/loadnext/src/account_pool.rs | 4 +- core/tests/loadnext/src/config.rs | 6 +- core/tests/vm-benchmark/harness/src/lib.rs | 4 +- 39 files changed, 306 insertions(+), 139 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0b584dc9a2ea..45d34bfcd8fb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5645,9 +5645,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.97" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf3bf93142acad5821c99197022e170842cdbc1c30482b98750c688c640842a" +checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" dependencies = [ "itoa", "ryu", @@ -7640,6 +7640,7 @@ name = "zksync_basic_types" version = "0.1.0" dependencies = [ "serde", + "serde_json", "web3", ] diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 35b1e91bc08a..66f4e54ff571 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -52,13 +52,14 @@ impl RemoteENConfig { .get_main_contract() .await .context("Failed to fetch L1 contract address")?; - let l2_chain_id = L2ChainId( + let l2_chain_id = L2ChainId::try_from( client .chain_id() .await .context("Failed to fetch L2 chain ID")? - .as_u64() as u16, - ); + .as_u64(), + ) + .unwrap(); let l1_chain_id = L1ChainId( client .l1_chain_id() @@ -396,14 +397,14 @@ impl ExternalNodeConfig { .await .context("Unable to check L1 chain ID through the configured L1 client")?; - let l2_chain_id: u16 = env_var("EN_L2_CHAIN_ID"); + let l2_chain_id: L2ChainId = env_var("EN_L2_CHAIN_ID"); let l1_chain_id: u64 = env_var("EN_L1_CHAIN_ID"); - if l2_chain_id != remote.l2_chain_id.0 { + if l2_chain_id != remote.l2_chain_id { anyhow::bail!( "Configured L2 chain id doesn't match the one from main node. Make sure your configuration is correct and you are corrected to the right main node. - Main node L2 chain id: {}. Local config value: {}", - remote.l2_chain_id.0, l2_chain_id + Main node L2 chain id: {:?}. Local config value: {:?}", + remote.l2_chain_id, l2_chain_id ); } if l1_chain_id != remote.l1_chain_id.0 { diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index afb00b5cda7d..d55a73d4e8f7 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -92,7 +92,7 @@ pub(super) fn get_l2_tx(contract_address: Address, signer: &H256, pubdata_price: gas_per_pubdata_limit: pubdata_price.into(), }, U256::from(0), - L2ChainId(270), + L2ChainId::from(270), signer, None, Default::default(), diff --git a/core/lib/basic_types/Cargo.toml b/core/lib/basic_types/Cargo.toml index e96dd0c0ce2b..4e8d8af8c15a 100644 --- a/core/lib/basic_types/Cargo.toml +++ b/core/lib/basic_types/Cargo.toml @@ -12,3 +12,4 @@ categories = ["cryptography"] [dependencies] web3 = { version= "0.19.0", default-features = false, features = ["http-rustls-tls", "test", "signing"] } serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" diff --git a/core/lib/basic_types/src/lib.rs b/core/lib/basic_types/src/lib.rs index a8f7cacbae5f..3223dfddf590 100644 --- a/core/lib/basic_types/src/lib.rs +++ b/core/lib/basic_types/src/lib.rs @@ -7,7 +7,7 @@ mod macros; pub mod network; -use serde::{Deserialize, Serialize}; +use serde::{de, Deserialize, Deserializer, Serialize}; use std::convert::{Infallible, TryFrom, TryInto}; use std::fmt; use std::num::ParseIntError; @@ -76,6 +76,85 @@ impl TryFrom for AccountTreeId { } } +/// ChainId in the ZkSync network. +#[derive(Copy, Clone, Debug, Serialize, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct L2ChainId(u64); + +impl<'de> Deserialize<'de> for L2ChainId { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s: String = Deserialize::deserialize(deserializer)?; + s.parse().map_err(de::Error::custom) + } +} + +impl FromStr for L2ChainId { + type Err = String; + + fn from_str(s: &str) -> Result { + // Parse the string as a U64 + // try to parse as decimal first + let number = match U64::from_dec_str(s) { + Ok(u) => u, + Err(_) => { + // try to parse as hex + s.parse::() + .map_err(|err| format!("Failed to parse L2ChainId: Err {err}"))? + } + }; + + if number.as_u64() > L2ChainId::max().0 { + return Err(format!("Too big chain ID. MAX: {}", L2ChainId::max().0)); + } + Ok(L2ChainId(number.as_u64())) + } +} + +impl L2ChainId { + /// The maximum value of the L2 chain ID. + // 2^53 - 1 is a max safe integer in JS. In ethereum JS libs chain ID should be the safe integer. + // Next arithmetic operation: subtract 36 and divide by 2 comes from `v` calculation: + // v = 2*chainId + 36, that should be save integer as well. + const MAX: u64 = ((1 << 53) - 1 - 36) / 2; + + pub fn max() -> Self { + Self(Self::MAX) + } + + pub fn as_u64(&self) -> u64 { + self.0 + } +} + +impl Default for L2ChainId { + fn default() -> Self { + Self(270) + } +} + +impl TryFrom for L2ChainId { + type Error = String; + + fn try_from(val: u64) -> Result { + if val > L2ChainId::max().0 { + return Err(format!( + "Cannot convert given value {} into L2ChainId. It's greater than MAX: {},", + val, + L2ChainId::max().0, + )); + } + Ok(Self(val)) + } +} + +impl From for L2ChainId { + fn from(value: u32) -> Self { + Self(value as u64) + } +} + basic_type!( /// zkSync network block sequential index. MiniblockNumber, @@ -112,12 +191,6 @@ basic_type!( u64 ); -basic_type!( - /// ChainId in the ZkSync network. - L2ChainId, - u16 -); - #[allow(clippy::derivable_impls)] impl Default for MiniblockNumber { fn default() -> Self { @@ -139,15 +212,78 @@ impl Default for L1BlockNumber { } } -impl Default for L2ChainId { - fn default() -> Self { - Self(270) - } -} - #[allow(clippy::derivable_impls)] impl Default for PriorityOpId { fn default() -> Self { Self(0) } } + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::from_str; + + #[test] + fn test_from_str_valid_decimal() { + let input = "42"; + let result = L2ChainId::from_str(input); + assert_eq!(result.unwrap().as_u64(), 42); + } + + #[test] + fn test_from_str_valid_hexadecimal() { + let input = "0x2A"; + let result = L2ChainId::from_str(input); + assert_eq!(result.unwrap().as_u64(), 42); + } + + #[test] + fn test_from_str_too_big_chain_id() { + let input = "18446744073709551615"; // 2^64 - 1 + let result = L2ChainId::from_str(input); + assert_eq!( + result, + Err(format!("Too big chain ID. MAX: {}", L2ChainId::max().0)) + ); + } + + #[test] + fn test_from_str_invalid_input() { + let input = "invalid"; // Invalid input that cannot be parsed as a number + let result = L2ChainId::from_str(input); + + assert!(result.is_err()); + assert!(result + .unwrap_err() + .contains("Failed to parse L2ChainId: Err ")); + } + + #[test] + fn test_deserialize_valid_decimal() { + let input_json = "\"42\""; + + let result: Result = from_str(input_json); + assert_eq!(result.unwrap().as_u64(), 42); + } + + #[test] + fn test_deserialize_valid_hex() { + let input_json = "\"0x2A\""; + + let result: Result = from_str(input_json); + assert_eq!(result.unwrap().as_u64(), 42); + } + + #[test] + fn test_deserialize_invalid() { + let input_json = "\"invalid\""; + + let result: Result = from_str(input_json); + assert!(result.is_err()); + assert!(result + .unwrap_err() + .to_string() + .contains("Failed to parse L2ChainId: Err Invalid character ")); + } +} diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index 3e680b435e4c..afb928716946 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -5,7 +5,7 @@ use serde::Deserialize; use std::time::Duration; // Local uses use zksync_basic_types::network::Network; -use zksync_basic_types::{Address, H256}; +use zksync_basic_types::{Address, L2ChainId, H256}; use zksync_contracts::BaseSystemContractsHashes; use super::envy_load; @@ -47,7 +47,7 @@ pub struct NetworkConfig { pub zksync_network: String, /// ID of current zkSync network treated as ETH network ID. /// Used to distinguish zkSync from other Web3-capable networks. - pub zksync_network_id: u16, + pub zksync_network_id: L2ChainId, } impl NetworkConfig { @@ -202,7 +202,7 @@ mod tests { network: NetworkConfig { network: "localhost".parse().unwrap(), zksync_network: "localhost".to_string(), - zksync_network_id: 270, + zksync_network_id: L2ChainId::from(270), }, state_keeper: StateKeeperConfig { transaction_slots: 50, diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 301f6940d1a4..03ec1c1930fb 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -623,7 +623,7 @@ mod tests { for block_id in block_ids { let block = conn .blocks_web3_dal() - .get_block_by_web3_block_id(block_id, false, L2ChainId(270)) + .get_block_by_web3_block_id(block_id, false, L2ChainId::from(270)) .await; let block = block.unwrap().unwrap(); assert!(block.transactions.is_empty()); @@ -650,7 +650,7 @@ mod tests { for block_id in non_existing_block_ids { let block = conn .blocks_web3_dal() - .get_block_by_web3_block_id(block_id, false, L2ChainId(270)) + .get_block_by_web3_block_id(block_id, false, L2ChainId::from(270)) .await; assert!(block.unwrap().is_none()); diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index 738cf3356f35..554d33649f2d 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -389,7 +389,7 @@ impl<'r> FromRow<'r, PgRow> for StorageApiTransaction { .unwrap_or_default() .map(U64::from), access_list: None, - chain_id: U256::from(0), + chain_id: 0, l1_batch_number: db_row .try_get::("l1_batch_number_tx") .ok() @@ -502,7 +502,7 @@ pub fn web3_transaction_select_sql() -> &'static str { pub fn extract_web3_transaction(db_row: PgRow, chain_id: L2ChainId) -> api::Transaction { let mut storage_api_tx = StorageApiTransaction::from_row(&db_row).unwrap(); - storage_api_tx.inner_api_transaction.chain_id = U256::from(chain_id.0); + storage_api_tx.inner_api_transaction.chain_id = chain_id.as_u64(); storage_api_tx.into() } diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index 3f39f98a45a3..fecd33f47615 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -59,7 +59,7 @@ pub(crate) fn mock_l2_transaction() -> L2Tx { zksync_types::Nonce(0), fee, Default::default(), - L2ChainId(270), + L2ChainId::from(270), &H256::random(), None, Default::default(), diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index c9a6ee8bf765..8ad983a2218e 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -417,7 +417,7 @@ mod tests { for transaction_id in transaction_ids { let web3_tx = conn .transactions_web3_dal() - .get_transaction(transaction_id, L2ChainId(270)) + .get_transaction(transaction_id, L2ChainId::from(270)) .await; let web3_tx = web3_tx.unwrap().unwrap(); assert_eq!(web3_tx.hash, tx_hash); @@ -431,7 +431,7 @@ mod tests { for transaction_id in transactions_with_bogus_index { let web3_tx = conn .transactions_web3_dal() - .get_transaction(transaction_id, L2ChainId(270)) + .get_transaction(transaction_id, L2ChainId::from(270)) .await; assert!(web3_tx.unwrap().is_none()); } @@ -448,7 +448,7 @@ mod tests { for transaction_id in transactions_with_bogus_block { let web3_tx = conn .transactions_web3_dal() - .get_transaction(transaction_id, L2ChainId(270)) + .get_transaction(transaction_id, L2ChainId::from(270)) .await; assert!(web3_tx.unwrap().is_none()); } diff --git a/core/lib/state/src/in_memory.rs b/core/lib/state/src/in_memory.rs index 3ae72a9f4e9f..e44187e34d95 100644 --- a/core/lib/state/src/in_memory.rs +++ b/core/lib/state/src/in_memory.rs @@ -9,7 +9,7 @@ use zksync_types::{ use zksync_utils::u256_to_h256; /// Network ID we use by defailt for in memory storage. -pub const IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID: u16 = 270; +pub const IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID: u32 = 270; /// In-memory storage. #[derive(Debug, Default)] @@ -22,7 +22,7 @@ impl InMemoryStorage { /// Constructs a storage that contains system smart contracts. pub fn with_system_contracts(bytecode_hasher: impl Fn(&[u8]) -> H256) -> Self { Self::with_system_contracts_and_chain_id( - L2ChainId(IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID), + L2ChainId::from(IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID), bytecode_hasher, ) } diff --git a/core/lib/test_account/src/lib.rs b/core/lib/test_account/src/lib.rs index 9c94e3f49cf7..509402b7b6b8 100644 --- a/core/lib/test_account/src/lib.rs +++ b/core/lib/test_account/src/lib.rs @@ -77,7 +77,7 @@ impl Account { nonce, fee.unwrap_or_else(|| self.default_fee()), value, - L2ChainId(270), + L2ChainId::default(), &self.private_key, factory_deps, Default::default(), diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index c9b157c6629d..6c65356081b4 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -505,7 +505,7 @@ pub struct Transaction { pub max_priority_fee_per_gas: Option, /// Id of the current chain #[serde(rename = "chainId")] - pub chain_id: U256, + pub chain_id: u64, /// Number of the l1 batch this transaction was included within. #[serde( rename = "l1BatchNumber", diff --git a/core/lib/types/src/l2/mod.rs b/core/lib/types/src/l2/mod.rs index 973c5a30b106..b1ef8ca07a71 100644 --- a/core/lib/types/src/l2/mod.rs +++ b/core/lib/types/src/l2/mod.rs @@ -93,7 +93,7 @@ impl L2TxCommonData { self.input = Some(InputData { hash, data: input }) } - pub fn extract_chain_id(&self) -> Option { + pub fn extract_chain_id(&self) -> Option { let bytes = self.input_data()?; let chain_id = match bytes.first() { Some(x) if *x >= 0x80 => { @@ -226,7 +226,7 @@ impl L2Tx { pub fn get_rlp_bytes(&self, chain_id: L2ChainId) -> Bytes { let mut rlp_stream = RlpStream::new(); let tx: TransactionRequest = self.clone().into(); - tx.rlp(&mut rlp_stream, chain_id.0, None); + tx.rlp(&mut rlp_stream, chain_id.as_u64(), None); Bytes(rlp_stream.as_raw().to_vec()) } @@ -329,7 +329,7 @@ impl From for TransactionRequest { transaction_type: None, access_list: None, eip712_meta: None, - chain_id: tx.common_data.extract_chain_id().unwrap_or_default().into(), + chain_id: tx.common_data.extract_chain_id(), }; match tx_type as u8 { LEGACY_TX_TYPE => {} @@ -389,7 +389,7 @@ impl From for api::Transaction { Self { hash: tx.hash(), - chain_id: tx.common_data.extract_chain_id().unwrap_or_default().into(), + chain_id: tx.common_data.extract_chain_id().unwrap_or_default(), nonce: U256::from(tx.common_data.nonce.0), from: Some(tx.common_data.initiator_address), to: Some(tx.recipient_account()), diff --git a/core/lib/types/src/storage/mod.rs b/core/lib/types/src/storage/mod.rs index b1ed25dad970..bf790b58d3d2 100644 --- a/core/lib/types/src/storage/mod.rs +++ b/core/lib/types/src/storage/mod.rs @@ -103,7 +103,7 @@ pub fn get_system_context_init_logs(chain_id: L2ChainId) -> Vec { vec![ StorageLog::new_write_log( get_system_context_key(SYSTEM_CONTEXT_CHAIN_ID_POSITION), - H256::from_low_u64_be(chain_id.0 as u64), + H256::from_low_u64_be(chain_id.as_u64()), ), StorageLog::new_write_log( get_system_context_key(SYSTEM_CONTEXT_BLOCK_GAS_LIMIT_POSITION), diff --git a/core/lib/types/src/transaction_request.rs b/core/lib/types/src/transaction_request.rs index 0516081434d8..c9af634c3e47 100644 --- a/core/lib/types/src/transaction_request.rs +++ b/core/lib/types/src/transaction_request.rs @@ -168,7 +168,7 @@ pub enum SerializationTransactionError { #[error("invalid signature")] MalformedSignature, #[error("wrong chain id {}", .0.unwrap_or_default())] - WrongChainId(Option), + WrongChainId(Option), #[error("malformed paymaster params")] MalforedPaymasterParams, #[error("factory dependency #{0} is invalid: {1}")] @@ -233,7 +233,7 @@ pub struct TransactionRequest { pub eip712_meta: Option, /// Chain ID #[serde(default, skip_serializing_if = "Option::is_none")] - pub chain_id: Option, + pub chain_id: Option, } #[derive(Default, Serialize, Deserialize, Clone, PartialEq, Debug, Eq)] @@ -426,7 +426,7 @@ impl TransactionRequest { pub fn get_signed_bytes(&self, signature: &PackedEthSignature, chain_id: L2ChainId) -> Vec { let mut rlp = RlpStream::new(); - self.rlp(&mut rlp, *chain_id, Some(signature)); + self.rlp(&mut rlp, chain_id.as_u64(), Some(signature)); let mut data = rlp.out().to_vec(); if let Some(tx_type) = self.transaction_type { data.insert(0, tx_type.as_u64() as u8); @@ -438,7 +438,7 @@ impl TransactionRequest { self.transaction_type.is_none() || self.transaction_type == Some(LEGACY_TX_TYPE.into()) } - pub fn rlp(&self, rlp: &mut RlpStream, chain_id: u16, signature: Option<&PackedEthSignature>) { + pub fn rlp(&self, rlp: &mut RlpStream, chain_id: u64, signature: Option<&PackedEthSignature>) { rlp.begin_unbounded_list(); match self.transaction_type { @@ -553,7 +553,7 @@ impl TransactionRequest { pub fn from_bytes( bytes: &[u8], - chain_id: u16, + chain_id: L2ChainId, ) -> Result<(Self, H256), SerializationTransactionError> { let rlp; let mut tx = match bytes.first() { @@ -567,7 +567,7 @@ impl TransactionRequest { let v = rlp.val_at(6)?; let (_, tx_chain_id) = PackedEthSignature::unpack_v(v) .map_err(|_| SerializationTransactionError::MalformedSignature)?; - if tx_chain_id.is_some() && tx_chain_id != Some(chain_id) { + if tx_chain_id.is_some() && tx_chain_id != Some(chain_id.as_u64()) { return Err(SerializationTransactionError::WrongChainId(tx_chain_id)); } Self { @@ -592,7 +592,7 @@ impl TransactionRequest { } let tx_chain_id = rlp.val_at(0).ok(); - if tx_chain_id != Some(chain_id) { + if tx_chain_id != Some(chain_id.as_u64()) { return Err(SerializationTransactionError::WrongChainId(tx_chain_id)); } Self { @@ -613,7 +613,7 @@ impl TransactionRequest { )); } let tx_chain_id = rlp.val_at(10).ok(); - if tx_chain_id.is_some() && tx_chain_id != Some(chain_id) { + if tx_chain_id.is_some() && tx_chain_id != Some(chain_id.as_u64()) { return Err(SerializationTransactionError::WrongChainId(tx_chain_id)); } @@ -658,21 +658,20 @@ impl TransactionRequest { None => tx.recover_default_signer(default_signed_message).ok(), }; - let hash = - tx.get_tx_hash_with_signed_message(&default_signed_message, L2ChainId(chain_id))?; + let hash = tx.get_tx_hash_with_signed_message(&default_signed_message, chain_id)?; Ok((tx, hash)) } fn get_default_signed_message( &self, - chain_id: Option, + chain_id: Option, ) -> Result { if self.is_eip712_tx() { let tx_chain_id = chain_id.ok_or(SerializationTransactionError::WrongChainId(chain_id))?; Ok(PackedEthSignature::typed_data_to_signed_bytes( - &Eip712Domain::new(L2ChainId(tx_chain_id)), + &Eip712Domain::new(L2ChainId::try_from(tx_chain_id).unwrap()), self, )) } else { @@ -707,7 +706,7 @@ impl TransactionRequest { } pub fn get_tx_hash(&self, chain_id: L2ChainId) -> Result { - let default_signed_message = self.get_default_signed_message(Some(chain_id.0))?; + let default_signed_message = self.get_default_signed_message(Some(chain_id.as_u64()))?; self.get_tx_hash_with_signed_message(&default_signed_message, chain_id) } @@ -979,8 +978,11 @@ mod tests { access_list: None, }; let signed_tx = accounts.sign_transaction(tx.clone(), &key).await.unwrap(); - let (tx2, _) = - TransactionRequest::from_bytes(signed_tx.raw_transaction.0.as_slice(), 270).unwrap(); + let (tx2, _) = TransactionRequest::from_bytes( + signed_tx.raw_transaction.0.as_slice(), + L2ChainId::from(270), + ) + .unwrap(); assert_eq!(tx.gas, tx2.gas); assert_eq!(tx.gas_price.unwrap(), tx2.gas_price); assert_eq!(tx.nonce.unwrap(), tx2.nonce); @@ -1013,16 +1015,13 @@ mod tests { let mut rlp = RlpStream::new(); tx.rlp(&mut rlp, 270, Some(&signature)); let data = rlp.out().to_vec(); - let (tx2, _) = TransactionRequest::from_bytes(&data, 270).unwrap(); + let (tx2, _) = TransactionRequest::from_bytes(&data, L2ChainId::from(270)).unwrap(); assert_eq!(tx.gas, tx2.gas); assert_eq!(tx.gas_price, tx2.gas_price); assert_eq!(tx.nonce, tx2.nonce); assert_eq!(tx.input, tx2.input); assert_eq!(tx.value, tx2.value); - assert_eq!( - tx2.v.unwrap().as_u32() as u16, - signature.v_with_chain_id(270) - ); + assert_eq!(tx2.v.unwrap().as_u64(), signature.v_with_chain_id(270)); assert_eq!(tx2.s.unwrap(), signature.s().into()); assert_eq!(tx2.r.unwrap(), signature.r().into()); assert_eq!(address, tx2.from.unwrap()); @@ -1056,8 +1055,10 @@ mod tests { ..Default::default() }; - let msg = - PackedEthSignature::typed_data_to_signed_bytes(&Eip712Domain::new(L2ChainId(270)), &tx); + let msg = PackedEthSignature::typed_data_to_signed_bytes( + &Eip712Domain::new(L2ChainId::from(270)), + &tx, + ); let signature = PackedEthSignature::sign_raw(&private_key, &msg).unwrap(); let mut rlp = RlpStream::new(); @@ -1069,7 +1070,7 @@ mod tests { tx.r = Some(U256::from_big_endian(signature.r())); tx.s = Some(U256::from_big_endian(signature.s())); - let (tx2, _) = TransactionRequest::from_bytes(&data, 270).unwrap(); + let (tx2, _) = TransactionRequest::from_bytes(&data, L2ChainId::from(270)).unwrap(); assert_eq!(tx, tx2); } @@ -1098,14 +1099,15 @@ mod tests { chain_id: Some(270), ..Default::default() }; - let domain = Eip712Domain::new(L2ChainId(270)); + let domain = Eip712Domain::new(L2ChainId::from(270)); let signature = PackedEthSignature::sign_typed_data(&private_key, &domain, &transaction_request) .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId(270)); + let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(270)); - let (decoded_tx, _) = TransactionRequest::from_bytes(encoded_tx.as_slice(), 270).unwrap(); + let (decoded_tx, _) = + TransactionRequest::from_bytes(encoded_tx.as_slice(), L2ChainId::from(270)).unwrap(); let recovered_signer = decoded_tx.from.unwrap(); assert_eq!(address, recovered_signer); } @@ -1137,14 +1139,15 @@ mod tests { chain_id: Some(270), ..Default::default() }; - let domain = Eip712Domain::new(L2ChainId(270)); + let domain = Eip712Domain::new(L2ChainId::from(270)); let signature = PackedEthSignature::sign_typed_data(&private_key, &domain, &transaction_request) .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId(270)); + let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(270)); - let decoded_tx = TransactionRequest::from_bytes(encoded_tx.as_slice(), 272); + let decoded_tx = + TransactionRequest::from_bytes(encoded_tx.as_slice(), L2ChainId::from(272)); assert_eq!( decoded_tx, Err(SerializationTransactionError::WrongChainId(Some(270))) @@ -1184,7 +1187,8 @@ mod tests { let mut data = rlp.out().to_vec(); data.insert(0, EIP_1559_TX_TYPE); - let (decoded_tx, _) = TransactionRequest::from_bytes(data.as_slice(), 270).unwrap(); + let (decoded_tx, _) = + TransactionRequest::from_bytes(data.as_slice(), L2ChainId::from(270)).unwrap(); let recovered_signer = decoded_tx.from.unwrap(); assert_eq!(address, recovered_signer); } @@ -1221,7 +1225,7 @@ mod tests { let mut data = rlp.out().to_vec(); data.insert(0, EIP_1559_TX_TYPE); - let decoded_tx = TransactionRequest::from_bytes(data.as_slice(), 270); + let decoded_tx = TransactionRequest::from_bytes(data.as_slice(), L2ChainId::from(270)); assert_eq!( decoded_tx, Err(SerializationTransactionError::WrongChainId(Some(272))) @@ -1261,7 +1265,7 @@ mod tests { let mut data = rlp.out().to_vec(); data.insert(0, EIP_1559_TX_TYPE); - let res = TransactionRequest::from_bytes(data.as_slice(), 270); + let res = TransactionRequest::from_bytes(data.as_slice(), L2ChainId::from(270)); assert_eq!( res, Err(SerializationTransactionError::AccessListsNotSupported) @@ -1298,7 +1302,7 @@ mod tests { let mut data = rlp.out().to_vec(); data.insert(0, EIP_2930_TX_TYPE); - let res = TransactionRequest::from_bytes(data.as_slice(), 270); + let res = TransactionRequest::from_bytes(data.as_slice(), L2ChainId::from(270)); assert_eq!( res, Err(SerializationTransactionError::AccessListsNotSupported) @@ -1419,8 +1423,10 @@ mod tests { ..Default::default() }; - let msg = - PackedEthSignature::typed_data_to_signed_bytes(&Eip712Domain::new(L2ChainId(270)), &tx); + let msg = PackedEthSignature::typed_data_to_signed_bytes( + &Eip712Domain::new(L2ChainId::from(270)), + &tx, + ); let signature = PackedEthSignature::sign_raw(&private_key, &msg).unwrap(); let mut rlp = RlpStream::new(); @@ -1431,7 +1437,8 @@ mod tests { tx.v = Some(U64::from(signature.v())); tx.r = Some(U256::from_big_endian(signature.r())); tx.s = Some(U256::from_big_endian(signature.s())); - let request = TransactionRequest::from_bytes(data.as_slice(), 270).unwrap(); + let request = + TransactionRequest::from_bytes(data.as_slice(), L2ChainId::from(270)).unwrap(); assert!(matches!( L2Tx::from_request(request.0, random_tx_max_size), Err(SerializationTransactionError::OversizedData(_, _)) diff --git a/core/lib/types/src/tx/primitives/eip712_signature/typed_structure.rs b/core/lib/types/src/tx/primitives/eip712_signature/typed_structure.rs index 5ad48995a5c7..999afbbe604d 100644 --- a/core/lib/types/src/tx/primitives/eip712_signature/typed_structure.rs +++ b/core/lib/types/src/tx/primitives/eip712_signature/typed_structure.rs @@ -170,7 +170,7 @@ impl Eip712Domain { Self { name: Self::NAME.to_string(), version: Self::VERSION.to_string(), - chain_id: U256::from(*chain_id), + chain_id: U256::from(chain_id.as_u64()), } } } diff --git a/core/lib/types/src/tx/primitives/packed_eth_signature.rs b/core/lib/types/src/tx/primitives/packed_eth_signature.rs index 63f4911ea47f..b249d151ef56 100644 --- a/core/lib/types/src/tx/primitives/packed_eth_signature.rs +++ b/core/lib/types/src/tx/primitives/packed_eth_signature.rs @@ -150,12 +150,10 @@ impl PackedEthSignature { pub fn v(&self) -> u8 { self.0.v() } - pub fn v_with_chain_id(&self, chain_id: u16) -> u16 { - self.0.v() as u16 + 35 + chain_id * 2 + pub fn v_with_chain_id(&self, chain_id: u64) -> u64 { + self.0.v() as u64 + 35 + chain_id * 2 } - pub fn unpack_v(v: u64) -> Result<(u8, Option), ParityCryptoError> { - use std::convert::TryInto; - + pub fn unpack_v(v: u64) -> Result<(u8, Option), ParityCryptoError> { if v == 27 { return Ok((0, None)); } else if v == 28 { @@ -163,9 +161,6 @@ impl PackedEthSignature { } else if v >= 35 { let chain_id = (v - 35) >> 1; let v = v - 35 - chain_id * 2; - let chain_id = chain_id - .try_into() - .map_err(|_| ParityCryptoError::Custom("Invalid chain_id".to_string()))?; if v == 0 { return Ok((0, Some(chain_id))); } else if v == 1 { diff --git a/core/lib/vm/src/tests/l1_tx_execution.rs b/core/lib/vm/src/tests/l1_tx_execution.rs index 5afe6af7918a..a231d8aba0b6 100644 --- a/core/lib/vm/src/tests/l1_tx_execution.rs +++ b/core/lib/vm/src/tests/l1_tx_execution.rs @@ -41,7 +41,7 @@ fn test_l1_tx_execution() { is_service: true, tx_number_in_block: 0, sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(L2ChainId(0)), + key: tx_data.tx_hash(L2ChainId::from(0)), value: u256_to_h256(U256::from(1u32)), }]; diff --git a/core/lib/vm/src/tests/require_eip712.rs b/core/lib/vm/src/tests/require_eip712.rs index d77e4d6a33a3..4c2515ae2eff 100644 --- a/core/lib/vm/src/tests/require_eip712.rs +++ b/core/lib/vm/src/tests/require_eip712.rs @@ -52,7 +52,7 @@ async fn test_require_eip712() { assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - let chain_id: u16 = 270; + let chain_id: u32 = 270; // First, let's set the owners of the AA account to the private_address. // (so that messages signed by private_address, are authorized to act on behalf of the AA account). @@ -94,7 +94,7 @@ async fn test_require_eip712() { }; let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, 270).unwrap(); + let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); l2_tx.set_input(aa_tx, hash); @@ -134,15 +134,16 @@ async fn test_require_eip712() { let transaction_request: TransactionRequest = tx_712.into(); - let domain = Eip712Domain::new(L2ChainId(chain_id)); + let domain = Eip712Domain::new(L2ChainId::from(chain_id)); let signature = private_account .get_pk_signer() .sign_typed_data(&domain, &transaction_request) .await .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId(chain_id)); + let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(chain_id)); - let (aa_txn_request, aa_hash) = TransactionRequest::from_bytes(&encoded_tx, chain_id).unwrap(); + let (aa_txn_request, aa_hash) = + TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); l2_tx.set_input(encoded_tx, aa_hash); diff --git a/core/lib/vm/src/tests/tester/vm_tester.rs b/core/lib/vm/src/tests/tester/vm_tester.rs index 19450244120a..07dbf89a8ebb 100644 --- a/core/lib/vm/src/tests/tester/vm_tester.rs +++ b/core/lib/vm/src/tests/tester/vm_tester.rs @@ -142,7 +142,7 @@ impl VmTesterBuilder { gas_limit: BLOCK_GAS_LIMIT, execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId(270), + chain_id: L2ChainId::from(270), }, deployer: None, rich_accounts: vec![], diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs index 89434de79110..b68cfc247be1 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs @@ -369,7 +369,7 @@ impl EthNamespace { #[tracing::instrument(skip(self))] pub fn chain_id_impl(&self) -> U64 { - self.state.api_config.l2_chain_id.0.into() + self.state.api_config.l2_chain_id.as_u64().into() } #[tracing::instrument(skip(self))] diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/net.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/net.rs index b31279ab6935..88a732505ab4 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/net.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/net.rs @@ -11,7 +11,7 @@ impl NetNamespace { } pub fn version_impl(&self) -> String { - self.zksync_network_id.to_string() + self.zksync_network_id.as_u64().to_string() } pub fn peer_count_impl(&self) -> U256 { diff --git a/core/lib/zksync_core/src/api_server/web3/state.rs b/core/lib/zksync_core/src/api_server/web3/state.rs index 8ea44db4a63b..6ed90ec1d3cb 100644 --- a/core/lib/zksync_core/src/api_server/web3/state.rs +++ b/core/lib/zksync_core/src/api_server/web3/state.rs @@ -65,7 +65,7 @@ impl InternalApiConfig { ) -> Self { Self { l1_chain_id: eth_config.network.chain_id(), - l2_chain_id: L2ChainId(eth_config.zksync_network_id), + l2_chain_id: eth_config.zksync_network_id, max_tx_size: web3_config.max_tx_size, estimate_gas_scale_factor: web3_config.estimate_gas_scale_factor, estimate_gas_acceptable_overestimation: web3_config @@ -195,7 +195,7 @@ impl Clone for RpcState { impl RpcState { pub fn parse_transaction_bytes(&self, bytes: &[u8]) -> Result<(L2Tx, H256), Web3Error> { let chain_id = self.api_config.l2_chain_id; - let (tx_request, hash) = api::TransactionRequest::from_bytes(bytes, chain_id.0)?; + let (tx_request, hash) = api::TransactionRequest::from_bytes(bytes, chain_id)?; Ok(( L2Tx::from_request(tx_request, self.api_config.max_tx_size)?, diff --git a/core/lib/zksync_core/src/genesis.rs b/core/lib/zksync_core/src/genesis.rs index f613b2b6a480..ccc9e949d2d4 100644 --- a/core/lib/zksync_core/src/genesis.rs +++ b/core/lib/zksync_core/src/genesis.rs @@ -389,7 +389,7 @@ mod tests { first_l1_verifier_config: L1VerifierConfig::default(), first_verifier_address: Address::random(), }; - ensure_genesis_state(&mut conn, L2ChainId(270), ¶ms) + ensure_genesis_state(&mut conn, L2ChainId::from(270), ¶ms) .await .unwrap(); @@ -403,8 +403,34 @@ mod tests { assert_ne!(root_hash, H256::zero()); // Check that `ensure_genesis_state()` doesn't panic on repeated runs. - ensure_genesis_state(&mut conn, L2ChainId(270), ¶ms) + ensure_genesis_state(&mut conn, L2ChainId::from(270), ¶ms) .await .unwrap(); } + + #[db_test] + async fn running_genesis_with_big_chain_id(pool: ConnectionPool) { + let mut conn: StorageProcessor<'_> = pool.access_storage().await.unwrap(); + conn.blocks_dal().delete_genesis().await.unwrap(); + + let params = GenesisParams { + protocol_version: ProtocolVersionId::latest(), + first_validator: Address::random(), + base_system_contracts: BaseSystemContracts::load_from_disk(), + system_contracts: get_system_smart_contracts(), + first_l1_verifier_config: L1VerifierConfig::default(), + first_verifier_address: Address::random(), + }; + ensure_genesis_state(&mut conn, L2ChainId::max(), ¶ms) + .await + .unwrap(); + + assert!(!conn.blocks_dal().is_genesis_needed().await.unwrap()); + let metadata = conn + .blocks_dal() + .get_l1_batch_metadata(L1BatchNumber(0)) + .await; + let root_hash = metadata.unwrap().unwrap().metadata.root_hash; + assert_ne!(root_hash, H256::zero()); + } } diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index f40074d600c7..028a746ced07 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -42,7 +42,7 @@ use zksync_types::{ proofs::AggregationRound, protocol_version::{L1VerifierConfig, VerifierParams}, system_contracts::get_system_smart_contracts, - Address, L2ChainId, PackedEthSignature, ProtocolVersionId, + Address, PackedEthSignature, ProtocolVersionId, }; use zksync_verification_key_server::get_cached_commitments; @@ -124,7 +124,7 @@ pub async fn genesis_init( genesis::ensure_genesis_state( &mut storage, - L2ChainId(network_config.zksync_network_id), + network_config.zksync_network_id, &genesis::GenesisParams { // We consider the operator to be the first validator for now. first_validator: operator_address, @@ -365,7 +365,7 @@ pub async fn initialize_components( let tx_sender_config = TxSenderConfig::new( &state_keeper_config, &api_config.web3_json_rpc, - L2ChainId(network_config.zksync_network_id), + network_config.zksync_network_id, ); let internal_api_config = InternalApiConfig::new( &network_config, diff --git a/core/lib/zksync_core/src/metadata_calculator/helpers.rs b/core/lib/zksync_core/src/metadata_calculator/helpers.rs index bd79b8866f4b..0abcc30c6444 100644 --- a/core/lib/zksync_core/src/metadata_calculator/helpers.rs +++ b/core/lib/zksync_core/src/metadata_calculator/helpers.rs @@ -366,7 +366,7 @@ mod tests { async fn loaded_logs_equivalence_basics(pool: ConnectionPool) { ensure_genesis_state( &mut pool.access_storage().await.unwrap(), - L2ChainId(270), + L2ChainId::from(270), &mock_genesis_params(), ) .await @@ -389,7 +389,7 @@ mod tests { #[db_test] async fn loaded_logs_equivalence_with_zero_no_op_logs(pool: ConnectionPool) { let mut storage = pool.access_storage().await.unwrap(); - ensure_genesis_state(&mut storage, L2ChainId(270), &mock_genesis_params()) + ensure_genesis_state(&mut storage, L2ChainId::from(270), &mock_genesis_params()) .await .unwrap(); @@ -467,7 +467,7 @@ mod tests { #[db_test] async fn loaded_logs_equivalence_with_non_zero_no_op_logs(pool: ConnectionPool) { let mut storage = pool.access_storage().await.unwrap(); - ensure_genesis_state(&mut storage, L2ChainId(270), &mock_genesis_params()) + ensure_genesis_state(&mut storage, L2ChainId::from(270), &mock_genesis_params()) .await .unwrap(); @@ -514,7 +514,7 @@ mod tests { #[db_test] async fn loaded_logs_equivalence_with_protective_reads(pool: ConnectionPool) { let mut storage = pool.access_storage().await.unwrap(); - ensure_genesis_state(&mut storage, L2ChainId(270), &mock_genesis_params()) + ensure_genesis_state(&mut storage, L2ChainId::from(270), &mock_genesis_params()) .await .unwrap(); diff --git a/core/lib/zksync_core/src/metadata_calculator/tests.rs b/core/lib/zksync_core/src/metadata_calculator/tests.rs index 00d34d7f870a..e5e6e1f43ba5 100644 --- a/core/lib/zksync_core/src/metadata_calculator/tests.rs +++ b/core/lib/zksync_core/src/metadata_calculator/tests.rs @@ -397,7 +397,7 @@ async fn setup_calculator_with_options( let mut storage = pool.access_storage().await.unwrap(); if storage.blocks_dal().is_genesis_needed().await.unwrap() { - let chain_id = L2ChainId(270); + let chain_id = L2ChainId::from(270); let protocol_version = ProtocolVersionId::latest(); let base_system_contracts = BaseSystemContracts::load_from_disk(); let system_contracts = get_system_smart_contracts(); @@ -650,7 +650,7 @@ async fn deduplication_works_as_expected(pool: ConnectionPool) { let first_verifier_address = Address::zero(); ensure_genesis_state( &mut storage, - L2ChainId(270), + L2ChainId::from(270), &GenesisParams { protocol_version, first_validator, diff --git a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs index 2fd2df20e6cc..d41b0c98a82a 100644 --- a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs @@ -28,7 +28,7 @@ use crate::state_keeper::{ }; const DEFAULT_GAS_PER_PUBDATA: u32 = 100; -const CHAIN_ID: L2ChainId = L2ChainId(270); +const CHAIN_ID: u32 = 270; /// Representation of configuration parameters used by the state keeper. /// Has sensible defaults for most tests, each of which can be overridden. @@ -144,7 +144,7 @@ impl Tester { create_genesis_l1_batch( &mut storage, self.fee_account, - CHAIN_ID, + L2ChainId::from(CHAIN_ID), ProtocolVersionId::latest(), &BASE_SYSTEM_CONTRACTS, &get_system_smart_contracts(), diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs index 5f1881afb3f7..fb9ec33c54b9 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs @@ -91,7 +91,7 @@ impl Tester { Duration::from_secs(1), l2_erc20_bridge_addr, BLOCK_GAS_LIMIT, - L2ChainId(270), + L2ChainId::from(270), ) .await; @@ -108,7 +108,7 @@ impl Tester { create_genesis_l1_batch( &mut storage, Address::repeat_byte(0x01), - L2ChainId(270), + L2ChainId::from(270), ProtocolVersionId::latest(), &self.base_system_contracts, &get_system_smart_contracts(), diff --git a/core/lib/zksync_core/src/state_keeper/mod.rs b/core/lib/zksync_core/src/state_keeper/mod.rs index 5ccae06a3f4b..8eef5d6adbc3 100644 --- a/core/lib/zksync_core/src/state_keeper/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/mod.rs @@ -8,7 +8,6 @@ use zksync_config::{ ContractsConfig, DBConfig, }; use zksync_dal::ConnectionPool; -use zksync_types::L2ChainId; mod batch_executor; pub(crate) mod extractors; @@ -71,7 +70,7 @@ where mempool_config.delay_interval(), contracts_config.l2_erc20_bridge_addr, state_keeper_config.validation_computational_gas_limit, - L2ChainId(network_config.zksync_network_id), + network_config.zksync_network_id, ) .await; diff --git a/core/lib/zksync_core/src/state_keeper/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/tests/mod.rs index 4f8f1fe364d3..d269b1fea67a 100644 --- a/core/lib/zksync_core/src/state_keeper/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/tests/mod.rs @@ -58,7 +58,7 @@ pub(super) fn default_system_env() -> SystemEnv { gas_limit: BLOCK_GAS_LIMIT, execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId(270), + chain_id: L2ChainId::from(270), } } @@ -147,7 +147,7 @@ pub(super) fn create_l2_transaction(fee_per_gas: u64, gas_per_pubdata: u32) -> L Nonce(0), fee, U256::zero(), - L2ChainId(271), + L2ChainId::from(271), &H256::repeat_byte(0x11), None, PaymasterParams::default(), diff --git a/core/lib/zksync_core/src/state_keeper/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/tests/tester.rs index b855ce54560b..62bd4307b4e6 100644 --- a/core/lib/zksync_core/src/state_keeper/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/tests/tester.rs @@ -286,7 +286,7 @@ pub(crate) fn pending_batch_data( gas_limit: BLOCK_GAS_LIMIT, execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId(270), + chain_id: L2ChainId::from(270), }, pending_miniblocks, } @@ -601,7 +601,7 @@ impl StateKeeperIO for TestIO { gas_limit: BLOCK_GAS_LIMIT, execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId(270), + chain_id: L2ChainId::from(270), }, L1BatchEnv { previous_batch_hash: Some(H256::zero()), diff --git a/core/multivm_deps/vm_1_3_2/src/test_utils.rs b/core/multivm_deps/vm_1_3_2/src/test_utils.rs index 2ebaebb4e379..5acefe94a4b3 100644 --- a/core/multivm_deps/vm_1_3_2/src/test_utils.rs +++ b/core/multivm_deps/vm_1_3_2/src/test_utils.rs @@ -172,7 +172,7 @@ pub fn mock_loadnext_test_call( nonce, fee, Default::default(), - L2ChainId(270), + L2ChainId::from(270), ð_private_key, None, Default::default(), @@ -209,7 +209,7 @@ pub fn mock_loadnext_gas_burn_call( nonce, fee, Default::default(), - L2ChainId(270), + L2ChainId::from(270), ð_private_key, None, Default::default(), @@ -276,7 +276,7 @@ pub fn get_deploy_tx( nonce, fee, U256::zero(), - L2ChainId(270), + L2ChainId::from(270), &account_private_key, Some(factory_deps), Default::default(), @@ -303,7 +303,7 @@ pub fn get_error_tx( nonce, fee, U256::zero(), - L2ChainId(270), + L2ChainId::from(270), &account_private_key, Some(factory_deps), Default::default(), diff --git a/core/multivm_deps/vm_m5/src/test_utils.rs b/core/multivm_deps/vm_m5/src/test_utils.rs index 13cb91a57829..83ef7575805e 100644 --- a/core/multivm_deps/vm_m5/src/test_utils.rs +++ b/core/multivm_deps/vm_m5/src/test_utils.rs @@ -171,7 +171,7 @@ pub fn mock_loadnext_test_call( nonce, fee, Default::default(), - L2ChainId(270), + L2ChainId::from(270), ð_private_key, None, Default::default(), @@ -208,7 +208,7 @@ pub fn mock_loadnext_gas_burn_call( nonce, fee, Default::default(), - L2ChainId(270), + L2ChainId::from(270), ð_private_key, None, Default::default(), @@ -275,7 +275,7 @@ pub fn get_deploy_tx( nonce, fee, U256::zero(), - L2ChainId(270), + L2ChainId::from(270), &account_private_key, Some(factory_deps), Default::default(), @@ -302,7 +302,7 @@ pub fn get_error_tx( nonce, fee, U256::zero(), - L2ChainId(270), + L2ChainId::from(270), &account_private_key, Some(factory_deps), Default::default(), diff --git a/core/multivm_deps/vm_m6/src/test_utils.rs b/core/multivm_deps/vm_m6/src/test_utils.rs index b196ed9e357f..7d7b98685ef1 100644 --- a/core/multivm_deps/vm_m6/src/test_utils.rs +++ b/core/multivm_deps/vm_m6/src/test_utils.rs @@ -171,7 +171,7 @@ pub fn mock_loadnext_test_call( nonce, fee, Default::default(), - L2ChainId(270), + L2ChainId::from(270), ð_private_key, None, Default::default(), @@ -208,7 +208,7 @@ pub fn mock_loadnext_gas_burn_call( nonce, fee, Default::default(), - L2ChainId(270), + L2ChainId::from(270), ð_private_key, None, Default::default(), @@ -275,7 +275,7 @@ pub fn get_deploy_tx( nonce, fee, U256::zero(), - L2ChainId(270), + L2ChainId::from(270), &account_private_key, Some(factory_deps), Default::default(), @@ -302,7 +302,7 @@ pub fn get_error_tx( nonce, fee, U256::zero(), - L2ChainId(270), + L2ChainId::from(270), &account_private_key, Some(factory_deps), Default::default(), diff --git a/core/tests/loadnext/src/account_pool.rs b/core/tests/loadnext/src/account_pool.rs index c19765d2bb32..556bee7f402f 100644 --- a/core/tests/loadnext/src/account_pool.rs +++ b/core/tests/loadnext/src/account_pool.rs @@ -1,4 +1,4 @@ -use std::{collections::VecDeque, str::FromStr, sync::Arc, time::Duration}; +use std::{collections::VecDeque, convert::TryFrom, str::FromStr, sync::Arc, time::Duration}; use once_cell::sync::OnceCell; use rand::Rng; @@ -90,7 +90,7 @@ pub struct AccountPool { impl AccountPool { /// Generates all the required test accounts and prepares `Wallet` objects. pub async fn new(config: &LoadtestConfig) -> anyhow::Result { - let l2_chain_id = L2ChainId(config.l2_chain_id); + let l2_chain_id = L2ChainId::try_from(config.l2_chain_id).unwrap(); // Create a client for pinging the rpc. let client = HttpClientBuilder::default() .build(&config.l2_rpc_address) diff --git a/core/tests/loadnext/src/config.rs b/core/tests/loadnext/src/config.rs index 1c4b5ae77339..d62f4cdb63e5 100644 --- a/core/tests/loadnext/src/config.rs +++ b/core/tests/loadnext/src/config.rs @@ -103,7 +103,7 @@ pub struct LoadtestConfig { /// Chain id of L2 node. #[serde(default = "default_l2_chain_id")] - pub l2_chain_id: u16, + pub l2_chain_id: u64, /// RPC address of L2 node. #[serde(default = "default_l2_rpc_address")] @@ -227,9 +227,9 @@ fn default_seed() -> Option { result } -fn default_l2_chain_id() -> u16 { +fn default_l2_chain_id() -> u64 { // 270 for rinkeby - let result = *L2ChainId::default(); + let result = L2ChainId::default().as_u64(); tracing::info!("Using default L2_CHAIN_ID: {result}"); result } diff --git a/core/tests/vm-benchmark/harness/src/lib.rs b/core/tests/vm-benchmark/harness/src/lib.rs index 55b4eb238d7d..e439e1359fbb 100644 --- a/core/tests/vm-benchmark/harness/src/lib.rs +++ b/core/tests/vm-benchmark/harness/src/lib.rs @@ -84,7 +84,7 @@ impl BenchmarkingVm { gas_limit: BLOCK_GAS_LIMIT, execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId(270), + chain_id: L2ChainId::from(270), }, Rc::new(RefCell::new(StorageView::new(&*STORAGE))), HistoryEnabled, @@ -120,7 +120,7 @@ pub fn get_deploy_tx(code: &[u8]) -> Transaction { gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), }, U256::zero(), - L2ChainId(270), + L2ChainId::from(270), &PRIVATE_KEY, Some(vec![code.to_vec()]), // maybe not needed? Default::default(), From 1e30d0ba8d243f41ad1e86e77d24848d64bd11e6 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 6 Oct 2023 17:05:49 +0300 Subject: [PATCH 22/29] feat(merkle tree): Provide Merkle proofs for tree entries and entry ranges (#119) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ - Enables the Merkle tree to provide proofs for entries. The procedure efficiently handles batched requests. - Allows to verify range proofs using a streaming approach. ## Why ❔ These are preparation steps for snapshot syncing. "Plain" Merkle tree proofs could be used in `eth_getProof` implementation. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/lib/merkle_tree/src/domain.rs | 13 +- core/lib/merkle_tree/src/errors.rs | 29 ++ core/lib/merkle_tree/src/getters.rs | 130 +++++++- core/lib/merkle_tree/src/hasher/mod.rs | 272 ++++++++++++++++ .../src/{hasher.rs => hasher/nodes.rs} | 308 +----------------- core/lib/merkle_tree/src/hasher/proofs.rs | 219 +++++++++++++ core/lib/merkle_tree/src/lib.rs | 7 +- core/lib/merkle_tree/src/storage/mod.rs | 19 +- core/lib/merkle_tree/src/storage/patch.rs | 87 ++++- core/lib/merkle_tree/src/storage/proofs.rs | 59 +--- .../src/{types.rs => types/internal.rs} | 120 +------ core/lib/merkle_tree/src/types/mod.rs | 163 +++++++++ .../tests/integration/merkle_tree.rs | 181 +++++++++- 13 files changed, 1082 insertions(+), 525 deletions(-) create mode 100644 core/lib/merkle_tree/src/hasher/mod.rs rename core/lib/merkle_tree/src/{hasher.rs => hasher/nodes.rs} (56%) create mode 100644 core/lib/merkle_tree/src/hasher/proofs.rs rename core/lib/merkle_tree/src/{types.rs => types/internal.rs} (83%) create mode 100644 core/lib/merkle_tree/src/types/mod.rs diff --git a/core/lib/merkle_tree/src/domain.rs b/core/lib/merkle_tree/src/domain.rs index add622f91395..6b26bbd873f4 100644 --- a/core/lib/merkle_tree/src/domain.rs +++ b/core/lib/merkle_tree/src/domain.rs @@ -4,7 +4,7 @@ use rayon::{ThreadPool, ThreadPoolBuilder}; use crate::{ storage::{MerkleTreeColumnFamily, PatchSet, Patched, RocksDBWrapper}, - types::{Key, LeafData, Root, TreeInstruction, TreeLogEntry, ValueHash, TREE_DEPTH}, + types::{Key, Root, TreeInstruction, TreeLogEntry, ValueHash, TREE_DEPTH}, BlockOutput, HashTree, MerkleTree, }; use zksync_crypto::hasher::blake2::Blake2Hasher; @@ -159,17 +159,6 @@ impl ZkSyncTree { }); } - /// Reads leaf nodes with the specified keys from the tree storage. The nodes - /// are returned in a `Vec` in the same order as requested. - pub fn read_leaves( - &self, - l1_batch_number: L1BatchNumber, - leaf_keys: &[Key], - ) -> Vec> { - let version = u64::from(l1_batch_number.0); - self.tree.read_leaves(version, leaf_keys) - } - /// Processes an iterator of storage logs comprising a single L1 batch. pub fn process_l1_batch(&mut self, storage_logs: &[StorageLog]) -> TreeMetadata { match self.mode { diff --git a/core/lib/merkle_tree/src/errors.rs b/core/lib/merkle_tree/src/errors.rs index b2aba5c12843..a30b0b98f5be 100644 --- a/core/lib/merkle_tree/src/errors.rs +++ b/core/lib/merkle_tree/src/errors.rs @@ -135,6 +135,35 @@ impl fmt::Display for DeserializeError { impl error::Error for DeserializeError {} +/// Error accessing a specific tree version. +#[derive(Debug)] +pub struct NoVersionError { + pub(crate) missing_version: u64, + pub(crate) version_count: u64, +} + +impl fmt::Display for NoVersionError { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + let &Self { + missing_version, + version_count, + } = self; + if missing_version >= version_count { + write!( + formatter, + "Version {missing_version} does not exist in Merkle tree; it has {version_count} versions" + ) + } else { + write!( + formatter, + "Version {missing_version} was pruned from Merkle tree" + ) + } + } +} + +impl error::Error for NoVersionError {} + #[cfg(test)] mod tests { use super::*; diff --git a/core/lib/merkle_tree/src/getters.rs b/core/lib/merkle_tree/src/getters.rs index 3c32b64b8d8f..66b9c303c506 100644 --- a/core/lib/merkle_tree/src/getters.rs +++ b/core/lib/merkle_tree/src/getters.rs @@ -1,37 +1,135 @@ //! Getters for the Merkle tree. use crate::{ + hasher::HasherWithStats, storage::{LoadAncestorsResult, SortedKeys, WorkingPatchSet}, - types::{LeafData, Node}, - Database, Key, MerkleTree, + types::{Nibbles, Node, TreeEntry, TreeEntryWithProof}, + Database, Key, MerkleTree, NoVersionError, ValueHash, }; impl MerkleTree<'_, DB> where DB: Database, { - /// Reads leaf nodes with the specified keys from the tree storage. The nodes - /// are returned in a `Vec` in the same order as requested. - pub fn read_leaves(&self, version: u64, leaf_keys: &[Key]) -> Vec> { - let Some(root) = self.db.root(version) else { - return vec![None; leaf_keys.len()]; - }; + /// Reads entries with the specified keys from the tree. The entries are returned in the same order + /// as requested. + /// + /// # Errors + /// + /// Returns an error if the tree `version` is missing. + pub fn entries( + &self, + version: u64, + leaf_keys: &[Key], + ) -> Result, NoVersionError> { + self.load_and_transform_entries( + version, + leaf_keys, + |patch_set, leaf_key, longest_prefix| { + let node = patch_set.get(longest_prefix); + match node { + Some(Node::Leaf(leaf)) if &leaf.full_key == leaf_key => (*leaf).into(), + _ => TreeEntry::empty(), + } + }, + ) + } + + fn load_and_transform_entries( + &self, + version: u64, + leaf_keys: &[Key], + mut transform: impl FnMut(&mut WorkingPatchSet, &Key, &Nibbles) -> T, + ) -> Result, NoVersionError> { + let root = self.db.root(version).ok_or_else(|| { + let manifest = self.db.manifest().unwrap_or_default(); + NoVersionError { + missing_version: version, + version_count: manifest.version_count, + } + })?; let sorted_keys = SortedKeys::new(leaf_keys.iter().copied()); let mut patch_set = WorkingPatchSet::new(version, root); let LoadAncestorsResult { longest_prefixes, .. } = patch_set.load_ancestors(&sorted_keys, &self.db); - leaf_keys + Ok(leaf_keys .iter() .zip(&longest_prefixes) - .map(|(leaf_key, longest_prefix)| { - let node = patch_set.get(longest_prefix); - match node { - Some(Node::Leaf(leaf)) if &leaf.full_key == leaf_key => Some((*leaf).into()), - _ => None, + .map(|(leaf_key, longest_prefix)| transform(&mut patch_set, leaf_key, longest_prefix)) + .collect()) + } + + /// Reads entries together with Merkle proofs with the specified keys from the tree. The entries are returned + /// in the same order as requested. + /// + /// # Errors + /// + /// Returns an error if the tree `version` is missing. + pub fn entries_with_proofs( + &self, + version: u64, + leaf_keys: &[Key], + ) -> Result, NoVersionError> { + let mut hasher = HasherWithStats::from(self.hasher); + self.load_and_transform_entries( + version, + leaf_keys, + |patch_set, &leaf_key, longest_prefix| { + let (leaf, merkle_path) = + patch_set.create_proof(&mut hasher, leaf_key, longest_prefix, 0); + let value_hash = leaf + .as_ref() + .map_or_else(ValueHash::zero, |leaf| leaf.value_hash); + TreeEntry { + value_hash, + leaf_index: leaf.map_or(0, |leaf| leaf.leaf_index), } - }) - .collect() + .with_merkle_path(merkle_path.into_inner()) + }, + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::PatchSet; + + #[test] + fn entries_in_empty_tree() { + let mut tree = MerkleTree::new(PatchSet::default()); + tree.extend(vec![]); + let missing_key = Key::from(123); + + let entries = tree.entries(0, &[missing_key]).unwrap(); + assert_eq!(entries.len(), 1); + assert!(entries[0].is_empty()); + + let entries = tree.entries_with_proofs(0, &[missing_key]).unwrap(); + assert_eq!(entries.len(), 1); + assert!(entries[0].base.is_empty()); + entries[0].verify(tree.hasher, missing_key, tree.hasher.empty_tree_hash()); + } + + #[test] + fn entries_in_single_node_tree() { + let mut tree = MerkleTree::new(PatchSet::default()); + let key = Key::from(987_654); + let output = tree.extend(vec![(key, ValueHash::repeat_byte(1))]); + let missing_key = Key::from(123); + + let entries = tree.entries(0, &[key, missing_key]).unwrap(); + assert_eq!(entries.len(), 2); + assert_eq!(entries[0].value_hash, ValueHash::repeat_byte(1)); + assert_eq!(entries[0].leaf_index, 1); + + let entries = tree.entries_with_proofs(0, &[key, missing_key]).unwrap(); + assert_eq!(entries.len(), 2); + assert!(!entries[0].base.is_empty()); + entries[0].verify(tree.hasher, key, output.root_hash); + assert!(entries[1].base.is_empty()); + entries[1].verify(tree.hasher, missing_key, output.root_hash); } } diff --git a/core/lib/merkle_tree/src/hasher/mod.rs b/core/lib/merkle_tree/src/hasher/mod.rs new file mode 100644 index 000000000000..cf64c5ec3aea --- /dev/null +++ b/core/lib/merkle_tree/src/hasher/mod.rs @@ -0,0 +1,272 @@ +//! Hashing operations on the Merkle tree. + +use once_cell::sync::Lazy; + +use std::{fmt, iter}; + +mod nodes; +mod proofs; + +pub(crate) use self::nodes::{InternalNodeCache, MerklePath}; +pub use self::proofs::TreeRangeDigest; +use crate::{ + metrics::HashingStats, + types::{Key, ValueHash, TREE_DEPTH}, +}; +use zksync_crypto::hasher::{blake2::Blake2Hasher, Hasher}; + +/// Tree hashing functionality. +pub trait HashTree: Send + Sync { + /// Returns the unique name of the hasher. This is used in Merkle tree tags to ensure + /// that the tree remains consistent. + fn name(&self) -> &'static str; + + /// Hashes a leaf node. + fn hash_leaf(&self, value_hash: &ValueHash, leaf_index: u64) -> ValueHash; + /// Compresses hashes in an intermediate node of a binary Merkle tree. + fn hash_branch(&self, lhs: &ValueHash, rhs: &ValueHash) -> ValueHash; + + /// Returns the hash of an empty subtree with the given depth. Implementations + /// are encouraged to cache the returned values. + fn empty_subtree_hash(&self, depth: usize) -> ValueHash; +} + +impl dyn HashTree + '_ { + pub(crate) fn empty_tree_hash(&self) -> ValueHash { + self.empty_subtree_hash(TREE_DEPTH) + } + + /// Extends the provided `path` to length `TREE_DEPTH`. + fn extend_merkle_path<'a>( + &'a self, + path: &'a [ValueHash], + ) -> impl Iterator + 'a { + let empty_hash_count = TREE_DEPTH - path.len(); + let empty_hashes = (0..empty_hash_count).map(|depth| self.empty_subtree_hash(depth)); + empty_hashes.chain(path.iter().copied()) + } + + fn fold_merkle_path( + &self, + path: &[ValueHash], + key: Key, + value_hash: ValueHash, + leaf_index: u64, + ) -> ValueHash { + let mut hash = self.hash_leaf(&value_hash, leaf_index); + let full_path = self.extend_merkle_path(path); + for (depth, adjacent_hash) in full_path.enumerate() { + hash = if key.bit(depth) { + self.hash_branch(&adjacent_hash, &hash) + } else { + self.hash_branch(&hash, &adjacent_hash) + }; + } + hash + } + + pub(crate) fn with_stats<'a>(&'a self, stats: &'a HashingStats) -> HasherWithStats<'a> { + HasherWithStats { + shared_metrics: Some(stats), + ..HasherWithStats::from(self) + } + } +} + +impl fmt::Debug for dyn HashTree + '_ { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.debug_struct("HashTree").finish_non_exhaustive() + } +} + +/// No-op hasher that returns `H256::zero()` for all operations. +impl HashTree for () { + fn name(&self) -> &'static str { + "no_op256" + } + + fn hash_leaf(&self, _value_hash: &ValueHash, _leaf_index: u64) -> ValueHash { + ValueHash::zero() + } + + fn hash_branch(&self, _lhs: &ValueHash, _rhs: &ValueHash) -> ValueHash { + ValueHash::zero() + } + + fn empty_subtree_hash(&self, _depth: usize) -> ValueHash { + ValueHash::zero() + } +} + +impl HashTree for Blake2Hasher { + fn name(&self) -> &'static str { + "blake2s256" + } + + fn hash_leaf(&self, value_hash: &ValueHash, leaf_index: u64) -> ValueHash { + let mut bytes = [0_u8; 40]; + bytes[..8].copy_from_slice(&leaf_index.to_be_bytes()); + bytes[8..].copy_from_slice(value_hash.as_ref()); + self.hash_bytes(&bytes) + } + + /// Compresses the hashes of 2 children in a branch node. + fn hash_branch(&self, lhs: &ValueHash, rhs: &ValueHash) -> ValueHash { + self.compress(lhs, rhs) + } + + /// Returns the hash of an empty subtree with the given depth. + fn empty_subtree_hash(&self, depth: usize) -> ValueHash { + static EMPTY_TREE_HASHES: Lazy> = Lazy::new(compute_empty_tree_hashes); + EMPTY_TREE_HASHES[depth] + } +} + +fn compute_empty_tree_hashes() -> Vec { + let empty_leaf_hash = Blake2Hasher.hash_bytes(&[0_u8; 40]); + iter::successors(Some(empty_leaf_hash), |hash| { + Some(Blake2Hasher.hash_branch(hash, hash)) + }) + .take(TREE_DEPTH + 1) + .collect() +} + +/// Hasher that keeps track of hashing metrics. +/// +/// On drop, the metrics are merged into `shared_stats` (if present). Such roundabout handling +/// is motivated by efficiency; if atomics were to be used to track metrics (e.g., +/// via a wrapping `HashTree` implementation), this would tank performance because of contention. +#[derive(Debug)] +pub(crate) struct HasherWithStats<'a> { + inner: &'a dyn HashTree, + shared_metrics: Option<&'a HashingStats>, + local_hashed_bytes: u64, +} + +impl<'a> From<&'a dyn HashTree> for HasherWithStats<'a> { + fn from(inner: &'a dyn HashTree) -> Self { + Self { + inner, + shared_metrics: None, + local_hashed_bytes: 0, + } + } +} + +impl<'a> AsRef for HasherWithStats<'a> { + fn as_ref(&self) -> &(dyn HashTree + 'a) { + self.inner + } +} + +impl Drop for HasherWithStats<'_> { + fn drop(&mut self) { + if let Some(shared_stats) = self.shared_metrics { + shared_stats.add_hashed_bytes(self.local_hashed_bytes); + } + } +} + +impl HasherWithStats<'_> { + fn hash_leaf(&mut self, value_hash: &ValueHash, leaf_index: u64) -> ValueHash { + const HASHED_BYTES: u64 = 8 + ValueHash::len_bytes() as u64; + + self.local_hashed_bytes += HASHED_BYTES; + self.inner.hash_leaf(value_hash, leaf_index) + } + + fn hash_branch(&mut self, lhs: &ValueHash, rhs: &ValueHash) -> ValueHash { + const HASHED_BYTES: u64 = 2 * ValueHash::len_bytes() as u64; + + self.local_hashed_bytes += HASHED_BYTES; + self.inner.hash_branch(lhs, rhs) + } + + fn hash_optional_branch( + &mut self, + subtree_depth: usize, + lhs: Option, + rhs: Option, + ) -> Option { + match (lhs, rhs) { + (None, None) => None, + (Some(lhs), None) => { + let empty_hash = self.empty_subtree_hash(subtree_depth); + Some(self.hash_branch(&lhs, &empty_hash)) + } + (None, Some(rhs)) => { + let empty_hash = self.empty_subtree_hash(subtree_depth); + Some(self.hash_branch(&empty_hash, &rhs)) + } + (Some(lhs), Some(rhs)) => Some(self.hash_branch(&lhs, &rhs)), + } + } + + pub fn empty_subtree_hash(&self, depth: usize) -> ValueHash { + self.inner.empty_subtree_hash(depth) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::types::LeafNode; + use zksync_types::{AccountTreeId, Address, StorageKey, H256}; + + #[test] + fn empty_tree_hash_is_as_expected() { + const EXPECTED_HASH: ValueHash = H256([ + 152, 164, 142, 78, 209, 115, 97, 136, 56, 74, 232, 167, 157, 210, 28, 77, 102, 135, + 229, 253, 34, 202, 24, 20, 137, 6, 215, 135, 54, 192, 216, 106, + ]); + + let hasher: &dyn HashTree = &Blake2Hasher; + assert_eq!(hasher.empty_tree_hash(), EXPECTED_HASH); + } + + #[test] + fn leaf_is_hashed_as_expected() { + // Reference value taken from the previous implementation. + const EXPECTED_HASH: ValueHash = H256([ + 127, 0, 166, 178, 238, 222, 150, 8, 87, 112, 60, 140, 185, 233, 111, 40, 185, 16, 230, + 105, 52, 18, 206, 164, 176, 6, 242, 66, 57, 182, 129, 224, + ]); + + let address: Address = "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2".parse().unwrap(); + let key = StorageKey::new(AccountTreeId::new(address), H256::zero()); + let key = key.hashed_key_u256(); + let leaf = LeafNode::new(key, H256([1; 32]), 1); + + let stats = HashingStats::default(); + let mut hasher = (&Blake2Hasher as &dyn HashTree).with_stats(&stats); + let leaf_hash = leaf.hash(&mut hasher, 0); + assert_eq!(leaf_hash, EXPECTED_HASH); + + drop(hasher); + assert!(stats.hashed_bytes.into_inner() > 100); + + let hasher: &dyn HashTree = &Blake2Hasher; + let folded_hash = hasher.fold_merkle_path(&[], key, H256([1; 32]), 1); + assert_eq!(folded_hash, EXPECTED_HASH); + } + + #[test] + fn folding_merkle_path() { + let address: Address = "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2".parse().unwrap(); + let key = StorageKey::new(AccountTreeId::new(address), H256::zero()); + let key = key.hashed_key_u256(); + let leaf = LeafNode::new(key, H256([1; 32]), 1); + + let mut hasher = (&Blake2Hasher as &dyn HashTree).into(); + let leaf_hash = leaf.hash(&mut hasher, 2); + assert!(key.bit(254) && !key.bit(255)); + let merkle_path = [H256([2; 32]), H256([3; 32])]; + let expected_hash = hasher.hash_branch(&merkle_path[0], &leaf_hash); + let expected_hash = hasher.hash_branch(&expected_hash, &merkle_path[1]); + + let folded_hash = hasher + .inner + .fold_merkle_path(&merkle_path, key, H256([1; 32]), 1); + assert_eq!(folded_hash, expected_hash); + } +} diff --git a/core/lib/merkle_tree/src/hasher.rs b/core/lib/merkle_tree/src/hasher/nodes.rs similarity index 56% rename from core/lib/merkle_tree/src/hasher.rs rename to core/lib/merkle_tree/src/hasher/nodes.rs index 5c06f2e35610..715e0c958fa4 100644 --- a/core/lib/merkle_tree/src/hasher.rs +++ b/core/lib/merkle_tree/src/hasher/nodes.rs @@ -1,201 +1,11 @@ -//! Hashing operations on the Merkle tree. +//! Hash helpers for tree nodes. -use once_cell::sync::Lazy; - -use std::{fmt, iter, slice}; +use std::slice; use crate::{ - metrics::HashingStats, - types::{ - BlockOutputWithProofs, ChildRef, InternalNode, Key, LeafNode, Node, TreeInstruction, - TreeLogEntry, ValueHash, TREE_DEPTH, - }, + hasher::HasherWithStats, + types::{ChildRef, InternalNode, LeafNode, Node, ValueHash, TREE_DEPTH}, }; -use zksync_crypto::hasher::{blake2::Blake2Hasher, Hasher}; - -/// Tree hashing functionality. -pub trait HashTree: Send + Sync { - /// Returns the unique name of the hasher. This is used in Merkle tree tags to ensure - /// that the tree remains consistent. - fn name(&self) -> &'static str; - - /// Hashes a leaf node. - fn hash_leaf(&self, value_hash: &ValueHash, leaf_index: u64) -> ValueHash; - /// Compresses hashes in an intermediate node of a binary Merkle tree. - fn hash_branch(&self, lhs: &ValueHash, rhs: &ValueHash) -> ValueHash; - - /// Returns the hash of an empty subtree with the given depth. Implementations - /// are encouraged to cache the returned values. - fn empty_subtree_hash(&self, depth: usize) -> ValueHash; -} - -impl dyn HashTree + '_ { - pub(crate) fn empty_tree_hash(&self) -> ValueHash { - self.empty_subtree_hash(TREE_DEPTH) - } - - fn fold_merkle_path( - &self, - path: &[ValueHash], - key: Key, - value_hash: ValueHash, - leaf_index: u64, - ) -> ValueHash { - let mut hash = self.hash_leaf(&value_hash, leaf_index); - let empty_hash_count = TREE_DEPTH - path.len(); - let empty_hashes = (0..empty_hash_count).map(|depth| self.empty_subtree_hash(depth)); - let full_path = empty_hashes.chain(path.iter().copied()); - for (depth, adjacent_hash) in full_path.enumerate() { - hash = if key.bit(depth) { - self.hash_branch(&adjacent_hash, &hash) - } else { - self.hash_branch(&hash, &adjacent_hash) - }; - } - hash - } - - pub(crate) fn with_stats<'a>(&'a self, stats: &'a HashingStats) -> HasherWithStats<'a> { - HasherWithStats { - shared_metrics: Some(stats), - ..HasherWithStats::from(self) - } - } -} - -impl fmt::Debug for dyn HashTree + '_ { - fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.debug_struct("HashTree").finish_non_exhaustive() - } -} - -/// No-op hasher that returns `H256::zero()` for all operations. -impl HashTree for () { - fn name(&self) -> &'static str { - "no_op256" - } - - fn hash_leaf(&self, _value_hash: &ValueHash, _leaf_index: u64) -> ValueHash { - ValueHash::zero() - } - - fn hash_branch(&self, _lhs: &ValueHash, _rhs: &ValueHash) -> ValueHash { - ValueHash::zero() - } - - fn empty_subtree_hash(&self, _depth: usize) -> ValueHash { - ValueHash::zero() - } -} - -impl HashTree for Blake2Hasher { - fn name(&self) -> &'static str { - "blake2s256" - } - - fn hash_leaf(&self, value_hash: &ValueHash, leaf_index: u64) -> ValueHash { - let mut bytes = [0_u8; 40]; - bytes[..8].copy_from_slice(&leaf_index.to_be_bytes()); - bytes[8..].copy_from_slice(value_hash.as_ref()); - self.hash_bytes(&bytes) - } - - /// Compresses the hashes of 2 children in a branch node. - fn hash_branch(&self, lhs: &ValueHash, rhs: &ValueHash) -> ValueHash { - self.compress(lhs, rhs) - } - - /// Returns the hash of an empty subtree with the given depth. - fn empty_subtree_hash(&self, depth: usize) -> ValueHash { - static EMPTY_TREE_HASHES: Lazy> = Lazy::new(compute_empty_tree_hashes); - EMPTY_TREE_HASHES[depth] - } -} - -fn compute_empty_tree_hashes() -> Vec { - let empty_leaf_hash = Blake2Hasher.hash_bytes(&[0_u8; 40]); - iter::successors(Some(empty_leaf_hash), |hash| { - Some(Blake2Hasher.hash_branch(hash, hash)) - }) - .take(TREE_DEPTH + 1) - .collect() -} - -/// Hasher that keeps track of hashing metrics. -/// -/// On drop, the metrics are merged into `shared_stats` (if present). Such roundabout handling -/// is motivated by efficiency; if atomics were to be used to track metrics (e.g., -/// via a wrapping `HashTree` implementation), this would tank performance because of contention. -#[derive(Debug)] -pub(crate) struct HasherWithStats<'a> { - inner: &'a dyn HashTree, - shared_metrics: Option<&'a HashingStats>, - local_hashed_bytes: u64, -} - -impl<'a> From<&'a dyn HashTree> for HasherWithStats<'a> { - fn from(inner: &'a dyn HashTree) -> Self { - Self { - inner, - shared_metrics: None, - local_hashed_bytes: 0, - } - } -} - -impl<'a> AsRef for HasherWithStats<'a> { - fn as_ref(&self) -> &(dyn HashTree + 'a) { - self.inner - } -} - -impl Drop for HasherWithStats<'_> { - fn drop(&mut self) { - if let Some(shared_stats) = self.shared_metrics { - shared_stats.add_hashed_bytes(self.local_hashed_bytes); - } - } -} - -impl HasherWithStats<'_> { - fn hash_leaf(&mut self, value_hash: &ValueHash, leaf_index: u64) -> ValueHash { - const HASHED_BYTES: u64 = 8 + ValueHash::len_bytes() as u64; - - self.local_hashed_bytes += HASHED_BYTES; - self.inner.hash_leaf(value_hash, leaf_index) - } - - fn hash_branch(&mut self, lhs: &ValueHash, rhs: &ValueHash) -> ValueHash { - const HASHED_BYTES: u64 = 2 * ValueHash::len_bytes() as u64; - - self.local_hashed_bytes += HASHED_BYTES; - self.inner.hash_branch(lhs, rhs) - } - - fn hash_optional_branch( - &mut self, - subtree_depth: usize, - lhs: Option, - rhs: Option, - ) -> Option { - match (lhs, rhs) { - (None, None) => None, - (Some(lhs), None) => { - let empty_hash = self.empty_subtree_hash(subtree_depth); - Some(self.hash_branch(&lhs, &empty_hash)) - } - (None, Some(rhs)) => { - let empty_hash = self.empty_subtree_hash(subtree_depth); - Some(self.hash_branch(&empty_hash, &rhs)) - } - (Some(lhs), Some(rhs)) => Some(self.hash_branch(&lhs, &rhs)), - } - } - - pub fn empty_subtree_hash(&self, depth: usize) -> ValueHash { - self.inner.empty_subtree_hash(depth) - } -} impl LeafNode { pub(crate) fn hash(&self, hasher: &mut HasherWithStats<'_>, level: usize) -> ValueHash { @@ -446,116 +256,12 @@ impl Node { } } -impl BlockOutputWithProofs { - /// Verifies this output against the trusted old root hash of the tree and - /// the applied instructions. - /// - /// # Panics - /// - /// Panics if the proof doesn't verify. - pub fn verify_proofs( - &self, - hasher: &dyn HashTree, - old_root_hash: ValueHash, - instructions: &[(Key, TreeInstruction)], - ) { - assert_eq!(instructions.len(), self.logs.len()); - - let mut root_hash = old_root_hash; - for (op, &(key, instruction)) in self.logs.iter().zip(instructions) { - assert!(op.merkle_path.len() <= TREE_DEPTH); - if matches!(instruction, TreeInstruction::Read) { - assert_eq!(op.root_hash, root_hash); - assert!(op.base.is_read()); - } else { - assert!(!op.base.is_read()); - } - - let (prev_leaf_index, leaf_index, prev_value) = match op.base { - TreeLogEntry::Inserted { leaf_index } => (0, leaf_index, ValueHash::zero()), - TreeLogEntry::Updated { - leaf_index, - previous_value, - } => (leaf_index, leaf_index, previous_value), - - TreeLogEntry::Read { leaf_index, value } => (leaf_index, leaf_index, value), - TreeLogEntry::ReadMissingKey => (0, 0, ValueHash::zero()), - }; - - let prev_hash = - hasher.fold_merkle_path(&op.merkle_path, key, prev_value, prev_leaf_index); - assert_eq!(prev_hash, root_hash); - if let TreeInstruction::Write(value) = instruction { - let next_hash = hasher.fold_merkle_path(&op.merkle_path, key, value, leaf_index); - assert_eq!(next_hash, op.root_hash); - } - root_hash = op.root_hash; - } - } -} - #[cfg(test)] mod tests { use super::*; - use crate::types::ChildRef; - use zksync_types::{AccountTreeId, Address, StorageKey, H256}; - - #[test] - fn empty_tree_hash_is_as_expected() { - const EXPECTED_HASH: ValueHash = H256([ - 152, 164, 142, 78, 209, 115, 97, 136, 56, 74, 232, 167, 157, 210, 28, 77, 102, 135, - 229, 253, 34, 202, 24, 20, 137, 6, 215, 135, 54, 192, 216, 106, - ]); - - let hasher: &dyn HashTree = &Blake2Hasher; - assert_eq!(hasher.empty_tree_hash(), EXPECTED_HASH); - } - - #[test] - fn leaf_is_hashed_as_expected() { - // Reference value taken from the previous implementation. - const EXPECTED_HASH: ValueHash = H256([ - 127, 0, 166, 178, 238, 222, 150, 8, 87, 112, 60, 140, 185, 233, 111, 40, 185, 16, 230, - 105, 52, 18, 206, 164, 176, 6, 242, 66, 57, 182, 129, 224, - ]); - - let address: Address = "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2".parse().unwrap(); - let key = StorageKey::new(AccountTreeId::new(address), H256::zero()); - let key = key.hashed_key_u256(); - let leaf = LeafNode::new(key, H256([1; 32]), 1); - - let stats = HashingStats::default(); - let mut hasher = (&Blake2Hasher as &dyn HashTree).with_stats(&stats); - let leaf_hash = leaf.hash(&mut hasher, 0); - assert_eq!(leaf_hash, EXPECTED_HASH); - - drop(hasher); - assert!(stats.hashed_bytes.into_inner() > 100); - - let hasher: &dyn HashTree = &Blake2Hasher; - let folded_hash = hasher.fold_merkle_path(&[], key, H256([1; 32]), 1); - assert_eq!(folded_hash, EXPECTED_HASH); - } - - #[test] - fn folding_merkle_path() { - let address: Address = "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2".parse().unwrap(); - let key = StorageKey::new(AccountTreeId::new(address), H256::zero()); - let key = key.hashed_key_u256(); - let leaf = LeafNode::new(key, H256([1; 32]), 1); - - let mut hasher = (&Blake2Hasher as &dyn HashTree).into(); - let leaf_hash = leaf.hash(&mut hasher, 2); - assert!(key.bit(254) && !key.bit(255)); - let merkle_path = [H256([2; 32]), H256([3; 32])]; - let expected_hash = hasher.hash_branch(&merkle_path[0], &leaf_hash); - let expected_hash = hasher.hash_branch(&expected_hash, &merkle_path[1]); - - let folded_hash = hasher - .inner - .fold_merkle_path(&merkle_path, key, H256([1; 32]), 1); - assert_eq!(folded_hash, expected_hash); - } + use crate::hasher::HashTree; + use zksync_crypto::hasher::{blake2::Blake2Hasher, Hasher}; + use zksync_types::H256; fn test_internal_node_hashing(child_indexes: &[u8]) { println!("Testing indices: {child_indexes:?}"); diff --git a/core/lib/merkle_tree/src/hasher/proofs.rs b/core/lib/merkle_tree/src/hasher/proofs.rs new file mode 100644 index 000000000000..e496acb3f882 --- /dev/null +++ b/core/lib/merkle_tree/src/hasher/proofs.rs @@ -0,0 +1,219 @@ +//! Merkle proof-related hashing logic. + +use std::mem; + +use crate::{ + hasher::{HashTree, HasherWithStats}, + types::{ + BlockOutputWithProofs, Key, LeafNode, TreeEntry, TreeEntryWithProof, TreeInstruction, + TreeLogEntry, ValueHash, TREE_DEPTH, + }, + utils, +}; + +impl BlockOutputWithProofs { + /// Verifies this output against the trusted old root hash of the tree and + /// the applied instructions. + /// + /// # Panics + /// + /// Panics if the proof doesn't verify. + pub fn verify_proofs( + &self, + hasher: &dyn HashTree, + old_root_hash: ValueHash, + instructions: &[(Key, TreeInstruction)], + ) { + assert_eq!(instructions.len(), self.logs.len()); + + let mut root_hash = old_root_hash; + for (op, &(key, instruction)) in self.logs.iter().zip(instructions) { + assert!(op.merkle_path.len() <= TREE_DEPTH); + if matches!(instruction, TreeInstruction::Read) { + assert_eq!(op.root_hash, root_hash); + assert!(op.base.is_read()); + } else { + assert!(!op.base.is_read()); + } + + let (prev_leaf_index, leaf_index, prev_value) = match op.base { + TreeLogEntry::Inserted { leaf_index } => (0, leaf_index, ValueHash::zero()), + TreeLogEntry::Updated { + leaf_index, + previous_value, + } => (leaf_index, leaf_index, previous_value), + + TreeLogEntry::Read { leaf_index, value } => (leaf_index, leaf_index, value), + TreeLogEntry::ReadMissingKey => (0, 0, ValueHash::zero()), + }; + + let prev_hash = + hasher.fold_merkle_path(&op.merkle_path, key, prev_value, prev_leaf_index); + assert_eq!(prev_hash, root_hash); + if let TreeInstruction::Write(value) = instruction { + let next_hash = hasher.fold_merkle_path(&op.merkle_path, key, value, leaf_index); + assert_eq!(next_hash, op.root_hash); + } + root_hash = op.root_hash; + } + } +} + +impl TreeEntryWithProof { + /// Verifies this proof. + /// + /// # Panics + /// + /// Panics if the proof doesn't verify. + pub fn verify(&self, hasher: &dyn HashTree, key: Key, trusted_root_hash: ValueHash) { + if self.base.leaf_index == 0 { + assert!( + self.base.value_hash.is_zero(), + "Invalid missing value specification: leaf index is zero, but value is non-default" + ); + } + let root_hash = hasher.fold_merkle_path( + &self.merkle_path, + key, + self.base.value_hash, + self.base.leaf_index, + ); + assert_eq!(root_hash, trusted_root_hash, "Root hash mismatch"); + } +} + +/// Range digest in a Merkle tree allowing to compute its root hash based on the provided entries. +/// +/// - The entries must be ordered by key. I.e., the first entry must have the numerically smallest key, +/// and the last entry must have the numerically greatest key among all provided entries. +/// - The first and the last entries must be provided together with a Merkle proof; other entries +/// do not need proofs. +/// - Any entry can be [empty](TreeEntry::is_empty()). I.e., there's no requirement to only +/// provide existing entries. +/// +/// This construction is useful for verifying *Merkle range proofs*. Such a proof proves that +/// a certain key range in the Merkle tree contains the specified entries and no other entries. +/// +/// # Implementation details +/// +/// A streaming approach is used. `TreeRange` occupies `O(1)` RAM w.r.t. the number of entries. +/// `TreeRange` consists of `TREE_DEPTH = 256` hashes and a constant amount of other data. +// +// We keep a *left contour* of hashes, i.e., known hashes to the left of the last processed key. +// Initially, the left contour is a filtered Merkle path for the start entry; we only take into +// account left hashes in the path (ones for which the corresponding start key bit is 1), and +// discard right hashes. +// +// When a `TreeRange` is updated, we find the first diverging bit between the last processed key +// and the new key. (This bit is always 0 in the last processed key and 1 in the new key.) +// +// ```text +// ... +// diverging_level: / \ +// ... | (only empty subtrees) | +// TREE_DEPTH: current_leaf next_leaf +// ``` +// +// We update the left contour by collapsing the last processed entry up to the diverging bit. +// When collapsing, we take advantage of the fact that all right hashes in the collapsed part +// of the Merkle path correspond to empty subtrees. We also clean all hashes in the left contour +// further down the tree; it's guaranteed that the next key will only have empty subtrees +// to the left of it until the diverging level. +// +// When we want to finalize a range, we update the left contour one final time, and then collapse +// the Merkle path for the final key all the way to the root hash. When doing this, we take +// right hashes from the provided path, and left hashes from the left contour (left hashes from +// the final entry Merkle path are discarded). +#[derive(Debug)] +pub struct TreeRangeDigest<'a> { + hasher: HasherWithStats<'a>, + current_leaf: LeafNode, + left_contour: Box<[ValueHash; TREE_DEPTH]>, +} + +impl<'a> TreeRangeDigest<'a> { + /// Starts a new Merkle tree range. + #[allow(clippy::missing_panics_doc)] // false positive + pub fn new(hasher: &'a dyn HashTree, start_key: Key, start_entry: &TreeEntryWithProof) -> Self { + let full_path = hasher.extend_merkle_path(&start_entry.merkle_path); + let left_contour = full_path.enumerate().map(|(depth, adjacent_hash)| { + if start_key.bit(depth) { + adjacent_hash // `adjacent_hash` is to the left of the `start_key`; take it + } else { + hasher.empty_subtree_hash(depth) + } + }); + let left_contour: Vec<_> = left_contour.collect(); + Self { + hasher: hasher.into(), + current_leaf: LeafNode::new( + start_key, + start_entry.base.value_hash, + start_entry.base.leaf_index, + ), + left_contour: left_contour.try_into().unwrap(), + // ^ `unwrap()` is safe by construction; `left_contour` will always have necessary length + } + } + + /// Updates this digest with a new entry. + /// + /// # Panics + /// + /// Panics if the provided `key` is not greater than the previous key provided to this digest. + pub fn update(&mut self, key: Key, entry: TreeEntry) { + assert!( + key > self.current_leaf.full_key, + "Keys provided to a digest must be monotonically increasing" + ); + + let diverging_level = utils::find_diverging_bit(self.current_leaf.full_key, key) + 1; + + // Hash the current leaf up to the `diverging_level`, taking current `left_contour` into account. + let mut hash = self + .hasher + .hash_leaf(&self.current_leaf.value_hash, self.current_leaf.leaf_index); + for depth in 0..(TREE_DEPTH - diverging_level) { + let empty_subtree_hash = self.hasher.empty_subtree_hash(depth); + // Replace the left contour value with the default one. + let left_hash = mem::replace(&mut self.left_contour[depth], empty_subtree_hash); + + hash = if self.current_leaf.full_key.bit(depth) { + self.hasher.hash_branch(&left_hash, &hash) + } else { + // We don't take right contour into account, since by construction (because we iterate + // over keys in ascending order) it's always empty. + self.hasher.hash_branch(&hash, &empty_subtree_hash) + }; + } + // Record the computed hash. + self.left_contour[TREE_DEPTH - diverging_level] = hash; + self.current_leaf = LeafNode::new(key, entry.value_hash, entry.leaf_index); + } + + /// Finalizes this digest and returns the root hash of the tree. + /// + /// # Panics + /// + /// Panics if the provided `final_key` is not greater than the previous key provided to this digest. + pub fn finalize(mut self, final_key: Key, final_entry: &TreeEntryWithProof) -> ValueHash { + self.update(final_key, final_entry.base); + + let full_path = self + .hasher + .inner + .extend_merkle_path(&final_entry.merkle_path); + let zipped_paths = self.left_contour.into_iter().zip(full_path); + let mut hash = self + .hasher + .hash_leaf(&final_entry.base.value_hash, final_entry.base.leaf_index); + for (depth, (left, right)) in zipped_paths.enumerate() { + hash = if final_key.bit(depth) { + self.hasher.hash_branch(&left, &hash) + } else { + self.hasher.hash_branch(&hash, &right) + }; + } + hash + } +} diff --git a/core/lib/merkle_tree/src/lib.rs b/core/lib/merkle_tree/src/lib.rs index 8296e1598ff4..a3344d1d6704 100644 --- a/core/lib/merkle_tree/src/lib.rs +++ b/core/lib/merkle_tree/src/lib.rs @@ -64,15 +64,16 @@ pub mod unstable { } pub use crate::{ - hasher::HashTree, + errors::NoVersionError, + hasher::{HashTree, TreeRangeDigest}, pruning::{MerkleTreePruner, MerkleTreePrunerHandle}, storage::{ Database, MerkleTreeColumnFamily, PatchSet, Patched, PruneDatabase, PrunePatchSet, RocksDBWrapper, }, types::{ - BlockOutput, BlockOutputWithProofs, Key, TreeInstruction, TreeLogEntry, - TreeLogEntryWithProof, ValueHash, + BlockOutput, BlockOutputWithProofs, Key, TreeEntry, TreeEntryWithProof, TreeInstruction, + TreeLogEntry, TreeLogEntryWithProof, ValueHash, }, }; diff --git a/core/lib/merkle_tree/src/storage/mod.rs b/core/lib/merkle_tree/src/storage/mod.rs index 3c653d161769..a7553727467a 100644 --- a/core/lib/merkle_tree/src/storage/mod.rs +++ b/core/lib/merkle_tree/src/storage/mod.rs @@ -92,23 +92,6 @@ impl TreeUpdater { longest_prefixes } - fn traverse(&self, key: Key, parent_nibbles: &Nibbles) -> TraverseOutcome { - for nibble_idx in parent_nibbles.nibble_count().. { - let nibbles = Nibbles::new(&key, nibble_idx); - match self.patch_set.get(&nibbles) { - Some(Node::Internal(_)) => { /* continue descent */ } - Some(Node::Leaf(leaf)) if leaf.full_key == key => { - return TraverseOutcome::LeafMatch(nibbles, *leaf); - } - Some(Node::Leaf(leaf)) => { - return TraverseOutcome::LeafMismatch(nibbles, *leaf); - } - None => return TraverseOutcome::MissingChild(nibbles), - } - } - unreachable!("We must have encountered a leaf or missing node when traversing"); - } - /// Inserts or updates a value hash for the specified `key`. This implementation /// is almost verbatim the algorithm described in the Jellyfish Merkle tree white paper. /// The algorithm from the paper is as follows: @@ -138,7 +121,7 @@ impl TreeUpdater { leaf_index_fn: impl FnOnce() -> u64, ) -> (TreeLogEntry, NewLeafData) { let version = self.patch_set.version(); - let traverse_outcome = self.traverse(key, parent_nibbles); + let traverse_outcome = self.patch_set.traverse(key, parent_nibbles); let (log, leaf_data) = match traverse_outcome { TraverseOutcome::LeafMatch(nibbles, mut leaf) => { let log = TreeLogEntry::update(leaf.value_hash, leaf.leaf_index); diff --git a/core/lib/merkle_tree/src/storage/patch.rs b/core/lib/merkle_tree/src/storage/patch.rs index 2cfe2c9375b1..5f98f3a2a667 100644 --- a/core/lib/merkle_tree/src/storage/patch.rs +++ b/core/lib/merkle_tree/src/storage/patch.rs @@ -5,14 +5,14 @@ use rayon::prelude::*; use std::collections::{hash_map::Entry, HashMap}; use crate::{ - hasher::HashTree, + hasher::{HashTree, HasherWithStats, MerklePath}, metrics::HashingStats, - storage::{proofs::SUBTREE_COUNT, SortedKeys}, + storage::{proofs::SUBTREE_COUNT, SortedKeys, TraverseOutcome}, types::{ - ChildRef, InternalNode, Manifest, Nibbles, NibblesBytes, Node, NodeKey, Root, ValueHash, - KEY_SIZE, + ChildRef, InternalNode, Key, LeafNode, Manifest, Nibbles, NibblesBytes, Node, NodeKey, + Root, ValueHash, KEY_SIZE, }, - Database, + utils, Database, }; /// Raw set of database changes. @@ -512,6 +512,83 @@ impl WorkingPatchSet { db_reads, } } + + pub(super) fn traverse(&self, key: Key, parent_nibbles: &Nibbles) -> TraverseOutcome { + for nibble_idx in parent_nibbles.nibble_count().. { + let nibbles = Nibbles::new(&key, nibble_idx); + match self.get(&nibbles) { + Some(Node::Internal(_)) => { /* continue descent */ } + Some(Node::Leaf(leaf)) if leaf.full_key == key => { + return TraverseOutcome::LeafMatch(nibbles, *leaf); + } + Some(Node::Leaf(leaf)) => { + return TraverseOutcome::LeafMismatch(nibbles, *leaf); + } + None => return TraverseOutcome::MissingChild(nibbles), + } + } + unreachable!("We must have encountered a leaf or missing node when traversing"); + } + + /// Creates a Merkle proof for the specified `key`, which has given `parent_nibbles` + /// in this patch set. `root_nibble_count` specifies to which level the proof needs to be constructed. + pub(crate) fn create_proof( + &mut self, + hasher: &mut HasherWithStats<'_>, + key: Key, + parent_nibbles: &Nibbles, + root_nibble_count: usize, + ) -> (Option, MerklePath) { + let traverse_outcome = self.traverse(key, parent_nibbles); + let merkle_path = match traverse_outcome { + TraverseOutcome::MissingChild(_) | TraverseOutcome::LeafMatch(..) => None, + TraverseOutcome::LeafMismatch(nibbles, leaf) => { + // Find the level at which `leaf.full_key` and `key` diverge. + // Note the addition of 1; e.g., if the keys differ at 0th bit, they + // differ at level 1 of the tree. + let diverging_level = utils::find_diverging_bit(key, leaf.full_key) + 1; + let nibble_count = nibbles.nibble_count(); + debug_assert!(diverging_level > 4 * nibble_count); + let mut path = MerklePath::new(diverging_level); + // Find the hash of the existing `leaf` at the level, and include it + // as the first hash on the Merkle path. + let adjacent_hash = leaf.hash(hasher, diverging_level); + path.push(hasher, Some(adjacent_hash)); + // Fill the path with empty hashes until we've reached the leaf level. + for _ in (4 * nibble_count + 1)..diverging_level { + path.push(hasher, None); + } + Some(path) + } + }; + + let mut nibbles = traverse_outcome.position(); + let leaf_level = nibbles.nibble_count() * 4; + debug_assert!(leaf_level >= root_nibble_count); + + let mut merkle_path = merkle_path.unwrap_or_else(|| MerklePath::new(leaf_level)); + while let Some((parent_nibbles, last_nibble)) = nibbles.split_last() { + if parent_nibbles.nibble_count() < root_nibble_count { + break; + } + + let parent = self.get_mut_without_updating(&parent_nibbles); + let Some(Node::Internal(parent)) = parent else { + unreachable!() + }; + let parent_level = parent_nibbles.nibble_count() * 4; + parent + .updater(hasher, parent_level, last_nibble) + .extend_merkle_path(&mut merkle_path); + nibbles = parent_nibbles; + } + + let leaf = match traverse_outcome { + TraverseOutcome::MissingChild(_) | TraverseOutcome::LeafMismatch(..) => None, + TraverseOutcome::LeafMatch(_, leaf) => Some(leaf), + }; + (leaf, merkle_path) + } } #[cfg(test)] diff --git a/core/lib/merkle_tree/src/storage/proofs.rs b/core/lib/merkle_tree/src/storage/proofs.rs index a854ee5e6277..a9ad624225d1 100644 --- a/core/lib/merkle_tree/src/storage/proofs.rs +++ b/core/lib/merkle_tree/src/storage/proofs.rs @@ -63,12 +63,12 @@ use rayon::prelude::*; use crate::{ hasher::{HasherWithStats, MerklePath}, metrics::{HashingStats, TreeUpdaterStats, BLOCK_TIMINGS, GENERAL_METRICS}, - storage::{Database, NewLeafData, PatchSet, SortedKeys, Storage, TraverseOutcome, TreeUpdater}, + storage::{Database, NewLeafData, PatchSet, SortedKeys, Storage, TreeUpdater}, types::{ BlockOutputWithProofs, InternalNode, Key, Nibbles, Node, TreeInstruction, TreeLogEntry, TreeLogEntryWithProof, ValueHash, }, - utils::{find_diverging_bit, increment_counter, merge_by_index}, + utils::{increment_counter, merge_by_index}, }; /// Number of subtrees used for parallel computations. @@ -179,61 +179,18 @@ impl TreeUpdater { key: Key, parent_nibbles: &Nibbles, ) -> (TreeLogEntry, MerklePath) { - let traverse_outcome = self.traverse(key, parent_nibbles); - let (operation, merkle_path) = match traverse_outcome { - TraverseOutcome::MissingChild(_) => (TreeLogEntry::ReadMissingKey, None), - TraverseOutcome::LeafMatch(_, leaf) => { - let log = TreeLogEntry::read(leaf.value_hash, leaf.leaf_index); - (log, None) - } - TraverseOutcome::LeafMismatch(nibbles, leaf) => { - // Find the level at which `leaf.full_key` and `key` diverge. - // Note the addition of 1; e.g., if the keys differ at 0th bit, they - // differ at level 1 of the tree. - let diverging_level = find_diverging_bit(key, leaf.full_key) + 1; - let nibble_count = nibbles.nibble_count(); - debug_assert!(diverging_level > 4 * nibble_count); - let mut path = MerklePath::new(diverging_level); - // Find the hash of the existing `leaf` at the level, and include it - // as the first hash on the Merkle path. - let adjacent_hash = leaf.hash(hasher, diverging_level); - path.push(hasher, Some(adjacent_hash)); - // Fill the path with empty hashes until we've reached the leaf level. - for _ in (4 * nibble_count + 1)..diverging_level { - path.push(hasher, None); - } - (TreeLogEntry::ReadMissingKey, Some(path)) - } - }; + let (leaf, merkle_path) = + self.patch_set + .create_proof(hasher, key, parent_nibbles, SUBTREE_ROOT_LEVEL / 4); + let operation = leaf.map_or(TreeLogEntry::ReadMissingKey, |leaf| { + TreeLogEntry::read(leaf.value_hash, leaf.leaf_index) + }); if matches!(operation, TreeLogEntry::ReadMissingKey) { self.metrics.missing_key_reads += 1; } else { self.metrics.key_reads += 1; } - - let mut nibbles = traverse_outcome.position(); - let leaf_level = nibbles.nibble_count() * 4; - debug_assert!(leaf_level >= SUBTREE_ROOT_LEVEL); - // ^ Because we've ensured an internal root node, all found positions have at least - // 1 nibble. - - let mut merkle_path = merkle_path.unwrap_or_else(|| MerklePath::new(leaf_level)); - while let Some((parent_nibbles, last_nibble)) = nibbles.split_last() { - if parent_nibbles.nibble_count() == 0 { - break; - } - - let parent = self.patch_set.get_mut_without_updating(&parent_nibbles); - let Some(Node::Internal(parent)) = parent else { - unreachable!() - }; - let parent_level = parent_nibbles.nibble_count() * 4; - parent - .updater(hasher, parent_level, last_nibble) - .extend_merkle_path(&mut merkle_path); - nibbles = parent_nibbles; - } (operation, merkle_path) } diff --git a/core/lib/merkle_tree/src/types.rs b/core/lib/merkle_tree/src/types/internal.rs similarity index 83% rename from core/lib/merkle_tree/src/types.rs rename to core/lib/merkle_tree/src/types/internal.rs index 7aa0c1d023e3..86568da7f5de 100644 --- a/core/lib/merkle_tree/src/types.rs +++ b/core/lib/merkle_tree/src/types/internal.rs @@ -1,4 +1,6 @@ -//! Basic storage types. +//! Internal types, mostly related to Merkle tree nodes. Note that because of the public `Database` trait, +//! some of these types are declared as public and can be even exported using the `unstable` module. +//! Still, logically these types are private, so adding them to new public APIs etc. is a logical error. use std::{fmt, num::NonZeroU64}; @@ -16,15 +18,6 @@ pub(crate) const TREE_DEPTH: usize = KEY_SIZE * 8; /// Size of a hashed value in bytes. pub(crate) const HASH_SIZE: usize = 32; -/// Instruction to read or write a tree value at a certain key. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum TreeInstruction { - /// Read the current tree value. - Read, - /// Write the specified value. - Write(ValueHash), -} - /// Tags associated with a tree. #[derive(Debug, Clone)] #[cfg_attr(test, derive(PartialEq))] @@ -320,22 +313,6 @@ impl LeafNode { } } -/// Data of a leaf node of the tree. -#[derive(Debug, Clone, Copy)] -pub struct LeafData { - pub value_hash: ValueHash, - pub leaf_index: u64, -} - -impl From for LeafData { - fn from(leaf: LeafNode) -> Self { - Self { - value_hash: leaf.value_hash, - leaf_index: leaf.leaf_index, - } - } -} - /// Reference to a child in an [`InternalNode`]. #[derive(Debug, Clone, Copy)] #[cfg_attr(test, derive(PartialEq, Eq))] @@ -532,97 +509,6 @@ impl StaleNodeKey { } } -/// Output of inserting a block of entries into a Merkle tree. -#[derive(Debug, PartialEq, Eq)] -pub struct BlockOutput { - /// The new hash of the tree. - pub root_hash: ValueHash, - /// The number of leaves in the tree after the update. - pub leaf_count: u64, - /// Information about each insertion / update operation in the order of application. - pub logs: Vec, -} - -/// Information about an the effect of a [`TreeInstruction`]. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum TreeLogEntry { - /// A node was inserted into the tree. - Inserted { - /// Index of the inserted node. - leaf_index: u64, - }, - /// A node with the specified index was updated. - Updated { - /// Index of the updated node. - leaf_index: u64, - /// Hash of the previous value. - previous_value: ValueHash, - }, - /// A node was read from the tree. - Read { - /// Index of the read node. - leaf_index: u64, - /// Hash of the read value. - value: ValueHash, - }, - /// A missing key was read. - ReadMissingKey, -} - -impl TreeLogEntry { - pub(crate) fn insert(leaf_index: u64) -> Self { - Self::Inserted { leaf_index } - } - - pub(crate) fn update(previous_value: ValueHash, leaf_index: u64) -> Self { - Self::Updated { - leaf_index, - previous_value, - } - } - - pub(crate) fn read(value: ValueHash, leaf_index: u64) -> Self { - Self::Read { leaf_index, value } - } - - pub(crate) fn is_read(&self) -> bool { - matches!(self, Self::Read { .. } | Self::ReadMissingKey) - } -} - -/// Extended output of inserting a block of entries into a Merkle tree that contains -/// Merkle proofs for each operation. -#[derive(Debug)] -pub struct BlockOutputWithProofs { - /// Extended information about each insertion / update operation in the order of application. - pub logs: Vec, - /// The number of leaves in the tree after the update. - pub leaf_count: u64, -} - -impl BlockOutputWithProofs { - /// Returns the final root hash of the Merkle tree. - pub fn root_hash(&self) -> Option { - Some(self.logs.last()?.root_hash) - } -} - -/// [`TreeLogEntry`] together with its authenticity proof. -#[derive(Debug)] -pub struct TreeLogEntryWithProof

> { - /// Log entry about an atomic operation on the tree. - pub base: TreeLogEntry, - /// Merkle path to prove the log authenticity. The path consists of up to 256 hashes - /// ordered starting the bottommost level of the tree (one with leaves) and ending before - /// the root level. - /// - /// If the path is not full (contains <256 hashes), it means that the hashes at the beginning - /// corresponding to the empty subtrees are skipped. This allows compacting the proof ~10x. - pub merkle_path: P, - /// Root tree hash after the operation. - pub root_hash: ValueHash, -} - #[cfg(test)] mod tests { use super::*; diff --git a/core/lib/merkle_tree/src/types/mod.rs b/core/lib/merkle_tree/src/types/mod.rs new file mode 100644 index 000000000000..6988735ec021 --- /dev/null +++ b/core/lib/merkle_tree/src/types/mod.rs @@ -0,0 +1,163 @@ +//! Basic storage types. + +mod internal; + +pub(crate) use self::internal::{ + ChildRef, Nibbles, NibblesBytes, StaleNodeKey, TreeTags, HASH_SIZE, KEY_SIZE, TREE_DEPTH, +}; +pub use self::internal::{InternalNode, Key, LeafNode, Manifest, Node, NodeKey, Root, ValueHash}; + +/// Instruction to read or write a tree value at a certain key. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum TreeInstruction { + /// Read the current tree value. + Read, + /// Write the specified value. + Write(ValueHash), +} + +/// Entry in a Merkle tree associated with a key. +#[derive(Debug, Clone, Copy)] +pub struct TreeEntry { + /// Value associated with the key. + pub value_hash: ValueHash, + /// Enumeration index of the key. + pub leaf_index: u64, +} + +impl From for TreeEntry { + fn from(leaf: LeafNode) -> Self { + Self { + value_hash: leaf.value_hash, + leaf_index: leaf.leaf_index, + } + } +} + +impl TreeEntry { + pub(crate) fn empty() -> Self { + Self { + value_hash: ValueHash::zero(), + leaf_index: 0, + } + } + + /// Returns `true` iff this entry encodes lack of a value. + pub fn is_empty(&self) -> bool { + self.leaf_index == 0 && self.value_hash.is_zero() + } + + pub(crate) fn with_merkle_path(self, merkle_path: Vec) -> TreeEntryWithProof { + TreeEntryWithProof { + base: self, + merkle_path, + } + } +} + +/// Entry in a Merkle tree together with a proof of authenticity. +#[derive(Debug, Clone)] +pub struct TreeEntryWithProof { + /// Entry in a Merkle tree. + pub base: TreeEntry, + /// Proof of the value authenticity. + /// + /// If specified, a proof is the Merkle path consisting of up to 256 hashes + /// ordered starting the bottommost level of the tree (one with leaves) and ending before + /// the root level. + /// + /// If the path is not full (contains <256 hashes), it means that the hashes at the beginning + /// corresponding to the empty subtrees are skipped. This allows compacting the proof ~10x. + pub merkle_path: Vec, +} + +/// Output of inserting a block of entries into a Merkle tree. +#[derive(Debug, PartialEq, Eq)] +pub struct BlockOutput { + /// The new hash of the tree. + pub root_hash: ValueHash, + /// The number of leaves in the tree after the update. + pub leaf_count: u64, + /// Information about each insertion / update operation in the order of application. + pub logs: Vec, +} + +/// Information about an the effect of a [`TreeInstruction`]. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum TreeLogEntry { + /// A node was inserted into the tree. + Inserted { + /// Index of the inserted node. + leaf_index: u64, + }, + /// A node with the specified index was updated. + Updated { + /// Index of the updated node. + leaf_index: u64, + /// Hash of the previous value. + previous_value: ValueHash, + }, + /// A node was read from the tree. + Read { + /// Index of the read node. + leaf_index: u64, + /// Hash of the read value. + value: ValueHash, + }, + /// A missing key was read. + ReadMissingKey, +} + +impl TreeLogEntry { + pub(crate) fn insert(leaf_index: u64) -> Self { + Self::Inserted { leaf_index } + } + + pub(crate) fn update(previous_value: ValueHash, leaf_index: u64) -> Self { + Self::Updated { + leaf_index, + previous_value, + } + } + + pub(crate) fn read(value: ValueHash, leaf_index: u64) -> Self { + Self::Read { leaf_index, value } + } + + pub(crate) fn is_read(&self) -> bool { + matches!(self, Self::Read { .. } | Self::ReadMissingKey) + } +} + +/// Extended output of inserting a block of entries into a Merkle tree that contains +/// Merkle proofs for each operation. +#[derive(Debug)] +pub struct BlockOutputWithProofs { + /// Extended information about each insertion / update operation in the order of application. + pub logs: Vec, + /// The number of leaves in the tree after the update. + pub leaf_count: u64, +} + +impl BlockOutputWithProofs { + /// Returns the final root hash of the Merkle tree. + pub fn root_hash(&self) -> Option { + Some(self.logs.last()?.root_hash) + } +} + +/// [`TreeLogEntry`] together with its authenticity proof. +#[derive(Debug)] +pub struct TreeLogEntryWithProof

> { + /// Log entry about an atomic operation on the tree. + pub base: TreeLogEntry, + /// Merkle path to prove log authenticity. The path consists of up to 256 hashes + /// ordered starting the bottommost level of the tree (one with leaves) and ending before + /// the root level. + /// + /// If the path is not full (contains <256 hashes), it means that the hashes at the beginning + /// corresponding to the empty subtrees are skipped. This allows compacting the proof ~10x. + pub merkle_path: P, + /// Root tree hash after the operation. + pub root_hash: ValueHash, +} diff --git a/core/lib/merkle_tree/tests/integration/merkle_tree.rs b/core/lib/merkle_tree/tests/integration/merkle_tree.rs index 96e36f34d1b0..f94335390eea 100644 --- a/core/lib/merkle_tree/tests/integration/merkle_tree.rs +++ b/core/lib/merkle_tree/tests/integration/merkle_tree.rs @@ -3,9 +3,12 @@ use once_cell::sync::Lazy; use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; +use std::{cmp, mem}; + use zksync_crypto::hasher::{blake2::Blake2Hasher, Hasher}; use zksync_merkle_tree::{ Database, HashTree, MerkleTree, PatchSet, Patched, TreeInstruction, TreeLogEntry, + TreeRangeDigest, }; use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; @@ -91,7 +94,7 @@ fn root_hash_is_computed_correctly_on_empty_tree() { } #[test] -fn proofs_are_computed_correctly_on_empty_tree() { +fn output_proofs_are_computed_correctly_on_empty_tree() { const RNG_SEED: u64 = 123; let mut rng = StdRng::seed_from_u64(RNG_SEED); @@ -121,6 +124,50 @@ fn proofs_are_computed_correctly_on_empty_tree() { } } +#[test] +fn entry_proofs_are_computed_correctly_on_empty_tree() { + const RNG_SEED: u64 = 123; + + let mut rng = StdRng::seed_from_u64(RNG_SEED); + for kv_count in [1, 2, 3, 5, 8, 13, 21, 100] { + println!("Inserting {kv_count} key-value pairs"); + + let mut tree = MerkleTree::new(PatchSet::default()); + let kvs = generate_key_value_pairs(0..kv_count); + let expected_hash = compute_tree_hash(&kvs); + tree.extend(kvs.clone()); + + let existing_keys: Vec<_> = kvs.iter().map(|(key, _)| *key).collect(); + let entries = tree.entries_with_proofs(0, &existing_keys).unwrap(); + assert_eq!(entries.len(), existing_keys.len()); + for ((key, value), entry) in kvs.iter().zip(entries) { + entry.verify(&Blake2Hasher, *key, expected_hash); + assert_eq!(entry.base.value_hash, *value); + } + + // Test some keys adjacent to existing ones. + let adjacent_keys = kvs.iter().flat_map(|(key, _)| { + [ + *key ^ (U256::one() << rng.gen_range(0..256)), + *key ^ (U256::one() << rng.gen_range(0..256)), + *key ^ (U256::one() << rng.gen_range(0..256)), + ] + }); + let random_keys = generate_key_value_pairs(kv_count..(kv_count * 2)) + .into_iter() + .map(|(key, _)| key); + let mut missing_keys: Vec<_> = adjacent_keys.chain(random_keys).collect(); + missing_keys.shuffle(&mut rng); + + let entries = tree.entries_with_proofs(0, &missing_keys).unwrap(); + assert_eq!(entries.len(), missing_keys.len()); + for (key, entry) in missing_keys.iter().zip(entries) { + assert!(entry.base.is_empty()); + entry.verify(&Blake2Hasher, *key, expected_hash); + } + } +} + #[test] fn proofs_are_computed_correctly_for_mixed_instructions() { const RNG_SEED: u64 = 123; @@ -206,7 +253,7 @@ fn root_hash_is_computed_correctly_with_intermediate_commits() { } #[test] -fn proofs_are_computed_correctly_with_intermediate_commits() { +fn output_proofs_are_computed_correctly_with_intermediate_commits() { let (kvs, expected_hash) = &*KVS_AND_HASH; for chunk_size in [3, 5, 10, 17, 28, 42] { println!("Inserting 100 key-value pairs in {chunk_size}-sized chunks"); @@ -223,6 +270,40 @@ fn proofs_are_computed_correctly_with_intermediate_commits() { } } +#[test] +fn entry_proofs_are_computed_correctly_with_intermediate_commits() { + let (kvs, _) = &*KVS_AND_HASH; + let all_keys: Vec<_> = kvs.iter().map(|(key, _)| *key).collect(); + for chunk_size in [10, 17, 28, 42] { + println!("Inserting 100 key-value pairs in {chunk_size}-sized chunks"); + + let mut tree = MerkleTree::new(PatchSet::default()); + let mut root_hashes = vec![]; + for chunk in kvs.chunks(chunk_size) { + let output = tree.extend(chunk.to_vec()); + root_hashes.push(output.root_hash); + + let version = root_hashes.len() - 1; + let entries = tree.entries_with_proofs(version as u64, &all_keys).unwrap(); + assert_eq!(entries.len(), all_keys.len()); + for (i, (key, entry)) in all_keys.iter().zip(entries).enumerate() { + assert_eq!(entry.base.is_empty(), i >= (version + 1) * chunk_size); + entry.verify(&Blake2Hasher, *key, output.root_hash); + } + } + + // Check all tree versions. + for (version, root_hash) in root_hashes.into_iter().enumerate() { + let entries = tree.entries_with_proofs(version as u64, &all_keys).unwrap(); + assert_eq!(entries.len(), all_keys.len()); + for (i, (key, entry)) in all_keys.iter().zip(entries).enumerate() { + assert_eq!(entry.base.is_empty(), i >= (version + 1) * chunk_size); + entry.verify(&Blake2Hasher, *key, root_hash); + } + } + } +} + fn test_accumulated_commits(db: DB, chunk_size: usize) -> DB { let (kvs, expected_hash) = &*KVS_AND_HASH; let mut db = Patched::new(db); @@ -388,6 +469,13 @@ fn proofs_are_computed_correctly_with_key_updates() { let output = tree.extend_with_proofs(instructions.clone()); assert_eq!(output.root_hash(), Some(*expected_hash)); output.verify_proofs(&Blake2Hasher, root_hash, &instructions); + + let keys: Vec<_> = kvs.iter().map(|(key, _)| *key).collect(); + let proofs = tree.entries_with_proofs(1, &keys).unwrap(); + for ((key, value), proof) in kvs.iter().zip(proofs) { + assert_eq!(proof.base.value_hash, *value); + proof.verify(&Blake2Hasher, *key, *expected_hash); + } } } @@ -432,6 +520,95 @@ fn root_hash_equals_to_previous_implementation() { test_root_hash_equals_to_previous_implementation(&mut PatchSet::default()); } +fn test_range_proofs_simple(range_size: usize) { + let (kvs, expected_hash) = &*KVS_AND_HASH; + assert!(range_size >= 2 && range_size <= kvs.len()); + + let mut tree = MerkleTree::new(PatchSet::default()); + tree.extend(kvs.clone()); + + let mut sorted_keys: Vec<_> = kvs.iter().map(|(key, _)| *key).collect(); + sorted_keys.sort_unstable(); + + for start_idx in 0..(sorted_keys.len() - range_size) { + let key_range = &sorted_keys[start_idx..(start_idx + range_size)]; + let [first_key, other_keys @ .., last_key] = key_range else { + unreachable!(); + }; + + let mut proven_entries = tree + .entries_with_proofs(0, &[*first_key, *last_key]) + .unwrap(); + let last_entry = proven_entries.pop().unwrap(); + let first_entry = proven_entries.pop().unwrap(); + let other_entries = tree.entries(0, other_keys).unwrap(); + + let mut range = TreeRangeDigest::new(&Blake2Hasher, *first_key, &first_entry); + for (key, entry) in other_keys.iter().zip(other_entries) { + range.update(*key, entry); + } + let range_hash = range.finalize(*last_key, &last_entry); + assert_eq!(range_hash, *expected_hash); + } +} + +#[test] +fn range_proofs_with_multiple_existing_items() { + for range_size in [2, 3, 5, 10, 17, 28, 42] { + println!("Testing range proofs with {range_size} items"); + test_range_proofs_simple(range_size); + } +} + +#[test] +fn range_proofs_for_almost_full_range() { + for range_size in 95..=100 { + println!("Testing range proofs with {range_size} items"); + test_range_proofs_simple(range_size); + } +} + +#[test] +fn range_proofs_with_random_ranges() { + const ITER_COUNT: usize = 100; + const RNG_SEED: u64 = 321; + + let mut rng = StdRng::seed_from_u64(RNG_SEED); + let (kvs, expected_hash) = &*KVS_AND_HASH; + let mut tree = MerkleTree::new(PatchSet::default()); + tree.extend(kvs.clone()); + + for _ in 0..ITER_COUNT { + let mut start_key = U256([rng.gen(), rng.gen(), rng.gen(), rng.gen()]); + let mut end_key = U256([rng.gen(), rng.gen(), rng.gen(), rng.gen()]); + match start_key.cmp(&end_key) { + cmp::Ordering::Less => { /* ok */ } + cmp::Ordering::Equal => continue, + cmp::Ordering::Greater => mem::swap(&mut start_key, &mut end_key), + } + + // Find out keys falling into the range. + let keys_in_range = kvs + .iter() + .filter_map(|&(key, _)| (key > start_key && key < end_key).then_some(key)); + let mut keys_in_range: Vec<_> = keys_in_range.collect(); + keys_in_range.sort_unstable(); + println!("Proving range with {} keys", keys_in_range.len()); + + let mut proven_entries = tree.entries_with_proofs(0, &[start_key, end_key]).unwrap(); + let last_entry = proven_entries.pop().unwrap(); + let first_entry = proven_entries.pop().unwrap(); + let other_entries = tree.entries(0, &keys_in_range).unwrap(); + + let mut range = TreeRangeDigest::new(&Blake2Hasher, start_key, &first_entry); + for (key, entry) in keys_in_range.iter().zip(other_entries) { + range.update(*key, entry); + } + let range_hash = range.finalize(end_key, &last_entry); + assert_eq!(range_hash, *expected_hash); + } +} + /// RocksDB-specific tests. mod rocksdb { use serde::{Deserialize, Serialize}; From 99b8aad24376d9beee0ff260a48fa52dad1b5c4e Mon Sep 17 00:00:00 2001 From: Igor Borodin Date: Sun, 8 Oct 2023 21:13:53 +0200 Subject: [PATCH 23/29] feat: Post FOSS Dockerfiles refactoring, take 1 (#166) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ - Prettifies Dockerfiles - Removes some unnecessary dependencies on running something outside of Docker build (there are still some, though) ## Why ❔ To make a simple `docker build` command work without any additional hacks ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: Yury Akudovich Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Danil Co-authored-by: Marcin M <128217157+mm-zk@users.noreply.github.com> --- .github/workflows/build-prover-template.yml | 21 ++---------- bin/ci_run | 14 +++++++- docker/circuit-synthesizer/Dockerfile | 9 +++-- docker/contract-verifier/Dockerfile | 11 ++++--- .../cross-external-nodes-checker/Dockerfile | 13 +++----- docker/external-node/Dockerfile | 16 +++++---- docker/local-node/Dockerfile | 2 -- docker/local-node/entrypoint.sh | 2 +- docker/proof-fri-compressor/Dockerfile | 8 ++--- docker/prover-fri-gateway/Dockerfile | 8 ++--- docker/prover-fri/Dockerfile | 8 ++--- docker/prover-gar/Dockerfile | 5 +-- docker/prover-gpu-fri-gar/Dockerfile | 3 +- docker/prover-gpu-fri/Dockerfile | 14 ++++---- docker/prover/Dockerfile | 33 +++++++++++++++---- docker/runner/Dockerfile | 5 --- docker/server-v2/Dockerfile | 12 ++++--- docker/witness-generator/Dockerfile | 8 ++--- docker/witness-vector-generator/Dockerfile | 8 ++--- etc/scripts/prepare_bellman_cuda.sh | 7 ---- etc/scripts/prepare_era_bellman_cuda.sh | 11 ------- infrastructure/zk/src/docker.ts | 18 +++++++--- 22 files changed, 112 insertions(+), 124 deletions(-) delete mode 100644 docker/runner/Dockerfile delete mode 100755 etc/scripts/prepare_bellman_cuda.sh delete mode 100755 etc/scripts/prepare_era_bellman_cuda.sh diff --git a/.github/workflows/build-prover-template.yml b/.github/workflows/build-prover-template.yml index b3a0c262503e..a93a890622a0 100644 --- a/.github/workflows/build-prover-template.yml +++ b/.github/workflows/build-prover-template.yml @@ -28,27 +28,14 @@ on: required: false jobs: - era-bellman-cuda: - name: Get era-bellman-cuda release URLs. - runs-on: [ubuntu-latest] - outputs: - source: ${{ steps.release.outputs.source }} - binary: ${{ steps.release.outputs.binary }} - steps: - - id: release - run: | - release=($(curl -v --silent https://api.github.com/repos/matter-labs/era-bellman-cuda/releases/tags/${{ inputs.ERA_BELLMAN_CUDA_RELEASE }} | jq -r ".name, .tarball_url, .assets[0].browser_download_url")) - echo "source=${release[1]}" >> "$GITHUB_OUTPUT" - echo "binary=${release[2]}" >> "$GITHUB_OUTPUT" - build-images: name: Build and Push Docker Images env: image_tag: ${{ inputs.image_tag }} IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }} RUNNER_COMPOSE_FILE: "docker-compose-runner-nightly.yml" + ERA_BELLMAN_CUDA_RELEASE: ${{ inputs.ERA_BELLMAN_CUDA_RELEASE }} runs-on: [matterlabs-ci-runner] - needs: [era-bellman-cuda] strategy: matrix: component: @@ -65,9 +52,6 @@ jobs: with: submodules: "recursive" - - name: Prepare bellman-cuda directory - run: etc/scripts/prepare_era_bellman_cuda.sh ${{ needs.era-bellman-cuda.outputs.source }} ${{ needs.era-bellman-cuda.outputs.binary }} - - name: setup-env run: | echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV @@ -113,7 +97,7 @@ jobs: "proof-fri-compressor" ) run: | - nightly_components=${{env.RUST_NIGHTLY_COMPONENTS}} + nightly_components=${{ env.RUST_NIGHTLY_COMPONENTS }} ci_run docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} ci_run gcloud auth configure-docker us-docker.pkg.dev,asia-docker.pkg.dev -q @@ -130,6 +114,7 @@ jobs: ci_run echo [workspace] > Cargo.toml ci_run echo members = [\"prover/${underscored_name}\"] >> Cargo.toml ci_run cp prover/Cargo.lock Cargo.lock + PASSED_ENV_VARS="ERA_BELLMAN_CUDA_RELEASE" \ ci_run zk docker $DOCKER_ACTION $COMPONENT else ci_run zk docker $DOCKER_ACTION $COMPONENT diff --git a/bin/ci_run b/bin/ci_run index 0f578106f467..d3fa5414b5fb 100755 --- a/bin/ci_run +++ b/bin/ci_run @@ -3,4 +3,16 @@ # Runs the command from within CI docker-compose environment. cd $ZKSYNC_HOME compose_file="${RUNNER_COMPOSE_FILE:-docker-compose-runner.yml}" -docker-compose -f $compose_file exec -T zk $@ + +# Pass environment variables explicitly if specified +if [ ! -z "$PASSED_ENV_VARS" ]; then + env_vars_option="" + IFS=',' read -ra ADDR <<<"$PASSED_ENV_VARS" + for var in "${ADDR[@]}"; do + value=$(eval echo \$$var) + env_vars_option="${env_vars_option} -e ${var}=${value}" + done + docker-compose -f $compose_file exec -T $env_vars_option zk $@ +else + docker-compose -f $compose_file exec -T zk $@ +fi diff --git a/docker/circuit-synthesizer/Dockerfile b/docker/circuit-synthesizer/Dockerfile index 811e73a19edf..e64ada1d1a89 100644 --- a/docker/circuit-synthesizer/Dockerfile +++ b/docker/circuit-synthesizer/Dockerfile @@ -1,7 +1,3 @@ -# For using private GitHub dependencies, CI downdloads all crates and bellman-cuda dependency outside of the contatiner -# Not expected to work locally - -# syntax=docker/dockerfile:experimental FROM debian:bookworm-slim as builder ARG DEBIAN_FRONTEND=noninteractive @@ -21,10 +17,13 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ WORKDIR /usr/src/zksync COPY . . -RUN CARGO_HOME=./cargo cargo build --release +RUN cargo build --release FROM debian:bookworm-slim + RUN apt-get update && apt-get install -y curl openssl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* + COPY core/bin/verification_key_generator_and_server/data/ /core/bin/verification_key_generator_and_server/data/ COPY --from=builder /usr/src/zksync/target/release/zksync_circuit_synthesizer /usr/bin/ + ENTRYPOINT ["zksync_circuit_synthesizer"] diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 74976449ef4f..69e3a0a7df06 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -1,13 +1,15 @@ -# syntax=docker/dockerfile:experimental +# Will work locally only after building contracts first + FROM rust:1.72-bookworm as builder + RUN apt-get update && apt-get install -y clang && rm -rf /var/lib/apt/lists/* WORKDIR /usr/src/zksync COPY . . -#Needed to get zkEVM package from github link with auth embedded -# Doesn't expected to work local -RUN CARGO_HOME=./cargo cargo build --release + +RUN cargo build --release FROM debian:bookworm-slim + RUN apt-get update && apt-get install -y curl libpq5 ca-certificates wget python3 && rm -rf /var/lib/apt/lists/* # install zksolc @@ -95,5 +97,6 @@ RUN mkdir -p /etc/vyper-bin/0.3.9 \ COPY --from=builder /usr/src/zksync/target/release/zksync_contract_verifier /usr/bin/ COPY etc/system-contracts/bootloader/build/artifacts/ /etc/system-contracts/bootloader/build/artifacts/ COPY etc/system-contracts/artifacts-zk /etc/system-contracts/artifacts-zk + # CMD tail -f /dev/null ENTRYPOINT ["zksync_contract_verifier"] diff --git a/docker/cross-external-nodes-checker/Dockerfile b/docker/cross-external-nodes-checker/Dockerfile index 4f9fc72b9235..301f67e8b0a5 100644 --- a/docker/cross-external-nodes-checker/Dockerfile +++ b/docker/cross-external-nodes-checker/Dockerfile @@ -1,21 +1,16 @@ -# For using private GitHub dependencies, CI downdloads all crates outside of the contatiner -# Not expected to work locally - -# BUILDING STAGE -# syntax=docker/dockerfile:experimental FROM rust:1.72-bookworm as builder + RUN apt-get update && apt-get install -y clang && rm -rf /var/lib/apt/lists/* + WORKDIR /usr/src/zksync COPY . . -RUN CARGO_HOME=./cargo cargo build --release +RUN cargo build --release -# RUNNING STAGE FROM debian:bookworm-slim + RUN apt-get update && apt-get install -y curl ca-certificates && rm -rf /var/lib/apt/lists/* -# Bring the below from the building stage to the final image. COPY --from=builder /usr/src/zksync/target/release/cross_external_nodes_checker /usr/bin -# Run the entrypoint script as the default command. ENTRYPOINT ["cross_external_nodes_checker"] diff --git a/docker/external-node/Dockerfile b/docker/external-node/Dockerfile index 58a341b14a30..60375216ba09 100644 --- a/docker/external-node/Dockerfile +++ b/docker/external-node/Dockerfile @@ -1,23 +1,23 @@ -# For using private GitHub dependencies, CI downdloads all crates outside of the contatiner -# Not expected to work locally +# Will work locally only after prior contracts build -# syntax=docker/dockerfile:experimental FROM rust:1.72-bookworm as builder + RUN apt-get update && apt-get install -y clang && rm -rf /var/lib/apt/lists/* + WORKDIR /usr/src/zksync COPY . . -RUN CARGO_HOME=./cargo cargo build --release -RUN CARGO_HOME=./cargo cargo install sqlx-cli --version 0.5.13 +RUN cargo build --release +RUN cargo install sqlx-cli --version 0.5.13 FROM debian:bookworm-slim + RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* COPY --from=builder /usr/src/zksync/target/release/zksync_external_node /usr/bin COPY --from=builder /usr/src/zksync/target/release/block_reverter /usr/bin -COPY --from=builder /usr/src/zksync/cargo/bin/sqlx /usr/bin +COPY --from=builder /usr/local/cargo/bin/sqlx /usr/bin COPY --from=builder /usr/src/zksync/docker/external-node/entrypoint.sh /usr/bin -RUN chmod +x /usr/bin/entrypoint.sh COPY etc/system-contracts/bootloader/build/artifacts/ /etc/system-contracts/bootloader/build/artifacts/ COPY etc/system-contracts/contracts/artifacts/ /etc/system-contracts/contracts/artifacts/ COPY etc/system-contracts/contracts/precompiles/artifacts/ /etc/system-contracts/contracts/precompiles/artifacts/ @@ -29,4 +29,6 @@ COPY etc/ERC20/ /etc/ERC20/ COPY etc/multivm_bootloaders/ /etc/multivm_bootloaders/ COPY core/lib/dal/migrations/ /migrations +RUN chmod +x /usr/bin/entrypoint.sh + ENTRYPOINT [ "sh", "/usr/bin/entrypoint.sh"] diff --git a/docker/local-node/Dockerfile b/docker/local-node/Dockerfile index e34dded01109..2826c4a14744 100644 --- a/docker/local-node/Dockerfile +++ b/docker/local-node/Dockerfile @@ -1,5 +1,3 @@ -# syntax=docker/dockerfile:experimental - # Image is always built from the server image to reuse the common parts # This image is expected to be built locally beforehand (implemented in the `zk` tool) FROM matterlabs/server-v2:latest2.0 diff --git a/docker/local-node/entrypoint.sh b/docker/local-node/entrypoint.sh index 664cf4b3b6d0..e96674d6bdcb 100755 --- a/docker/local-node/entrypoint.sh +++ b/docker/local-node/entrypoint.sh @@ -13,7 +13,7 @@ then echo "Initialing local environment" psql ${DATABASE_URL%/*} -c "create database ${DATABASE_URL##*/}" find /migrations -name "*up.sql" | sort | xargs printf -- ' -f %s' | xargs -t psql $DATABASE_URL - + cd /infrastructure/zk # Compile configs yarn start config compile diff --git a/docker/proof-fri-compressor/Dockerfile b/docker/proof-fri-compressor/Dockerfile index 7fd50a923f99..e60998fac70d 100644 --- a/docker/proof-fri-compressor/Dockerfile +++ b/docker/proof-fri-compressor/Dockerfile @@ -1,7 +1,5 @@ -# For using private GitHub dependencies, CI downdloads all crates outside of the contatiner -# Not expected to work locally +# Will work locally only after prior universal setup key download -# syntax=docker/dockerfile:experimental FROM debian:bookworm-slim as builder ARG DEBIAN_FRONTEND=noninteractive @@ -21,9 +19,10 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ WORKDIR /usr/src/zksync COPY . . -RUN CARGO_HOME=./cargo cargo build --release +RUN cargo build --release FROM debian:bookworm-slim + RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* # copy VK required for proof wrapping @@ -33,4 +32,5 @@ COPY prover/vk_setup_data_generator_server_fri/data/ /prover/vk_setup_data_gener COPY setup_2\^26.key /setup_2\^26.key COPY --from=builder /usr/src/zksync/target/release/zksync_proof_fri_compressor /usr/bin/ + ENTRYPOINT ["zksync_proof_fri_compressor"] diff --git a/docker/prover-fri-gateway/Dockerfile b/docker/prover-fri-gateway/Dockerfile index 03341d20df63..6a7b27637abb 100644 --- a/docker/prover-fri-gateway/Dockerfile +++ b/docker/prover-fri-gateway/Dockerfile @@ -1,7 +1,3 @@ -# For using private GitHub dependencies, CI downdloads all crates outside of the contatiner -# Not expected to work locally - -# syntax=docker/dockerfile:experimental FROM debian:bookworm-slim as builder ARG DEBIAN_FRONTEND=noninteractive @@ -21,7 +17,7 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ WORKDIR /usr/src/zksync COPY . . -RUN CARGO_HOME=./cargo cargo build --release +RUN cargo build --release FROM debian:bookworm-slim RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* @@ -29,6 +25,6 @@ RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf / # copy VK required for proof wrapping COPY prover/vk_setup_data_generator_server_fri/data/ /prover/vk_setup_data_generator_server_fri/data/ - COPY --from=builder /usr/src/zksync/target/release/zksync_prover_fri_gateway /usr/bin/ + ENTRYPOINT ["zksync_prover_fri_gateway"] diff --git a/docker/prover-fri/Dockerfile b/docker/prover-fri/Dockerfile index 16677d128ee6..1fda048ca33c 100644 --- a/docker/prover-fri/Dockerfile +++ b/docker/prover-fri/Dockerfile @@ -1,7 +1,3 @@ -# For using private GitHub dependencies, CI downdloads all crates outside of the contatiner -# Not expected to work locally - -# syntax=docker/dockerfile:experimental FROM debian:bookworm-slim as builder ARG DEBIAN_FRONTEND=noninteractive @@ -21,7 +17,7 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ WORKDIR /usr/src/zksync COPY . . -RUN CARGO_HOME=./cargo cargo build --release +RUN cargo build --release FROM debian:bookworm-slim RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* @@ -29,6 +25,6 @@ RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf / # copy VK required for protocol version COPY prover/vk_setup_data_generator_server_fri/data/ /prover/vk_setup_data_generator_server_fri/data/ - COPY --from=builder /usr/src/zksync/target/release/zksync_prover_fri /usr/bin/ + ENTRYPOINT ["zksync_prover_fri"] diff --git a/docker/prover-gar/Dockerfile b/docker/prover-gar/Dockerfile index 974679cbebfa..ced97d6d7e77 100644 --- a/docker/prover-gar/Dockerfile +++ b/docker/prover-gar/Dockerfile @@ -1,5 +1,6 @@ -# syntax=docker/dockerfile:1 -ARG PROVER_IMAGE +# Will work locally only after prior universal key download and Docker login to the private registry + +ARG PROVER_IMAGE=latest FROM us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-v2:2.0-$PROVER_IMAGE as prover FROM nvidia/cuda:11.8.0-runtime-ubuntu22.04 as app diff --git a/docker/prover-gpu-fri-gar/Dockerfile b/docker/prover-gpu-fri-gar/Dockerfile index 44577a79dc83..bd70be7ee4b4 100644 --- a/docker/prover-gpu-fri-gar/Dockerfile +++ b/docker/prover-gpu-fri-gar/Dockerfile @@ -1,8 +1,7 @@ -# syntax=docker/dockerfile:1 ARG PROVER_IMAGE FROM us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-gpu-fri:2.0-$PROVER_IMAGE as prover -FROM nvidia/cuda:12.0.0-runtime-ubuntu22.04 as app +FROM nvidia/cuda:12.2.0-runtime-ubuntu22.04 as app # HACK copying to root is the only way to make Docker layer caching work for these files for some reason COPY *.bin / diff --git a/docker/prover-gpu-fri/Dockerfile b/docker/prover-gpu-fri/Dockerfile index 8234dd2b6b0e..5e37c089ed95 100644 --- a/docker/prover-gpu-fri/Dockerfile +++ b/docker/prover-gpu-fri/Dockerfile @@ -1,8 +1,4 @@ -# For using private GitHub dependencies, CI downdloads all crates outside of the contatiner -# Not expected to work locally - -# syntax=docker/dockerfile:experimental -FROM nvidia/cuda:12.0.0-devel-ubuntu22.04 as builder +FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 as builder ARG DEBIAN_FRONTEND=noninteractive @@ -14,13 +10,13 @@ ENV RUSTUP_HOME=/usr/local/rustup \ CARGO_HOME=/usr/local/cargo \ PATH=/usr/local/cargo/bin:$PATH -ENV CUDAARCHS=75 +# Building for Nvidia L4 +ENV CUDAARCHS=89 RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ rustup install nightly-2023-07-21 && \ rustup default nightly-2023-07-21 -# Setup cmake RUN curl -Lo cmake-3.24.2-linux-x86_64.sh https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh && \ chmod +x cmake-3.24.2-linux-x86_64.sh && \ ./cmake-3.24.2-linux-x86_64.sh --skip-license --prefix=/usr/local @@ -28,13 +24,15 @@ RUN curl -Lo cmake-3.24.2-linux-x86_64.sh https://github.com/Kitware/CMake/relea WORKDIR /usr/src/zksync COPY . . -RUN CARGO_HOME=./cargo cargo build --release --features "gpu" +RUN cargo build --release --features "gpu" FROM nvidia/cuda:12.0.0-devel-ubuntu22.04 + RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* # copy finalization hints required for assembly generation COPY prover/vk_setup_data_generator_server_fri/data/ /prover/vk_setup_data_generator_server_fri/data/ COPY --from=builder /usr/src/zksync/target/release/zksync_prover_fri /usr/bin/ + ENTRYPOINT ["zksync_prover_fri"] diff --git a/docker/prover/Dockerfile b/docker/prover/Dockerfile index 74ce3c12c100..25e0d8d8e0d1 100644 --- a/docker/prover/Dockerfile +++ b/docker/prover/Dockerfile @@ -1,12 +1,12 @@ -# For using private GitHub dependencies, CI downdloads all crates and bellman-cuda dependency outside of the contatiner -# Not expected to work locally +# Will work locally only after prior contracts build and universal setup key download -# syntax=docker/dockerfile:experimental FROM nvidia/cuda:11.8.0-devel-ubuntu22.04 as builder ARG DEBIAN_FRONTEND=noninteractive -RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ \ +ARG ERA_BELLMAN_CUDA_RELEASE=latest + +RUN apt-get update && apt-get install -y curl jq clang openssl libssl-dev gcc g++ \ pkg-config build-essential libclang-dev && \ rm -rf /var/lib/apt/lists/* @@ -19,11 +19,31 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ rustup default nightly-2023-07-21 WORKDIR /usr/src/zksync -COPY . . + +ENV GITHUB_OWNER=matter-labs +ENV GITHUB_REPO=era-bellman-cuda + +RUN set -e; \ + if [ "$ERA_BELLMAN_CUDA_RELEASE" = "latest" ]; then \ + latest_release_data=$(curl --silent "https://api.github.com/repos/${GITHUB_OWNER}/${GITHUB_REPO}/releases"); \ + latest_tag=$(echo "$latest_release_data" | jq -r '.[0].tag_name'); \ + source_url="https://github.com/${GITHUB_OWNER}/${GITHUB_REPO}/archive/refs/tags/${latest_tag}.tar.gz"; \ + binary_url="https://github.com/${GITHUB_OWNER}/${GITHUB_REPO}/releases/download/${latest_tag}/bellman-cuda.tar.gz"; \ + else \ + source_url="https://github.com/${GITHUB_OWNER}/${GITHUB_REPO}/archive/refs/tags/${ERA_BELLMAN_CUDA_RELEASE}.tar.gz"; \ + binary_url="https://github.com/${GITHUB_OWNER}/${GITHUB_REPO}/releases/download/${ERA_BELLMAN_CUDA_RELEASE}/bellman-cuda.tar.gz"; \ + fi; \ + curl --silent --location "$source_url" --output bellman-cuda-source.tar.gz; \ + curl --silent --location "$binary_url" --output bellman-cuda.tar.gz; \ + mkdir -p bellman-cuda; \ + tar xvfz bellman-cuda.tar.gz -C ./bellman-cuda; \ + tar xvfz bellman-cuda-source.tar.gz -C ./bellman-cuda --strip-components=1 ENV BELLMAN_CUDA_DIR=/usr/src/zksync/bellman-cuda -RUN CARGO_HOME=./cargo cargo build --release +COPY . . + +RUN cargo build --release FROM nvidia/cuda:11.8.0-runtime-ubuntu22.04 as runner @@ -40,4 +60,5 @@ COPY setup_2\^26.key /etc/ COPY core/bin/verification_key_generator_and_server/data/ /core/bin/verification_key_generator_and_server/data/ COPY --from=builder /usr/src/zksync/target/release/zksync_prover /usr/bin/ + ENTRYPOINT ["zksync_prover"] diff --git a/docker/runner/Dockerfile b/docker/runner/Dockerfile deleted file mode 100644 index bec7c1e015f1..000000000000 --- a/docker/runner/Dockerfile +++ /dev/null @@ -1,5 +0,0 @@ -FROM tcardonne/github-runner -FROM docker:dind -RUN apk update -RUN apk add py-pip python3-dev libffi-dev openssl-dev gcc libc-dev make -RUN pip install docker-compose diff --git a/docker/server-v2/Dockerfile b/docker/server-v2/Dockerfile index abd92a76320d..074e2e306ec9 100644 --- a/docker/server-v2/Dockerfile +++ b/docker/server-v2/Dockerfile @@ -1,22 +1,24 @@ -# For using private GitHub dependencies, CI downdloads all crates outside of the contatiner -# Not expected to work locally +# Will work locally only after prior contracts build -# syntax=docker/dockerfile:experimental FROM rust:1.72-bookworm as builder + RUN apt-get update && apt-get install -y linux-libc-dev liburing-dev clang && \ - # ^ We need a newer version of `linux-libc-dev` from backports than the one installed by default rm -rf /var/lib/apt/lists/* + WORKDIR /usr/src/zksync COPY . . -RUN CARGO_HOME=./cargo cargo build --release --features=rocksdb/io-uring +RUN cargo build --release --features=rocksdb/io-uring FROM debian:bookworm-slim + RUN apt-get update && apt-get install -y curl libpq5 liburing-dev ca-certificates && \ rm -rf /var/lib/apt/lists/* + EXPOSE 3000 EXPOSE 3031 EXPOSE 3030 + COPY --from=builder /usr/src/zksync/target/release/zksync_server /usr/bin COPY --from=builder /usr/src/zksync/target/release/block_reverter /usr/bin COPY --from=builder /usr/src/zksync/target/release/merkle_tree_consistency_checker /usr/bin diff --git a/docker/witness-generator/Dockerfile b/docker/witness-generator/Dockerfile index a917a73669fb..984d85203133 100644 --- a/docker/witness-generator/Dockerfile +++ b/docker/witness-generator/Dockerfile @@ -1,7 +1,3 @@ -# For using private GitHub dependencies, CI downdloads all crates outside of the contatiner -# Not expected to work locally - -# syntax=docker/dockerfile:experimental FROM debian:bookworm-slim as builder ARG DEBIAN_FRONTEND=noninteractive @@ -21,12 +17,14 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ WORKDIR /usr/src/zksync COPY . . -RUN CARGO_HOME=./cargo cargo build --release +RUN cargo build --release FROM debian:bookworm-slim + RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* COPY prover/vk_setup_data_generator_server_fri/data/ /prover/vk_setup_data_generator_server_fri/data/ COPY --from=builder /usr/src/zksync/target/release/zksync_witness_generator /usr/bin/ + ENTRYPOINT ["zksync_witness_generator"] diff --git a/docker/witness-vector-generator/Dockerfile b/docker/witness-vector-generator/Dockerfile index 4b26646801c5..b7e1c320cfbc 100644 --- a/docker/witness-vector-generator/Dockerfile +++ b/docker/witness-vector-generator/Dockerfile @@ -1,7 +1,3 @@ -# For using private GitHub dependencies, CI downdloads all crates outside of the contatiner -# Not expected to work locally - -# syntax=docker/dockerfile:experimental FROM debian:bookworm-slim as builder ARG DEBIAN_FRONTEND=noninteractive @@ -21,13 +17,15 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ WORKDIR /usr/src/zksync COPY . . -RUN CARGO_HOME=./cargo cargo build --release +RUN cargo build --release FROM debian:bookworm-slim + RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* # copy finalization hints required for witness vector generation COPY prover/vk_setup_data_generator_server_fri/data/ /prover/vk_setup_data_generator_server_fri/data/ COPY --from=builder /usr/src/zksync/target/release/zksync_witness_vector_generator /usr/bin/ + ENTRYPOINT ["zksync_witness_vector_generator"] diff --git a/etc/scripts/prepare_bellman_cuda.sh b/etc/scripts/prepare_bellman_cuda.sh deleted file mode 100755 index db0ba745bbab..000000000000 --- a/etc/scripts/prepare_bellman_cuda.sh +++ /dev/null @@ -1,7 +0,0 @@ -echo "preparing bellman cuda directory" -gh release -R github.com/matter-labs/bellman-cuda download "$1" -gh release -R github.com/matter-labs/bellman-cuda download "$1" -A tar.gz -mkdir -p bellman-cuda -tar xvf bellman-cuda.tar.gz -C ./bellman-cuda -tar xvf bellman-cuda-"$1".tar.gz -mv bellman-cuda-"$1"/* ./bellman-cuda/ diff --git a/etc/scripts/prepare_era_bellman_cuda.sh b/etc/scripts/prepare_era_bellman_cuda.sh deleted file mode 100755 index 270c326217bc..000000000000 --- a/etc/scripts/prepare_era_bellman_cuda.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -set -e - -source="$1" -binary="$2" - -curl --silent --location "${source}" --output bellman-cuda-source.tar.gz -curl --silent --location "${binary}" --output bellman-cuda.tar.gz -mkdir -p bellman-cuda -tar xvfz bellman-cuda.tar.gz -C ./bellman-cuda -tar xvfz bellman-cuda-source.tar.gz -C ./bellman-cuda --strip-components=1 diff --git a/infrastructure/zk/src/docker.ts b/infrastructure/zk/src/docker.ts index 64c23828d1f9..17e93117b965 100644 --- a/infrastructure/zk/src/docker.ts +++ b/infrastructure/zk/src/docker.ts @@ -86,22 +86,30 @@ function defaultTagList(image: string, imageTagSha: string, imageTagShaTS: strin } async function _build(image: string, tagList: string[]) { - if (image == 'server-v2' || image == 'external-node' || image == 'prover') { + if (image === 'server-v2' || image === 'external-node' || image === 'prover') { await contract.build(); } const tagsToBuild = tagList.map((tag) => `-t matterlabs/${image}:${tag}`).join(' '); - // generate list of tags for image - we want 3 tags (latest, SHA, SHA+TimeStamp) for listed components and only "latest" for everything else - await utils.spawn(`CARGO_HOME=./cargo cargo fetch`); + // Conditionally add build argument if image is prover-v2 + let buildArgs = ''; + if (image === 'prover-v2') { + const eraBellmanCudaRelease = process.env.ERA_BELLMAN_CUDA_RELEASE; + buildArgs = `--build-arg ERA_BELLMAN_CUDA_RELEASE=${eraBellmanCudaRelease}`; + } // HACK // For prover-v2 which is not a prover, but should be built from the prover dockerfile. So here we go. const imagePath = image == 'prover-v2' ? 'prover' : image; - // build image with needed tags - await utils.spawn(`DOCKER_BUILDKIT=1 docker build ${tagsToBuild} -f ./docker/${imagePath}/Dockerfile .`); + const buildCommand = + `DOCKER_BUILDKIT=1 docker build ${tagsToBuild}` + + (buildArgs ? ` ${buildArgs}` : '') + + ` -f ./docker/${imagePath}/Dockerfile .`; + + await utils.spawn(buildCommand); } async function _push(image: string, tagList: string[], publishPublic: boolean = false) { From fa7165002884e7137b623feec3721cbbe3332a40 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Mon, 9 Oct 2023 12:13:12 +0300 Subject: [PATCH 24/29] fix(db): drop constraint prover_jobs_fri_l1_batch_number_fkey (#173) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ `prover_jobs_fri_l1_batch_number_fkey` is dropped. ## Why ❔ `prover_jobs_fri` is filled only in prover DB, while `l1_batches` -- only in core DB, so we can't have the constraint. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- ...009073918_drop-prover_jobs_fri_l1_batch_number_fkey.down.sql | 2 ++ ...31009073918_drop-prover_jobs_fri_l1_batch_number_fkey.up.sql | 1 + 2 files changed, 3 insertions(+) create mode 100644 core/lib/dal/migrations/20231009073918_drop-prover_jobs_fri_l1_batch_number_fkey.down.sql create mode 100644 core/lib/dal/migrations/20231009073918_drop-prover_jobs_fri_l1_batch_number_fkey.up.sql diff --git a/core/lib/dal/migrations/20231009073918_drop-prover_jobs_fri_l1_batch_number_fkey.down.sql b/core/lib/dal/migrations/20231009073918_drop-prover_jobs_fri_l1_batch_number_fkey.down.sql new file mode 100644 index 000000000000..ca6f0f843025 --- /dev/null +++ b/core/lib/dal/migrations/20231009073918_drop-prover_jobs_fri_l1_batch_number_fkey.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE prover_jobs_fri ADD CONSTRAINT prover_jobs_fri_l1_batch_number_fkey + FOREIGN KEY (l1_batch_number) REFERENCES l1_batches (number); diff --git a/core/lib/dal/migrations/20231009073918_drop-prover_jobs_fri_l1_batch_number_fkey.up.sql b/core/lib/dal/migrations/20231009073918_drop-prover_jobs_fri_l1_batch_number_fkey.up.sql new file mode 100644 index 000000000000..2835b15b64fa --- /dev/null +++ b/core/lib/dal/migrations/20231009073918_drop-prover_jobs_fri_l1_batch_number_fkey.up.sql @@ -0,0 +1 @@ +ALTER TABLE prover_jobs_fri DROP CONSTRAINT IF EXISTS prover_jobs_fri_l1_batch_number_fkey; From fe2d6ada50c59cbe6c917bb2bc1f26a96b6ddab0 Mon Sep 17 00:00:00 2001 From: "Ramon \"9Tails\" Canales" Date: Mon, 9 Oct 2023 11:30:04 +0100 Subject: [PATCH 25/29] chore(submodules): Updates contracts and system-contracts submodules (#175) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ Updates the era-contracts and era-system-contracts submodules to their latest versions ## Why ❔ Changes in those submodules are required for the hyperchain initializer to work properly ## Checklist - [X] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [X] Code has been formatted via `zk fmt` and `zk lint`. --- contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts b/contracts index 67f38daa255c..f06a58360a2b 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 67f38daa255c31e778e9a6f8c673f36914513fa1 +Subproject commit f06a58360a2b8e7129f64413998767ac169d1efd From 92b6f5999b66666f01b89b5ff188d220139751a2 Mon Sep 17 00:00:00 2001 From: Danil Date: Mon, 9 Oct 2023 23:22:10 +0200 Subject: [PATCH 26/29] feat(vm)!: Update Refund model (#181) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. Signed-off-by: Danil --- Cargo.lock | 24 + core/lib/multivm/Cargo.toml | 1 + core/lib/multivm/src/glue/block_properties.rs | 4 +- core/lib/multivm/src/glue/history_mode.rs | 22 +- core/lib/multivm/src/glue/init_vm.rs | 18 +- core/lib/multivm/src/glue/oracle_tools.rs | 9 +- core/lib/multivm/src/glue/tracer.rs | 36 +- .../src/glue/tracer/implementations.rs | 48 ++ .../glue/types/vm/bytecompression_result.rs | 33 + .../glue/types/vm/current_execution_state.rs | 15 + .../src/glue/types/vm/execution_result.rs | 18 + core/lib/multivm/src/glue/types/vm/halt.rs | 44 + .../lib/multivm/src/glue/types/vm/l1_batch.rs | 16 + .../lib/multivm/src/glue/types/vm/l2_block.rs | 12 + core/lib/multivm/src/glue/types/vm/mod.rs | 12 + core/lib/multivm/src/glue/types/vm/refunds.rs | 11 + .../multivm/src/glue/types/vm/system_env.rs | 16 + .../src/glue/types/vm/tx_execution_mode.rs | 10 + .../src/glue/types/vm/vm_execution_mode.rs | 12 + .../types/vm/vm_execution_result_and_logs.rs | 13 + .../glue/types/vm/vm_execution_statistics.rs | 14 + .../src/glue/types/vm/vm_memory_metrics.rs | 17 + .../src/glue/types/vm/vm_revert_reason.rs | 17 + core/lib/multivm/src/lib.rs | 5 +- core/lib/multivm/src/vm_instance.rs | 122 ++- core/lib/test_account/src/lib.rs | 21 + core/lib/types/Cargo.toml | 2 +- core/lib/types/src/protocol_version.rs | 11 +- core/lib/types/src/vm_trace.rs | 36 + core/lib/types/src/vm_version.rs | 3 +- core/lib/vm/src/lib.rs | 5 +- core/lib/vm/src/old_vm/history_recorder.rs | 4 + core/lib/vm/src/old_vm/oracles/mod.rs | 1 - core/lib/vm/src/oracles/mod.rs | 1 + core/lib/vm/src/oracles/storage.rs | 414 +++++++++ core/lib/vm/src/tests/l1_tx_execution.rs | 4 +- core/lib/vm/src/tests/rollbacks.rs | 129 ++- core/lib/vm/src/tests/tester/inner_state.rs | 7 + core/lib/vm/src/tracers/call.rs | 2 +- core/lib/vm/src/tracers/refunds.rs | 81 +- .../lib/vm/src/tracers/storage_invocations.rs | 2 +- core/lib/vm/src/tracers/validation/error.rs | 37 +- core/lib/vm/src/tracers/validation/mod.rs | 18 +- core/lib/vm/src/types/internals/vm_state.rs | 2 +- .../api_server/execution_sandbox/tracers.rs | 5 +- .../src/api_server/tx_sender/mod.rs | 3 +- .../multivm_deps/vm_virtual_blocks/Cargo.toml | 33 + core/multivm_deps/vm_virtual_blocks/README.md | 44 + .../src/bootloader_state/l2_block.rs | 83 ++ .../src/bootloader_state/mod.rs | 8 + .../src/bootloader_state/snapshot.rs | 23 + .../src/bootloader_state/state.rs | 254 ++++++ .../src/bootloader_state/tx.rs | 48 ++ .../src/bootloader_state/utils.rs | 140 +++ .../vm_virtual_blocks/src/constants.rs | 111 +++ .../src/errors/bootloader_error.rs | 67 ++ .../src/errors/bytecode_compression.rs | 8 + .../vm_virtual_blocks/src/errors/halt.rs | 107 +++ .../vm_virtual_blocks/src/errors/mod.rs | 11 + .../src/errors/tx_revert_reason.rs | 138 +++ .../src/errors/vm_revert_reason.rs | 252 ++++++ .../src/implementation/bytecode.rs | 57 ++ .../src/implementation/execution.rs | 123 +++ .../src/implementation/gas.rs | 42 + .../src/implementation/logs.rs | 64 ++ .../src/implementation/mod.rs | 7 + .../src/implementation/snapshots.rs | 92 ++ .../src/implementation/statistics.rs | 87 ++ .../src/implementation/tx.rs | 65 ++ .../multivm_deps/vm_virtual_blocks/src/lib.rs | 49 ++ .../src/old_vm/event_sink.rs | 171 ++++ .../vm_virtual_blocks/src/old_vm/events.rs | 146 ++++ .../src/old_vm/history_recorder.rs | 805 ++++++++++++++++++ .../vm_virtual_blocks/src/old_vm/memory.rs | 323 +++++++ .../vm_virtual_blocks/src/old_vm/mod.rs | 8 + .../src/old_vm/oracles/decommitter.rs | 238 ++++++ .../src/old_vm/oracles/mod.rs | 9 + .../src/old_vm/oracles/precompile.rs | 75 ++ .../src/old_vm/oracles/storage.rs | 338 ++++++++ .../vm_virtual_blocks/src/old_vm/utils.rs | 222 +++++ .../vm_virtual_blocks/src/tests/bootloader.rs | 53 ++ .../src/tests/bytecode_publishing.rs | 37 + .../src/tests/call_tracer.rs | 86 ++ .../vm_virtual_blocks/src/tests/default_aa.rs | 68 ++ .../vm_virtual_blocks/src/tests/gas_limit.rs | 45 + .../src/tests/get_used_contracts.rs | 104 +++ .../src/tests/invalid_bytecode.rs | 120 +++ .../src/tests/is_write_initial.rs | 42 + .../src/tests/l1_tx_execution.rs | 123 +++ .../vm_virtual_blocks/src/tests/l2_blocks.rs | 500 +++++++++++ .../vm_virtual_blocks/src/tests/mod.rs | 20 + .../src/tests/nonce_holder.rs | 180 ++++ .../vm_virtual_blocks/src/tests/refunds.rs | 152 ++++ .../src/tests/require_eip712.rs | 161 ++++ .../vm_virtual_blocks/src/tests/rollbacks.rs | 146 ++++ .../src/tests/simple_execution.rs | 77 ++ .../src/tests/tester/inner_state.rs | 116 +++ .../vm_virtual_blocks/src/tests/tester/mod.rs | 7 + .../src/tests/tester/transaction_test_info.rs | 216 +++++ .../src/tests/tester/vm_tester.rs | 298 +++++++ .../src/tests/tracing_execution_error.rs | 49 ++ .../vm_virtual_blocks/src/tests/upgrade.rs | 341 ++++++++ .../vm_virtual_blocks/src/tests/utils.rs | 106 +++ .../vm_virtual_blocks/src/tracers/call.rs | 241 ++++++ .../src/tracers/default_tracers.rs | 259 ++++++ .../vm_virtual_blocks/src/tracers/mod.rs | 15 + .../vm_virtual_blocks/src/tracers/refunds.rs | 394 +++++++++ .../src/tracers/result_tracer.rs | 246 ++++++ .../src/tracers/storage_invocations.rs | 44 + .../vm_virtual_blocks/src/tracers/traits.rs | 85 ++ .../vm_virtual_blocks/src/tracers/utils.rs | 224 +++++ .../src/tracers/validation/error.rs | 22 + .../src/tracers/validation/mod.rs | 388 +++++++++ .../src/tracers/validation/params.rs | 18 + .../src/tracers/validation/types.rs | 18 + .../src/types/inputs/execution_mode.rs | 15 + .../src/types/inputs/l1_batch_env.rs | 76 ++ .../src/types/inputs/l2_block.rs | 9 + .../vm_virtual_blocks/src/types/inputs/mod.rs | 9 + .../src/types/inputs/system_env.rs | 52 ++ .../src/types/internals/mod.rs | 7 + .../src/types/internals/snapshot.rs | 11 + .../src/types/internals/transaction_data.rs | 344 ++++++++ .../src/types/internals/vm_state.rs | 175 ++++ .../vm_virtual_blocks/src/types/mod.rs | 3 + .../src/types/outputs/execution_result.rs | 83 ++ .../src/types/outputs/execution_state.rs | 22 + .../src/types/outputs/finished_l1batch.rs | 12 + .../src/types/outputs/l2_block.rs | 7 + .../src/types/outputs/mod.rs | 11 + .../src/types/outputs/statistic.rs | 26 + .../vm_virtual_blocks/src/utils/fee.rs | 29 + .../vm_virtual_blocks/src/utils/l2_blocks.rs | 93 ++ .../vm_virtual_blocks/src/utils/mod.rs | 5 + .../vm_virtual_blocks/src/utils/overhead.rs | 347 ++++++++ .../src/utils/transaction_encoding.rs | 15 + core/multivm_deps/vm_virtual_blocks/src/vm.rs | 158 ++++ 137 files changed, 11929 insertions(+), 166 deletions(-) create mode 100644 core/lib/multivm/src/glue/tracer/implementations.rs create mode 100644 core/lib/multivm/src/glue/types/vm/bytecompression_result.rs create mode 100644 core/lib/multivm/src/glue/types/vm/current_execution_state.rs create mode 100644 core/lib/multivm/src/glue/types/vm/execution_result.rs create mode 100644 core/lib/multivm/src/glue/types/vm/halt.rs create mode 100644 core/lib/multivm/src/glue/types/vm/l1_batch.rs create mode 100644 core/lib/multivm/src/glue/types/vm/l2_block.rs create mode 100644 core/lib/multivm/src/glue/types/vm/refunds.rs create mode 100644 core/lib/multivm/src/glue/types/vm/system_env.rs create mode 100644 core/lib/multivm/src/glue/types/vm/vm_execution_mode.rs create mode 100644 core/lib/multivm/src/glue/types/vm/vm_execution_result_and_logs.rs create mode 100644 core/lib/multivm/src/glue/types/vm/vm_execution_statistics.rs create mode 100644 core/lib/multivm/src/glue/types/vm/vm_memory_metrics.rs create mode 100644 core/lib/vm/src/oracles/mod.rs create mode 100644 core/lib/vm/src/oracles/storage.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/Cargo.toml create mode 100644 core/multivm_deps/vm_virtual_blocks/README.md create mode 100644 core/multivm_deps/vm_virtual_blocks/src/bootloader_state/l2_block.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/bootloader_state/mod.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/bootloader_state/snapshot.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/bootloader_state/state.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/bootloader_state/tx.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/bootloader_state/utils.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/constants.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/errors/bootloader_error.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/errors/bytecode_compression.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/errors/halt.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/errors/mod.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/errors/tx_revert_reason.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/errors/vm_revert_reason.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/implementation/bytecode.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/implementation/execution.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/implementation/gas.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/implementation/logs.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/implementation/mod.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/implementation/snapshots.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/implementation/statistics.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/implementation/tx.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/lib.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/old_vm/event_sink.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/old_vm/events.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/old_vm/history_recorder.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/old_vm/memory.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/old_vm/mod.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/old_vm/oracles/decommitter.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/old_vm/oracles/mod.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/old_vm/oracles/precompile.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/old_vm/oracles/storage.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/old_vm/utils.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tests/bootloader.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tests/bytecode_publishing.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tests/call_tracer.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tests/default_aa.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tests/gas_limit.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tests/get_used_contracts.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tests/invalid_bytecode.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tests/is_write_initial.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tests/l1_tx_execution.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tests/l2_blocks.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tests/mod.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tests/nonce_holder.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tests/refunds.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tests/require_eip712.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tests/rollbacks.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tests/simple_execution.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tests/tester/inner_state.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tests/tester/mod.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tests/tester/transaction_test_info.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tests/tester/vm_tester.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tests/tracing_execution_error.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tests/upgrade.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tests/utils.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tracers/call.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tracers/default_tracers.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tracers/mod.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tracers/refunds.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tracers/result_tracer.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tracers/storage_invocations.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tracers/traits.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tracers/utils.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tracers/validation/error.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tracers/validation/mod.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tracers/validation/params.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/tracers/validation/types.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/types/inputs/execution_mode.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/types/inputs/l1_batch_env.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/types/inputs/l2_block.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/types/inputs/mod.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/types/inputs/system_env.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/types/internals/mod.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/types/internals/snapshot.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/types/internals/transaction_data.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/types/internals/vm_state.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/types/mod.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/types/outputs/execution_result.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/types/outputs/execution_state.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/types/outputs/finished_l1batch.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/types/outputs/l2_block.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/types/outputs/mod.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/types/outputs/statistic.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/utils/fee.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/utils/l2_blocks.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/utils/mod.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/utils/overhead.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/utils/transaction_encoding.rs create mode 100644 core/multivm_deps/vm_virtual_blocks/src/vm.rs diff --git a/Cargo.lock b/Cargo.lock index 45d34bfcd8fb..e5c438cb51ec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3827,6 +3827,7 @@ dependencies = [ "vm_1_3_2", "vm_m5", "vm_m6", + "vm_virtual_blocks", "zksync_contracts", "zksync_state", "zksync_types", @@ -7032,6 +7033,29 @@ dependencies = [ "zksync_utils", ] +[[package]] +name = "vm_virtual_blocks" +version = "0.1.0" +dependencies = [ + "anyhow", + "ethabi", + "hex", + "itertools", + "once_cell", + "thiserror", + "tokio", + "tracing", + "vise", + "zk_evm 1.3.3", + "zksync_config", + "zksync_contracts", + "zksync_eth_signer", + "zksync_state", + "zksync_test_account", + "zksync_types", + "zksync_utils", +] + [[package]] name = "walkdir" version = "2.3.3" diff --git a/core/lib/multivm/Cargo.toml b/core/lib/multivm/Cargo.toml index 4abef1cc50b8..b2dd8396f157 100644 --- a/core/lib/multivm/Cargo.toml +++ b/core/lib/multivm/Cargo.toml @@ -13,6 +13,7 @@ categories = ["cryptography"] vm_m5 = { path = "../../multivm_deps/vm_m5" } vm_m6 = { path = "../../multivm_deps/vm_m6" } vm_1_3_2 = { path = "../../multivm_deps/vm_1_3_2" } +vm_virtual_blocks= { path = "../../multivm_deps/vm_virtual_blocks" } vm_latest = { path = "../vm", package = "vm" } zksync_types = { path = "../types" } diff --git a/core/lib/multivm/src/glue/block_properties.rs b/core/lib/multivm/src/glue/block_properties.rs index e37dbfd895b4..6dea6dddb1cc 100644 --- a/core/lib/multivm/src/glue/block_properties.rs +++ b/core/lib/multivm/src/glue/block_properties.rs @@ -33,9 +33,9 @@ impl BlockProperties { }; Self::Vm1_3_2(inner) } - VmVersion::VmVirtualBlocks => { + VmVersion::VmVirtualBlocks | VmVersion::VmVirtualBlocksRefundsEnhancement => { unreachable!( - "Vm with virtual blocks has another initialization logic, \ + "From VmVirtualBlocks we have another initialization logic, \ so it's not required to have BlockProperties for it" ) } diff --git a/core/lib/multivm/src/glue/history_mode.rs b/core/lib/multivm/src/glue/history_mode.rs index 2055e963ffe8..b40eec4f0898 100644 --- a/core/lib/multivm/src/glue/history_mode.rs +++ b/core/lib/multivm/src/glue/history_mode.rs @@ -5,10 +5,12 @@ pub trait HistoryMode: + GlueInto + GlueInto + GlueInto + + GlueInto { type VmM6Mode: vm_m6::HistoryMode; type Vm1_3_2Mode: vm_1_3_2::HistoryMode; - type VmVirtualBlocksMode: vm_latest::HistoryMode; + type VmVirtualBlocksMode: vm_virtual_blocks::HistoryMode; + type VmVirtualBlocksRefundsEnhancement: vm_latest::HistoryMode; } impl GlueFrom for vm_m6::HistoryEnabled { @@ -23,6 +25,12 @@ impl GlueFrom for vm_1_3_2::HistoryEnabled { } } +impl GlueFrom for vm_virtual_blocks::HistoryEnabled { + fn glue_from(_: vm_latest::HistoryEnabled) -> Self { + Self + } +} + impl GlueFrom for vm_m6::HistoryDisabled { fn glue_from(_: vm_latest::HistoryDisabled) -> Self { Self @@ -35,14 +43,22 @@ impl GlueFrom for vm_1_3_2::HistoryDisabled { } } +impl GlueFrom for vm_virtual_blocks::HistoryDisabled { + fn glue_from(_: vm_latest::HistoryDisabled) -> Self { + Self + } +} + impl HistoryMode for vm_latest::HistoryEnabled { type VmM6Mode = vm_m6::HistoryEnabled; type Vm1_3_2Mode = vm_1_3_2::HistoryEnabled; - type VmVirtualBlocksMode = vm_latest::HistoryEnabled; + type VmVirtualBlocksMode = vm_virtual_blocks::HistoryEnabled; + type VmVirtualBlocksRefundsEnhancement = vm_latest::HistoryEnabled; } impl HistoryMode for vm_latest::HistoryDisabled { type VmM6Mode = vm_m6::HistoryDisabled; type Vm1_3_2Mode = vm_1_3_2::HistoryDisabled; - type VmVirtualBlocksMode = vm_latest::HistoryDisabled; + type VmVirtualBlocksMode = vm_virtual_blocks::HistoryDisabled; + type VmVirtualBlocksRefundsEnhancement = vm_latest::HistoryDisabled; } diff --git a/core/lib/multivm/src/glue/init_vm.rs b/core/lib/multivm/src/glue/init_vm.rs index a2f251394036..bd75c0fbec8d 100644 --- a/core/lib/multivm/src/glue/init_vm.rs +++ b/core/lib/multivm/src/glue/init_vm.rs @@ -69,9 +69,9 @@ impl<'a, S: ReadStorage, H: HistoryMode> VmInstance<'a, S, H> { } } VmInstanceData::VmVirtualBlocks(data) => { - let vm = vm_latest::Vm::new( + let vm = vm_virtual_blocks::Vm::new( l1_batch_env.glue_into(), - system_env.clone(), + system_env.clone().glue_into(), data.storage_view.clone(), H::VmVirtualBlocksMode::default(), ); @@ -82,6 +82,20 @@ impl<'a, S: ReadStorage, H: HistoryMode> VmInstance<'a, S, H> { last_tx_compressed_bytecodes: vec![], } } + VmInstanceData::VmVirtualBlocksRefundsEnhancement(data) => { + let vm = vm_latest::Vm::new( + l1_batch_env.glue_into(), + system_env.clone(), + data.storage_view.clone(), + H::VmVirtualBlocksRefundsEnhancement::default(), + ); + let vm = VmInstanceVersion::VmVirtualBlocksRefundsEnhancement(Box::new(vm)); + Self { + vm, + system_env, + last_tx_compressed_bytecodes: vec![], + } + } } } } diff --git a/core/lib/multivm/src/glue/oracle_tools.rs b/core/lib/multivm/src/glue/oracle_tools.rs index b13c15af168f..1cd46d17692a 100644 --- a/core/lib/multivm/src/glue/oracle_tools.rs +++ b/core/lib/multivm/src/glue/oracle_tools.rs @@ -27,11 +27,10 @@ where let oracle_tools = vm_m6::OracleTools::new(state, history.glue_into()); OracleTools::M6(oracle_tools) } - VmVersion::Vm1_3_2 => { - panic!("oracle tools for vm1.3.2 do not exist") - } - VmVersion::VmVirtualBlocks => { - panic!("oracle tools for VmVirtualBlocks do not exist") + VmVersion::VmVirtualBlocks + | VmVersion::VmVirtualBlocksRefundsEnhancement + | VmVersion::Vm1_3_2 => { + panic!("oracle tools for after VM1.3.2 do not exist") } } } diff --git a/core/lib/multivm/src/glue/tracer.rs b/core/lib/multivm/src/glue/tracer.rs index d80a1585084e..78d991b29a47 100644 --- a/core/lib/multivm/src/glue/tracer.rs +++ b/core/lib/multivm/src/glue/tracer.rs @@ -15,6 +15,10 @@ //! into a form compatible with the latest VM version. //! It defines a method `latest` for obtaining a boxed tracer. //! +//! - `IntoVmVirtualBlocksTracer`:This trait is responsible for converting a tracer +//! into a form compatible with the vm_virtual_blocks version. +//! It defines a method `vm_virtual_blocks` for obtaining a boxed tracer. +//! //! For `MultivmTracer` to be implemented, Tracer must implement all N currently //! existing sub-traits. //! @@ -30,11 +34,15 @@ //! - Create a new trait performing conversion to the specified VM tracer, e.g. `IntoTracer`. //! - Provide implementations of this trait for all the structures that currently implement `MultivmTracer`. //! - Add this trait as a trait bound to the `MultivmTracer`. +//! - Add this trait as a trait bound for `T` in `MultivmTracer` implementation. //! - Integrate the newly added method to the MultiVM itself (e.g. add required tracer conversions where applicable). +mod implementations; + +use crate::HistoryMode; use zksync_state::WriteStorage; -pub trait MultivmTracer: - IntoLatestTracer +pub trait MultivmTracer: + IntoLatestTracer + IntoVmVirtualBlocksTracer { fn into_boxed(self) -> Box> where @@ -44,17 +52,21 @@ pub trait MultivmTracer: } } -pub trait IntoLatestTracer { - fn latest(&self) -> Box>; +pub trait IntoLatestTracer { + fn latest(&self) -> Box>; +} + +pub trait IntoVmVirtualBlocksTracer { + fn vm_virtual_blocks(&self) -> Box>; } -impl IntoLatestTracer for T +impl IntoLatestTracer for T where S: WriteStorage, - H: vm_latest::HistoryMode, - T: vm_latest::VmTracer + Clone + 'static, + H: HistoryMode, + T: vm_latest::VmTracer + Clone + 'static, { - fn latest(&self) -> Box> { + fn latest(&self) -> Box> { Box::new(self.clone()) } } @@ -62,7 +74,11 @@ where impl MultivmTracer for T where S: WriteStorage, - H: vm_latest::HistoryMode, - T: vm_latest::VmTracer + Clone + 'static, + H: HistoryMode, + T: vm_latest::VmTracer + + IntoLatestTracer + + IntoVmVirtualBlocksTracer + + Clone + + 'static, { } diff --git a/core/lib/multivm/src/glue/tracer/implementations.rs b/core/lib/multivm/src/glue/tracer/implementations.rs new file mode 100644 index 000000000000..d8f7056728bc --- /dev/null +++ b/core/lib/multivm/src/glue/tracer/implementations.rs @@ -0,0 +1,48 @@ +use crate::glue::tracer::IntoVmVirtualBlocksTracer; +use vm_latest::{CallTracer, StorageInvocations, ValidationTracer}; +use zksync_state::WriteStorage; + +impl IntoVmVirtualBlocksTracer for StorageInvocations +where + H: crate::HistoryMode, + S: WriteStorage, +{ + fn vm_virtual_blocks(&self) -> Box> { + Box::new(vm_virtual_blocks::StorageInvocations::new(self.limit)) + } +} + +impl IntoVmVirtualBlocksTracer for CallTracer +where + H: crate::HistoryMode + 'static, + S: WriteStorage, +{ + fn vm_virtual_blocks(&self) -> Box> { + Box::new(vm_virtual_blocks::CallTracer::new( + self.result.clone(), + H::VmVirtualBlocksMode::default(), + )) + } +} + +impl IntoVmVirtualBlocksTracer + for ValidationTracer +where + H: crate::HistoryMode + 'static, + S: WriteStorage, +{ + fn vm_virtual_blocks(&self) -> Box> { + let params = self.params(); + Box::new(vm_virtual_blocks::ValidationTracer::new( + vm_virtual_blocks::ValidationTracerParams { + user_address: params.user_address, + paymaster_address: params.paymaster_address, + trusted_slots: params.trusted_slots, + trusted_addresses: params.trusted_addresses, + trusted_address_slots: params.trusted_address_slots, + computational_gas_limit: params.computational_gas_limit, + }, + self.result.clone(), + )) + } +} diff --git a/core/lib/multivm/src/glue/types/vm/bytecompression_result.rs b/core/lib/multivm/src/glue/types/vm/bytecompression_result.rs new file mode 100644 index 000000000000..53e65a36cdf5 --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/bytecompression_result.rs @@ -0,0 +1,33 @@ +use crate::glue::{GlueFrom, GlueInto}; +use vm_latest::{BytecodeCompressionError, VmExecutionResultAndLogs}; + +impl GlueFrom for BytecodeCompressionError { + fn glue_from(value: vm_virtual_blocks::BytecodeCompressionError) -> Self { + match value { + vm_virtual_blocks::BytecodeCompressionError::BytecodeCompressionFailed => { + Self::BytecodeCompressionFailed + } + } + } +} + +impl + GlueFrom< + Result< + vm_virtual_blocks::VmExecutionResultAndLogs, + vm_virtual_blocks::BytecodeCompressionError, + >, + > for Result +{ + fn glue_from( + value: Result< + vm_virtual_blocks::VmExecutionResultAndLogs, + vm_virtual_blocks::BytecodeCompressionError, + >, + ) -> Self { + match value { + Ok(result) => Ok(result.glue_into()), + Err(err) => Err(err.glue_into()), + } + } +} diff --git a/core/lib/multivm/src/glue/types/vm/current_execution_state.rs b/core/lib/multivm/src/glue/types/vm/current_execution_state.rs new file mode 100644 index 000000000000..41e77344da25 --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/current_execution_state.rs @@ -0,0 +1,15 @@ +use crate::glue::GlueFrom; +use vm_latest::CurrentExecutionState; + +impl GlueFrom for CurrentExecutionState { + fn glue_from(value: vm_virtual_blocks::CurrentExecutionState) -> Self { + Self { + events: value.events, + storage_log_queries: value.storage_log_queries, + used_contract_hashes: value.used_contract_hashes, + l2_to_l1_logs: value.l2_to_l1_logs, + total_log_queries: value.total_log_queries, + cycles_used: value.cycles_used, + } + } +} diff --git a/core/lib/multivm/src/glue/types/vm/execution_result.rs b/core/lib/multivm/src/glue/types/vm/execution_result.rs new file mode 100644 index 000000000000..7dd4b361ffc9 --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/execution_result.rs @@ -0,0 +1,18 @@ +use crate::glue::{GlueFrom, GlueInto}; +use vm_latest::ExecutionResult; + +impl GlueFrom for ExecutionResult { + fn glue_from(value: vm_virtual_blocks::ExecutionResult) -> Self { + match value { + vm_virtual_blocks::ExecutionResult::Success { output } => { + ExecutionResult::Success { output } + } + vm_virtual_blocks::ExecutionResult::Revert { output } => ExecutionResult::Revert { + output: output.glue_into(), + }, + vm_virtual_blocks::ExecutionResult::Halt { reason } => ExecutionResult::Halt { + reason: reason.glue_into(), + }, + } + } +} diff --git a/core/lib/multivm/src/glue/types/vm/halt.rs b/core/lib/multivm/src/glue/types/vm/halt.rs new file mode 100644 index 000000000000..d08f143f80e8 --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/halt.rs @@ -0,0 +1,44 @@ +use crate::glue::{GlueFrom, GlueInto}; +use vm_latest::Halt; + +impl GlueFrom for Halt { + fn glue_from(value: vm_virtual_blocks::Halt) -> Self { + match value { + vm_virtual_blocks::Halt::ValidationFailed(reason) => { + Self::ValidationFailed(reason.glue_into()) + } + vm_virtual_blocks::Halt::PaymasterValidationFailed(reason) => { + Self::PaymasterValidationFailed(reason.glue_into()) + } + vm_virtual_blocks::Halt::PrePaymasterPreparationFailed(reason) => { + Self::PrePaymasterPreparationFailed(reason.glue_into()) + } + vm_virtual_blocks::Halt::PayForTxFailed(reason) => { + Self::PayForTxFailed(reason.glue_into()) + } + vm_virtual_blocks::Halt::FailedToMarkFactoryDependencies(reason) => { + Self::FailedToMarkFactoryDependencies(reason.glue_into()) + } + vm_virtual_blocks::Halt::FailedToChargeFee(reason) => { + Self::FailedToChargeFee(reason.glue_into()) + } + vm_virtual_blocks::Halt::FromIsNotAnAccount => Self::FromIsNotAnAccount, + vm_virtual_blocks::Halt::InnerTxError => Self::InnerTxError, + vm_virtual_blocks::Halt::Unknown(reason) => Self::Unknown(reason.glue_into()), + vm_virtual_blocks::Halt::UnexpectedVMBehavior(reason) => { + Self::UnexpectedVMBehavior(reason) + } + vm_virtual_blocks::Halt::BootloaderOutOfGas => Self::BootloaderOutOfGas, + vm_virtual_blocks::Halt::TooBigGasLimit => Self::TooBigGasLimit, + vm_virtual_blocks::Halt::NotEnoughGasProvided => Self::NotEnoughGasProvided, + vm_virtual_blocks::Halt::MissingInvocationLimitReached => { + Self::MissingInvocationLimitReached + } + vm_virtual_blocks::Halt::FailedToSetL2Block(reason) => Self::FailedToSetL2Block(reason), + vm_virtual_blocks::Halt::FailedToAppendTransactionToL2Block(reason) => { + Self::FailedToAppendTransactionToL2Block(reason) + } + vm_virtual_blocks::Halt::VMPanic => Self::VMPanic, + } + } +} diff --git a/core/lib/multivm/src/glue/types/vm/l1_batch.rs b/core/lib/multivm/src/glue/types/vm/l1_batch.rs new file mode 100644 index 000000000000..7d1cd7584989 --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/l1_batch.rs @@ -0,0 +1,16 @@ +use crate::glue::{GlueFrom, GlueInto}; + +impl GlueFrom for vm_virtual_blocks::L1BatchEnv { + fn glue_from(value: vm_latest::L1BatchEnv) -> Self { + Self { + previous_batch_hash: value.previous_batch_hash, + number: value.number, + timestamp: value.timestamp, + l1_gas_price: value.l1_gas_price, + fair_l2_gas_price: value.fair_l2_gas_price, + fee_account: value.fee_account, + enforced_base_fee: value.enforced_base_fee, + first_l2_block: value.first_l2_block.glue_into(), + } + } +} diff --git a/core/lib/multivm/src/glue/types/vm/l2_block.rs b/core/lib/multivm/src/glue/types/vm/l2_block.rs new file mode 100644 index 000000000000..a12e5ec816b8 --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/l2_block.rs @@ -0,0 +1,12 @@ +use crate::glue::GlueFrom; + +impl GlueFrom for vm_virtual_blocks::L2BlockEnv { + fn glue_from(value: vm_latest::L2BlockEnv) -> Self { + Self { + number: value.number, + timestamp: value.timestamp, + prev_block_hash: value.prev_block_hash, + max_virtual_blocks_to_create: value.max_virtual_blocks_to_create, + } + } +} diff --git a/core/lib/multivm/src/glue/types/vm/mod.rs b/core/lib/multivm/src/glue/types/vm/mod.rs index a9e8d84ad702..0a416a221b5d 100644 --- a/core/lib/multivm/src/glue/types/vm/mod.rs +++ b/core/lib/multivm/src/glue/types/vm/mod.rs @@ -1,8 +1,20 @@ mod block_context_mode; +mod bytecompression_result; +mod current_execution_state; +mod execution_result; +mod halt; +mod l1_batch; +mod l2_block; +mod refunds; +mod system_env; mod tx_execution_mode; mod tx_revert_reason; mod vm_block_result; +mod vm_execution_mode; mod vm_execution_result; +mod vm_execution_result_and_logs; +mod vm_execution_statistics; +mod vm_memory_metrics; mod vm_partial_execution_result; mod vm_revert_reason; mod vm_tx_execution_result; diff --git a/core/lib/multivm/src/glue/types/vm/refunds.rs b/core/lib/multivm/src/glue/types/vm/refunds.rs new file mode 100644 index 000000000000..3127efbf6612 --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/refunds.rs @@ -0,0 +1,11 @@ +use crate::glue::GlueFrom; +use vm_latest::Refunds; + +impl GlueFrom for Refunds { + fn glue_from(value: vm_virtual_blocks::Refunds) -> Self { + Self { + gas_refunded: value.gas_refunded, + operator_suggested_refund: value.operator_suggested_refund, + } + } +} diff --git a/core/lib/multivm/src/glue/types/vm/system_env.rs b/core/lib/multivm/src/glue/types/vm/system_env.rs new file mode 100644 index 000000000000..0f3012287bfb --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/system_env.rs @@ -0,0 +1,16 @@ +use crate::glue::{GlueFrom, GlueInto}; + +impl GlueFrom for vm_virtual_blocks::SystemEnv { + fn glue_from(value: vm_latest::SystemEnv) -> Self { + Self { + zk_porter_available: value.zk_porter_available, + version: value.version, + base_system_smart_contracts: value.base_system_smart_contracts, + gas_limit: value.gas_limit, + execution_mode: value.execution_mode.glue_into(), + default_validation_computational_gas_limit: value + .default_validation_computational_gas_limit, + chain_id: value.chain_id, + } + } +} diff --git a/core/lib/multivm/src/glue/types/vm/tx_execution_mode.rs b/core/lib/multivm/src/glue/types/vm/tx_execution_mode.rs index e9b901ff9906..2f68ed5edc70 100644 --- a/core/lib/multivm/src/glue/types/vm/tx_execution_mode.rs +++ b/core/lib/multivm/src/glue/types/vm/tx_execution_mode.rs @@ -45,3 +45,13 @@ impl GlueFrom for vm_1_3_2::vm_with_bootloader::TxEx } } } + +impl GlueFrom for vm_virtual_blocks::TxExecutionMode { + fn glue_from(value: vm_latest::TxExecutionMode) -> Self { + match value { + vm_latest::TxExecutionMode::VerifyExecute => Self::VerifyExecute, + vm_latest::TxExecutionMode::EstimateFee => Self::EstimateFee, + vm_latest::TxExecutionMode::EthCall => Self::EthCall, + } + } +} diff --git a/core/lib/multivm/src/glue/types/vm/vm_execution_mode.rs b/core/lib/multivm/src/glue/types/vm/vm_execution_mode.rs new file mode 100644 index 000000000000..d02766c9fce5 --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/vm_execution_mode.rs @@ -0,0 +1,12 @@ +use crate::glue::GlueFrom; +use vm_latest::VmExecutionMode; + +impl GlueFrom for vm_virtual_blocks::VmExecutionMode { + fn glue_from(value: VmExecutionMode) -> Self { + match value { + VmExecutionMode::OneTx => vm_virtual_blocks::VmExecutionMode::OneTx, + VmExecutionMode::Batch => vm_virtual_blocks::VmExecutionMode::Batch, + VmExecutionMode::Bootloader => vm_virtual_blocks::VmExecutionMode::Bootloader, + } + } +} diff --git a/core/lib/multivm/src/glue/types/vm/vm_execution_result_and_logs.rs b/core/lib/multivm/src/glue/types/vm/vm_execution_result_and_logs.rs new file mode 100644 index 000000000000..e6e7c411fd64 --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/vm_execution_result_and_logs.rs @@ -0,0 +1,13 @@ +use crate::glue::{GlueFrom, GlueInto}; +use vm_latest::VmExecutionResultAndLogs; + +impl GlueFrom for VmExecutionResultAndLogs { + fn glue_from(value: vm_virtual_blocks::VmExecutionResultAndLogs) -> Self { + Self { + result: value.result.glue_into(), + logs: value.logs, + statistics: value.statistics.glue_into(), + refunds: value.refunds.glue_into(), + } + } +} diff --git a/core/lib/multivm/src/glue/types/vm/vm_execution_statistics.rs b/core/lib/multivm/src/glue/types/vm/vm_execution_statistics.rs new file mode 100644 index 000000000000..3b4951ca9f61 --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/vm_execution_statistics.rs @@ -0,0 +1,14 @@ +use crate::glue::GlueFrom; +use vm_latest::VmExecutionStatistics; + +impl GlueFrom for VmExecutionStatistics { + fn glue_from(value: vm_virtual_blocks::VmExecutionStatistics) -> Self { + Self { + contracts_used: value.contracts_used, + cycles_used: value.cycles_used, + gas_used: value.gas_used, + computational_gas_used: value.computational_gas_used, + total_log_queries: value.total_log_queries, + } + } +} diff --git a/core/lib/multivm/src/glue/types/vm/vm_memory_metrics.rs b/core/lib/multivm/src/glue/types/vm/vm_memory_metrics.rs new file mode 100644 index 000000000000..d81a67de3213 --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/vm_memory_metrics.rs @@ -0,0 +1,17 @@ +use crate::glue::GlueFrom; +use vm_latest::VmMemoryMetrics; + +impl GlueFrom for VmMemoryMetrics { + fn glue_from(value: vm_virtual_blocks::VmMemoryMetrics) -> Self { + Self { + event_sink_inner: value.event_sink_inner, + event_sink_history: value.event_sink_history, + memory_inner: value.memory_inner, + memory_history: value.memory_history, + decommittment_processor_inner: value.decommittment_processor_inner, + decommittment_processor_history: value.decommittment_processor_history, + storage_inner: value.storage_inner, + storage_history: value.storage_history, + } + } +} diff --git a/core/lib/multivm/src/glue/types/vm/vm_revert_reason.rs b/core/lib/multivm/src/glue/types/vm/vm_revert_reason.rs index ec38027d1147..691651e2baff 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_revert_reason.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_revert_reason.rs @@ -53,3 +53,20 @@ impl GlueFrom for vm_latest::VmRevertReason { } } } + +impl GlueFrom for vm_latest::VmRevertReason { + fn glue_from(value: vm_virtual_blocks::VmRevertReason) -> Self { + match value { + vm_virtual_blocks::VmRevertReason::General { msg, data } => Self::General { msg, data }, + vm_virtual_blocks::VmRevertReason::InnerTxError => Self::InnerTxError, + vm_virtual_blocks::VmRevertReason::VmError => Self::VmError, + vm_virtual_blocks::VmRevertReason::Unknown { + function_selector, + data, + } => Self::Unknown { + function_selector, + data, + }, + } + } +} diff --git a/core/lib/multivm/src/lib.rs b/core/lib/multivm/src/lib.rs index a45eacecfa31..fa9497c2153d 100644 --- a/core/lib/multivm/src/lib.rs +++ b/core/lib/multivm/src/lib.rs @@ -1,5 +1,8 @@ pub use crate::{ - glue::{block_properties::BlockProperties, oracle_tools::OracleTools, tracer::MultivmTracer}, + glue::{ + block_properties::BlockProperties, history_mode::HistoryMode, oracle_tools::OracleTools, + tracer::MultivmTracer, + }, vm_instance::{VmInstance, VmInstanceData}, }; pub use zksync_types::vm_version::VmVersion; diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index dcc4d393c536..30b410b52b53 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -24,7 +24,10 @@ pub(crate) enum VmInstanceVersion<'a, S: ReadStorage, H: HistoryMode> { VmM5(Box>>), VmM6(Box, H::VmM6Mode>>), Vm1_3_2(Box, H::Vm1_3_2Mode>>), - VmVirtualBlocks(Box, H::VmVirtualBlocksMode>>), + VmVirtualBlocks(Box, H::VmVirtualBlocksMode>>), + VmVirtualBlocksRefundsEnhancement( + Box, H::VmVirtualBlocksRefundsEnhancement>>, + ), } impl<'a, S: ReadStorage, H: HistoryMode> VmInstance<'a, S, H> { @@ -57,6 +60,9 @@ impl<'a, S: ReadStorage, H: HistoryMode> VmInstance<'a, S, H> { VmInstanceVersion::VmVirtualBlocks(vm) => { vm.push_transaction(tx.clone()); } + VmInstanceVersion::VmVirtualBlocksRefundsEnhancement(vm) => { + vm.push_transaction(tx.clone()); + } } } @@ -79,6 +85,16 @@ impl<'a, S: ReadStorage, H: HistoryMode> VmInstance<'a, S, H> { ) .glue_into(), VmInstanceVersion::VmVirtualBlocks(vm) => { + let result = vm.execute(VmExecutionMode::Batch.glue_into()); + let execution_state = vm.get_current_execution_state(); + let bootloader_memory = vm.get_bootloader_memory(); + FinishedL1Batch { + block_tip_execution_result: result.glue_into(), + final_execution_state: execution_state.glue_into(), + final_bootloader_memory: Some(bootloader_memory), + } + } + VmInstanceVersion::VmVirtualBlocksRefundsEnhancement(vm) => { let result = vm.execute(VmExecutionMode::Batch); let execution_state = vm.get_current_execution_state(); let bootloader_memory = vm.get_bootloader_memory(); @@ -98,7 +114,12 @@ impl<'a, S: ReadStorage, H: HistoryMode> VmInstance<'a, S, H> { VmInstanceVersion::VmM5(vm) => vm.execute_block_tip().glue_into(), VmInstanceVersion::VmM6(vm) => vm.execute_block_tip().glue_into(), VmInstanceVersion::Vm1_3_2(vm) => vm.execute_block_tip().glue_into(), - VmInstanceVersion::VmVirtualBlocks(vm) => vm.execute(VmExecutionMode::Bootloader), + VmInstanceVersion::VmVirtualBlocks(vm) => vm + .execute(VmExecutionMode::Bootloader.glue_into()) + .glue_into(), + VmInstanceVersion::VmVirtualBlocksRefundsEnhancement(vm) => { + vm.execute(VmExecutionMode::Bootloader) + } } } @@ -147,7 +168,12 @@ impl<'a, S: ReadStorage, H: HistoryMode> VmInstance<'a, S, H> { .glue_into(), } } - VmInstanceVersion::VmVirtualBlocks(vm) => vm.execute(VmExecutionMode::OneTx), + VmInstanceVersion::VmVirtualBlocks(vm) => { + vm.execute(VmExecutionMode::OneTx.glue_into()).glue_into() + } + VmInstanceVersion::VmVirtualBlocksRefundsEnhancement(vm) => { + vm.execute(VmExecutionMode::OneTx) + } } } @@ -155,6 +181,9 @@ impl<'a, S: ReadStorage, H: HistoryMode> VmInstance<'a, S, H> { pub fn get_last_tx_compressed_bytecodes(&self) -> Vec { match &self.vm { VmInstanceVersion::VmVirtualBlocks(vm) => vm.get_last_tx_compressed_bytecodes(), + VmInstanceVersion::VmVirtualBlocksRefundsEnhancement(vm) => { + vm.get_last_tx_compressed_bytecodes() + } _ => self.last_tx_compressed_bytecodes.clone(), } } @@ -162,10 +191,19 @@ impl<'a, S: ReadStorage, H: HistoryMode> VmInstance<'a, S, H> { /// Execute next transaction with custom tracers pub fn inspect_next_transaction( &mut self, - tracers: Vec, H::VmVirtualBlocksMode>>>, + tracers: Vec, H>>>, ) -> vm_latest::VmExecutionResultAndLogs { match &mut self.vm { - VmInstanceVersion::VmVirtualBlocks(vm) => vm.inspect( + VmInstanceVersion::VmVirtualBlocks(vm) => vm + .inspect( + tracers + .into_iter() + .map(|tracer| tracer.vm_virtual_blocks()) + .collect(), + VmExecutionMode::OneTx.glue_into(), + ) + .glue_into(), + VmInstanceVersion::VmVirtualBlocksRefundsEnhancement(vm) => vm.inspect( tracers.into_iter().map(|tracer| tracer.latest()).collect(), VmExecutionMode::OneTx, ), @@ -316,7 +354,10 @@ impl<'a, S: ReadStorage, H: HistoryMode> VmInstance<'a, S, H> { Ok(result) } } - VmInstanceVersion::VmVirtualBlocks(vm) => { + VmInstanceVersion::VmVirtualBlocks(vm) => vm + .execute_transaction_with_bytecode_compression(tx, with_compression) + .glue_into(), + VmInstanceVersion::VmVirtualBlocksRefundsEnhancement(vm) => { vm.execute_transaction_with_bytecode_compression(tx, with_compression) } } @@ -325,25 +366,43 @@ impl<'a, S: ReadStorage, H: HistoryMode> VmInstance<'a, S, H> { /// Inspect transaction with optional bytecode compression. pub fn inspect_transaction_with_bytecode_compression( &mut self, - tracers: Vec, H::VmVirtualBlocksMode>>>, + tracers: Vec, H>>>, tx: zksync_types::Transaction, with_compression: bool, ) -> Result { - if let VmInstanceVersion::VmVirtualBlocks(vm) = &mut self.vm { - vm.inspect_transaction_with_bytecode_compression( - tracers.into_iter().map(|tracer| tracer.latest()).collect(), - tx, - with_compression, - ) - } else { - self.last_tx_compressed_bytecodes = vec![]; - self.execute_transaction_with_bytecode_compression(tx, with_compression) + match &mut self.vm { + VmInstanceVersion::VmVirtualBlocks(vm) => vm + .inspect_transaction_with_bytecode_compression( + tracers + .into_iter() + .map(|tracer| tracer.vm_virtual_blocks()) + .collect(), + tx, + with_compression, + ) + .glue_into(), + VmInstanceVersion::VmVirtualBlocksRefundsEnhancement(vm) => vm + .inspect_transaction_with_bytecode_compression( + tracers.into_iter().map(|tracer| tracer.latest()).collect(), + tx, + with_compression, + ), + _ => { + self.last_tx_compressed_bytecodes = vec![]; + self.execute_transaction_with_bytecode_compression(tx, with_compression) + } } } pub fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { - if let VmInstanceVersion::VmVirtualBlocks(vm) = &mut self.vm { - vm.start_new_l2_block(l2_block_env); + match &mut self.vm { + VmInstanceVersion::VmVirtualBlocks(vm) => { + vm.start_new_l2_block(l2_block_env.glue_into()); + } + VmInstanceVersion::VmVirtualBlocksRefundsEnhancement(vm) => { + vm.start_new_l2_block(l2_block_env); + } + _ => {} } } @@ -376,7 +435,12 @@ impl<'a, S: ReadStorage, H: HistoryMode> VmInstance<'a, S, H> { storage_inner: vm.state.storage.get_size(), storage_history: vm.state.storage.get_history_size(), }), - VmInstanceVersion::VmVirtualBlocks(vm) => Some(vm.record_vm_memory_metrics()), + VmInstanceVersion::VmVirtualBlocks(vm) => { + Some(vm.record_vm_memory_metrics().glue_into()) + } + VmInstanceVersion::VmVirtualBlocksRefundsEnhancement(vm) => { + Some(vm.record_vm_memory_metrics()) + } } } } @@ -408,6 +472,7 @@ pub enum VmInstanceData { M6(M6NecessaryData), Vm1_3_2(Vm1_3_2NecessaryData), VmVirtualBlocks(VmVirtualBlocksNecessaryData), + VmVirtualBlocksRefundsEnhancement(VmVirtualBlocksNecessaryData), } impl VmInstanceData { @@ -435,6 +500,13 @@ impl VmInstanceData { } fn latest(storage_view: StoragePtr>, history_mode: H) -> Self { + Self::VmVirtualBlocksRefundsEnhancement(VmVirtualBlocksNecessaryData { + storage_view, + history_mode, + }) + } + + fn vm_virtual_blocks(storage_view: StoragePtr>, history_mode: H) -> Self { Self::VmVirtualBlocks(VmVirtualBlocksNecessaryData { storage_view, history_mode, @@ -515,7 +587,10 @@ impl VmInstanceData { ) } VmVersion::Vm1_3_2 => VmInstanceData::vm1_3_2(storage_view, history), - VmVersion::VmVirtualBlocks => VmInstanceData::latest(storage_view, history), + VmVersion::VmVirtualBlocks => VmInstanceData::vm_virtual_blocks(storage_view, history), + VmVersion::VmVirtualBlocksRefundsEnhancement => { + VmInstanceData::latest(storage_view, history) + } } } } @@ -527,6 +602,7 @@ impl VmInstance<'_, S, vm_latest::HistoryEnabled> { VmInstanceVersion::VmM6(vm) => vm.save_current_vm_as_snapshot(), VmInstanceVersion::Vm1_3_2(vm) => vm.save_current_vm_as_snapshot(), VmInstanceVersion::VmVirtualBlocks(vm) => vm.make_snapshot(), + VmInstanceVersion::VmVirtualBlocksRefundsEnhancement(vm) => vm.make_snapshot(), } } @@ -538,6 +614,9 @@ impl VmInstance<'_, S, vm_latest::HistoryEnabled> { VmInstanceVersion::VmVirtualBlocks(vm) => { vm.rollback_to_the_latest_snapshot(); } + VmInstanceVersion::VmVirtualBlocksRefundsEnhancement(vm) => { + vm.rollback_to_the_latest_snapshot(); + } } } @@ -550,6 +629,9 @@ impl VmInstance<'_, S, vm_latest::HistoryEnabled> { VmInstanceVersion::VmM6(vm) => vm.pop_snapshot_no_rollback(), VmInstanceVersion::Vm1_3_2(vm) => vm.pop_snapshot_no_rollback(), VmInstanceVersion::VmVirtualBlocks(vm) => vm.pop_snapshot_no_rollback(), + VmInstanceVersion::VmVirtualBlocksRefundsEnhancement(vm) => { + vm.pop_snapshot_no_rollback() + } } } } diff --git a/core/lib/test_account/src/lib.rs b/core/lib/test_account/src/lib.rs index 509402b7b6b8..abc57b937ea0 100644 --- a/core/lib/test_account/src/lib.rs +++ b/core/lib/test_account/src/lib.rs @@ -2,6 +2,7 @@ use ethabi::Token; use zksync_config::constants::{ CONTRACT_DEPLOYER_ADDRESS, MAX_GAS_PER_PUBDATA_BYTE, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, }; +use zksync_contracts::test_contracts::LoadnextContractExecutionParams; use zksync_contracts::{deployer_contract, load_contract}; use zksync_types::fee::Fee; use zksync_types::l2::L2Tx; @@ -215,6 +216,26 @@ impl Account { } } + pub fn get_loadnext_transaction( + &mut self, + address: Address, + params: LoadnextContractExecutionParams, + tx_type: TxType, + ) -> Transaction { + let calldata = params.to_bytes(); + let execute = Execute { + contract_address: address, + calldata, + value: U256::zero(), + factory_deps: None, + }; + + match tx_type { + TxType::L2 => self.get_l2_tx_for_execute(execute, None), + TxType::L1 { serial_id } => self.get_l1_tx(execute, serial_id), + } + } + pub fn address(&self) -> Address { self.address } diff --git a/core/lib/types/Cargo.toml b/core/lib/types/Cargo.toml index 27220908aa2f..08c47dd99f0b 100644 --- a/core/lib/types/Cargo.toml +++ b/core/lib/types/Cargo.toml @@ -31,6 +31,7 @@ serde_with = { version = "1", features = ["base64"] } strum = { version = "0.24", features = ["derive"] } thiserror = "1.0" num_enum = "0.6" +hex = "0.4" # Crypto stuff # TODO (PLA-440): remove parity-crypto @@ -42,7 +43,6 @@ blake2 = "0.10" ethereum_types_old = { package = "ethereum-types", version = "0.12.0" } [dev-dependencies] -hex = "0.4" secp256k1 = { version = "0.27", features = ["recovery"] } tokio = { version = "1", features = ["rt", "macros"] } serde_with = { version = "1", features = ["hex"] } diff --git a/core/lib/types/src/protocol_version.rs b/core/lib/types/src/protocol_version.rs index a36fd003cfad..afc4868785a9 100644 --- a/core/lib/types/src/protocol_version.rs +++ b/core/lib/types/src/protocol_version.rs @@ -36,15 +36,16 @@ pub enum ProtocolVersionId { Version14, Version15, Version16, + Version17, } impl ProtocolVersionId { pub fn latest() -> Self { - Self::Version15 + Self::Version16 } pub fn next() -> Self { - Self::Version16 + Self::Version17 } /// Returns VM version to be used by API for this protocol version. @@ -67,7 +68,8 @@ impl ProtocolVersionId { ProtocolVersionId::Version13 => VmVersion::VmVirtualBlocks, ProtocolVersionId::Version14 => VmVersion::VmVirtualBlocks, ProtocolVersionId::Version15 => VmVersion::VmVirtualBlocks, - ProtocolVersionId::Version16 => VmVersion::VmVirtualBlocks, + ProtocolVersionId::Version16 => VmVersion::VmVirtualBlocksRefundsEnhancement, + ProtocolVersionId::Version17 => VmVersion::VmVirtualBlocksRefundsEnhancement, } } } @@ -552,7 +554,8 @@ impl From for VmVersion { ProtocolVersionId::Version13 => VmVersion::VmVirtualBlocks, ProtocolVersionId::Version14 => VmVersion::VmVirtualBlocks, ProtocolVersionId::Version15 => VmVersion::VmVirtualBlocks, - ProtocolVersionId::Version16 => VmVersion::VmVirtualBlocks, + ProtocolVersionId::Version16 => VmVersion::VmVirtualBlocksRefundsEnhancement, + ProtocolVersionId::Version17 => VmVersion::VmVirtualBlocksRefundsEnhancement, } } } diff --git a/core/lib/types/src/vm_trace.rs b/core/lib/types/src/vm_trace.rs index 34ac1d77d63f..c1fafa088da0 100644 --- a/core/lib/types/src/vm_trace.rs +++ b/core/lib/types/src/vm_trace.rs @@ -2,8 +2,10 @@ use crate::{Address, U256}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::collections::{HashMap, HashSet}; use std::fmt; +use std::fmt::Display; use zk_evm::zkevm_opcode_defs::FarCallOpcode; use zksync_config::constants::BOOTLOADER_ADDRESS; +use zksync_utils::u256_to_h256; #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub enum VmTrace { @@ -194,3 +196,37 @@ impl fmt::Debug for Call { .finish() } } + +#[derive(Debug, Clone)] +pub enum ViolatedValidationRule { + TouchedUnallowedStorageSlots(Address, U256), + CalledContractWithNoCode(Address), + TouchedUnallowedContext, + TookTooManyComputationalGas(u32), +} + +impl Display for ViolatedValidationRule { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ViolatedValidationRule::TouchedUnallowedStorageSlots(contract, key) => write!( + f, + "Touched unallowed storage slots: address {}, key: {}", + hex::encode(contract), + hex::encode(u256_to_h256(*key)) + ), + ViolatedValidationRule::CalledContractWithNoCode(contract) => { + write!(f, "Called contract with no code: {}", hex::encode(contract)) + } + ViolatedValidationRule::TouchedUnallowedContext => { + write!(f, "Touched unallowed context") + } + ViolatedValidationRule::TookTooManyComputationalGas(gas_limit) => { + write!( + f, + "Took too many computational gas, allowed limit: {}", + gas_limit + ) + } + } + } +} diff --git a/core/lib/types/src/vm_version.rs b/core/lib/types/src/vm_version.rs index 410c85e899f0..7f043bb5552b 100644 --- a/core/lib/types/src/vm_version.rs +++ b/core/lib/types/src/vm_version.rs @@ -6,11 +6,12 @@ pub enum VmVersion { M6BugWithCompressionFixed, Vm1_3_2, VmVirtualBlocks, + VmVirtualBlocksRefundsEnhancement, } impl VmVersion { /// Returns the latest supported VM version. pub const fn latest() -> VmVersion { - Self::VmVirtualBlocks + Self::VmVirtualBlocksRefundsEnhancement } } diff --git a/core/lib/vm/src/lib.rs b/core/lib/vm/src/lib.rs index 38e6982ce818..f0a9d7c03301 100644 --- a/core/lib/vm/src/lib.rs +++ b/core/lib/vm/src/lib.rs @@ -6,9 +6,10 @@ pub use old_vm::{ history_recorder::{HistoryDisabled, HistoryEnabled, HistoryMode}, memory::SimpleMemory, - oracles::storage::StorageOracle, }; +pub use oracles::storage::StorageOracle; + pub use errors::{ BytecodeCompressionError, Halt, TxRevertReason, VmRevertReason, VmRevertReasonParsingError, }; @@ -20,7 +21,6 @@ pub use tracers::{ TracerExecutionStopReason, VmTracer, }, utils::VmExecutionStopReason, - validation::ViolatedValidationRule, StorageInvocations, ValidationError, ValidationTracer, ValidationTracerParams, }; @@ -42,6 +42,7 @@ mod bootloader_state; mod errors; mod implementation; mod old_vm; +mod oracles; mod tracers; mod types; mod vm; diff --git a/core/lib/vm/src/old_vm/history_recorder.rs b/core/lib/vm/src/old_vm/history_recorder.rs index 1a5f7db58664..31431a3cc7a4 100644 --- a/core/lib/vm/src/old_vm/history_recorder.rs +++ b/core/lib/vm/src/old_vm/history_recorder.rs @@ -330,6 +330,10 @@ impl HistoryRecorder Option { + self.apply_historic_record(HashMapHistoryEvent { key, value: None }, timestamp) + } } /// A stack of stacks. The inner stacks are called frames. diff --git a/core/lib/vm/src/old_vm/oracles/mod.rs b/core/lib/vm/src/old_vm/oracles/mod.rs index daa2e21672df..725272e7060a 100644 --- a/core/lib/vm/src/old_vm/oracles/mod.rs +++ b/core/lib/vm/src/old_vm/oracles/mod.rs @@ -2,7 +2,6 @@ use zk_evm::aux_structures::Timestamp; pub(crate) mod decommitter; pub(crate) mod precompile; -pub(crate) mod storage; pub(crate) trait OracleWithHistory { fn rollback_to_timestamp(&mut self, timestamp: Timestamp); diff --git a/core/lib/vm/src/oracles/mod.rs b/core/lib/vm/src/oracles/mod.rs new file mode 100644 index 000000000000..b21c842572fe --- /dev/null +++ b/core/lib/vm/src/oracles/mod.rs @@ -0,0 +1 @@ +pub(crate) mod storage; diff --git a/core/lib/vm/src/oracles/storage.rs b/core/lib/vm/src/oracles/storage.rs new file mode 100644 index 000000000000..42d4f802247d --- /dev/null +++ b/core/lib/vm/src/oracles/storage.rs @@ -0,0 +1,414 @@ +use std::collections::HashMap; + +use crate::old_vm::history_recorder::{ + AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, + HistoryRecorder, StorageWrapper, WithHistory, +}; +use crate::old_vm::oracles::OracleWithHistory; + +use zk_evm::abstractions::RefundedAmounts; +use zk_evm::zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES; +use zk_evm::{ + abstractions::{RefundType, Storage as VmStorageOracle}, + aux_structures::{LogQuery, Timestamp}, +}; + +use zksync_state::{StoragePtr, WriteStorage}; +use zksync_types::utils::storage_key_for_eth_balance; +use zksync_types::{ + AccountTreeId, Address, StorageKey, StorageLogQuery, StorageLogQueryType, BOOTLOADER_ADDRESS, + U256, +}; +use zksync_utils::u256_to_h256; + +// While the storage does not support different shards, it was decided to write the +// code of the StorageOracle with the shard parameters in mind. +pub(crate) fn triplet_to_storage_key(_shard_id: u8, address: Address, key: U256) -> StorageKey { + StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)) +} + +pub(crate) fn storage_key_of_log(query: &LogQuery) -> StorageKey { + triplet_to_storage_key(query.shard_id, query.address, query.key) +} + +#[derive(Debug)] +pub struct StorageOracle { + // Access to the persistent storage. Please note that it + // is used only for read access. All the actual writes happen + // after the execution ended. + pub(crate) storage: HistoryRecorder, H>, + + pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, + + // The changes that have been paid for in previous transactions. + // It is a mapping from storage key to the number of *bytes* that was paid by the user + // to cover this slot. + pub(crate) pre_paid_changes: HistoryRecorder, H>, + + // The changes that have been paid for in the current transaction + pub(crate) paid_changes: HistoryRecorder, H>, + + // The map that contains all the first values read from storage for each slot. + // While formally it does not have to be rollbackable, we still do it to avoid memory bloat + // for unused slots. + pub(crate) initial_values: HistoryRecorder, H>, +} + +impl OracleWithHistory for StorageOracle { + fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { + self.storage.rollback_to_timestamp(timestamp); + self.frames_stack.rollback_to_timestamp(timestamp); + self.pre_paid_changes.rollback_to_timestamp(timestamp); + self.paid_changes.rollback_to_timestamp(timestamp); + self.initial_values.rollback_to_timestamp(timestamp); + } +} + +impl StorageOracle { + pub fn new(storage: StoragePtr) -> Self { + Self { + storage: HistoryRecorder::from_inner(StorageWrapper::new(storage)), + frames_stack: Default::default(), + pre_paid_changes: Default::default(), + paid_changes: Default::default(), + initial_values: Default::default(), + } + } + + pub fn delete_history(&mut self) { + self.storage.delete_history(); + self.frames_stack.delete_history(); + self.pre_paid_changes.delete_history(); + self.paid_changes.delete_history(); + self.initial_values.delete_history(); + } + + fn is_storage_key_free(&self, key: &StorageKey) -> bool { + key.address() == &zksync_config::constants::SYSTEM_CONTEXT_ADDRESS + || *key == storage_key_for_eth_balance(&BOOTLOADER_ADDRESS) + } + + pub fn read_value(&mut self, mut query: LogQuery) -> LogQuery { + let key = triplet_to_storage_key(query.shard_id, query.address, query.key); + let current_value = self.storage.read_from_storage(&key); + + query.read_value = current_value; + + self.frames_stack.push_forward( + Box::new(StorageLogQuery { + log_query: query, + log_type: StorageLogQueryType::Read, + }), + query.timestamp, + ); + + query + } + + pub fn write_value(&mut self, mut query: LogQuery) -> LogQuery { + let key = triplet_to_storage_key(query.shard_id, query.address, query.key); + let current_value = + self.storage + .write_to_storage(key, query.written_value, query.timestamp); + + let is_initial_write = self.storage.get_ptr().borrow_mut().is_write_initial(&key); + let log_query_type = if is_initial_write { + StorageLogQueryType::InitialWrite + } else { + StorageLogQueryType::RepeatedWrite + }; + + query.read_value = current_value; + + if !self.initial_values.inner().contains_key(&key) { + self.initial_values + .insert(key, current_value, query.timestamp); + } + + let mut storage_log_query = StorageLogQuery { + log_query: query, + log_type: log_query_type, + }; + self.frames_stack + .push_forward(Box::new(storage_log_query), query.timestamp); + storage_log_query.log_query.rollback = true; + self.frames_stack + .push_rollback(Box::new(storage_log_query), query.timestamp); + storage_log_query.log_query.rollback = false; + + query + } + + // Returns the amount of funds that has been already paid for writes into the storage slot + fn prepaid_for_write(&self, storage_key: &StorageKey) -> u32 { + self.paid_changes + .inner() + .get(storage_key) + .copied() + .unwrap_or_else(|| { + self.pre_paid_changes + .inner() + .get(storage_key) + .copied() + .unwrap_or(0) + }) + } + + // Remembers the changes that have been paid for in the current transaction. + // It also returns how much pubdata did the user pay for and how much was actually published. + pub(crate) fn save_paid_changes(&mut self, timestamp: Timestamp) -> u32 { + let mut published = 0; + + let modified_keys = self + .paid_changes + .inner() + .iter() + .map(|(k, v)| (*k, *v)) + .collect::>(); + + for (key, _) in modified_keys { + // It is expected that for each slot for which we have paid changes, there is some + // first slot value already read. + let first_slot_value = self.initial_values.inner().get(&key).copied().unwrap(); + + // This is the value has been written to the storage slot at the end. + let current_slot_value = self.storage.read_from_storage(&key); + + let required_pubdata = + self.base_price_for_write(&key, first_slot_value, current_slot_value); + + // We assume that "prepaid_for_slot" represents both the number of pubdata published and the number of bytes paid by the previous transactions + // as they should be identical. + let prepaid_for_slot = self + .pre_paid_changes + .inner() + .get(&key) + .copied() + .unwrap_or_default(); + + published += required_pubdata.saturating_sub(prepaid_for_slot); + + // We remove the slot from the paid changes and move to the pre-paid changes as + // the transaction ends. + self.paid_changes.remove(key, timestamp); + self.pre_paid_changes + .insert(key, prepaid_for_slot.max(required_pubdata), timestamp); + } + + published + } + + fn base_price_for_write_query(&self, query: &LogQuery) -> u32 { + let storage_key = storage_key_of_log(query); + + self.base_price_for_write(&storage_key, query.read_value, query.written_value) + } + + pub(crate) fn base_price_for_write( + &self, + storage_key: &StorageKey, + prev_value: U256, + new_value: U256, + ) -> u32 { + if self.is_storage_key_free(storage_key) || prev_value == new_value { + return 0; + } + + let is_initial_write = self + .storage + .get_ptr() + .borrow_mut() + .is_write_initial(storage_key); + + get_pubdata_price_bytes(is_initial_write) + } + + // Returns the price of the update in terms of pubdata bytes. + // TODO (SMA-1701): update VM to accept gas instead of pubdata. + fn value_update_price(&self, query: &LogQuery) -> u32 { + let storage_key = storage_key_of_log(query); + + let base_cost = self.base_price_for_write_query(query); + + let already_paid = self.prepaid_for_write(&storage_key); + + if base_cost <= already_paid { + // Some other transaction has already paid for this slot, no need to pay anything + 0u32 + } else { + base_cost - already_paid + } + } + + /// Returns storage log queries from current frame where `log.log_query.timestamp >= from_timestamp`. + pub(crate) fn storage_log_queries_after_timestamp( + &self, + from_timestamp: Timestamp, + ) -> &[Box] { + let logs = self.frames_stack.forward().current_frame(); + + // Select all of the last elements where l.log_query.timestamp >= from_timestamp. + // Note, that using binary search here is dangerous, because the logs are not sorted by timestamp. + logs.rsplit(|l| l.log_query.timestamp < from_timestamp) + .next() + .unwrap_or(&[]) + } + + pub(crate) fn get_final_log_queries(&self) -> Vec { + assert_eq!( + self.frames_stack.len(), + 1, + "VM finished execution in unexpected state" + ); + + self.frames_stack + .forward() + .current_frame() + .iter() + .map(|x| **x) + .collect() + } + + pub(crate) fn get_size(&self) -> usize { + let frames_stack_size = self.frames_stack.get_size(); + let paid_changes_size = + self.paid_changes.inner().len() * std::mem::size_of::<(StorageKey, u32)>(); + + frames_stack_size + paid_changes_size + } + + pub(crate) fn get_history_size(&self) -> usize { + let storage_size = self.storage.borrow_history(|h| h.len(), 0) + * std::mem::size_of::< as WithHistory>::HistoryRecord>(); + let frames_stack_size = self.frames_stack.get_history_size(); + let paid_changes_size = self.paid_changes.borrow_history(|h| h.len(), 0) + * std::mem::size_of::< as WithHistory>::HistoryRecord>(); + storage_size + frames_stack_size + paid_changes_size + } +} + +impl VmStorageOracle for StorageOracle { + // Perform a storage read/write access by taking an partially filled query + // and returning filled query and cold/warm marker for pricing purposes + fn execute_partial_query( + &mut self, + _monotonic_cycle_counter: u32, + query: LogQuery, + ) -> LogQuery { + // tracing::trace!( + // "execute partial query cyc {:?} addr {:?} key {:?}, rw {:?}, wr {:?}, tx {:?}", + // _monotonic_cycle_counter, + // query.address, + // query.key, + // query.rw_flag, + // query.written_value, + // query.tx_number_in_block + // ); + assert!(!query.rollback); + if query.rw_flag { + // The number of bytes that have been compensated by the user to perform this write + let storage_key = storage_key_of_log(&query); + + // It is considered that the user has paid for the whole base price for the writes + let to_pay_by_user = self.base_price_for_write_query(&query); + let prepaid = self.prepaid_for_write(&storage_key); + + if to_pay_by_user > prepaid { + self.paid_changes.apply_historic_record( + HashMapHistoryEvent { + key: storage_key, + value: Some(to_pay_by_user), + }, + query.timestamp, + ); + } + self.write_value(query) + } else { + self.read_value(query) + } + } + + // We can return the size of the refund before each storage query. + // Note, that while the `RefundType` allows to provide refunds both in + // `ergs` and `pubdata`, only refunds in pubdata will be compensated for the users + fn estimate_refunds_for_write( + &mut self, // to avoid any hacks inside, like prefetch + _monotonic_cycle_counter: u32, + partial_query: &LogQuery, + ) -> RefundType { + let price_to_pay = self.value_update_price(partial_query); + + RefundType::RepeatedWrite(RefundedAmounts { + ergs: 0, + // `INITIAL_STORAGE_WRITE_PUBDATA_BYTES` is the default amount of pubdata bytes the user pays for. + pubdata_bytes: (INITIAL_STORAGE_WRITE_PUBDATA_BYTES as u32) - price_to_pay, + }) + } + + // Indicate a start of execution frame for rollback purposes + fn start_frame(&mut self, timestamp: Timestamp) { + self.frames_stack.push_frame(timestamp); + } + + // Indicate that execution frame went out from the scope, so we can + // log the history and either rollback immediately or keep records to rollback later + fn finish_frame(&mut self, timestamp: Timestamp, panicked: bool) { + // If we panic then we append forward and rollbacks to the forward of parent, + // otherwise we place rollbacks of child before rollbacks of the parent + if panicked { + // perform actual rollback + for query in self.frames_stack.rollback().current_frame().iter().rev() { + let read_value = match query.log_type { + StorageLogQueryType::Read => { + // Having Read logs in rollback is not possible + tracing::warn!("Read log in rollback queue {:?}", query); + continue; + } + StorageLogQueryType::InitialWrite | StorageLogQueryType::RepeatedWrite => { + query.log_query.read_value + } + }; + + let LogQuery { written_value, .. } = query.log_query; + let key = triplet_to_storage_key( + query.log_query.shard_id, + query.log_query.address, + query.log_query.key, + ); + let current_value = self.storage.write_to_storage( + key, + // NOTE, that since it is a rollback query, + // the `read_value` is being set + read_value, timestamp, + ); + + // Additional validation that the current value was correct + // Unwrap is safe because the return value from write_inner is the previous value in this leaf. + // It is impossible to set leaf value to `None` + assert_eq!(current_value, written_value); + } + + self.frames_stack + .move_rollback_to_forward(|_| true, timestamp); + } + self.frames_stack.merge_frame(timestamp); + } +} + +/// Returns the number of bytes needed to publish a slot. +// Since we need to publish the state diffs onchain, for each of the updated storage slot +// we basically need to publish the following pair: (). +// While new_value is always 32 bytes long, for key we use the following optimization: +// - The first time we publish it, we use 32 bytes. +// Then, we remember a 8-byte id for this slot and assign it to it. We call this initial write. +// - The second time we publish it, we will use this 8-byte instead of the 32 bytes of the entire key. +// So the total size of the publish pubdata is 40 bytes. We call this kind of write the repeated one +fn get_pubdata_price_bytes(is_initial: bool) -> u32 { + // TODO (SMA-1702): take into account the content of the log query, i.e. values that contain mostly zeroes + // should cost less. + if is_initial { + zk_evm::zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES as u32 + } else { + zk_evm::zkevm_opcode_defs::system_params::REPEATED_STORAGE_WRITE_PUBDATA_BYTES as u32 + } +} diff --git a/core/lib/vm/src/tests/l1_tx_execution.rs b/core/lib/vm/src/tests/l1_tx_execution.rs index a231d8aba0b6..cd1c8f2460cb 100644 --- a/core/lib/vm/src/tests/l1_tx_execution.rs +++ b/core/lib/vm/src/tests/l1_tx_execution.rs @@ -1,7 +1,7 @@ use zksync_config::constants::BOOTLOADER_ADDRESS; use zksync_types::l2_to_l1_log::L2ToL1Log; use zksync_types::storage_writes_deduplicator::StorageWritesDeduplicator; -use zksync_types::{get_code_key, get_known_code_key, L2ChainId, U256}; +use zksync_types::{get_code_key, get_known_code_key, U256}; use zksync_utils::u256_to_h256; use crate::tests::tester::{TxType, VmTesterBuilder}; @@ -41,7 +41,7 @@ fn test_l1_tx_execution() { is_service: true, tx_number_in_block: 0, sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(L2ChainId::from(0)), + key: tx_data.tx_hash(0.into()), value: u256_to_h256(U256::from(1u32)), }]; diff --git a/core/lib/vm/src/tests/rollbacks.rs b/core/lib/vm/src/tests/rollbacks.rs index 1fa6a2afe390..9d6c48b86908 100644 --- a/core/lib/vm/src/tests/rollbacks.rs +++ b/core/lib/vm/src/tests/rollbacks.rs @@ -3,14 +3,19 @@ use ethabi::Token; use zksync_contracts::get_loadnext_contract; use zksync_contracts::test_contracts::LoadnextContractExecutionParams; -use zksync_types::{Execute, U256}; +use zksync_state::WriteStorage; +use zksync_types::{get_nonce_key, Execute, U256}; use crate::tests::tester::{ DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder, }; use crate::tests::utils::read_test_contract; use crate::types::inputs::system_env::TxExecutionMode; -use crate::HistoryEnabled; +use crate::{ + BootloaderState, DynTracer, ExecutionEndTracer, ExecutionProcessing, HistoryEnabled, + HistoryMode, TracerExecutionStatus, TracerExecutionStopReason, VmExecutionMode, VmTracer, + ZkSyncVmState, +}; #[test] fn test_vm_rollbacks() { @@ -144,3 +149,123 @@ fn test_vm_loadnext_rollbacks() { assert_eq!(result_without_rollbacks, result_with_rollbacks); } + +// Testing tracer that does not allow the recursion to go deeper than a certain limit +struct MaxRecursionTracer { + max_recursion_depth: usize, + should_stop_execution: bool, +} + +/// Tracer responsible for calculating the number of storage invocations and +/// stopping the VM execution if the limit is reached. +impl DynTracer for MaxRecursionTracer {} + +impl ExecutionEndTracer for MaxRecursionTracer { + fn should_stop_execution(&self) -> TracerExecutionStatus { + if self.should_stop_execution { + TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish) + } else { + TracerExecutionStatus::Continue + } + } +} + +impl ExecutionProcessing for MaxRecursionTracer { + fn after_cycle( + &mut self, + state: &mut ZkSyncVmState, + _bootloader_state: &mut BootloaderState, + ) { + let current_depth = state.local_state.callstack.depth(); + + if current_depth > self.max_recursion_depth { + self.should_stop_execution = true; + } + } +} + +impl VmTracer for MaxRecursionTracer {} + +#[test] +fn test_layered_rollback() { + // This test checks that the layered rollbacks work correctly, i.e. + // the rollback by the operator will always revert all the changes + + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let account = &mut vm.rich_accounts[0]; + let loadnext_contract = get_loadnext_contract().bytecode; + + let DeployContractsTx { + tx: deploy_tx, + address, + .. + } = account.get_deploy_tx( + &loadnext_contract, + Some(&[Token::Uint(0.into())]), + TxType::L2, + ); + vm.vm.push_transaction(deploy_tx); + let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!deployment_res.result.is_failed(), "transaction failed"); + + let loadnext_transaction = account.get_loadnext_transaction( + address, + LoadnextContractExecutionParams { + writes: 1, + recursive_calls: 20, + ..LoadnextContractExecutionParams::empty() + }, + TxType::L2, + ); + + let nonce_val = vm + .vm + .state + .storage + .storage + .read_from_storage(&get_nonce_key(&account.address)); + + vm.vm.make_snapshot(); + + vm.vm.push_transaction(loadnext_transaction.clone()); + vm.vm.inspect( + vec![Box::new(MaxRecursionTracer { + max_recursion_depth: 15, + should_stop_execution: false, + })], + VmExecutionMode::OneTx, + ); + + let nonce_val2 = vm + .vm + .state + .storage + .storage + .read_from_storage(&get_nonce_key(&account.address)); + + // The tracer stopped after the validation has passed, so nonce has already been increased + assert_eq!(nonce_val + U256::one(), nonce_val2, "nonce did not change"); + + vm.vm.rollback_to_the_latest_snapshot(); + + let nonce_val_after_rollback = vm + .vm + .state + .storage + .storage + .read_from_storage(&get_nonce_key(&account.address)); + + assert_eq!( + nonce_val, nonce_val_after_rollback, + "nonce changed after rollback" + ); + + vm.vm.push_transaction(loadnext_transaction); + let result = vm.vm.inspect(vec![], VmExecutionMode::OneTx); + assert!(!result.result.is_failed(), "transaction must not fail"); +} diff --git a/core/lib/vm/src/tests/tester/inner_state.rs b/core/lib/vm/src/tests/tester/inner_state.rs index 08220724b4dd..24363743b9ee 100644 --- a/core/lib/vm/src/tests/tester/inner_state.rs +++ b/core/lib/vm/src/tests/tester/inner_state.rs @@ -47,6 +47,10 @@ pub(crate) struct StorageOracleInnerState { pub(crate) modified_storage_keys: ModifiedKeysMap, pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, + + pub(crate) pre_paid_changes: HistoryRecorder, H>, + pub(crate) paid_changes: HistoryRecorder, H>, + pub(crate) initial_values: HistoryRecorder, H>, } #[derive(Clone, PartialEq, Debug)] @@ -101,6 +105,9 @@ impl Vm { .clone(), ), frames_stack: self.state.storage.frames_stack.clone(), + pre_paid_changes: self.state.storage.pre_paid_changes.clone(), + paid_changes: self.state.storage.paid_changes.clone(), + initial_values: self.state.storage.initial_values.clone(), }; let local_state = self.state.local_state.clone(); diff --git a/core/lib/vm/src/tracers/call.rs b/core/lib/vm/src/tracers/call.rs index 127502476040..aa2041f0bea9 100644 --- a/core/lib/vm/src/tracers/call.rs +++ b/core/lib/vm/src/tracers/call.rs @@ -22,7 +22,7 @@ use crate::types::outputs::VmExecutionResultAndLogs; #[derive(Debug, Clone)] pub struct CallTracer { stack: Vec, - result: Arc>>, + pub result: Arc>>, _phantom: PhantomData H>, } diff --git a/core/lib/vm/src/tracers/refunds.rs b/core/lib/vm/src/tracers/refunds.rs index 47e3f83e20f8..d7f15d7c9b94 100644 --- a/core/lib/vm/src/tracers/refunds.rs +++ b/core/lib/vm/src/tracers/refunds.rs @@ -1,7 +1,5 @@ use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; -use std::collections::HashMap; - use zk_evm::{ aux_structures::Timestamp, tracing::{BeforeExecutionData, VmLocalStateData}, @@ -12,18 +10,18 @@ use zksync_state::{StoragePtr, WriteStorage}; use zksync_types::{ event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}, l2_to_l1_log::L2ToL1Log, - zkevm_test_harness::witness::sort_storage_access::sort_storage_access_queries, - L1BatchNumber, StorageKey, U256, + L1BatchNumber, U256, }; use zksync_utils::bytecode::bytecode_len_in_bytes; use zksync_utils::{ceil_div_u256, u256_to_h256}; -use crate::bootloader_state::BootloaderState; use crate::constants::{BOOTLOADER_HEAP_PAGE, OPERATOR_REFUNDS_OFFSET, TX_GAS_LIMIT_OFFSET}; use crate::old_vm::{ events::merge_events, history_recorder::HistoryMode, memory::SimpleMemory, - oracles::storage::storage_key_of_log, utils::eth_price_per_pubdata_byte, + utils::eth_price_per_pubdata_byte, }; + +use crate::bootloader_state::BootloaderState; use crate::tracers::utils::gas_spent_on_bytecodes_and_long_messages_this_opcode; use crate::tracers::{ traits::{DynTracer, ExecutionEndTracer, ExecutionProcessing, VmTracer}, @@ -224,8 +222,16 @@ impl ExecutionProcessing for RefundsTrace .value .as_u32(); - let pubdata_published = - pubdata_published(state, self.timestamp_initial, self.l1_batch.number); + let used_published_storage_slots = state + .storage + .save_paid_changes(Timestamp(state.local_state.timestamp)); + + let pubdata_published = pubdata_published( + state, + used_published_storage_slots, + self.timestamp_initial, + self.l1_batch.number, + ); let current_ergs_per_pubdata_byte = state.local_state.current_ergs_per_pubdata_byte; let tx_body_refund = self.tx_body_refund( @@ -285,11 +291,10 @@ impl ExecutionProcessing for RefundsTrace /// Returns the given transactions' gas limit - by reading it directly from the VM memory. pub(crate) fn pubdata_published( state: &ZkSyncVmState, + storage_writes_pubdata_published: u32, from_timestamp: Timestamp, batch_number: L1BatchNumber, ) -> u32 { - let storage_writes_pubdata_published = pubdata_published_for_writes(state, from_timestamp); - let (raw_events, l1_messages) = state .event_sink .get_events_and_l2_l1_logs_after_timestamp(from_timestamp); @@ -328,62 +333,6 @@ pub(crate) fn pubdata_published( + published_bytecode_bytes } -fn pubdata_published_for_writes( - state: &ZkSyncVmState, - from_timestamp: Timestamp, -) -> u32 { - // This `HashMap` contains how much was already paid for every slot that was paid during the last tx execution. - // For the slots that weren't paid during the last tx execution we can just use - // `self.state.storage.paid_changes.inner().get(&key)` to get how much it was paid before. - let pre_paid_before_tx_map: HashMap = state - .storage - .paid_changes - .history() - .iter() - .rev() - .take_while(|history_elem| history_elem.0 >= from_timestamp) - .map(|history_elem| (history_elem.1.key, history_elem.1.value.unwrap_or(0))) - .collect(); - let pre_paid_before_tx = |key: &StorageKey| -> u32 { - if let Some(pre_paid) = pre_paid_before_tx_map.get(key) { - *pre_paid - } else { - state - .storage - .paid_changes - .inner() - .get(key) - .copied() - .unwrap_or(0) - } - }; - - let storage_logs = state - .storage - .storage_log_queries_after_timestamp(from_timestamp); - let (_, deduplicated_logs) = - sort_storage_access_queries(storage_logs.iter().map(|log| &log.log_query)); - - deduplicated_logs - .into_iter() - .filter_map(|log| { - if log.rw_flag { - let key = storage_key_of_log(&log); - let pre_paid = pre_paid_before_tx(&key); - let to_pay_by_user = state.storage.base_price_for_write(&log); - - if to_pay_by_user > pre_paid { - Some(to_pay_by_user - pre_paid) - } else { - None - } - } else { - None - } - }) - .sum() -} - impl VmTracer for RefundsTracer { fn save_results(&mut self, result: &mut VmExecutionResultAndLogs) { result.refunds = Refunds { diff --git a/core/lib/vm/src/tracers/storage_invocations.rs b/core/lib/vm/src/tracers/storage_invocations.rs index bd6f419eddfb..bd7bbeb25c45 100644 --- a/core/lib/vm/src/tracers/storage_invocations.rs +++ b/core/lib/vm/src/tracers/storage_invocations.rs @@ -10,7 +10,7 @@ use zksync_state::WriteStorage; #[derive(Debug, Default, Clone)] pub struct StorageInvocations { - limit: usize, + pub limit: usize, current: usize, } diff --git a/core/lib/vm/src/tracers/validation/error.rs b/core/lib/vm/src/tracers/validation/error.rs index 49afb22e10d8..8fb104cb67a3 100644 --- a/core/lib/vm/src/tracers/validation/error.rs +++ b/core/lib/vm/src/tracers/validation/error.rs @@ -1,15 +1,6 @@ use crate::Halt; use std::fmt::Display; -use zksync_types::{Address, U256}; -use zksync_utils::u256_to_h256; - -#[derive(Debug, Clone)] -pub enum ViolatedValidationRule { - TouchedUnallowedStorageSlots(Address, U256), - CalledContractWithNoCode(Address), - TouchedUnallowedContext, - TookTooManyComputationalGas(u32), -} +use zksync_types::vm_trace::ViolatedValidationRule; #[derive(Debug, Clone)] pub enum ValidationError { @@ -17,32 +8,6 @@ pub enum ValidationError { ViolatedRule(ViolatedValidationRule), } -impl Display for ViolatedValidationRule { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - ViolatedValidationRule::TouchedUnallowedStorageSlots(contract, key) => write!( - f, - "Touched unallowed storage slots: address {}, key: {}", - hex::encode(contract), - hex::encode(u256_to_h256(*key)) - ), - ViolatedValidationRule::CalledContractWithNoCode(contract) => { - write!(f, "Called contract with no code: {}", hex::encode(contract)) - } - ViolatedValidationRule::TouchedUnallowedContext => { - write!(f, "Touched unallowed context") - } - ViolatedValidationRule::TookTooManyComputationalGas(gas_limit) => { - write!( - f, - "Took too many computational gas, allowed limit: {}", - gas_limit - ) - } - } - } -} - impl Display for ValidationError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { diff --git a/core/lib/vm/src/tracers/validation/mod.rs b/core/lib/vm/src/tracers/validation/mod.rs index d85d031665ac..dd06fe8e5dbd 100644 --- a/core/lib/vm/src/tracers/validation/mod.rs +++ b/core/lib/vm/src/tracers/validation/mod.rs @@ -19,7 +19,8 @@ use zksync_config::constants::{ use zksync_state::{StoragePtr, WriteStorage}; use zksync_types::{ - get_code_key, web3::signing::keccak256, AccountTreeId, Address, StorageKey, H256, U256, + get_code_key, vm_trace::ViolatedValidationRule, web3::signing::keccak256, AccountTreeId, + Address, StorageKey, H256, U256, }; use zksync_utils::{ be_bytes_to_safe_address, h256_to_account_address, u256_to_account_address, u256_to_h256, @@ -35,7 +36,7 @@ use crate::tracers::utils::{ computational_gas_price, get_calldata_page_via_abi, print_debug_if_needed, VmHook, }; -pub use error::{ValidationError, ViolatedValidationRule}; +pub use error::ValidationError; pub use params::ValidationTracerParams; use types::NewTrustedValidationItems; @@ -59,7 +60,7 @@ pub struct ValidationTracer { trusted_address_slots: HashSet<(Address, U256)>, computational_gas_used: u32, computational_gas_limit: u32, - result: Arc>, + pub result: Arc>, _marker: PhantomData H>, } @@ -192,6 +193,17 @@ impl ValidationTracer { } } + pub fn params(&self) -> ValidationTracerParams { + ValidationTracerParams { + user_address: self.user_address, + paymaster_address: self.paymaster_address, + trusted_slots: self.trusted_slots.clone(), + trusted_addresses: self.trusted_addresses.clone(), + trusted_address_slots: self.trusted_address_slots.clone(), + computational_gas_limit: self.computational_gas_limit, + } + } + fn check_user_restrictions( &mut self, state: VmLocalStateData<'_>, diff --git a/core/lib/vm/src/types/internals/vm_state.rs b/core/lib/vm/src/types/internals/vm_state.rs index 60969241295c..fa4782515019 100644 --- a/core/lib/vm/src/types/internals/vm_state.rs +++ b/core/lib/vm/src/types/internals/vm_state.rs @@ -24,8 +24,8 @@ use crate::constants::BOOTLOADER_HEAP_PAGE; use crate::old_vm::{ event_sink::InMemoryEventSink, history_recorder::HistoryMode, memory::SimpleMemory, oracles::decommitter::DecommitterOracle, oracles::precompile::PrecompilesProcessorWithHistory, - oracles::storage::StorageOracle, }; +use crate::oracles::storage::StorageOracle; use crate::types::inputs::{L1BatchEnv, SystemEnv}; use crate::utils::l2_blocks::{assert_next_block, load_last_l2_block}; use crate::L2Block; diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/tracers.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/tracers.rs index ad6a65f1373f..468bb4649e0f 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/tracers.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/tracers.rs @@ -12,7 +12,10 @@ pub(crate) enum ApiTracer { } impl ApiTracer { - pub fn into_boxed( + pub fn into_boxed< + S: WriteStorage, + H: HistoryMode + multivm::HistoryMode + 'static, + >( self, ) -> Box> { match self { diff --git a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs index b51a094a8351..320345ac189a 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs @@ -90,7 +90,8 @@ impl MultiVMBaseSystemContracts { ProtocolVersionId::Version13 => self.post_virtual_blocks, ProtocolVersionId::Version14 | ProtocolVersionId::Version15 - | ProtocolVersionId::Version16 => self.post_virtual_blocks_finish_upgrade_fix, + | ProtocolVersionId::Version16 + | ProtocolVersionId::Version17 => self.post_virtual_blocks_finish_upgrade_fix, } } } diff --git a/core/multivm_deps/vm_virtual_blocks/Cargo.toml b/core/multivm_deps/vm_virtual_blocks/Cargo.toml new file mode 100644 index 000000000000..02237d6bf83e --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "vm_virtual_blocks" +version = "0.1.0" +edition = "2018" +authors = ["The Matter Labs Team "] +homepage = "https://zksync.io/" +repository = "https://github.com/matter-labs/zksync-era" +license = "MIT OR Apache-2.0" +keywords = ["blockchain", "zksync"] +categories = ["cryptography"] + +[dependencies] +vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "9d097ab747b037b6e62504df1db5b975425b6bdd" } +zk_evm = { git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.3.3" } +zksync_config = { path = "../../lib/config" } +zksync_types = { path = "../../lib/types" } +zksync_utils = { path = "../../lib/utils" } +zksync_state = { path = "../../lib/state" } +zksync_contracts = { path = "../../lib/contracts" } + +anyhow = "1.0" +hex = "0.4" +itertools = "0.10" +once_cell = "1.7" +thiserror = "1.0" +tracing = "0.1" + +[dev-dependencies] +tokio = { version = "1", features = ["time"] } +zksync_test_account = { path = "../../lib/test_account" } +ethabi = "18.0.0" +zksync_eth_signer = { path = "../../lib/eth_signer" } + diff --git a/core/multivm_deps/vm_virtual_blocks/README.md b/core/multivm_deps/vm_virtual_blocks/README.md new file mode 100644 index 000000000000..d515df0dfc60 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/README.md @@ -0,0 +1,44 @@ +# VM Crate + +This crate contains code that interacts with the VM (Virtual Machine). The VM itself is in a separate repository +[era-zk_evm][zk_evm_repo_ext]. + +## VM Dependencies + +The VM relies on several subcomponents or traits, such as Memory and Storage. These traits are defined in the `zk_evm` +repository, while their implementations can be found in this crate, such as the storage implementation in +`oracles/storage.rs` and the Memory implementation in `memory.rs`. + +Many of these implementations also support easy rollbacks and history, which is useful when creating a block with +multiple transactions and needing to return the VM to a previous state if a transaction doesn't fit. + +## Running the VM + +To interact with the VM, you must initialize it with `L1BatchEnv`, which represents the initial parameters of the batch, +`SystemEnv`, that represents the system parameters, and a reference to the Storage. To execute a transaction, you have +to push the transaction into the bootloader memory and call the `execute_next_transaction` method. + +### Tracers + +The VM implementation allows for the addition of `Tracers`, which are activated before and after each instruction. This +provides a more in-depth look into the VM, collecting detailed debugging information and logs. More details can be found +in the `tracer/` directory. + +This VM also supports custom tracers. You can call the `inspect_next_transaction` method with a custom tracer and +receive the result of the execution. + +### Bootloader + +In the context of zkEVM, we usually think about transactions. However, from the VM's perspective, it runs a single +program called the bootloader, which internally processes multiple transactions. + +### Rollbacks + +The `VMInstance` in `vm.rs` allows for easy rollbacks. You can save the current state at any moment by calling +`make_snapshot()` and return to that state using `rollback_to_the_latest_snapshot()`. + +This rollback affects all subcomponents, such as memory, storage, and events, and is mainly used if a transaction +doesn't fit in a block. + +[zk_evm_repo]: https://github.com/matter-labs/zk_evm 'internal zk EVM repo' +[zk_evm_repo_ext]: https://github.com/matter-labs/era-zk_evm 'external zk EVM repo' diff --git a/core/multivm_deps/vm_virtual_blocks/src/bootloader_state/l2_block.rs b/core/multivm_deps/vm_virtual_blocks/src/bootloader_state/l2_block.rs new file mode 100644 index 000000000000..8b08978a9adb --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/bootloader_state/l2_block.rs @@ -0,0 +1,83 @@ +use std::cmp::Ordering; +use zksync_types::{MiniblockNumber, H256}; +use zksync_utils::concat_and_hash; + +use crate::bootloader_state::snapshot::L2BlockSnapshot; +use crate::bootloader_state::tx::BootloaderTx; +use crate::utils::l2_blocks::l2_block_hash; +use crate::{L2Block, L2BlockEnv}; + +const EMPTY_TXS_ROLLING_HASH: H256 = H256::zero(); + +#[derive(Debug, Clone)] +pub(crate) struct BootloaderL2Block { + pub(crate) number: u32, + pub(crate) timestamp: u64, + pub(crate) txs_rolling_hash: H256, // The rolling hash of all the transactions in the miniblock + pub(crate) prev_block_hash: H256, + // Number of the first l2 block tx in l1 batch + pub(crate) first_tx_index: usize, + pub(crate) max_virtual_blocks_to_create: u32, + pub(super) txs: Vec, +} + +impl BootloaderL2Block { + pub(crate) fn new(l2_block: L2BlockEnv, first_tx_place: usize) -> Self { + Self { + number: l2_block.number, + timestamp: l2_block.timestamp, + txs_rolling_hash: EMPTY_TXS_ROLLING_HASH, + prev_block_hash: l2_block.prev_block_hash, + first_tx_index: first_tx_place, + max_virtual_blocks_to_create: l2_block.max_virtual_blocks_to_create, + txs: vec![], + } + } + + pub(super) fn push_tx(&mut self, tx: BootloaderTx) { + self.update_rolling_hash(tx.hash); + self.txs.push(tx) + } + + pub(crate) fn get_hash(&self) -> H256 { + l2_block_hash( + MiniblockNumber(self.number), + self.timestamp, + self.prev_block_hash, + self.txs_rolling_hash, + ) + } + + fn update_rolling_hash(&mut self, tx_hash: H256) { + self.txs_rolling_hash = concat_and_hash(self.txs_rolling_hash, tx_hash) + } + + pub(crate) fn interim_version(&self) -> BootloaderL2Block { + let mut interim = self.clone(); + interim.max_virtual_blocks_to_create = 0; + interim + } + + pub(crate) fn make_snapshot(&self) -> L2BlockSnapshot { + L2BlockSnapshot { + txs_rolling_hash: self.txs_rolling_hash, + txs_len: self.txs.len(), + } + } + + pub(crate) fn apply_snapshot(&mut self, snapshot: L2BlockSnapshot) { + self.txs_rolling_hash = snapshot.txs_rolling_hash; + match self.txs.len().cmp(&snapshot.txs_len) { + Ordering::Greater => self.txs.truncate(snapshot.txs_len), + Ordering::Less => panic!("Applying snapshot from future is not supported"), + Ordering::Equal => {} + } + } + pub(crate) fn l2_block(&self) -> L2Block { + L2Block { + number: self.number, + timestamp: self.timestamp, + hash: self.get_hash(), + } + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/bootloader_state/mod.rs b/core/multivm_deps/vm_virtual_blocks/src/bootloader_state/mod.rs new file mode 100644 index 000000000000..73830de2759b --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/bootloader_state/mod.rs @@ -0,0 +1,8 @@ +mod l2_block; +mod snapshot; +mod state; +mod tx; + +pub(crate) mod utils; +pub(crate) use snapshot::BootloaderStateSnapshot; +pub use state::BootloaderState; diff --git a/core/multivm_deps/vm_virtual_blocks/src/bootloader_state/snapshot.rs b/core/multivm_deps/vm_virtual_blocks/src/bootloader_state/snapshot.rs new file mode 100644 index 000000000000..e417a3b9ee69 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/bootloader_state/snapshot.rs @@ -0,0 +1,23 @@ +use zksync_types::H256; + +#[derive(Debug, Clone)] +pub(crate) struct BootloaderStateSnapshot { + /// ID of the next transaction to be executed. + pub(crate) tx_to_execute: usize, + /// Stored l2 blocks in bootloader memory + pub(crate) l2_blocks_len: usize, + /// Snapshot of the last l2 block. Only this block could be changed during the rollback + pub(crate) last_l2_block: L2BlockSnapshot, + /// The number of 32-byte words spent on the already included compressed bytecodes. + pub(crate) compressed_bytecodes_encoding: usize, + /// Current offset of the free space in the bootloader memory. + pub(crate) free_tx_offset: usize, +} + +#[derive(Debug, Clone)] +pub(crate) struct L2BlockSnapshot { + /// The rolling hash of all the transactions in the miniblock + pub(crate) txs_rolling_hash: H256, + /// The number of transactions in the last l2 block + pub(crate) txs_len: usize, +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/bootloader_state/state.rs b/core/multivm_deps/vm_virtual_blocks/src/bootloader_state/state.rs new file mode 100644 index 000000000000..ca6f54e233eb --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/bootloader_state/state.rs @@ -0,0 +1,254 @@ +use crate::bootloader_state::l2_block::BootloaderL2Block; +use crate::bootloader_state::snapshot::BootloaderStateSnapshot; +use crate::bootloader_state::utils::{apply_l2_block, apply_tx_to_memory}; +use std::cmp::Ordering; +use zksync_types::{L2ChainId, U256}; +use zksync_utils::bytecode::CompressedBytecodeInfo; + +use crate::constants::TX_DESCRIPTION_OFFSET; +use crate::types::inputs::system_env::TxExecutionMode; +use crate::types::internals::TransactionData; +use crate::types::outputs::BootloaderMemory; +use crate::utils::l2_blocks::assert_next_block; +use crate::L2BlockEnv; + +use super::tx::BootloaderTx; +/// Intermediate bootloader-related VM state. +/// +/// Required to process transactions one by one (since we intercept the VM execution to execute +/// transactions and add new ones to the memory on the fly). +/// Keeps tracking everything related to the bootloader memory and can restore the whole memory. +/// +/// +/// Serves two purposes: +/// - Tracks where next tx should be pushed to in the bootloader memory. +/// - Tracks which transaction should be executed next. +#[derive(Debug, Clone)] +pub struct BootloaderState { + /// ID of the next transaction to be executed. + /// See the structure doc-comment for a better explanation of purpose. + tx_to_execute: usize, + /// Stored txs in bootloader memory + l2_blocks: Vec, + /// The number of 32-byte words spent on the already included compressed bytecodes. + compressed_bytecodes_encoding: usize, + /// Initial memory of bootloader + initial_memory: BootloaderMemory, + /// Mode of txs for execution, it can be changed once per vm lunch + execution_mode: TxExecutionMode, + /// Current offset of the free space in the bootloader memory. + free_tx_offset: usize, +} + +impl BootloaderState { + pub(crate) fn new( + execution_mode: TxExecutionMode, + initial_memory: BootloaderMemory, + first_l2_block: L2BlockEnv, + ) -> Self { + let l2_block = BootloaderL2Block::new(first_l2_block, 0); + Self { + tx_to_execute: 0, + compressed_bytecodes_encoding: 0, + l2_blocks: vec![l2_block], + initial_memory, + execution_mode, + free_tx_offset: 0, + } + } + + pub(crate) fn set_refund_for_current_tx(&mut self, refund: u32) { + let current_tx = self.current_tx(); + // We can't set the refund for the latest tx or using the latest l2_block for fining tx + // Because we can fill the whole batch first and then execute txs one by one + let tx = self.find_tx_mut(current_tx); + tx.refund = refund; + } + + pub(crate) fn start_new_l2_block(&mut self, l2_block: L2BlockEnv) { + let last_block = self.last_l2_block(); + assert!( + !last_block.txs.is_empty(), + "Can not create new miniblocks on top of empty ones" + ); + assert_next_block(&last_block.l2_block(), &l2_block); + self.push_l2_block(l2_block); + } + + /// This method bypass sanity checks and should be used carefully. + pub(crate) fn push_l2_block(&mut self, l2_block: L2BlockEnv) { + self.l2_blocks + .push(BootloaderL2Block::new(l2_block, self.free_tx_index())) + } + + pub(crate) fn push_tx( + &mut self, + tx: TransactionData, + predefined_overhead: u32, + predefined_refund: u32, + compressed_bytecodes: Vec, + trusted_ergs_limit: U256, + chain_id: L2ChainId, + ) -> BootloaderMemory { + let tx_offset = self.free_tx_offset(); + let bootloader_tx = BootloaderTx::new( + tx, + predefined_refund, + predefined_overhead, + trusted_ergs_limit, + compressed_bytecodes, + tx_offset, + chain_id, + ); + + let mut memory = vec![]; + let compressed_bytecode_size = apply_tx_to_memory( + &mut memory, + &bootloader_tx, + self.last_l2_block(), + self.free_tx_index(), + self.free_tx_offset(), + self.compressed_bytecodes_encoding, + self.execution_mode, + self.last_l2_block().txs.is_empty(), + ); + self.compressed_bytecodes_encoding += compressed_bytecode_size; + self.free_tx_offset = tx_offset + bootloader_tx.encoded_len(); + self.last_mut_l2_block().push_tx(bootloader_tx); + memory + } + + pub(crate) fn last_l2_block(&self) -> &BootloaderL2Block { + self.l2_blocks.last().unwrap() + } + + fn last_mut_l2_block(&mut self) -> &mut BootloaderL2Block { + self.l2_blocks.last_mut().unwrap() + } + + /// Apply all bootloader transaction to the initial memory + pub(crate) fn bootloader_memory(&self) -> BootloaderMemory { + let mut initial_memory = self.initial_memory.clone(); + let mut offset = 0; + let mut compressed_bytecodes_offset = 0; + let mut tx_index = 0; + for l2_block in &self.l2_blocks { + for (num, tx) in l2_block.txs.iter().enumerate() { + let compressed_bytecodes_size = apply_tx_to_memory( + &mut initial_memory, + tx, + l2_block, + tx_index, + offset, + compressed_bytecodes_offset, + self.execution_mode, + num == 0, + ); + offset += tx.encoded_len(); + compressed_bytecodes_offset += compressed_bytecodes_size; + tx_index += 1; + } + if l2_block.txs.is_empty() { + apply_l2_block(&mut initial_memory, l2_block, tx_index) + } + } + initial_memory + } + + fn free_tx_offset(&self) -> usize { + self.free_tx_offset + } + + pub(crate) fn free_tx_index(&self) -> usize { + let l2_block = self.last_l2_block(); + l2_block.first_tx_index + l2_block.txs.len() + } + + pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> Vec { + if let Some(tx) = self.last_l2_block().txs.last() { + tx.compressed_bytecodes.clone() + } else { + vec![] + } + } + + /// Returns the id of current tx + pub(crate) fn current_tx(&self) -> usize { + self.tx_to_execute + .checked_sub(1) + .expect("There are no current tx to execute") + } + + /// Returns the ID of the next transaction to be executed and increments the local transaction counter. + pub(crate) fn move_tx_to_execute_pointer(&mut self) -> usize { + assert!( + self.tx_to_execute < self.free_tx_index(), + "Attempt to execute tx that was not pushed to memory. Tx ID: {}, txs in bootloader: {}", + self.tx_to_execute, + self.free_tx_index() + ); + + let old = self.tx_to_execute; + self.tx_to_execute += 1; + old + } + + /// Get offset of tx description + pub(crate) fn get_tx_description_offset(&self, tx_index: usize) -> usize { + TX_DESCRIPTION_OFFSET + self.find_tx(tx_index).offset + } + + pub(crate) fn insert_fictive_l2_block(&mut self) -> &BootloaderL2Block { + let block = self.last_l2_block(); + if !block.txs.is_empty() { + self.start_new_l2_block(L2BlockEnv { + timestamp: block.timestamp + 1, + number: block.number + 1, + prev_block_hash: block.get_hash(), + max_virtual_blocks_to_create: 1, + }); + } + self.last_l2_block() + } + + fn find_tx(&self, tx_index: usize) -> &BootloaderTx { + for block in self.l2_blocks.iter().rev() { + if tx_index >= block.first_tx_index { + return &block.txs[tx_index - block.first_tx_index]; + } + } + panic!("The tx with this index must exist") + } + + fn find_tx_mut(&mut self, tx_index: usize) -> &mut BootloaderTx { + for block in self.l2_blocks.iter_mut().rev() { + if tx_index >= block.first_tx_index { + return &mut block.txs[tx_index - block.first_tx_index]; + } + } + panic!("The tx with this index must exist") + } + + pub(crate) fn get_snapshot(&self) -> BootloaderStateSnapshot { + BootloaderStateSnapshot { + tx_to_execute: self.tx_to_execute, + l2_blocks_len: self.l2_blocks.len(), + last_l2_block: self.last_l2_block().make_snapshot(), + compressed_bytecodes_encoding: self.compressed_bytecodes_encoding, + free_tx_offset: self.free_tx_offset, + } + } + + pub(crate) fn apply_snapshot(&mut self, snapshot: BootloaderStateSnapshot) { + self.tx_to_execute = snapshot.tx_to_execute; + self.compressed_bytecodes_encoding = snapshot.compressed_bytecodes_encoding; + self.free_tx_offset = snapshot.free_tx_offset; + match self.l2_blocks.len().cmp(&snapshot.l2_blocks_len) { + Ordering::Greater => self.l2_blocks.truncate(snapshot.l2_blocks_len), + Ordering::Less => panic!("Applying snapshot from future is not supported"), + Ordering::Equal => {} + } + self.last_mut_l2_block() + .apply_snapshot(snapshot.last_l2_block); + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/bootloader_state/tx.rs b/core/multivm_deps/vm_virtual_blocks/src/bootloader_state/tx.rs new file mode 100644 index 000000000000..ecf40eca824e --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/bootloader_state/tx.rs @@ -0,0 +1,48 @@ +use crate::types::internals::TransactionData; +use zksync_types::{L2ChainId, H256, U256}; +use zksync_utils::bytecode::CompressedBytecodeInfo; + +/// Information about tx necessary for execution in bootloader. +#[derive(Debug, Clone)] +pub(super) struct BootloaderTx { + pub(super) hash: H256, + /// Encoded transaction + pub(super) encoded: Vec, + /// Compressed bytecodes, which has been published during this transaction + pub(super) compressed_bytecodes: Vec, + /// Refunds for this transaction + pub(super) refund: u32, + /// Gas overhead + pub(super) gas_overhead: u32, + /// Gas Limit for this transaction. It can be different from the gaslimit inside the transaction + pub(super) trusted_gas_limit: U256, + /// Offset of the tx in bootloader memory + pub(super) offset: usize, +} + +impl BootloaderTx { + pub(super) fn new( + tx: TransactionData, + predefined_refund: u32, + predefined_overhead: u32, + trusted_gas_limit: U256, + compressed_bytecodes: Vec, + offset: usize, + chain_id: L2ChainId, + ) -> Self { + let hash = tx.tx_hash(chain_id); + Self { + hash, + encoded: tx.into_tokens(), + compressed_bytecodes, + refund: predefined_refund, + gas_overhead: predefined_overhead, + trusted_gas_limit, + offset, + } + } + + pub(super) fn encoded_len(&self) -> usize { + self.encoded.len() + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/bootloader_state/utils.rs b/core/multivm_deps/vm_virtual_blocks/src/bootloader_state/utils.rs new file mode 100644 index 000000000000..31ec2ede5995 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/bootloader_state/utils.rs @@ -0,0 +1,140 @@ +use zksync_types::U256; +use zksync_utils::bytecode::CompressedBytecodeInfo; +use zksync_utils::{bytes_to_be_words, h256_to_u256}; + +use crate::bootloader_state::l2_block::BootloaderL2Block; +use crate::constants::{ + BOOTLOADER_TX_DESCRIPTION_OFFSET, BOOTLOADER_TX_DESCRIPTION_SIZE, COMPRESSED_BYTECODES_OFFSET, + OPERATOR_REFUNDS_OFFSET, TX_DESCRIPTION_OFFSET, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, TX_OVERHEAD_OFFSET, TX_TRUSTED_GAS_LIMIT_OFFSET, +}; +use crate::{BootloaderMemory, TxExecutionMode}; + +use super::tx::BootloaderTx; + +pub(super) fn get_memory_for_compressed_bytecodes( + compressed_bytecodes: &[CompressedBytecodeInfo], +) -> Vec { + let memory_addition: Vec<_> = compressed_bytecodes + .iter() + .flat_map(|x| x.encode_call()) + .collect(); + + bytes_to_be_words(memory_addition) +} + +#[allow(clippy::too_many_arguments)] +pub(super) fn apply_tx_to_memory( + memory: &mut BootloaderMemory, + bootloader_tx: &BootloaderTx, + bootloader_l2_block: &BootloaderL2Block, + tx_index: usize, + tx_offset: usize, + compressed_bytecodes_size: usize, + execution_mode: TxExecutionMode, + start_new_l2_block: bool, +) -> usize { + let bootloader_description_offset = + BOOTLOADER_TX_DESCRIPTION_OFFSET + BOOTLOADER_TX_DESCRIPTION_SIZE * tx_index; + let tx_description_offset = TX_DESCRIPTION_OFFSET + tx_offset; + + memory.push(( + bootloader_description_offset, + assemble_tx_meta(execution_mode, true), + )); + + memory.push(( + bootloader_description_offset + 1, + U256::from_big_endian(&(32 * tx_description_offset).to_be_bytes()), + )); + + let refund_offset = OPERATOR_REFUNDS_OFFSET + tx_index; + memory.push((refund_offset, bootloader_tx.refund.into())); + + let overhead_offset = TX_OVERHEAD_OFFSET + tx_index; + memory.push((overhead_offset, bootloader_tx.gas_overhead.into())); + + let trusted_gas_limit_offset = TX_TRUSTED_GAS_LIMIT_OFFSET + tx_index; + memory.push((trusted_gas_limit_offset, bootloader_tx.trusted_gas_limit)); + + memory.extend( + (tx_description_offset..tx_description_offset + bootloader_tx.encoded_len()) + .zip(bootloader_tx.encoded.clone()), + ); + + let bootloader_l2_block = if start_new_l2_block { + bootloader_l2_block.clone() + } else { + bootloader_l2_block.interim_version() + }; + apply_l2_block(memory, &bootloader_l2_block, tx_index); + + // Note, +1 is moving for poitner + let compressed_bytecodes_offset = COMPRESSED_BYTECODES_OFFSET + 1 + compressed_bytecodes_size; + + let encoded_compressed_bytecodes = + get_memory_for_compressed_bytecodes(&bootloader_tx.compressed_bytecodes); + let compressed_bytecodes_encoding = encoded_compressed_bytecodes.len(); + + memory.extend( + (compressed_bytecodes_offset + ..compressed_bytecodes_offset + encoded_compressed_bytecodes.len()) + .zip(encoded_compressed_bytecodes), + ); + compressed_bytecodes_encoding +} + +pub(crate) fn apply_l2_block( + memory: &mut BootloaderMemory, + bootloader_l2_block: &BootloaderL2Block, + txs_index: usize, +) { + // Since L2 block infos start from the TX_OPERATOR_L2_BLOCK_INFO_OFFSET and each + // L2 block info takes TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO slots, the position where the L2 block info + // for this transaction needs to be written is: + + let block_position = + TX_OPERATOR_L2_BLOCK_INFO_OFFSET + txs_index * TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO; + + memory.extend(vec![ + (block_position, bootloader_l2_block.number.into()), + (block_position + 1, bootloader_l2_block.timestamp.into()), + ( + block_position + 2, + h256_to_u256(bootloader_l2_block.prev_block_hash), + ), + ( + block_position + 3, + bootloader_l2_block.max_virtual_blocks_to_create.into(), + ), + ]) +} + +/// Forms a word that contains meta information for the transaction execution. +/// +/// # Current layout +/// +/// - 0 byte (MSB): server-side tx execution mode +/// In the server, we may want to execute different parts of the transaction in the different context +/// For example, when checking validity, we don't want to actually execute transaction and have side effects. +/// +/// Possible values: +/// - 0x00: validate & execute (normal mode) +/// - 0x02: execute but DO NOT validate +/// +/// - 31 byte (LSB): whether to execute transaction or not (at all). +pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool) -> U256 { + let mut output = [0u8; 32]; + + // Set 0 byte (execution mode) + output[0] = match execution_mode { + TxExecutionMode::VerifyExecute => 0x00, + TxExecutionMode::EstimateFee { .. } => 0x00, + TxExecutionMode::EthCall { .. } => 0x02, + }; + + // Set 31 byte (marker for tx execution) + output[31] = u8::from(execute_tx); + + U256::from_big_endian(&output) +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/constants.rs b/core/multivm_deps/vm_virtual_blocks/src/constants.rs new file mode 100644 index 000000000000..a51688b851e7 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/constants.rs @@ -0,0 +1,111 @@ +use zk_evm::aux_structures::MemoryPage; + +use zksync_config::constants::{ + L1_GAS_PER_PUBDATA_BYTE, MAX_L2_TX_GAS_LIMIT, MAX_NEW_FACTORY_DEPS, MAX_TXS_IN_BLOCK, + USED_BOOTLOADER_MEMORY_WORDS, +}; + +pub use zk_evm::zkevm_opcode_defs::system_params::{ + ERGS_PER_CIRCUIT, INITIAL_STORAGE_WRITE_PUBDATA_BYTES, MAX_PUBDATA_PER_BLOCK, +}; + +use crate::old_vm::utils::heap_page_from_base; + +/// Max cycles for a single transaction. +pub const MAX_CYCLES_FOR_TX: u32 = u32::MAX; + +/// The first 32 slots are reserved for debugging purposes +pub(crate) const DEBUG_SLOTS_OFFSET: usize = 8; +pub(crate) const DEBUG_FIRST_SLOTS: usize = 32; +/// The next 33 slots are reserved for dealing with the paymaster context (1 slot for storing length + 32 slots for storing the actual context). +pub(crate) const PAYMASTER_CONTEXT_SLOTS: usize = 32 + 1; +/// The next PAYMASTER_CONTEXT_SLOTS + 7 slots free slots are needed before each tx, so that the +/// postOp operation could be encoded correctly. +pub(crate) const MAX_POSTOP_SLOTS: usize = PAYMASTER_CONTEXT_SLOTS + 7; + +/// Slots used to store the current L2 transaction's hash and the hash recommended +/// to be used for signing the transaction's content. +const CURRENT_L2_TX_HASHES_SLOTS: usize = 2; + +/// Slots used to store the calldata for the KnownCodesStorage to mark new factory +/// dependencies as known ones. Besides the slots for the new factory dependencies themselves +/// another 4 slots are needed for: selector, marker of whether the user should pay for the pubdata, +/// the offset for the encoding of the array as well as the length of the array. +const NEW_FACTORY_DEPS_RESERVED_SLOTS: usize = MAX_NEW_FACTORY_DEPS + 4; + +/// The operator can provide for each transaction the proposed minimal refund +pub(crate) const OPERATOR_REFUNDS_SLOTS: usize = MAX_TXS_IN_BLOCK; + +pub(crate) const OPERATOR_REFUNDS_OFFSET: usize = DEBUG_SLOTS_OFFSET + + DEBUG_FIRST_SLOTS + + PAYMASTER_CONTEXT_SLOTS + + CURRENT_L2_TX_HASHES_SLOTS + + NEW_FACTORY_DEPS_RESERVED_SLOTS; + +pub(crate) const TX_OVERHEAD_OFFSET: usize = OPERATOR_REFUNDS_OFFSET + OPERATOR_REFUNDS_SLOTS; +pub(crate) const TX_OVERHEAD_SLOTS: usize = MAX_TXS_IN_BLOCK; + +pub(crate) const TX_TRUSTED_GAS_LIMIT_OFFSET: usize = TX_OVERHEAD_OFFSET + TX_OVERHEAD_SLOTS; +pub(crate) const TX_TRUSTED_GAS_LIMIT_SLOTS: usize = MAX_TXS_IN_BLOCK; + +pub(crate) const COMPRESSED_BYTECODES_SLOTS: usize = 32768; + +pub(crate) const BOOTLOADER_TX_DESCRIPTION_OFFSET: usize = + COMPRESSED_BYTECODES_OFFSET + COMPRESSED_BYTECODES_SLOTS; + +/// The size of the bootloader memory dedicated to the encodings of transactions +pub const BOOTLOADER_TX_ENCODING_SPACE: u32 = + (USED_BOOTLOADER_MEMORY_WORDS - TX_DESCRIPTION_OFFSET - MAX_TXS_IN_BLOCK) as u32; + +// Size of the bootloader tx description in words +pub(crate) const BOOTLOADER_TX_DESCRIPTION_SIZE: usize = 2; + +/// The actual descriptions of transactions should start after the minor descriptions and a MAX_POSTOP_SLOTS +/// free slots to allow postOp encoding. +pub(crate) const TX_DESCRIPTION_OFFSET: usize = BOOTLOADER_TX_DESCRIPTION_OFFSET + + BOOTLOADER_TX_DESCRIPTION_SIZE * MAX_TXS_IN_BLOCK + + MAX_POSTOP_SLOTS; + +pub(crate) const TX_GAS_LIMIT_OFFSET: usize = 4; + +const INITIAL_BASE_PAGE: u32 = 8; +pub const BOOTLOADER_HEAP_PAGE: u32 = heap_page_from_base(MemoryPage(INITIAL_BASE_PAGE)).0; +pub(crate) const BLOCK_OVERHEAD_GAS: u32 = 1200000; +pub(crate) const BLOCK_OVERHEAD_L1_GAS: u32 = 1000000; +pub const BLOCK_OVERHEAD_PUBDATA: u32 = BLOCK_OVERHEAD_L1_GAS / L1_GAS_PER_PUBDATA_BYTE; + +/// VM Hooks are used for communication between bootloader and tracers. +/// The 'type'/'opcode' is put into VM_HOOK_POSITION slot, +/// and VM_HOOKS_PARAMS_COUNT parameters (each 32 bytes) are put in the slots before. +/// So the layout looks like this: +/// [param 0][param 1][vmhook opcode] +pub const VM_HOOK_POSITION: u32 = RESULT_SUCCESS_FIRST_SLOT - 1; +pub const VM_HOOK_PARAMS_COUNT: u32 = 2; +pub const VM_HOOK_PARAMS_START_POSITION: u32 = VM_HOOK_POSITION - VM_HOOK_PARAMS_COUNT; + +pub(crate) const MAX_MEM_SIZE_BYTES: u32 = 16777216; // 2^24 + +/// Arbitrary space in memory closer to the end of the page +pub const RESULT_SUCCESS_FIRST_SLOT: u32 = + (MAX_MEM_SIZE_BYTES - (MAX_TXS_IN_BLOCK as u32) * 32) / 32; + +/// How many gas bootloader is allowed to spend within one block. +/// Note that this value doesn't correspond to the gas limit of any particular transaction +/// (except for the fact that, of course, gas limit for each transaction should be <= `BLOCK_GAS_LIMIT`). +pub const BLOCK_GAS_LIMIT: u32 = zk_evm::zkevm_opcode_defs::system_params::VM_INITIAL_FRAME_ERGS; + +/// How many gas is allowed to spend on a single transaction in eth_call method +pub const ETH_CALL_GAS_LIMIT: u32 = MAX_L2_TX_GAS_LIMIT as u32; + +/// ID of the transaction from L1 +pub const L1_TX_TYPE: u8 = 255; + +pub(crate) const TX_OPERATOR_L2_BLOCK_INFO_OFFSET: usize = + TX_TRUSTED_GAS_LIMIT_OFFSET + TX_TRUSTED_GAS_LIMIT_SLOTS; + +pub(crate) const TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO: usize = 4; +pub(crate) const TX_OPERATOR_L2_BLOCK_INFO_SLOTS: usize = + (MAX_TXS_IN_BLOCK + 1) * TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO; + +pub(crate) const COMPRESSED_BYTECODES_OFFSET: usize = + TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_L2_BLOCK_INFO_SLOTS; diff --git a/core/multivm_deps/vm_virtual_blocks/src/errors/bootloader_error.rs b/core/multivm_deps/vm_virtual_blocks/src/errors/bootloader_error.rs new file mode 100644 index 000000000000..07ed0899b220 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/errors/bootloader_error.rs @@ -0,0 +1,67 @@ +/// Error codes returned by the bootloader. +#[derive(Debug)] +pub(crate) enum BootloaderErrorCode { + EthCall, + AccountTxValidationFailed, + FailedToChargeFee, + FromIsNotAnAccount, + FailedToCheckAccount, + UnacceptableGasPrice, + PayForTxFailed, + PrePaymasterPreparationFailed, + PaymasterValidationFailed, + FailedToSendFeesToTheOperator, + FailedToSetPrevBlockHash, + UnacceptablePubdataPrice, + TxValidationError, + MaxPriorityFeeGreaterThanMaxFee, + BaseFeeGreaterThanMaxFeePerGas, + PaymasterReturnedInvalidContext, + PaymasterContextIsTooLong, + AssertionError, + FailedToMarkFactoryDeps, + TxValidationOutOfGas, + NotEnoughGasProvided, + AccountReturnedInvalidMagic, + PaymasterReturnedInvalidMagic, + MintEtherFailed, + FailedToAppendTransactionToL2Block, + FailedToSetL2Block, + FailedToPublishBlockDataToL1, + Unknown, +} + +impl From for BootloaderErrorCode { + fn from(code: u8) -> BootloaderErrorCode { + match code { + 0 => BootloaderErrorCode::EthCall, + 1 => BootloaderErrorCode::AccountTxValidationFailed, + 2 => BootloaderErrorCode::FailedToChargeFee, + 3 => BootloaderErrorCode::FromIsNotAnAccount, + 4 => BootloaderErrorCode::FailedToCheckAccount, + 5 => BootloaderErrorCode::UnacceptableGasPrice, + 6 => BootloaderErrorCode::FailedToSetPrevBlockHash, + 7 => BootloaderErrorCode::PayForTxFailed, + 8 => BootloaderErrorCode::PrePaymasterPreparationFailed, + 9 => BootloaderErrorCode::PaymasterValidationFailed, + 10 => BootloaderErrorCode::FailedToSendFeesToTheOperator, + 11 => BootloaderErrorCode::UnacceptablePubdataPrice, + 12 => BootloaderErrorCode::TxValidationError, + 13 => BootloaderErrorCode::MaxPriorityFeeGreaterThanMaxFee, + 14 => BootloaderErrorCode::BaseFeeGreaterThanMaxFeePerGas, + 15 => BootloaderErrorCode::PaymasterReturnedInvalidContext, + 16 => BootloaderErrorCode::PaymasterContextIsTooLong, + 17 => BootloaderErrorCode::AssertionError, + 18 => BootloaderErrorCode::FailedToMarkFactoryDeps, + 19 => BootloaderErrorCode::TxValidationOutOfGas, + 20 => BootloaderErrorCode::NotEnoughGasProvided, + 21 => BootloaderErrorCode::AccountReturnedInvalidMagic, + 22 => BootloaderErrorCode::PaymasterReturnedInvalidMagic, + 23 => BootloaderErrorCode::MintEtherFailed, + 24 => BootloaderErrorCode::FailedToAppendTransactionToL2Block, + 25 => BootloaderErrorCode::FailedToSetL2Block, + 26 => BootloaderErrorCode::FailedToPublishBlockDataToL1, + _ => BootloaderErrorCode::Unknown, + } + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/errors/bytecode_compression.rs b/core/multivm_deps/vm_virtual_blocks/src/errors/bytecode_compression.rs new file mode 100644 index 000000000000..c6cd094ae948 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/errors/bytecode_compression.rs @@ -0,0 +1,8 @@ +use thiserror::Error; + +/// Errors related to bytecode compression. +#[derive(Debug, Error)] +pub enum BytecodeCompressionError { + #[error("Bytecode compression failed")] + BytecodeCompressionFailed, +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/errors/halt.rs b/core/multivm_deps/vm_virtual_blocks/src/errors/halt.rs new file mode 100644 index 000000000000..10c8a8d702b9 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/errors/halt.rs @@ -0,0 +1,107 @@ +use crate::errors::VmRevertReason; +use std::fmt::{Display, Formatter}; + +/// Structure for non-contract errors from the Virtual Machine (EVM). + +/// Differentiates VM-specific issues from contract-related errors. +#[derive(Debug, Clone, PartialEq)] +pub enum Halt { + // Can only be returned in VerifyAndExecute + ValidationFailed(VmRevertReason), + PaymasterValidationFailed(VmRevertReason), + PrePaymasterPreparationFailed(VmRevertReason), + PayForTxFailed(VmRevertReason), + FailedToMarkFactoryDependencies(VmRevertReason), + FailedToChargeFee(VmRevertReason), + // Emitted when trying to call a transaction from an account that has not + // been deployed as an account (i.e. the `from` is just a contract). + // Can only be returned in VerifyAndExecute + FromIsNotAnAccount, + // Currently cannot be returned. Should be removed when refactoring errors. + InnerTxError, + Unknown(VmRevertReason), + // Temporarily used instead of panics to provide better experience for developers: + // their transaction would simply be rejected and they'll be able to provide + // information about the cause to us. + UnexpectedVMBehavior(String), + // Bootloader is out of gas. + BootloaderOutOfGas, + // Transaction has a too big gas limit and will not be executed by the server. + TooBigGasLimit, + // The bootloader did not have enough gas to start the transaction in the first place + NotEnoughGasProvided, + // The tx consumes too much missing invocations to memory + MissingInvocationLimitReached, + // Failed to set information about the L2 block + FailedToSetL2Block(String), + // Failed to publish information about the batch and the L2 block onto L1 + FailedToAppendTransactionToL2Block(String), + VMPanic, +} + +impl Display for Halt { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Halt::ValidationFailed(reason) => { + write!(f, "Account validation error: {}", reason) + } + Halt::FailedToChargeFee(reason) => { + write!(f, "Failed to charge fee: {}", reason) + } + // Emitted when trying to call a transaction from an account that has no + // been deployed as an account (i.e. the `from` is just a contract). + Halt::FromIsNotAnAccount => write!(f, "Sender is not an account"), + Halt::InnerTxError => write!(f, "Bootloader-based tx failed"), + Halt::PaymasterValidationFailed(reason) => { + write!(f, "Paymaster validation error: {}", reason) + } + Halt::PrePaymasterPreparationFailed(reason) => { + write!(f, "Pre-paymaster preparation error: {}", reason) + } + Halt::Unknown(reason) => write!(f, "Unknown reason: {}", reason), + Halt::UnexpectedVMBehavior(problem) => { + write!(f, + "virtual machine entered unexpected state. Please contact developers and provide transaction details \ + that caused this error. Error description: {problem}" + ) + } + Halt::BootloaderOutOfGas => write!(f, "Bootloader out of gas"), + Halt::NotEnoughGasProvided => write!( + f, + "Bootloader did not have enough gas to start the transaction" + ), + Halt::FailedToMarkFactoryDependencies(reason) => { + write!(f, "Failed to mark factory dependencies: {}", reason) + } + Halt::PayForTxFailed(reason) => { + write!(f, "Failed to pay for the transaction: {}", reason) + } + Halt::TooBigGasLimit => { + write!( + f, + "Transaction has a too big ergs limit and will not be executed by the server" + ) + } + Halt::MissingInvocationLimitReached => { + write!(f, "Tx produced too much cold storage accesses") + } + Halt::VMPanic => { + write!(f, "VM panicked") + } + Halt::FailedToSetL2Block(reason) => { + write!( + f, + "Failed to set information about the L2 block: {}", + reason + ) + } + Halt::FailedToAppendTransactionToL2Block(reason) => { + write!( + f, + "Failed to append the transaction to the current L2 block: {}", + reason + ) + } + } + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/errors/mod.rs b/core/multivm_deps/vm_virtual_blocks/src/errors/mod.rs new file mode 100644 index 000000000000..43aecf796013 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/errors/mod.rs @@ -0,0 +1,11 @@ +pub(crate) use bootloader_error::BootloaderErrorCode; +pub use bytecode_compression::BytecodeCompressionError; +pub use halt::Halt; +pub use tx_revert_reason::TxRevertReason; +pub use vm_revert_reason::{VmRevertReason, VmRevertReasonParsingError}; + +mod bootloader_error; +mod bytecode_compression; +mod halt; +mod tx_revert_reason; +mod vm_revert_reason; diff --git a/core/multivm_deps/vm_virtual_blocks/src/errors/tx_revert_reason.rs b/core/multivm_deps/vm_virtual_blocks/src/errors/tx_revert_reason.rs new file mode 100644 index 000000000000..8e65b15a0976 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/errors/tx_revert_reason.rs @@ -0,0 +1,138 @@ +use crate::errors::halt::Halt; + +use std::fmt::Display; + +use super::{BootloaderErrorCode, VmRevertReason}; + +#[derive(Debug, Clone, PartialEq)] +pub enum TxRevertReason { + // Returned when the execution of an L2 transaction has failed + // Or EthCall has failed + TxReverted(VmRevertReason), + // Returned when some validation has failed or some internal errors + Halt(Halt), +} + +impl TxRevertReason { + pub fn parse_error(bytes: &[u8]) -> Self { + // The first 32 bytes should correspond with error code. + // If the error is smaller than that, we will use a standardized bootloader error. + if bytes.is_empty() { + return Self::Halt(Halt::UnexpectedVMBehavior( + "Bootloader returned an empty error".to_string(), + )); + } + + let (error_code, error_msg) = bytes.split_at(1); + let revert_reason = VmRevertReason::from(error_msg); + + // `error_code` is a big-endian number, so we can safely take the first byte of it. + match BootloaderErrorCode::from(error_code[0]) { + BootloaderErrorCode::EthCall => Self::TxReverted(revert_reason), + BootloaderErrorCode::AccountTxValidationFailed => Self::Halt(Halt::ValidationFailed(revert_reason)), + BootloaderErrorCode::FailedToChargeFee => Self::Halt(Halt::FailedToChargeFee(revert_reason)), + BootloaderErrorCode::FromIsNotAnAccount => Self::Halt(Halt::FromIsNotAnAccount), + BootloaderErrorCode::FailedToCheckAccount => Self::Halt(Halt::ValidationFailed(VmRevertReason::General { + msg: "Failed to check if `from` is an account. Most likely not enough gas provided".to_string(), + data: vec![], + })), + BootloaderErrorCode::UnacceptableGasPrice => Self::Halt(Halt::UnexpectedVMBehavior( + "The operator included transaction with an unacceptable gas price".to_owned(), + )), + BootloaderErrorCode::PrePaymasterPreparationFailed => { + Self::Halt(Halt::PrePaymasterPreparationFailed(revert_reason)) + } + BootloaderErrorCode::PaymasterValidationFailed => { + Self::Halt(Halt::PaymasterValidationFailed(revert_reason)) + } + BootloaderErrorCode::FailedToSendFeesToTheOperator => { + Self::Halt(Halt::UnexpectedVMBehavior("FailedToSendFeesToTheOperator".to_owned())) + } + BootloaderErrorCode::FailedToSetPrevBlockHash => { + panic!( + "The bootloader failed to set previous block hash. Reason: {}", + revert_reason + ) + } + BootloaderErrorCode::UnacceptablePubdataPrice => { + Self::Halt(Halt::UnexpectedVMBehavior("UnacceptablePubdataPrice".to_owned())) + } + // This is different from AccountTxValidationFailed error in a way that it means that + // the error was not produced by the account itself, but for some other unknown reason (most likely not enough gas) + BootloaderErrorCode::TxValidationError => Self::Halt(Halt::ValidationFailed(revert_reason)), + // Note, that `InnerTxError` is derived only after the actual tx execution, so + // it is not parsed here. Unknown error means that bootloader failed by a reason + // that was not specified by the protocol: + BootloaderErrorCode::MaxPriorityFeeGreaterThanMaxFee => { + Self::Halt(Halt::UnexpectedVMBehavior("Max priority fee greater than max fee".to_owned())) + } + BootloaderErrorCode::PaymasterReturnedInvalidContext => { + Self::Halt(Halt::PaymasterValidationFailed(VmRevertReason::General { + msg: String::from("Paymaster returned invalid context"), + data: vec![], + })) + } + BootloaderErrorCode::PaymasterContextIsTooLong => { + Self::Halt(Halt::PaymasterValidationFailed(VmRevertReason::General { + msg: String::from("Paymaster returned context that is too long"), + data: vec![], + })) + } + BootloaderErrorCode::AssertionError => { + Self::Halt(Halt::UnexpectedVMBehavior(format!("Assertion error: {}", revert_reason))) + } + BootloaderErrorCode::BaseFeeGreaterThanMaxFeePerGas => Self::Halt(Halt::UnexpectedVMBehavior( + "Block.basefee is greater than max fee per gas".to_owned(), + )), + BootloaderErrorCode::PayForTxFailed => { + Self::Halt(Halt::PayForTxFailed(revert_reason)) + }, + BootloaderErrorCode::FailedToMarkFactoryDeps => { + let (msg, data) = if let VmRevertReason::General { msg , data} = revert_reason { + (msg, data) + } else { + (String::from("Most likely not enough gas provided"), vec![]) + }; + Self::Halt(Halt::FailedToMarkFactoryDependencies(VmRevertReason::General { + msg, data + })) + }, + BootloaderErrorCode::TxValidationOutOfGas => { + Self::Halt(Halt::ValidationFailed(VmRevertReason::General { msg: String::from("Not enough gas for transaction validation"), data: vec![] })) + }, + BootloaderErrorCode::NotEnoughGasProvided => { + Self::Halt(Halt::NotEnoughGasProvided) + }, + BootloaderErrorCode::AccountReturnedInvalidMagic => { + Self::Halt(Halt::ValidationFailed(VmRevertReason::General { msg: String::from("Account validation returned invalid magic value. Most often this means that the signature is incorrect"), data: vec![] })) + }, + BootloaderErrorCode::PaymasterReturnedInvalidMagic => { + Self::Halt(Halt::ValidationFailed(VmRevertReason::General { msg: String::from("Paymaster validation returned invalid magic value. Please refer to the documentation of the paymaster for more details"), data: vec![] })) + } + BootloaderErrorCode::Unknown => Self::Halt(Halt::UnexpectedVMBehavior(format!( + "Unsupported error code: {}. Revert reason: {}", + error_code[0], revert_reason + ))), + BootloaderErrorCode::MintEtherFailed => Self::Halt(Halt::UnexpectedVMBehavior(format!("Failed to mint ether: {}", revert_reason))), + BootloaderErrorCode::FailedToAppendTransactionToL2Block => { + Self::Halt(Halt::FailedToAppendTransactionToL2Block(format!("Failed to append transaction to L2 block: {}", revert_reason))) + } + BootloaderErrorCode::FailedToSetL2Block => { + Self::Halt(Halt::FailedToSetL2Block(format!("{}", revert_reason))) + + } + BootloaderErrorCode::FailedToPublishBlockDataToL1 => { + Self::Halt(Halt::UnexpectedVMBehavior(format!("Failed to publish block data to L1: {}", revert_reason))) + } + } + } +} + +impl Display for TxRevertReason { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match &self { + TxRevertReason::TxReverted(reason) => write!(f, "{}", reason), + TxRevertReason::Halt(reason) => write!(f, "{}", reason), + } + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/errors/vm_revert_reason.rs b/core/multivm_deps/vm_virtual_blocks/src/errors/vm_revert_reason.rs new file mode 100644 index 000000000000..531d8b5507f6 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/errors/vm_revert_reason.rs @@ -0,0 +1,252 @@ +use std::fmt::{Debug, Display}; + +use zksync_types::U256; + +#[derive(Debug, thiserror::Error)] +pub enum VmRevertReasonParsingError { + #[error("Incorrect data offset. Data: {0:?}")] + IncorrectDataOffset(Vec), + #[error("Input is too short. Data: {0:?}")] + InputIsTooShort(Vec), + #[error("Incorrect string length. Data: {0:?}")] + IncorrectStringLength(Vec), +} + +/// Rich Revert Reasons https://github.com/0xProject/ZEIPs/issues/32 +#[derive(Debug, Clone, PartialEq)] +pub enum VmRevertReason { + General { + msg: String, + data: Vec, + }, + InnerTxError, + VmError, + Unknown { + function_selector: Vec, + data: Vec, + }, +} + +impl VmRevertReason { + const GENERAL_ERROR_SELECTOR: &'static [u8] = &[0x08, 0xc3, 0x79, 0xa0]; + fn parse_general_error(raw_bytes: &[u8]) -> Result { + let bytes = &raw_bytes[4..]; + if bytes.len() < 32 { + return Err(VmRevertReasonParsingError::InputIsTooShort(bytes.to_vec())); + } + let data_offset = U256::from_big_endian(&bytes[0..32]).as_usize(); + + // Data offset couldn't be less than 32 because data offset size is 32 bytes + // and data offset bytes are part of the offset. Also data offset couldn't be greater than + // data length + if data_offset > bytes.len() || data_offset < 32 { + return Err(VmRevertReasonParsingError::IncorrectDataOffset( + bytes.to_vec(), + )); + }; + + let data = &bytes[data_offset..]; + + if data.len() < 32 { + return Err(VmRevertReasonParsingError::InputIsTooShort(bytes.to_vec())); + }; + + let string_length = U256::from_big_endian(&data[0..32]).as_usize(); + + if string_length + 32 > data.len() { + return Err(VmRevertReasonParsingError::IncorrectStringLength( + bytes.to_vec(), + )); + }; + + let raw_data = &data[32..32 + string_length]; + Ok(Self::General { + msg: String::from_utf8_lossy(raw_data).to_string(), + data: raw_bytes.to_vec(), + }) + } + + pub fn to_user_friendly_string(&self) -> String { + match self { + // In case of `Unknown` reason we suppress it to prevent verbose Error function_selector = 0x{} + // message shown to user. + VmRevertReason::Unknown { .. } => "".to_owned(), + _ => self.to_string(), + } + } + + pub fn encoded_data(&self) -> Vec { + match self { + VmRevertReason::Unknown { data, .. } => data.clone(), + VmRevertReason::General { data, .. } => data.clone(), + _ => vec![], + } + } + + fn try_from_bytes(bytes: &[u8]) -> Result { + if bytes.len() < 4 { + // Note, that when the method reverts with no data + // the selector is empty as well. + // For now, we only accept errors with either no data or + // the data with complete selectors. + if !bytes.is_empty() { + return Err(VmRevertReasonParsingError::IncorrectStringLength( + bytes.to_owned(), + )); + } + + let result = VmRevertReason::Unknown { + function_selector: vec![], + data: bytes.to_vec(), + }; + + return Ok(result); + } + + let function_selector = &bytes[0..4]; + match function_selector { + VmRevertReason::GENERAL_ERROR_SELECTOR => Self::parse_general_error(bytes), + _ => { + let result = VmRevertReason::Unknown { + function_selector: function_selector.to_vec(), + data: bytes.to_vec(), + }; + tracing::warn!("Unsupported error type: {}", result); + Ok(result) + } + } + } +} + +impl From<&[u8]> for VmRevertReason { + fn from(error_msg: &[u8]) -> Self { + match Self::try_from_bytes(error_msg) { + Ok(reason) => reason, + Err(_) => { + let function_selector = if error_msg.len() >= 4 { + error_msg[0..4].to_vec() + } else { + error_msg.to_vec() + }; + + let data = if error_msg.len() > 4 { + error_msg[4..].to_vec() + } else { + vec![] + }; + + VmRevertReason::Unknown { + function_selector, + data, + } + } + } + } +} + +impl Display for VmRevertReason { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + use VmRevertReason::{General, InnerTxError, Unknown, VmError}; + + match self { + General { msg, .. } => write!(f, "{}", msg), + VmError => write!(f, "VM Error",), + InnerTxError => write!(f, "Bootloader-based tx failed"), + Unknown { + function_selector, + data, + } => write!( + f, + "Error function_selector = 0x{}, data = 0x{}", + hex::encode(function_selector), + hex::encode(data) + ), + } + } +} + +#[cfg(test)] +mod tests { + use super::VmRevertReason; + + #[test] + fn revert_reason_parsing() { + let msg = vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 38, 69, 82, 67, 50, 48, 58, 32, 116, 114, 97, 110, + 115, 102, 101, 114, 32, 97, 109, 111, 117, 110, 116, 32, 101, 120, 99, 101, 101, 100, + 115, 32, 98, 97, 108, 97, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + let reason = VmRevertReason::try_from_bytes(msg.as_slice()).expect("Shouldn't be error"); + assert_eq!( + reason, + VmRevertReason::General { + msg: "ERC20: transfer amount exceeds balance".to_string(), + data: msg + } + ); + } + + #[test] + fn revert_reason_with_wrong_function_selector() { + let msg = vec![ + 8, 195, 121, 161, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 38, 69, 82, 67, 50, 48, 58, 32, 116, 114, 97, 110, + 115, 102, 101, 114, 32, 97, 109, 111, 117, 110, 116, 32, 101, 120, 99, 101, 101, 100, + 115, 32, 98, 97, 108, 97, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + let reason = VmRevertReason::try_from_bytes(msg.as_slice()).expect("Shouldn't be error"); + assert!(matches!(reason, VmRevertReason::Unknown { .. })); + } + + #[test] + fn revert_reason_with_wrong_data_offset() { + let msg = vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 38, 69, 82, 67, 50, 48, 58, 32, 116, 114, 97, 110, + 115, 102, 101, 114, 32, 97, 109, 111, 117, 110, 116, 32, 101, 120, 99, 101, 101, 100, + 115, 32, 98, 97, 108, 97, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + let reason = VmRevertReason::try_from_bytes(msg.as_slice()); + assert!(reason.is_err()); + } + + #[test] + fn revert_reason_with_big_data_offset() { + let msg = vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 132, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 38, 69, 82, 67, 50, 48, 58, 32, 116, 114, 97, 110, + 115, 102, 101, 114, 32, 97, 109, 111, 117, 110, 116, 32, 101, 120, 99, 101, 101, 100, + 115, 32, 98, 97, 108, 97, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + let reason = VmRevertReason::try_from_bytes(msg.as_slice()); + assert!(reason.is_err()); + } + + #[test] + fn revert_reason_with_wrong_string_length() { + let msg = vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 158, 69, 82, 67, 50, 48, 58, 32, 116, 114, 97, 110, + 115, 102, 101, 114, 32, 97, 109, 111, 117, 110, 116, 32, 101, 120, 99, 101, 101, 100, + 115, 32, 98, 97, 108, 97, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + let reason = VmRevertReason::try_from_bytes(msg.as_slice()); + assert!(reason.is_err()); + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/implementation/bytecode.rs b/core/multivm_deps/vm_virtual_blocks/src/implementation/bytecode.rs new file mode 100644 index 000000000000..053d980bad73 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/implementation/bytecode.rs @@ -0,0 +1,57 @@ +use itertools::Itertools; + +use zksync_state::{StoragePtr, WriteStorage}; +use zksync_types::U256; +use zksync_utils::bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}; +use zksync_utils::bytes_to_be_words; + +use crate::{HistoryMode, Vm}; + +impl Vm { + /// Checks the last transaction has successfully published compressed bytecodes and returns `true` if there is at least one is still unknown. + pub(crate) fn has_unpublished_bytecodes(&mut self) -> bool { + self.get_last_tx_compressed_bytecodes().iter().any(|info| { + !self + .state + .storage + .storage + .get_ptr() + .borrow_mut() + .is_bytecode_known(&hash_bytecode(&info.original)) + }) + } +} + +/// Converts bytecode to tokens and hashes it. +pub(crate) fn bytecode_to_factory_dep(bytecode: Vec) -> (U256, Vec) { + let bytecode_hash = hash_bytecode(&bytecode); + let bytecode_hash = U256::from_big_endian(bytecode_hash.as_bytes()); + + let bytecode_words = bytes_to_be_words(bytecode); + + (bytecode_hash, bytecode_words) +} + +pub(crate) fn compress_bytecodes( + bytecodes: &[Vec], + storage: StoragePtr, +) -> Vec { + bytecodes + .iter() + .enumerate() + .sorted_by_key(|(_idx, dep)| *dep) + .dedup_by(|x, y| x.1 == y.1) + .filter(|(_idx, dep)| !storage.borrow_mut().is_bytecode_known(&hash_bytecode(dep))) + .sorted_by_key(|(idx, _dep)| *idx) + .filter_map(|(_idx, dep)| { + let compressed_bytecode = compress_bytecode(dep); + + compressed_bytecode + .ok() + .map(|compressed| CompressedBytecodeInfo { + original: dep.clone(), + compressed, + }) + }) + .collect() +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/implementation/execution.rs b/core/multivm_deps/vm_virtual_blocks/src/implementation/execution.rs new file mode 100644 index 000000000000..9944a37f7e83 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/implementation/execution.rs @@ -0,0 +1,123 @@ +use zk_evm::aux_structures::Timestamp; +use zksync_state::WriteStorage; + +use crate::old_vm::{ + history_recorder::HistoryMode, + utils::{vm_may_have_ended_inner, VmExecutionResult}, +}; +use crate::tracers::{ + traits::{BoxedTracer, ExecutionEndTracer, ExecutionProcessing, VmTracer}, + DefaultExecutionTracer, RefundsTracer, +}; +use crate::types::{inputs::VmExecutionMode, outputs::VmExecutionResultAndLogs}; +use crate::vm::Vm; +use crate::VmExecutionStopReason; + +impl Vm { + pub(crate) fn inspect_inner( + &mut self, + mut tracers: Vec>>, + execution_mode: VmExecutionMode, + ) -> VmExecutionResultAndLogs { + if let VmExecutionMode::OneTx = execution_mode { + // For correct results we have to include refunds tracer to the desired tracers + tracers.push(RefundsTracer::new(self.batch_env.clone()).into_boxed()); + // Move the pointer to the next transaction + self.bootloader_state.move_tx_to_execute_pointer(); + } + let (_, result) = self.inspect_and_collect_results(tracers, execution_mode); + result + } + + /// Execute VM with given traces until the stop reason is reached. + /// Collect the result from the default tracers. + fn inspect_and_collect_results( + &mut self, + tracers: Vec>>, + execution_mode: VmExecutionMode, + ) -> (VmExecutionStopReason, VmExecutionResultAndLogs) { + let mut tx_tracer: DefaultExecutionTracer = DefaultExecutionTracer::new( + self.system_env.default_validation_computational_gas_limit, + execution_mode, + tracers, + self.storage.clone(), + ); + + let timestamp_initial = Timestamp(self.state.local_state.timestamp); + let cycles_initial = self.state.local_state.monotonic_cycle_counter; + let gas_remaining_before = self.gas_remaining(); + let spent_pubdata_counter_before = self.state.local_state.spent_pubdata_counter; + + let stop_reason = self.execute_with_default_tracer(&mut tx_tracer); + + let gas_remaining_after = self.gas_remaining(); + + let logs = self.collect_execution_logs_after_timestamp(timestamp_initial); + + let statistics = self.get_statistics( + timestamp_initial, + cycles_initial, + &tx_tracer, + gas_remaining_before, + gas_remaining_after, + spent_pubdata_counter_before, + logs.total_log_queries_count, + ); + + let result = tx_tracer.result_tracer.into_result(); + + let mut result = VmExecutionResultAndLogs { + result, + logs, + statistics, + refunds: Default::default(), + }; + + for tracer in tx_tracer.custom_tracers.iter_mut() { + tracer.save_results(&mut result); + } + (stop_reason, result) + } + + /// Execute vm with given tracers until the stop reason is reached. + fn execute_with_default_tracer( + &mut self, + tracer: &mut DefaultExecutionTracer, + ) -> VmExecutionStopReason { + tracer.initialize_tracer(&mut self.state); + let result = loop { + // Sanity check: we should never reach the maximum value, because then we won't be able to process the next cycle. + assert_ne!( + self.state.local_state.monotonic_cycle_counter, + u32::MAX, + "VM reached maximum possible amount of cycles. Vm state: {:?}", + self.state + ); + + tracer.before_cycle(&mut self.state); + self.state + .cycle(tracer) + .expect("Failed execution VM cycle."); + + tracer.after_cycle(&mut self.state, &mut self.bootloader_state); + if self.has_ended() { + break VmExecutionStopReason::VmFinished; + } + + if tracer.should_stop_execution() { + break VmExecutionStopReason::TracerRequestedStop; + } + }; + tracer.after_vm_execution(&mut self.state, &self.bootloader_state, result); + result + } + + fn has_ended(&self) -> bool { + match vm_may_have_ended_inner(&self.state) { + None | Some(VmExecutionResult::MostLikelyDidNotFinish(_, _)) => false, + Some( + VmExecutionResult::Ok(_) | VmExecutionResult::Revert(_) | VmExecutionResult::Panic, + ) => true, + } + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/implementation/gas.rs b/core/multivm_deps/vm_virtual_blocks/src/implementation/gas.rs new file mode 100644 index 000000000000..a7938125540e --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/implementation/gas.rs @@ -0,0 +1,42 @@ +use zksync_state::WriteStorage; + +use crate::old_vm::history_recorder::HistoryMode; +use crate::tracers::DefaultExecutionTracer; +use crate::vm::Vm; + +impl Vm { + /// Returns the amount of gas remaining to the VM. + /// Note that this *does not* correspond to the gas limit of a transaction. + /// To calculate the amount of gas spent by transaction, you should call this method before and after + /// the execution, and subtract these values. + /// + /// Note: this method should only be called when either transaction is fully completed or VM completed + /// its execution. Remaining gas value is read from the current stack frame, so if you'll attempt to + /// read it during the transaction execution, you may receive invalid value. + pub(crate) fn gas_remaining(&self) -> u32 { + self.state.local_state.callstack.current.ergs_remaining + } + + pub(crate) fn calculate_computational_gas_used( + &self, + tracer: &DefaultExecutionTracer, + gas_remaining_before: u32, + spent_pubdata_counter_before: u32, + ) -> u32 { + let total_gas_used = gas_remaining_before + .checked_sub(self.gas_remaining()) + .expect("underflow"); + let gas_used_on_pubdata = + tracer.gas_spent_on_pubdata(&self.state.local_state) - spent_pubdata_counter_before; + total_gas_used + .checked_sub(gas_used_on_pubdata) + .unwrap_or_else(|| { + tracing::error!( + "Gas used on pubdata is greater than total gas used. On pubdata: {}, total: {}", + gas_used_on_pubdata, + total_gas_used + ); + 0 + }) + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/implementation/logs.rs b/core/multivm_deps/vm_virtual_blocks/src/implementation/logs.rs new file mode 100644 index 000000000000..6bc095740ef1 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/implementation/logs.rs @@ -0,0 +1,64 @@ +use zk_evm::aux_structures::Timestamp; +use zksync_state::WriteStorage; + +use zksync_types::l2_to_l1_log::L2ToL1Log; +use zksync_types::tx::tx_execution_info::VmExecutionLogs; +use zksync_types::VmEvent; + +use crate::old_vm::events::merge_events; +use crate::old_vm::history_recorder::HistoryMode; +use crate::old_vm::utils::precompile_calls_count_after_timestamp; +use crate::vm::Vm; + +impl Vm { + pub(crate) fn collect_execution_logs_after_timestamp( + &self, + from_timestamp: Timestamp, + ) -> VmExecutionLogs { + let storage_logs: Vec<_> = self + .state + .storage + .storage_log_queries_after_timestamp(from_timestamp) + .iter() + .map(|log| **log) + .collect(); + let storage_logs_count = storage_logs.len(); + + let (events, l2_to_l1_logs) = + self.collect_events_and_l1_logs_after_timestamp(from_timestamp); + + let log_queries = self + .state + .event_sink + .log_queries_after_timestamp(from_timestamp); + + let precompile_calls_count = precompile_calls_count_after_timestamp( + self.state.precompiles_processor.timestamp_history.inner(), + from_timestamp, + ); + + let total_log_queries_count = + storage_logs_count + log_queries.len() + precompile_calls_count; + VmExecutionLogs { + storage_logs, + events, + l2_to_l1_logs, + total_log_queries_count, + } + } + + pub(crate) fn collect_events_and_l1_logs_after_timestamp( + &self, + from_timestamp: Timestamp, + ) -> (Vec, Vec) { + let (raw_events, l1_messages) = self + .state + .event_sink + .get_events_and_l2_l1_logs_after_timestamp(from_timestamp); + let events = merge_events(raw_events) + .into_iter() + .map(|e| e.into_vm_event(self.batch_env.number)) + .collect(); + (events, l1_messages.into_iter().map(Into::into).collect()) + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/implementation/mod.rs b/core/multivm_deps/vm_virtual_blocks/src/implementation/mod.rs new file mode 100644 index 000000000000..161732cf0348 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/implementation/mod.rs @@ -0,0 +1,7 @@ +mod bytecode; +mod execution; +mod gas; +mod logs; +mod snapshots; +mod statistics; +mod tx; diff --git a/core/multivm_deps/vm_virtual_blocks/src/implementation/snapshots.rs b/core/multivm_deps/vm_virtual_blocks/src/implementation/snapshots.rs new file mode 100644 index 000000000000..e3ddb14a59ee --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/implementation/snapshots.rs @@ -0,0 +1,92 @@ +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; + +use std::time::Duration; + +use zk_evm::aux_structures::Timestamp; +use zksync_state::WriteStorage; + +use crate::{ + old_vm::{history_recorder::HistoryEnabled, oracles::OracleWithHistory}, + types::internals::VmSnapshot, + vm::Vm, +}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet, EncodeLabelValue)] +#[metrics(label = "stage", rename_all = "snake_case")] +enum RollbackStage { + DecommitmentProcessorRollback, + EventSinkRollback, + StorageRollback, + MemoryRollback, + PrecompilesProcessorRollback, + ApplyBootloaderSnapshot, +} + +#[derive(Debug, Metrics)] +#[metrics(prefix = "server_vm_virtual_blocks")] +struct VmMetrics { + #[metrics(buckets = Buckets::LATENCIES)] + rollback_time: Family>, +} + +#[vise::register] +static METRICS: vise::Global = vise::Global::new(); + +/// Implementation of VM related to rollbacks inside virtual machine +impl Vm { + pub(crate) fn make_snapshot_inner(&mut self) { + self.snapshots.push(VmSnapshot { + // Vm local state contains O(1) various parameters (registers/etc). + // The only "expensive" copying here is copying of the callstack. + // It will take O(callstack_depth) to copy it. + // So it is generally recommended to get snapshots of the bootloader frame, + // where the depth is 1. + local_state: self.state.local_state.clone(), + bootloader_state: self.bootloader_state.get_snapshot(), + }); + } + + pub(crate) fn rollback_to_snapshot(&mut self, snapshot: VmSnapshot) { + let VmSnapshot { + local_state, + bootloader_state, + } = snapshot; + + let stage_latency = + METRICS.rollback_time[&RollbackStage::DecommitmentProcessorRollback].start(); + let timestamp = Timestamp(local_state.timestamp); + tracing::trace!("Rolling back decomitter"); + self.state + .decommittment_processor + .rollback_to_timestamp(timestamp); + stage_latency.observe(); + + let stage_latency = METRICS.rollback_time[&RollbackStage::EventSinkRollback].start(); + tracing::trace!("Rolling back event_sink"); + self.state.event_sink.rollback_to_timestamp(timestamp); + stage_latency.observe(); + + let stage_latency = METRICS.rollback_time[&RollbackStage::StorageRollback].start(); + tracing::trace!("Rolling back storage"); + self.state.storage.rollback_to_timestamp(timestamp); + stage_latency.observe(); + + let stage_latency = METRICS.rollback_time[&RollbackStage::MemoryRollback].start(); + tracing::trace!("Rolling back memory"); + self.state.memory.rollback_to_timestamp(timestamp); + stage_latency.observe(); + + let stage_latency = + METRICS.rollback_time[&RollbackStage::PrecompilesProcessorRollback].start(); + tracing::trace!("Rolling back precompiles_processor"); + self.state + .precompiles_processor + .rollback_to_timestamp(timestamp); + stage_latency.observe(); + + self.state.local_state = local_state; + let stage_latency = METRICS.rollback_time[&RollbackStage::ApplyBootloaderSnapshot].start(); + self.bootloader_state.apply_snapshot(bootloader_state); + stage_latency.observe(); + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/implementation/statistics.rs b/core/multivm_deps/vm_virtual_blocks/src/implementation/statistics.rs new file mode 100644 index 000000000000..54b77d574940 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/implementation/statistics.rs @@ -0,0 +1,87 @@ +use zk_evm::aux_structures::Timestamp; +use zksync_state::WriteStorage; + +use zksync_types::U256; + +use crate::old_vm::history_recorder::HistoryMode; +use crate::tracers::DefaultExecutionTracer; +use crate::types::outputs::VmExecutionStatistics; +use crate::vm::Vm; + +use crate::VmMemoryMetrics; + +/// Module responsible for observing the VM behavior, i.e. calculating the statistics of the VM runs +/// or reporting the VM memory usage. + +impl Vm { + /// Get statistics about TX execution. + #[allow(clippy::too_many_arguments)] + pub(crate) fn get_statistics( + &self, + timestamp_initial: Timestamp, + cycles_initial: u32, + tracer: &DefaultExecutionTracer, + gas_remaining_before: u32, + gas_remaining_after: u32, + spent_pubdata_counter_before: u32, + total_log_queries_count: usize, + ) -> VmExecutionStatistics { + let computational_gas_used = self.calculate_computational_gas_used( + tracer, + gas_remaining_before, + spent_pubdata_counter_before, + ); + VmExecutionStatistics { + contracts_used: self + .state + .decommittment_processor + .get_decommitted_bytecodes_after_timestamp(timestamp_initial), + cycles_used: self.state.local_state.monotonic_cycle_counter - cycles_initial, + gas_used: gas_remaining_before - gas_remaining_after, + computational_gas_used, + total_log_queries: total_log_queries_count, + } + } + + /// Returns the hashes the bytecodes that have been decommitted by the decomittment processor. + pub(crate) fn get_used_contracts(&self) -> Vec { + self.state + .decommittment_processor + .decommitted_code_hashes + .inner() + .keys() + .cloned() + .collect() + } + + /// Returns the info about all oracles' sizes. + pub fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + VmMemoryMetrics { + event_sink_inner: self.state.event_sink.get_size(), + event_sink_history: self.state.event_sink.get_history_size(), + memory_inner: self.state.memory.get_size(), + memory_history: self.state.memory.get_history_size(), + decommittment_processor_inner: self.state.decommittment_processor.get_size(), + decommittment_processor_history: self.state.decommittment_processor.get_history_size(), + storage_inner: self.state.storage.get_size(), + storage_history: self.state.storage.get_history_size(), + } + } +} + +impl VmMemoryMetrics { + pub fn full_size(&self) -> usize { + [ + self.event_sink_inner, + self.event_sink_history, + self.memory_inner, + self.memory_history, + self.decommittment_processor_inner, + self.decommittment_processor_history, + self.storage_inner, + self.storage_history, + ] + .iter() + .sum::() + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/implementation/tx.rs b/core/multivm_deps/vm_virtual_blocks/src/implementation/tx.rs new file mode 100644 index 000000000000..8341782d8abf --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/implementation/tx.rs @@ -0,0 +1,65 @@ +use crate::constants::BOOTLOADER_HEAP_PAGE; +use crate::implementation::bytecode::{bytecode_to_factory_dep, compress_bytecodes}; +use zk_evm::aux_structures::Timestamp; +use zksync_state::WriteStorage; +use zksync_types::l1::is_l1_tx_type; +use zksync_types::Transaction; + +use crate::old_vm::history_recorder::HistoryMode; +use crate::types::internals::TransactionData; +use crate::vm::Vm; + +impl Vm { + pub(crate) fn push_raw_transaction( + &mut self, + tx: TransactionData, + predefined_overhead: u32, + predefined_refund: u32, + with_compression: bool, + ) { + let timestamp = Timestamp(self.state.local_state.timestamp); + let codes_for_decommiter = tx + .factory_deps + .iter() + .map(|dep| bytecode_to_factory_dep(dep.clone())) + .collect(); + + let compressed_bytecodes = if is_l1_tx_type(tx.tx_type) || !with_compression { + // L1 transactions do not need compression + vec![] + } else { + compress_bytecodes(&tx.factory_deps, self.state.storage.storage.get_ptr()) + }; + + self.state + .decommittment_processor + .populate(codes_for_decommiter, timestamp); + + let trusted_ergs_limit = + tx.trusted_ergs_limit(self.batch_env.block_gas_price_per_pubdata()); + + let memory = self.bootloader_state.push_tx( + tx, + predefined_overhead, + predefined_refund, + compressed_bytecodes, + trusted_ergs_limit, + self.system_env.chain_id, + ); + + self.state + .memory + .populate_page(BOOTLOADER_HEAP_PAGE as usize, memory, timestamp); + } + + pub(crate) fn push_transaction_with_compression( + &mut self, + tx: Transaction, + with_compression: bool, + ) { + let tx: TransactionData = tx.into(); + let block_gas_per_pubdata_byte = self.batch_env.block_gas_price_per_pubdata(); + let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); + self.push_raw_transaction(tx, overhead, 0, with_compression); + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/lib.rs b/core/multivm_deps/vm_virtual_blocks/src/lib.rs new file mode 100644 index 000000000000..6c356dbdff90 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/lib.rs @@ -0,0 +1,49 @@ +#![deny(unreachable_pub)] +#![deny(unused_crate_dependencies)] +#![warn(unused_extern_crates)] +#![warn(unused_imports)] + +pub use old_vm::{ + history_recorder::{HistoryDisabled, HistoryEnabled, HistoryMode}, + memory::SimpleMemory, + oracles::storage::StorageOracle, +}; + +pub use errors::{ + BytecodeCompressionError, Halt, TxRevertReason, VmRevertReason, VmRevertReasonParsingError, +}; + +pub use tracers::{ + call::CallTracer, + traits::{BoxedTracer, DynTracer, ExecutionEndTracer, ExecutionProcessing, VmTracer}, + utils::VmExecutionStopReason, + StorageInvocations, ValidationError, ValidationTracer, ValidationTracerParams, +}; + +pub use types::{ + inputs::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode}, + internals::ZkSyncVmState, + outputs::{ + BootloaderMemory, CurrentExecutionState, ExecutionResult, FinishedL1Batch, L2Block, + Refunds, VmExecutionResultAndLogs, VmExecutionStatistics, VmMemoryMetrics, + }, +}; +pub use utils::transaction_encoding::TransactionVmExt; + +pub use bootloader_state::BootloaderState; + +pub use crate::vm::Vm; + +mod bootloader_state; +mod errors; +mod implementation; +mod old_vm; +mod tracers; +mod types; +mod vm; + +pub mod constants; +pub mod utils; + +#[cfg(test)] +mod tests; diff --git a/core/multivm_deps/vm_virtual_blocks/src/old_vm/event_sink.rs b/core/multivm_deps/vm_virtual_blocks/src/old_vm/event_sink.rs new file mode 100644 index 000000000000..03156e83b9fb --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/old_vm/event_sink.rs @@ -0,0 +1,171 @@ +use crate::old_vm::{ + history_recorder::{AppDataFrameManagerWithHistory, HistoryEnabled, HistoryMode}, + oracles::OracleWithHistory, +}; +use std::collections::HashMap; +use zk_evm::{ + abstractions::EventSink, + aux_structures::{LogQuery, Timestamp}, + reference_impls::event_sink::EventMessage, + zkevm_opcode_defs::system_params::{ + BOOTLOADER_FORMAL_ADDRESS, EVENT_AUX_BYTE, L1_MESSAGE_AUX_BYTE, + }, +}; + +#[derive(Debug, Clone, PartialEq, Default)] +pub struct InMemoryEventSink { + frames_stack: AppDataFrameManagerWithHistory, H>, +} + +impl OracleWithHistory for InMemoryEventSink { + fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { + self.frames_stack.rollback_to_timestamp(timestamp); + } +} + +// as usual, if we rollback the current frame then we apply changes to storage immediately, +// otherwise we carry rollbacks to the parent's frames + +impl InMemoryEventSink { + pub fn flatten(&self) -> (Vec, Vec, Vec) { + assert_eq!( + self.frames_stack.len(), + 1, + "there must exist an initial keeper frame" + ); + // we forget rollbacks as we have finished the execution and can just apply them + let history = self.frames_stack.forward().current_frame(); + + let (events, l1_messages) = Self::events_and_l1_messages_from_history(history); + (history.iter().map(|x| **x).collect(), events, l1_messages) + } + + pub fn get_log_queries(&self) -> usize { + self.frames_stack.forward().current_frame().len() + } + + /// Returns the log queries in the current frame where `log_query.timestamp >= from_timestamp`. + pub fn log_queries_after_timestamp(&self, from_timestamp: Timestamp) -> &[Box] { + let events = self.frames_stack.forward().current_frame(); + + // Select all of the last elements where e.timestamp >= from_timestamp. + // Note, that using binary search here is dangerous, because the logs are not sorted by timestamp. + events + .rsplit(|e| e.timestamp < from_timestamp) + .next() + .unwrap_or(&[]) + } + + pub fn get_events_and_l2_l1_logs_after_timestamp( + &self, + from_timestamp: Timestamp, + ) -> (Vec, Vec) { + Self::events_and_l1_messages_from_history(self.log_queries_after_timestamp(from_timestamp)) + } + + fn events_and_l1_messages_from_history( + history: &[Box], + ) -> (Vec, Vec) { + let mut tmp = HashMap::::with_capacity(history.len()); + + // note that we only use "forward" part and discard the rollbacks at the end, + // since if rollbacks of parents were not appended anywhere we just still keep them + for el in history { + // we are time ordered here in terms of rollbacks + if tmp.get(&el.timestamp.0).is_some() { + assert!(el.rollback); + tmp.remove(&el.timestamp.0); + } else { + assert!(!el.rollback); + tmp.insert(el.timestamp.0, **el); + } + } + + // naturally sorted by timestamp + let mut keys: Vec<_> = tmp.keys().cloned().collect(); + keys.sort_unstable(); + + let mut events = vec![]; + let mut l1_messages = vec![]; + + for k in keys.into_iter() { + let el = tmp.remove(&k).unwrap(); + let LogQuery { + shard_id, + is_service, + tx_number_in_block, + address, + key, + written_value, + aux_byte, + .. + } = el; + + let event = EventMessage { + shard_id, + is_first: is_service, + tx_number_in_block, + address, + key, + value: written_value, + }; + + if aux_byte == EVENT_AUX_BYTE { + events.push(event); + } else { + l1_messages.push(event); + } + } + + (events, l1_messages) + } + + pub(crate) fn get_size(&self) -> usize { + self.frames_stack.get_size() + } + + pub fn get_history_size(&self) -> usize { + self.frames_stack.get_history_size() + } + + pub fn delete_history(&mut self) { + self.frames_stack.delete_history(); + } +} + +impl EventSink for InMemoryEventSink { + // when we enter a new frame we should remember all our current applications and rollbacks + // when we exit the current frame then if we did panic we should concatenate all current + // forward and rollback cases + + fn add_partial_query(&mut self, _monotonic_cycle_counter: u32, mut query: LogQuery) { + assert!(query.rw_flag); + assert!(query.aux_byte == EVENT_AUX_BYTE || query.aux_byte == L1_MESSAGE_AUX_BYTE); + assert!(!query.rollback); + + // just append to rollbacks and a full history + + self.frames_stack + .push_forward(Box::new(query), query.timestamp); + // we do not need it explicitly here, but let's be consistent with circuit counterpart + query.rollback = true; + self.frames_stack + .push_rollback(Box::new(query), query.timestamp); + } + + fn start_frame(&mut self, timestamp: Timestamp) { + self.frames_stack.push_frame(timestamp) + } + + fn finish_frame(&mut self, panicked: bool, timestamp: Timestamp) { + // if we panic then we append forward and rollbacks to the forward of parent, + // otherwise we place rollbacks of child before rollbacks of the parent + if panicked { + self.frames_stack.move_rollback_to_forward( + |q| q.address != *BOOTLOADER_FORMAL_ADDRESS || q.aux_byte != EVENT_AUX_BYTE, + timestamp, + ); + } + self.frames_stack.merge_frame(timestamp); + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/old_vm/events.rs b/core/multivm_deps/vm_virtual_blocks/src/old_vm/events.rs new file mode 100644 index 000000000000..384a0eb86d67 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/old_vm/events.rs @@ -0,0 +1,146 @@ +use zk_evm::{ethereum_types::Address, reference_impls::event_sink::EventMessage}; +use zksync_types::{L1BatchNumber, VmEvent, EVENT_WRITER_ADDRESS, H256}; +use zksync_utils::{be_chunks_to_h256_words, h256_to_account_address}; + +#[derive(Clone)] +pub(crate) struct SolidityLikeEvent { + pub(crate) shard_id: u8, + pub(crate) tx_number_in_block: u16, + pub(crate) address: Address, + pub(crate) topics: Vec<[u8; 32]>, + pub(crate) data: Vec, +} + +impl SolidityLikeEvent { + pub(crate) fn into_vm_event(self, block_number: L1BatchNumber) -> VmEvent { + VmEvent { + location: (block_number, self.tx_number_in_block as u32), + address: self.address, + indexed_topics: be_chunks_to_h256_words(self.topics), + value: self.data, + } + } +} + +fn merge_events_inner(events: Vec) -> Vec { + let mut result = vec![]; + let mut current: Option<(usize, u32, SolidityLikeEvent)> = None; + + for message in events.into_iter() { + if !message.is_first { + let EventMessage { + shard_id, + is_first: _, + tx_number_in_block, + address, + key, + value, + } = message; + + if let Some((mut remaining_data_length, mut remaining_topics, mut event)) = + current.take() + { + if event.address != address + || event.shard_id != shard_id + || event.tx_number_in_block != tx_number_in_block + { + continue; + } + let mut data_0 = [0u8; 32]; + let mut data_1 = [0u8; 32]; + key.to_big_endian(&mut data_0); + value.to_big_endian(&mut data_1); + for el in [data_0, data_1].iter() { + if remaining_topics != 0 { + event.topics.push(*el); + remaining_topics -= 1; + } else if remaining_data_length != 0 { + if remaining_data_length >= 32 { + event.data.extend_from_slice(el); + remaining_data_length -= 32; + } else { + event.data.extend_from_slice(&el[..remaining_data_length]); + remaining_data_length = 0; + } + } + } + + if remaining_data_length != 0 || remaining_topics != 0 { + current = Some((remaining_data_length, remaining_topics, event)) + } else { + result.push(event); + } + } + } else { + // start new one. First take the old one only if it's well formed + if let Some((remaining_data_length, remaining_topics, event)) = current.take() { + if remaining_data_length == 0 && remaining_topics == 0 { + result.push(event); + } + } + + let EventMessage { + shard_id, + is_first: _, + tx_number_in_block, + address, + key, + value, + } = message; + // split key as our internal marker. Ignore higher bits + let mut num_topics = key.0[0] as u32; + let mut data_length = (key.0[0] >> 32) as usize; + let mut buffer = [0u8; 32]; + value.to_big_endian(&mut buffer); + + let (topics, data) = if num_topics == 0 && data_length == 0 { + (vec![], vec![]) + } else if num_topics == 0 { + data_length -= 32; + (vec![], buffer.to_vec()) + } else { + num_topics -= 1; + (vec![buffer], vec![]) + }; + + let new_event = SolidityLikeEvent { + shard_id, + tx_number_in_block, + address, + topics, + data, + }; + + current = Some((data_length, num_topics, new_event)) + } + } + + // add the last one + if let Some((remaining_data_length, remaining_topics, event)) = current.take() { + if remaining_data_length == 0 && remaining_topics == 0 { + result.push(event); + } + } + + result +} + +pub(crate) fn merge_events(events: Vec) -> Vec { + let raw_events = merge_events_inner(events); + + raw_events + .into_iter() + .filter(|e| e.address == EVENT_WRITER_ADDRESS) + .map(|event| { + // The events writer events where the first topic is the actual address of the event and the rest of the topics are real topics + let address = h256_to_account_address(&H256(event.topics[0])); + let topics = event.topics.into_iter().skip(1).collect(); + + SolidityLikeEvent { + topics, + address, + ..event + } + }) + .collect() +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/old_vm/history_recorder.rs b/core/multivm_deps/vm_virtual_blocks/src/old_vm/history_recorder.rs new file mode 100644 index 000000000000..1a5f7db58664 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/old_vm/history_recorder.rs @@ -0,0 +1,805 @@ +use std::{collections::HashMap, fmt::Debug, hash::Hash}; + +use zk_evm::{ + aux_structures::Timestamp, + vm_state::PrimitiveValue, + zkevm_opcode_defs::{self}, +}; + +use zksync_state::{StoragePtr, WriteStorage}; +use zksync_types::{StorageKey, U256}; +use zksync_utils::{h256_to_u256, u256_to_h256}; + +pub(crate) type MemoryWithHistory = HistoryRecorder; +pub(crate) type IntFrameManagerWithHistory = HistoryRecorder, H>; + +// Within the same cycle, timestamps in range timestamp..timestamp+TIME_DELTA_PER_CYCLE-1 +// can be used. This can sometimes violate monotonicity of the timestamp within the +// same cycle, so it should be normalized. +#[inline] +fn normalize_timestamp(timestamp: Timestamp) -> Timestamp { + let timestamp = timestamp.0; + + // Making sure it is divisible by TIME_DELTA_PER_CYCLE + Timestamp(timestamp - timestamp % zkevm_opcode_defs::TIME_DELTA_PER_CYCLE) +} + +/// Accepts history item as its parameter and applies it. +pub trait WithHistory { + type HistoryRecord; + type ReturnValue; + + // Applies an action and returns the action that would + // rollback its effect as well as some returned value + fn apply_historic_record( + &mut self, + item: Self::HistoryRecord, + ) -> (Self::HistoryRecord, Self::ReturnValue); +} + +type EventList = Vec<(Timestamp, ::HistoryRecord)>; + +/// Controls if rolling back is possible or not. +/// Either [HistoryEnabled] or [HistoryDisabled]. +pub trait HistoryMode: private::Sealed + Debug + Clone + Default { + type History: Default; + + fn clone_history(history: &Self::History) -> Self::History + where + T::HistoryRecord: Clone; + fn mutate_history)>( + recorder: &mut HistoryRecorder, + f: F, + ); + fn borrow_history) -> R, R>( + recorder: &HistoryRecorder, + f: F, + default: R, + ) -> R; +} + +mod private { + pub trait Sealed {} + impl Sealed for super::HistoryEnabled {} + impl Sealed for super::HistoryDisabled {} +} + +// derives require that all type parameters implement the trait, which is why +// HistoryEnabled/Disabled derive so many traits even though they mostly don't +// exist at runtime. + +/// A data structure with this parameter can be rolled back. +/// See also: [HistoryDisabled] +#[derive(Debug, Clone, Default, PartialEq)] +pub struct HistoryEnabled; + +/// A data structure with this parameter cannot be rolled back. +/// It won't even have rollback methods. +/// See also: [HistoryEnabled] +#[derive(Debug, Clone, Default)] +pub struct HistoryDisabled; + +impl HistoryMode for HistoryEnabled { + type History = EventList; + + fn clone_history(history: &Self::History) -> Self::History + where + T::HistoryRecord: Clone, + { + history.clone() + } + fn mutate_history)>( + recorder: &mut HistoryRecorder, + f: F, + ) { + f(&mut recorder.inner, &mut recorder.history) + } + fn borrow_history) -> R, R>( + recorder: &HistoryRecorder, + f: F, + _: R, + ) -> R { + f(&recorder.history) + } +} + +impl HistoryMode for HistoryDisabled { + type History = (); + + fn clone_history(_: &Self::History) -> Self::History {} + fn mutate_history)>( + _: &mut HistoryRecorder, + _: F, + ) { + } + fn borrow_history) -> R, R>( + _: &HistoryRecorder, + _: F, + default: R, + ) -> R { + default + } +} + +/// A struct responsible for tracking history for +/// a component that is passed as a generic parameter to it (`inner`). +#[derive(Default)] +pub struct HistoryRecorder { + inner: T, + history: H::History, +} + +impl PartialEq for HistoryRecorder +where + T::HistoryRecord: PartialEq, +{ + fn eq(&self, other: &Self) -> bool { + self.inner == other.inner + && self.borrow_history(|h1| other.borrow_history(|h2| h1 == h2, true), true) + } +} + +impl Debug for HistoryRecorder +where + T::HistoryRecord: Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut debug_struct = f.debug_struct("HistoryRecorder"); + debug_struct.field("inner", &self.inner); + self.borrow_history( + |h| { + debug_struct.field("history", h); + }, + (), + ); + debug_struct.finish() + } +} + +impl Clone for HistoryRecorder +where + T::HistoryRecord: Clone, + H: HistoryMode, +{ + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + history: H::clone_history(&self.history), + } + } +} + +impl HistoryRecorder { + pub fn from_inner(inner: T) -> Self { + Self { + inner, + history: Default::default(), + } + } + + pub fn inner(&self) -> &T { + &self.inner + } + + /// If history exists, modify it using `f`. + pub fn mutate_history)>(&mut self, f: F) { + H::mutate_history(self, f); + } + + /// If history exists, feed it into `f`. Otherwise return `default`. + pub fn borrow_history) -> R, R>(&self, f: F, default: R) -> R { + H::borrow_history(self, f, default) + } + + pub fn apply_historic_record( + &mut self, + item: T::HistoryRecord, + timestamp: Timestamp, + ) -> T::ReturnValue { + let (reversed_item, return_value) = self.inner.apply_historic_record(item); + + self.mutate_history(|_, history| { + let last_recorded_timestamp = history.last().map(|(t, _)| *t).unwrap_or(Timestamp(0)); + let timestamp = normalize_timestamp(timestamp); + assert!( + last_recorded_timestamp <= timestamp, + "Timestamps are not monotonic" + ); + history.push((timestamp, reversed_item)); + }); + + return_value + } + + /// Deletes all the history for its component, making + /// its current state irreversible + pub fn delete_history(&mut self) { + self.mutate_history(|_, h| h.clear()) + } +} + +impl HistoryRecorder { + pub fn history(&self) -> &Vec<(Timestamp, T::HistoryRecord)> { + &self.history + } + + pub(crate) fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { + loop { + let should_undo = self + .history + .last() + .map(|(item_timestamp, _)| *item_timestamp >= timestamp) + .unwrap_or(false); + if !should_undo { + break; + } + + let (_, item_to_apply) = self.history.pop().unwrap(); + self.inner.apply_historic_record(item_to_apply); + } + } +} + +#[derive(Debug, Clone, PartialEq)] +pub enum VectorHistoryEvent { + Push(X), + Pop, +} + +impl WithHistory for Vec { + type HistoryRecord = VectorHistoryEvent; + type ReturnValue = Option; + fn apply_historic_record( + &mut self, + item: VectorHistoryEvent, + ) -> (Self::HistoryRecord, Self::ReturnValue) { + match item { + VectorHistoryEvent::Pop => { + // Note, that here we assume that the users + // will check themselves whether this vector is empty + // prior to popping from it. + let poped_item = self.pop().unwrap(); + + (VectorHistoryEvent::Push(poped_item), Some(poped_item)) + } + VectorHistoryEvent::Push(x) => { + self.push(x); + + (VectorHistoryEvent::Pop, None) + } + } + } +} + +impl HistoryRecorder, H> { + pub fn push(&mut self, elem: T, timestamp: Timestamp) { + self.apply_historic_record(VectorHistoryEvent::Push(elem), timestamp); + } + + pub fn pop(&mut self, timestamp: Timestamp) -> T { + self.apply_historic_record(VectorHistoryEvent::Pop, timestamp) + .unwrap() + } + + pub fn len(&self) -> usize { + self.inner.len() + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct HashMapHistoryEvent { + pub key: K, + pub value: Option, +} + +impl WithHistory for HashMap { + type HistoryRecord = HashMapHistoryEvent; + type ReturnValue = Option; + fn apply_historic_record( + &mut self, + item: Self::HistoryRecord, + ) -> (Self::HistoryRecord, Self::ReturnValue) { + let HashMapHistoryEvent { key, value } = item; + + let prev_value = match value { + Some(x) => self.insert(key, x), + None => self.remove(&key), + }; + + ( + HashMapHistoryEvent { + key, + value: prev_value.clone(), + }, + prev_value, + ) + } +} + +impl HistoryRecorder, H> { + pub fn insert(&mut self, key: K, value: V, timestamp: Timestamp) -> Option { + self.apply_historic_record( + HashMapHistoryEvent { + key, + value: Some(value), + }, + timestamp, + ) + } +} + +/// A stack of stacks. The inner stacks are called frames. +/// +/// Does not support popping from the outer stack. Instead, the outer stack can +/// push its topmost frame's contents onto the previous frame. +#[derive(Debug, Clone, PartialEq)] +pub struct FramedStack { + data: Vec, + frame_start_indices: Vec, +} + +impl Default for FramedStack { + fn default() -> Self { + // We typically require at least the first frame to be there + // since the last user-provided frame might be reverted + Self { + data: vec![], + frame_start_indices: vec![0], + } + } +} + +#[derive(Debug, Clone, PartialEq)] +pub enum FramedStackEvent { + Push(T), + Pop, + PushFrame(usize), + MergeFrame, +} + +impl WithHistory for FramedStack { + type HistoryRecord = FramedStackEvent; + type ReturnValue = (); + + fn apply_historic_record( + &mut self, + item: Self::HistoryRecord, + ) -> (Self::HistoryRecord, Self::ReturnValue) { + use FramedStackEvent::*; + match item { + Push(x) => { + self.data.push(x); + (Pop, ()) + } + Pop => { + let x = self.data.pop().unwrap(); + (Push(x), ()) + } + PushFrame(i) => { + self.frame_start_indices.push(i); + (MergeFrame, ()) + } + MergeFrame => { + let pos = self.frame_start_indices.pop().unwrap(); + (PushFrame(pos), ()) + } + } + } +} + +impl FramedStack { + fn push_frame(&self) -> FramedStackEvent { + FramedStackEvent::PushFrame(self.data.len()) + } + + pub fn current_frame(&self) -> &[T] { + &self.data[*self.frame_start_indices.last().unwrap()..self.data.len()] + } + + fn len(&self) -> usize { + self.frame_start_indices.len() + } + + /// Returns the amount of memory taken up by the stored items + pub fn get_size(&self) -> usize { + self.data.len() * std::mem::size_of::() + } +} + +impl HistoryRecorder, H> { + pub fn push_to_frame(&mut self, x: T, timestamp: Timestamp) { + self.apply_historic_record(FramedStackEvent::Push(x), timestamp); + } + pub fn clear_frame(&mut self, timestamp: Timestamp) { + let start = *self.inner.frame_start_indices.last().unwrap(); + while self.inner.data.len() > start { + self.apply_historic_record(FramedStackEvent::Pop, timestamp); + } + } + pub fn extend_frame(&mut self, items: impl IntoIterator, timestamp: Timestamp) { + for x in items { + self.push_to_frame(x, timestamp); + } + } + pub fn push_frame(&mut self, timestamp: Timestamp) { + self.apply_historic_record(self.inner.push_frame(), timestamp); + } + pub fn merge_frame(&mut self, timestamp: Timestamp) { + self.apply_historic_record(FramedStackEvent::MergeFrame, timestamp); + } +} + +#[derive(Debug, Clone, PartialEq)] +pub(crate) struct AppDataFrameManagerWithHistory { + forward: HistoryRecorder, H>, + rollback: HistoryRecorder, H>, +} + +impl Default for AppDataFrameManagerWithHistory { + fn default() -> Self { + Self { + forward: Default::default(), + rollback: Default::default(), + } + } +} + +impl AppDataFrameManagerWithHistory { + pub(crate) fn delete_history(&mut self) { + self.forward.delete_history(); + self.rollback.delete_history(); + } + + pub(crate) fn push_forward(&mut self, item: T, timestamp: Timestamp) { + self.forward.push_to_frame(item, timestamp); + } + pub(crate) fn push_rollback(&mut self, item: T, timestamp: Timestamp) { + self.rollback.push_to_frame(item, timestamp); + } + pub(crate) fn push_frame(&mut self, timestamp: Timestamp) { + self.forward.push_frame(timestamp); + self.rollback.push_frame(timestamp); + } + pub(crate) fn merge_frame(&mut self, timestamp: Timestamp) { + self.forward.merge_frame(timestamp); + self.rollback.merge_frame(timestamp); + } + + pub(crate) fn len(&self) -> usize { + self.forward.inner.len() + } + pub(crate) fn forward(&self) -> &FramedStack { + &self.forward.inner + } + pub(crate) fn rollback(&self) -> &FramedStack { + &self.rollback.inner + } + + /// Returns the amount of memory taken up by the stored items + pub(crate) fn get_size(&self) -> usize { + self.forward().get_size() + self.rollback().get_size() + } + + pub(crate) fn get_history_size(&self) -> usize { + (self.forward.borrow_history(|h| h.len(), 0) + self.rollback.borrow_history(|h| h.len(), 0)) + * std::mem::size_of::< as WithHistory>::HistoryRecord>() + } +} + +impl AppDataFrameManagerWithHistory { + pub(crate) fn move_rollback_to_forward bool>( + &mut self, + filter: F, + timestamp: Timestamp, + ) { + for x in self.rollback.inner.current_frame().iter().rev() { + if filter(x) { + self.forward.push_to_frame(x.clone(), timestamp); + } + } + self.rollback.clear_frame(timestamp); + } +} + +impl AppDataFrameManagerWithHistory { + pub(crate) fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { + self.forward.rollback_to_timestamp(timestamp); + self.rollback.rollback_to_timestamp(timestamp); + } +} + +const PRIMITIVE_VALUE_EMPTY: PrimitiveValue = PrimitiveValue::empty(); +const PAGE_SUBDIVISION_LEN: usize = 64; + +#[derive(Debug, Default, Clone)] +struct MemoryPage { + root: Vec>>, +} + +impl MemoryPage { + fn get(&self, slot: usize) -> &PrimitiveValue { + self.root + .get(slot / PAGE_SUBDIVISION_LEN) + .and_then(|inner| inner.as_ref()) + .map(|leaf| &leaf[slot % PAGE_SUBDIVISION_LEN]) + .unwrap_or(&PRIMITIVE_VALUE_EMPTY) + } + fn set(&mut self, slot: usize, value: PrimitiveValue) -> PrimitiveValue { + let root_index = slot / PAGE_SUBDIVISION_LEN; + let leaf_index = slot % PAGE_SUBDIVISION_LEN; + + if self.root.len() <= root_index { + self.root.resize_with(root_index + 1, || None); + } + let node = &mut self.root[root_index]; + + if let Some(leaf) = node { + let old = leaf[leaf_index]; + leaf[leaf_index] = value; + old + } else { + let mut leaf = [PrimitiveValue::empty(); PAGE_SUBDIVISION_LEN]; + leaf[leaf_index] = value; + self.root[root_index] = Some(Box::new(leaf)); + PrimitiveValue::empty() + } + } + + fn get_size(&self) -> usize { + self.root.iter().filter_map(|x| x.as_ref()).count() + * PAGE_SUBDIVISION_LEN + * std::mem::size_of::() + } +} + +impl PartialEq for MemoryPage { + fn eq(&self, other: &Self) -> bool { + for slot in 0..self.root.len().max(other.root.len()) * PAGE_SUBDIVISION_LEN { + if self.get(slot) != other.get(slot) { + return false; + } + } + true + } +} + +#[derive(Debug, Default, Clone)] +pub struct MemoryWrapper { + memory: Vec, +} + +impl PartialEq for MemoryWrapper { + fn eq(&self, other: &Self) -> bool { + let empty_page = MemoryPage::default(); + let empty_pages = std::iter::repeat(&empty_page); + self.memory + .iter() + .chain(empty_pages.clone()) + .zip(other.memory.iter().chain(empty_pages)) + .take(self.memory.len().max(other.memory.len())) + .all(|(a, b)| a == b) + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct MemoryHistoryRecord { + pub page: usize, + pub slot: usize, + pub set_value: PrimitiveValue, +} + +impl MemoryWrapper { + pub fn ensure_page_exists(&mut self, page: usize) { + if self.memory.len() <= page { + // We don't need to record such events in history + // because all these vectors will be empty + self.memory.resize_with(page + 1, MemoryPage::default); + } + } + + pub fn dump_page_content_as_u256_words( + &self, + page_number: u32, + range: std::ops::Range, + ) -> Vec { + if let Some(page) = self.memory.get(page_number as usize) { + let mut result = vec![]; + for i in range { + result.push(*page.get(i as usize)); + } + result + } else { + vec![PrimitiveValue::empty(); range.len()] + } + } + + pub fn read_slot(&self, page: usize, slot: usize) -> &PrimitiveValue { + self.memory + .get(page) + .map(|page| page.get(slot)) + .unwrap_or(&PRIMITIVE_VALUE_EMPTY) + } + + pub fn get_size(&self) -> usize { + self.memory.iter().map(|page| page.get_size()).sum() + } +} + +impl WithHistory for MemoryWrapper { + type HistoryRecord = MemoryHistoryRecord; + type ReturnValue = PrimitiveValue; + + fn apply_historic_record( + &mut self, + item: MemoryHistoryRecord, + ) -> (Self::HistoryRecord, Self::ReturnValue) { + let MemoryHistoryRecord { + page, + slot, + set_value, + } = item; + + self.ensure_page_exists(page); + let page_handle = self.memory.get_mut(page).unwrap(); + let prev_value = page_handle.set(slot, set_value); + + let undo = MemoryHistoryRecord { + page, + slot, + set_value: prev_value, + }; + + (undo, prev_value) + } +} + +impl HistoryRecorder { + pub fn write_to_memory( + &mut self, + page: usize, + slot: usize, + value: PrimitiveValue, + timestamp: Timestamp, + ) -> PrimitiveValue { + self.apply_historic_record( + MemoryHistoryRecord { + page, + slot, + set_value: value, + }, + timestamp, + ) + } + + pub fn clear_page(&mut self, page: usize, timestamp: Timestamp) { + self.mutate_history(|inner, history| { + if let Some(page_handle) = inner.memory.get(page) { + for (i, x) in page_handle.root.iter().enumerate() { + if let Some(slots) = x { + for (j, value) in slots.iter().enumerate() { + if *value != PrimitiveValue::empty() { + history.push(( + timestamp, + MemoryHistoryRecord { + page, + slot: PAGE_SUBDIVISION_LEN * i + j, + set_value: *value, + }, + )) + } + } + } + } + inner.memory[page] = MemoryPage::default(); + } + }); + } +} + +#[derive(Debug)] +pub struct StorageWrapper { + storage_ptr: StoragePtr, +} + +impl StorageWrapper { + pub fn new(storage_ptr: StoragePtr) -> Self { + Self { storage_ptr } + } + + pub fn get_ptr(&self) -> StoragePtr { + self.storage_ptr.clone() + } + + pub fn read_from_storage(&self, key: &StorageKey) -> U256 { + h256_to_u256(self.storage_ptr.borrow_mut().read_value(key)) + } +} + +#[derive(Debug, Clone)] +pub struct StorageHistoryRecord { + pub key: StorageKey, + pub value: U256, +} + +impl WithHistory for StorageWrapper { + type HistoryRecord = StorageHistoryRecord; + type ReturnValue = U256; + + fn apply_historic_record( + &mut self, + item: Self::HistoryRecord, + ) -> (Self::HistoryRecord, Self::ReturnValue) { + let prev_value = h256_to_u256( + self.storage_ptr + .borrow_mut() + .set_value(item.key, u256_to_h256(item.value)), + ); + + let reverse_item = StorageHistoryRecord { + key: item.key, + value: prev_value, + }; + + (reverse_item, prev_value) + } +} + +impl HistoryRecorder, H> { + pub fn read_from_storage(&self, key: &StorageKey) -> U256 { + self.inner.read_from_storage(key) + } + + pub fn write_to_storage(&mut self, key: StorageKey, value: U256, timestamp: Timestamp) -> U256 { + self.apply_historic_record(StorageHistoryRecord { key, value }, timestamp) + } + + /// Returns a pointer to the storage. + /// Note, that any changes done to the storage via this pointer + /// will NOT be recorded as its history. + pub fn get_ptr(&self) -> StoragePtr { + self.inner.get_ptr() + } +} + +#[cfg(test)] +mod tests { + use crate::old_vm::history_recorder::{HistoryRecorder, MemoryWrapper}; + use crate::HistoryDisabled; + use zk_evm::{aux_structures::Timestamp, vm_state::PrimitiveValue}; + use zksync_types::U256; + + #[test] + fn memory_equality() { + let mut a: HistoryRecorder = Default::default(); + let mut b = a.clone(); + let nonzero = U256::from_dec_str("123").unwrap(); + let different_value = U256::from_dec_str("1234").unwrap(); + + let write = |memory: &mut HistoryRecorder, value| { + memory.write_to_memory( + 17, + 34, + PrimitiveValue { + value, + is_pointer: false, + }, + Timestamp::empty(), + ); + }; + + assert_eq!(a, b); + + write(&mut b, nonzero); + assert_ne!(a, b); + + write(&mut a, different_value); + assert_ne!(a, b); + + write(&mut a, nonzero); + assert_eq!(a, b); + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/old_vm/memory.rs b/core/multivm_deps/vm_virtual_blocks/src/old_vm/memory.rs new file mode 100644 index 000000000000..8569c135d1e2 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/old_vm/memory.rs @@ -0,0 +1,323 @@ +use zk_evm::abstractions::{Memory, MemoryType}; +use zk_evm::aux_structures::{MemoryPage, MemoryQuery, Timestamp}; +use zk_evm::vm_state::PrimitiveValue; +use zk_evm::zkevm_opcode_defs::FatPointer; +use zksync_types::U256; + +use crate::old_vm::history_recorder::{ + FramedStack, HistoryEnabled, HistoryMode, IntFrameManagerWithHistory, MemoryWithHistory, + MemoryWrapper, WithHistory, +}; +use crate::old_vm::oracles::OracleWithHistory; +use crate::old_vm::utils::{aux_heap_page_from_base, heap_page_from_base, stack_page_from_base}; + +#[derive(Debug, Clone, PartialEq)] +pub struct SimpleMemory { + memory: MemoryWithHistory, + observable_pages: IntFrameManagerWithHistory, +} + +impl Default for SimpleMemory { + fn default() -> Self { + let mut memory: MemoryWithHistory = Default::default(); + memory.mutate_history(|_, h| h.reserve(607)); + Self { + memory, + observable_pages: Default::default(), + } + } +} + +impl OracleWithHistory for SimpleMemory { + fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { + self.memory.rollback_to_timestamp(timestamp); + self.observable_pages.rollback_to_timestamp(timestamp); + } +} + +impl SimpleMemory { + pub fn populate(&mut self, elements: Vec<(u32, Vec)>, timestamp: Timestamp) { + for (page, values) in elements.into_iter() { + for (i, value) in values.into_iter().enumerate() { + let value = PrimitiveValue { + value, + is_pointer: false, + }; + self.memory + .write_to_memory(page as usize, i, value, timestamp); + } + } + } + + pub fn populate_page( + &mut self, + page: usize, + elements: Vec<(usize, U256)>, + timestamp: Timestamp, + ) { + elements.into_iter().for_each(|(offset, value)| { + let value = PrimitiveValue { + value, + is_pointer: false, + }; + + self.memory.write_to_memory(page, offset, value, timestamp); + }); + } + + pub fn dump_page_content_as_u256_words( + &self, + page: u32, + range: std::ops::Range, + ) -> Vec { + self.memory + .inner() + .dump_page_content_as_u256_words(page, range) + .into_iter() + .map(|v| v.value) + .collect() + } + + pub fn read_slot(&self, page: usize, slot: usize) -> &PrimitiveValue { + self.memory.inner().read_slot(page, slot) + } + + // This method should be used with relatively small lengths, since + // we don't heavily optimize here for cases with long lengths + pub fn read_unaligned_bytes(&self, page: usize, start: usize, length: usize) -> Vec { + if length == 0 { + return vec![]; + } + + let end = start + length - 1; + + let mut current_word = start / 32; + let mut result = vec![]; + while current_word * 32 <= end { + let word_value = self.read_slot(page, current_word).value; + let word_value = { + let mut bytes: Vec = vec![0u8; 32]; + word_value.to_big_endian(&mut bytes); + bytes + }; + + result.extend(extract_needed_bytes_from_word( + word_value, + current_word, + start, + end, + )); + + current_word += 1; + } + + assert_eq!(result.len(), length); + + result + } + + pub(crate) fn get_size(&self) -> usize { + // Hashmap memory overhead is neglected. + let memory_size = self.memory.inner().get_size(); + let observable_pages_size = self.observable_pages.inner().get_size(); + + memory_size + observable_pages_size + } + + pub fn get_history_size(&self) -> usize { + let memory_size = self.memory.borrow_history(|h| h.len(), 0) + * std::mem::size_of::<::HistoryRecord>(); + let observable_pages_size = self.observable_pages.borrow_history(|h| h.len(), 0) + * std::mem::size_of::< as WithHistory>::HistoryRecord>(); + + memory_size + observable_pages_size + } + + pub fn delete_history(&mut self) { + self.memory.delete_history(); + self.observable_pages.delete_history(); + } +} + +impl Memory for SimpleMemory { + fn execute_partial_query( + &mut self, + _monotonic_cycle_counter: u32, + mut query: MemoryQuery, + ) -> MemoryQuery { + match query.location.memory_type { + MemoryType::Stack => {} + MemoryType::Heap | MemoryType::AuxHeap => { + // The following assertion works fine even when doing a read + // from heap through pointer, since `value_is_pointer` can only be set to + // `true` during memory writes. + assert!( + !query.value_is_pointer, + "Pointers can only be stored on stack" + ); + } + MemoryType::FatPointer => { + assert!(!query.rw_flag); + assert!( + !query.value_is_pointer, + "Pointers can only be stored on stack" + ); + } + MemoryType::Code => { + unreachable!("code should be through specialized query"); + } + } + + let page = query.location.page.0 as usize; + let slot = query.location.index.0 as usize; + + if query.rw_flag { + self.memory.write_to_memory( + page, + slot, + PrimitiveValue { + value: query.value, + is_pointer: query.value_is_pointer, + }, + query.timestamp, + ); + } else { + let current_value = self.read_slot(page, slot); + query.value = current_value.value; + query.value_is_pointer = current_value.is_pointer; + } + + query + } + + fn specialized_code_query( + &mut self, + _monotonic_cycle_counter: u32, + mut query: MemoryQuery, + ) -> MemoryQuery { + assert_eq!(query.location.memory_type, MemoryType::Code); + assert!( + !query.value_is_pointer, + "Pointers are not used for decommmits" + ); + + let page = query.location.page.0 as usize; + let slot = query.location.index.0 as usize; + + if query.rw_flag { + self.memory.write_to_memory( + page, + slot, + PrimitiveValue { + value: query.value, + is_pointer: query.value_is_pointer, + }, + query.timestamp, + ); + } else { + let current_value = self.read_slot(page, slot); + query.value = current_value.value; + query.value_is_pointer = current_value.is_pointer; + } + + query + } + + fn read_code_query( + &self, + _monotonic_cycle_counter: u32, + mut query: MemoryQuery, + ) -> MemoryQuery { + assert_eq!(query.location.memory_type, MemoryType::Code); + assert!( + !query.value_is_pointer, + "Pointers are not used for decommmits" + ); + assert!(!query.rw_flag, "Only read queries can be processed"); + + let page = query.location.page.0 as usize; + let slot = query.location.index.0 as usize; + + let current_value = self.read_slot(page, slot); + query.value = current_value.value; + query.value_is_pointer = current_value.is_pointer; + + query + } + + fn start_global_frame( + &mut self, + _current_base_page: MemoryPage, + new_base_page: MemoryPage, + calldata_fat_pointer: FatPointer, + timestamp: Timestamp, + ) { + // Besides the calldata page, we also formally include the current stack + // page, heap page and aux heap page. + // The code page will be always left observable, so we don't include it here. + self.observable_pages.push_frame(timestamp); + self.observable_pages.extend_frame( + vec![ + calldata_fat_pointer.memory_page, + stack_page_from_base(new_base_page).0, + heap_page_from_base(new_base_page).0, + aux_heap_page_from_base(new_base_page).0, + ], + timestamp, + ); + } + + fn finish_global_frame( + &mut self, + base_page: MemoryPage, + returndata_fat_pointer: FatPointer, + timestamp: Timestamp, + ) { + // Safe to unwrap here, since `finish_global_frame` is never called with empty stack + let current_observable_pages = self.observable_pages.inner().current_frame(); + let returndata_page = returndata_fat_pointer.memory_page; + + for &page in current_observable_pages { + // If the page's number is greater than or equal to the base_page, + // it means that it was created by the internal calls of this contract. + // We need to add this check as the calldata pointer is also part of the + // observable pages. + if page >= base_page.0 && page != returndata_page { + self.memory.clear_page(page as usize, timestamp); + } + } + + self.observable_pages.clear_frame(timestamp); + self.observable_pages.merge_frame(timestamp); + + self.observable_pages + .push_to_frame(returndata_page, timestamp); + } +} + +// It is expected that there is some intersection between [word_number*32..word_number*32+31] and [start, end] +fn extract_needed_bytes_from_word( + word_value: Vec, + word_number: usize, + start: usize, + end: usize, +) -> Vec { + let word_start = word_number * 32; + let word_end = word_start + 31; // Note, that at word_start + 32 a new word already starts + + let intersection_left = std::cmp::max(word_start, start); + let intersection_right = std::cmp::min(word_end, end); + + if intersection_right < intersection_left { + vec![] + } else { + let start_bytes = intersection_left - word_start; + let to_take = intersection_right - intersection_left + 1; + + word_value + .into_iter() + .skip(start_bytes) + .take(to_take) + .collect() + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/old_vm/mod.rs b/core/multivm_deps/vm_virtual_blocks/src/old_vm/mod.rs new file mode 100644 index 000000000000..afade1984614 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/old_vm/mod.rs @@ -0,0 +1,8 @@ +/// This module contains the parts from old VM implementation, which were not changed during the vm implementation. +/// It should be refactored and removed in the future. +pub(crate) mod event_sink; +pub(crate) mod events; +pub(crate) mod history_recorder; +pub(crate) mod memory; +pub(crate) mod oracles; +pub(crate) mod utils; diff --git a/core/multivm_deps/vm_virtual_blocks/src/old_vm/oracles/decommitter.rs b/core/multivm_deps/vm_virtual_blocks/src/old_vm/oracles/decommitter.rs new file mode 100644 index 000000000000..e91380a6d385 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/old_vm/oracles/decommitter.rs @@ -0,0 +1,238 @@ +use std::collections::HashMap; +use std::fmt::Debug; + +use crate::old_vm::history_recorder::{HistoryEnabled, HistoryMode, HistoryRecorder, WithHistory}; + +use zk_evm::abstractions::MemoryType; +use zk_evm::aux_structures::Timestamp; +use zk_evm::{ + abstractions::{DecommittmentProcessor, Memory}, + aux_structures::{DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery}, +}; + +use zksync_state::{ReadStorage, StoragePtr}; +use zksync_types::U256; +use zksync_utils::bytecode::bytecode_len_in_words; +use zksync_utils::{bytes_to_be_words, u256_to_h256}; + +use super::OracleWithHistory; + +/// The main job of the DecommiterOracle is to implement the DecommittmentProcessor trait - that is +/// used by the VM to 'load' bytecodes into memory. +#[derive(Debug)] +pub struct DecommitterOracle { + /// Pointer that enables to read contract bytecodes from the database. + storage: StoragePtr, + /// The cache of bytecodes that the bootloader "knows", but that are not necessarily in the database. + /// And it is also used as a database cache. + pub known_bytecodes: HistoryRecorder>, H>, + /// Stores pages of memory where certain code hashes have already been decommitted. + /// It is expected that they all are present in the DB. + // `decommitted_code_hashes` history is necessary + pub decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, + /// Stores history of decommitment requests. + decommitment_requests: HistoryRecorder, H>, +} + +impl DecommitterOracle { + pub fn new(storage: StoragePtr) -> Self { + Self { + storage, + known_bytecodes: HistoryRecorder::default(), + decommitted_code_hashes: HistoryRecorder::default(), + decommitment_requests: HistoryRecorder::default(), + } + } + + /// Gets the bytecode for a given hash (either from storage, or from 'known_bytecodes' that were populated by `populate` method). + /// Panics if bytecode doesn't exist. + pub fn get_bytecode(&mut self, hash: U256, timestamp: Timestamp) -> Vec { + let entry = self.known_bytecodes.inner().get(&hash); + + match entry { + Some(x) => x.clone(), + None => { + // It is ok to panic here, since the decommitter is never called directly by + // the users and always called by the VM. VM will never let decommit the + // code hash which we didn't previously claim to know the preimage of. + let value = self + .storage + .borrow_mut() + .load_factory_dep(u256_to_h256(hash)) + .expect("Trying to decode unexisting hash"); + + let value = bytes_to_be_words(value); + self.known_bytecodes.insert(hash, value.clone(), timestamp); + value + } + } + } + + /// Adds additional bytecodes. They will take precendent over the bytecodes from storage. + pub fn populate(&mut self, bytecodes: Vec<(U256, Vec)>, timestamp: Timestamp) { + for (hash, bytecode) in bytecodes { + self.known_bytecodes.insert(hash, bytecode, timestamp); + } + } + + pub fn get_used_bytecode_hashes(&self) -> Vec { + self.decommitted_code_hashes + .inner() + .iter() + .map(|item| *item.0) + .collect() + } + + pub fn get_decommitted_bytecodes_after_timestamp(&self, timestamp: Timestamp) -> usize { + // Note, that here we rely on the fact that for each used bytecode + // there is one and only one corresponding event in the history of it. + self.decommitted_code_hashes + .history() + .iter() + .rev() + .take_while(|(t, _)| *t >= timestamp) + .count() + } + + pub fn get_decommitted_code_hashes_with_history( + &self, + ) -> &HistoryRecorder, HistoryEnabled> { + &self.decommitted_code_hashes + } + + /// Returns the storage handle. Used only in tests. + pub fn get_storage(&self) -> StoragePtr { + self.storage.clone() + } + + /// Measures the amount of memory used by this Oracle (used for metrics only). + pub(crate) fn get_size(&self) -> usize { + // Hashmap memory overhead is neglected. + let known_bytecodes_size = self + .known_bytecodes + .inner() + .iter() + .map(|(_, value)| value.len() * std::mem::size_of::()) + .sum::(); + let decommitted_code_hashes_size = + self.decommitted_code_hashes.inner().len() * std::mem::size_of::<(U256, u32)>(); + + known_bytecodes_size + decommitted_code_hashes_size + } + + pub(crate) fn get_history_size(&self) -> usize { + let known_bytecodes_stack_size = self.known_bytecodes.borrow_history(|h| h.len(), 0) + * std::mem::size_of::<> as WithHistory>::HistoryRecord>(); + let known_bytecodes_heap_size = self.known_bytecodes.borrow_history( + |h| { + h.iter() + .map(|(_, event)| { + if let Some(bytecode) = event.value.as_ref() { + bytecode.len() * std::mem::size_of::() + } else { + 0 + } + }) + .sum::() + }, + 0, + ); + let decommitted_code_hashes_size = + self.decommitted_code_hashes.borrow_history(|h| h.len(), 0) + * std::mem::size_of::< as WithHistory>::HistoryRecord>(); + + known_bytecodes_stack_size + known_bytecodes_heap_size + decommitted_code_hashes_size + } + + pub fn delete_history(&mut self) { + self.decommitted_code_hashes.delete_history(); + self.known_bytecodes.delete_history(); + self.decommitment_requests.delete_history(); + } +} + +impl OracleWithHistory for DecommitterOracle { + fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { + self.decommitted_code_hashes + .rollback_to_timestamp(timestamp); + self.known_bytecodes.rollback_to_timestamp(timestamp); + self.decommitment_requests.rollback_to_timestamp(timestamp); + } +} + +impl DecommittmentProcessor + for DecommitterOracle +{ + /// Loads a given bytecode hash into memory (see trait description for more details). + fn decommit_into_memory( + &mut self, + monotonic_cycle_counter: u32, + mut partial_query: DecommittmentQuery, + memory: &mut M, + ) -> Result< + ( + zk_evm::aux_structures::DecommittmentQuery, + Option>, + ), + anyhow::Error, + > { + self.decommitment_requests.push((), partial_query.timestamp); + // First - check if we didn't fetch this bytecode in the past. + // If we did - we can just return the page that we used before (as the memory is read only). + if let Some(memory_page) = self + .decommitted_code_hashes + .inner() + .get(&partial_query.hash) + .copied() + { + partial_query.is_fresh = false; + partial_query.memory_page = MemoryPage(memory_page); + partial_query.decommitted_length = + bytecode_len_in_words(&u256_to_h256(partial_query.hash)); + + Ok((partial_query, None)) + } else { + // We are fetching a fresh bytecode that we didn't read before. + let values = self.get_bytecode(partial_query.hash, partial_query.timestamp); + let page_to_use = partial_query.memory_page; + let timestamp = partial_query.timestamp; + partial_query.decommitted_length = values.len() as u16; + partial_query.is_fresh = true; + + // Create a template query, that we'll use for writing into memory. + // value & index are set to 0 - as they will be updated in the inner loop below. + let mut tmp_q = MemoryQuery { + timestamp, + location: MemoryLocation { + memory_type: MemoryType::Code, + page: page_to_use, + index: MemoryIndex(0), + }, + value: U256::zero(), + value_is_pointer: false, + rw_flag: true, + }; + self.decommitted_code_hashes + .insert(partial_query.hash, page_to_use.0, timestamp); + + // Copy the bytecode (that is stored in 'values' Vec) into the memory page. + if B { + for (i, value) in values.iter().enumerate() { + tmp_q.location.index = MemoryIndex(i as u32); + tmp_q.value = *value; + memory.specialized_code_query(monotonic_cycle_counter, tmp_q); + } + // If we're in the witness mode - we also have to return the values. + Ok((partial_query, Some(values))) + } else { + for (i, value) in values.into_iter().enumerate() { + tmp_q.location.index = MemoryIndex(i as u32); + tmp_q.value = value; + memory.specialized_code_query(monotonic_cycle_counter, tmp_q); + } + + Ok((partial_query, None)) + } + } + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/old_vm/oracles/mod.rs b/core/multivm_deps/vm_virtual_blocks/src/old_vm/oracles/mod.rs new file mode 100644 index 000000000000..daa2e21672df --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/old_vm/oracles/mod.rs @@ -0,0 +1,9 @@ +use zk_evm::aux_structures::Timestamp; + +pub(crate) mod decommitter; +pub(crate) mod precompile; +pub(crate) mod storage; + +pub(crate) trait OracleWithHistory { + fn rollback_to_timestamp(&mut self, timestamp: Timestamp); +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/old_vm/oracles/precompile.rs b/core/multivm_deps/vm_virtual_blocks/src/old_vm/oracles/precompile.rs new file mode 100644 index 000000000000..72b751c75d49 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/old_vm/oracles/precompile.rs @@ -0,0 +1,75 @@ +use zk_evm::{ + abstractions::Memory, + abstractions::PrecompileCyclesWitness, + abstractions::PrecompilesProcessor, + aux_structures::{LogQuery, MemoryQuery, Timestamp}, + precompiles::DefaultPrecompilesProcessor, +}; + +use crate::old_vm::history_recorder::{HistoryEnabled, HistoryMode, HistoryRecorder}; + +use super::OracleWithHistory; + +/// Wrap of DefaultPrecompilesProcessor that store queue +/// of timestamp when precompiles are called to be executed. +/// Number of precompiles per block is strictly limited, +/// saving timestamps allows us to check the exact number +/// of log queries, that were used during the tx execution. +#[derive(Debug, Clone)] +pub struct PrecompilesProcessorWithHistory { + pub timestamp_history: HistoryRecorder, H>, + pub default_precompiles_processor: DefaultPrecompilesProcessor, +} + +impl Default for PrecompilesProcessorWithHistory { + fn default() -> Self { + Self { + timestamp_history: Default::default(), + default_precompiles_processor: DefaultPrecompilesProcessor, + } + } +} + +impl OracleWithHistory for PrecompilesProcessorWithHistory { + fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { + self.timestamp_history.rollback_to_timestamp(timestamp); + } +} + +impl PrecompilesProcessorWithHistory { + pub fn get_timestamp_history(&self) -> &Vec { + self.timestamp_history.inner() + } + + pub fn delete_history(&mut self) { + self.timestamp_history.delete_history(); + } +} + +impl PrecompilesProcessor for PrecompilesProcessorWithHistory { + fn start_frame(&mut self) { + self.default_precompiles_processor.start_frame(); + } + fn execute_precompile( + &mut self, + monotonic_cycle_counter: u32, + query: LogQuery, + memory: &mut M, + ) -> Option<(Vec, Vec, PrecompileCyclesWitness)> { + // In the next line we same `query.timestamp` as both + // an operation in the history of precompiles processor and + // the time when this operation occured. + // While slightly weird, it is done for consistency with other oracles + // where operations and timestamp have different types. + self.timestamp_history + .push(query.timestamp, query.timestamp); + self.default_precompiles_processor.execute_precompile( + monotonic_cycle_counter, + query, + memory, + ) + } + fn finish_frame(&mut self, _panicked: bool) { + self.default_precompiles_processor.finish_frame(_panicked); + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/old_vm/oracles/storage.rs b/core/multivm_deps/vm_virtual_blocks/src/old_vm/oracles/storage.rs new file mode 100644 index 000000000000..482cc69bbbd8 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/old_vm/oracles/storage.rs @@ -0,0 +1,338 @@ +use std::collections::HashMap; + +use crate::old_vm::history_recorder::{ + AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, + HistoryRecorder, StorageWrapper, WithHistory, +}; + +use zk_evm::abstractions::RefundedAmounts; +use zk_evm::zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES; +use zk_evm::{ + abstractions::{RefundType, Storage as VmStorageOracle}, + aux_structures::{LogQuery, Timestamp}, +}; + +use zksync_state::{StoragePtr, WriteStorage}; +use zksync_types::utils::storage_key_for_eth_balance; +use zksync_types::{ + AccountTreeId, Address, StorageKey, StorageLogQuery, StorageLogQueryType, BOOTLOADER_ADDRESS, + U256, +}; +use zksync_utils::u256_to_h256; + +use super::OracleWithHistory; + +// While the storage does not support different shards, it was decided to write the +// code of the StorageOracle with the shard parameters in mind. +pub(crate) fn triplet_to_storage_key(_shard_id: u8, address: Address, key: U256) -> StorageKey { + StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)) +} + +pub(crate) fn storage_key_of_log(query: &LogQuery) -> StorageKey { + triplet_to_storage_key(query.shard_id, query.address, query.key) +} + +#[derive(Debug)] +pub struct StorageOracle { + // Access to the persistent storage. Please note that it + // is used only for read access. All the actual writes happen + // after the execution ended. + pub(crate) storage: HistoryRecorder, H>, + + pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, + + // The changes that have been paid for in previous transactions. + // It is a mapping from storage key to the number of *bytes* that was paid by the user + // to cover this slot. + // `paid_changes` history is necessary + pub(crate) paid_changes: HistoryRecorder, HistoryEnabled>, +} + +impl OracleWithHistory for StorageOracle { + fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { + self.frames_stack.rollback_to_timestamp(timestamp); + self.storage.rollback_to_timestamp(timestamp); + self.paid_changes.rollback_to_timestamp(timestamp); + } +} + +impl StorageOracle { + pub fn new(storage: StoragePtr) -> Self { + Self { + storage: HistoryRecorder::from_inner(StorageWrapper::new(storage)), + frames_stack: Default::default(), + paid_changes: Default::default(), + } + } + + pub fn delete_history(&mut self) { + self.frames_stack.delete_history(); + self.storage.delete_history(); + self.paid_changes.delete_history(); + } + + fn is_storage_key_free(&self, key: &StorageKey) -> bool { + key.address() == &zksync_config::constants::SYSTEM_CONTEXT_ADDRESS + || *key == storage_key_for_eth_balance(&BOOTLOADER_ADDRESS) + } + + pub fn read_value(&mut self, mut query: LogQuery) -> LogQuery { + let key = triplet_to_storage_key(query.shard_id, query.address, query.key); + let current_value = self.storage.read_from_storage(&key); + + query.read_value = current_value; + + self.frames_stack.push_forward( + Box::new(StorageLogQuery { + log_query: query, + log_type: StorageLogQueryType::Read, + }), + query.timestamp, + ); + + query + } + + pub fn write_value(&mut self, mut query: LogQuery) -> LogQuery { + let key = triplet_to_storage_key(query.shard_id, query.address, query.key); + let current_value = + self.storage + .write_to_storage(key, query.written_value, query.timestamp); + + let is_initial_write = self.storage.get_ptr().borrow_mut().is_write_initial(&key); + let log_query_type = if is_initial_write { + StorageLogQueryType::InitialWrite + } else { + StorageLogQueryType::RepeatedWrite + }; + + query.read_value = current_value; + + let mut storage_log_query = StorageLogQuery { + log_query: query, + log_type: log_query_type, + }; + self.frames_stack + .push_forward(Box::new(storage_log_query), query.timestamp); + storage_log_query.log_query.rollback = true; + self.frames_stack + .push_rollback(Box::new(storage_log_query), query.timestamp); + storage_log_query.log_query.rollback = false; + + query + } + + // Returns the amount of funds that has been already paid for writes into the storage slot + fn prepaid_for_write(&self, storage_key: &StorageKey) -> u32 { + self.paid_changes + .inner() + .get(storage_key) + .copied() + .unwrap_or_default() + } + + pub(crate) fn base_price_for_write(&self, query: &LogQuery) -> u32 { + let storage_key = storage_key_of_log(query); + + if self.is_storage_key_free(&storage_key) { + return 0; + } + + let is_initial_write = self + .storage + .get_ptr() + .borrow_mut() + .is_write_initial(&storage_key); + + get_pubdata_price_bytes(query, is_initial_write) + } + + // Returns the price of the update in terms of pubdata bytes. + // TODO (SMA-1701): update VM to accept gas instead of pubdata. + fn value_update_price(&self, query: &LogQuery) -> u32 { + let storage_key = storage_key_of_log(query); + + let base_cost = self.base_price_for_write(query); + + let already_paid = self.prepaid_for_write(&storage_key); + + if base_cost <= already_paid { + // Some other transaction has already paid for this slot, no need to pay anything + 0u32 + } else { + base_cost - already_paid + } + } + + /// Returns storage log queries from current frame where `log.log_query.timestamp >= from_timestamp`. + pub(crate) fn storage_log_queries_after_timestamp( + &self, + from_timestamp: Timestamp, + ) -> &[Box] { + let logs = self.frames_stack.forward().current_frame(); + + // Select all of the last elements where l.log_query.timestamp >= from_timestamp. + // Note, that using binary search here is dangerous, because the logs are not sorted by timestamp. + logs.rsplit(|l| l.log_query.timestamp < from_timestamp) + .next() + .unwrap_or(&[]) + } + + pub(crate) fn get_final_log_queries(&self) -> Vec { + assert_eq!( + self.frames_stack.len(), + 1, + "VM finished execution in unexpected state" + ); + + self.frames_stack + .forward() + .current_frame() + .iter() + .map(|x| **x) + .collect() + } + + pub(crate) fn get_size(&self) -> usize { + let frames_stack_size = self.frames_stack.get_size(); + let paid_changes_size = + self.paid_changes.inner().len() * std::mem::size_of::<(StorageKey, u32)>(); + + frames_stack_size + paid_changes_size + } + + pub(crate) fn get_history_size(&self) -> usize { + let storage_size = self.storage.borrow_history(|h| h.len(), 0) + * std::mem::size_of::< as WithHistory>::HistoryRecord>(); + let frames_stack_size = self.frames_stack.get_history_size(); + let paid_changes_size = self.paid_changes.borrow_history(|h| h.len(), 0) + * std::mem::size_of::< as WithHistory>::HistoryRecord>(); + storage_size + frames_stack_size + paid_changes_size + } +} + +impl VmStorageOracle for StorageOracle { + // Perform a storage read/write access by taking an partially filled query + // and returning filled query and cold/warm marker for pricing purposes + fn execute_partial_query( + &mut self, + _monotonic_cycle_counter: u32, + query: LogQuery, + ) -> LogQuery { + // tracing::trace!( + // "execute partial query cyc {:?} addr {:?} key {:?}, rw {:?}, wr {:?}, tx {:?}", + // _monotonic_cycle_counter, + // query.address, + // query.key, + // query.rw_flag, + // query.written_value, + // query.tx_number_in_block + // ); + assert!(!query.rollback); + if query.rw_flag { + // The number of bytes that have been compensated by the user to perform this write + let storage_key = storage_key_of_log(&query); + + // It is considered that the user has paid for the whole base price for the writes + let to_pay_by_user = self.base_price_for_write(&query); + let prepaid = self.prepaid_for_write(&storage_key); + + if to_pay_by_user > prepaid { + self.paid_changes.apply_historic_record( + HashMapHistoryEvent { + key: storage_key, + value: Some(to_pay_by_user), + }, + query.timestamp, + ); + } + self.write_value(query) + } else { + self.read_value(query) + } + } + + // We can return the size of the refund before each storage query. + // Note, that while the `RefundType` allows to provide refunds both in + // `ergs` and `pubdata`, only refunds in pubdata will be compensated for the users + fn estimate_refunds_for_write( + &mut self, // to avoid any hacks inside, like prefetch + _monotonic_cycle_counter: u32, + partial_query: &LogQuery, + ) -> RefundType { + let price_to_pay = self.value_update_price(partial_query); + + RefundType::RepeatedWrite(RefundedAmounts { + ergs: 0, + // `INITIAL_STORAGE_WRITE_PUBDATA_BYTES` is the default amount of pubdata bytes the user pays for. + pubdata_bytes: (INITIAL_STORAGE_WRITE_PUBDATA_BYTES as u32) - price_to_pay, + }) + } + + // Indicate a start of execution frame for rollback purposes + fn start_frame(&mut self, timestamp: Timestamp) { + self.frames_stack.push_frame(timestamp); + } + + // Indicate that execution frame went out from the scope, so we can + // log the history and either rollback immediately or keep records to rollback later + fn finish_frame(&mut self, timestamp: Timestamp, panicked: bool) { + // If we panic then we append forward and rollbacks to the forward of parent, + // otherwise we place rollbacks of child before rollbacks of the parent + if panicked { + // perform actual rollback + for query in self.frames_stack.rollback().current_frame().iter().rev() { + let read_value = match query.log_type { + StorageLogQueryType::Read => { + // Having Read logs in rollback is not possible + tracing::warn!("Read log in rollback queue {:?}", query); + continue; + } + StorageLogQueryType::InitialWrite | StorageLogQueryType::RepeatedWrite => { + query.log_query.read_value + } + }; + + let LogQuery { written_value, .. } = query.log_query; + let key = triplet_to_storage_key( + query.log_query.shard_id, + query.log_query.address, + query.log_query.key, + ); + let current_value = self.storage.write_to_storage( + key, + // NOTE, that since it is a rollback query, + // the `read_value` is being set + read_value, timestamp, + ); + + // Additional validation that the current value was correct + // Unwrap is safe because the return value from write_inner is the previous value in this leaf. + // It is impossible to set leaf value to `None` + assert_eq!(current_value, written_value); + } + + self.frames_stack + .move_rollback_to_forward(|_| true, timestamp); + } + self.frames_stack.merge_frame(timestamp); + } +} + +/// Returns the number of bytes needed to publish a slot. +// Since we need to publish the state diffs onchain, for each of the updated storage slot +// we basically need to publish the following pair: (). +// While new_value is always 32 bytes long, for key we use the following optimization: +// - The first time we publish it, we use 32 bytes. +// Then, we remember a 8-byte id for this slot and assign it to it. We call this initial write. +// - The second time we publish it, we will use this 8-byte instead of the 32 bytes of the entire key. +// So the total size of the publish pubdata is 40 bytes. We call this kind of write the repeated one +fn get_pubdata_price_bytes(_query: &LogQuery, is_initial: bool) -> u32 { + // TODO (SMA-1702): take into account the content of the log query, i.e. values that contain mostly zeroes + // should cost less. + if is_initial { + zk_evm::zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES as u32 + } else { + zk_evm::zkevm_opcode_defs::system_params::REPEATED_STORAGE_WRITE_PUBDATA_BYTES as u32 + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/old_vm/utils.rs b/core/multivm_deps/vm_virtual_blocks/src/old_vm/utils.rs new file mode 100644 index 000000000000..5df4c6aa8012 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/old_vm/utils.rs @@ -0,0 +1,222 @@ +use crate::old_vm::memory::SimpleMemory; + +use crate::types::internals::ZkSyncVmState; +use crate::HistoryMode; + +use zk_evm::zkevm_opcode_defs::decoding::{AllowedPcOrImm, EncodingModeProduction, VmEncodingMode}; +use zk_evm::zkevm_opcode_defs::RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER; +use zk_evm::{ + aux_structures::{MemoryPage, Timestamp}, + vm_state::PrimitiveValue, + zkevm_opcode_defs::FatPointer, +}; +use zksync_config::constants::L1_GAS_PER_PUBDATA_BYTE; +use zksync_state::WriteStorage; + +use zksync_types::{Address, U256}; + +#[derive(Debug, Clone)] +pub(crate) enum VmExecutionResult { + Ok(Vec), + Revert(Vec), + Panic, + MostLikelyDidNotFinish(Address, u16), +} + +pub(crate) const fn stack_page_from_base(base: MemoryPage) -> MemoryPage { + MemoryPage(base.0 + 1) +} + +pub(crate) const fn heap_page_from_base(base: MemoryPage) -> MemoryPage { + MemoryPage(base.0 + 2) +} + +pub(crate) const fn aux_heap_page_from_base(base: MemoryPage) -> MemoryPage { + MemoryPage(base.0 + 3) +} + +pub(crate) trait FixedLengthIterator<'a, I: 'a, const N: usize>: Iterator +where + Self: 'a, +{ + fn next(&mut self) -> Option<::Item> { + ::next(self) + } +} + +pub(crate) trait IntoFixedLengthByteIterator { + type IntoIter: FixedLengthIterator<'static, u8, N>; + fn into_le_iter(self) -> Self::IntoIter; + fn into_be_iter(self) -> Self::IntoIter; +} + +pub(crate) struct FixedBufferValueIterator { + iter: std::array::IntoIter, +} + +impl Iterator for FixedBufferValueIterator { + type Item = T; + fn next(&mut self) -> Option { + self.iter.next() + } +} + +impl FixedLengthIterator<'static, T, N> + for FixedBufferValueIterator +{ +} + +impl IntoFixedLengthByteIterator<32> for U256 { + type IntoIter = FixedBufferValueIterator; + fn into_le_iter(self) -> Self::IntoIter { + let mut buffer = [0u8; 32]; + self.to_little_endian(&mut buffer); + + FixedBufferValueIterator { + iter: IntoIterator::into_iter(buffer), + } + } + + fn into_be_iter(self) -> Self::IntoIter { + let mut buffer = [0u8; 32]; + self.to_big_endian(&mut buffer); + + FixedBufferValueIterator { + iter: IntoIterator::into_iter(buffer), + } + } +} + +/// Receives sorted slice of timestamps. +/// Returns count of timestamps that are greater than or equal to `from_timestamp`. +/// Works in O(log(sorted_timestamps.len())). +pub(crate) fn precompile_calls_count_after_timestamp( + sorted_timestamps: &[Timestamp], + from_timestamp: Timestamp, +) -> usize { + sorted_timestamps.len() - sorted_timestamps.partition_point(|t| *t < from_timestamp) +} + +pub(crate) fn eth_price_per_pubdata_byte(l1_gas_price: u64) -> u64 { + // This value will typically be a lot less than u64 + // unless the gas price on L1 goes beyond tens of millions of gwei + l1_gas_price * (L1_GAS_PER_PUBDATA_BYTE as u64) +} + +pub(crate) fn vm_may_have_ended_inner( + vm: &ZkSyncVmState, +) -> Option { + let execution_has_ended = vm.execution_has_ended(); + + let r1 = vm.local_state.registers[RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER as usize]; + let current_address = vm.local_state.callstack.get_current_stack().this_address; + + let outer_eh_location = >::PcOrImm::MAX.as_u64(); + match ( + execution_has_ended, + vm.local_state.callstack.get_current_stack().pc.as_u64(), + ) { + (true, 0) => { + let returndata = dump_memory_page_using_primitive_value(&vm.memory, r1); + + Some(VmExecutionResult::Ok(returndata)) + } + (false, _) => None, + (true, l) if l == outer_eh_location => { + // check r1,r2,r3 + if vm.local_state.flags.overflow_or_less_than_flag { + Some(VmExecutionResult::Panic) + } else { + let returndata = dump_memory_page_using_primitive_value(&vm.memory, r1); + Some(VmExecutionResult::Revert(returndata)) + } + } + (_, a) => Some(VmExecutionResult::MostLikelyDidNotFinish( + current_address, + a as u16, + )), + } +} + +pub(crate) fn dump_memory_page_using_primitive_value( + memory: &SimpleMemory, + ptr: PrimitiveValue, +) -> Vec { + if !ptr.is_pointer { + return vec![]; + } + let fat_ptr = FatPointer::from_u256(ptr.value); + dump_memory_page_using_fat_pointer(memory, fat_ptr) +} + +pub(crate) fn dump_memory_page_using_fat_pointer( + memory: &SimpleMemory, + fat_ptr: FatPointer, +) -> Vec { + dump_memory_page_by_offset_and_length( + memory, + fat_ptr.memory_page, + (fat_ptr.start + fat_ptr.offset) as usize, + (fat_ptr.length - fat_ptr.offset) as usize, + ) +} + +pub(crate) fn dump_memory_page_by_offset_and_length( + memory: &SimpleMemory, + page: u32, + offset: usize, + length: usize, +) -> Vec { + assert!(offset < (1u32 << 24) as usize); + assert!(length < (1u32 << 24) as usize); + let mut dump = Vec::with_capacity(length); + if length == 0 { + return dump; + } + + let first_word = offset / 32; + let end_byte = offset + length; + let mut last_word = end_byte / 32; + if end_byte % 32 != 0 { + last_word += 1; + } + + let unalignment = offset % 32; + + let page_part = + memory.dump_page_content_as_u256_words(page, (first_word as u32)..(last_word as u32)); + + let mut is_first = true; + let mut remaining = length; + for word in page_part.into_iter() { + let it = word.into_be_iter(); + if is_first { + is_first = false; + let it = it.skip(unalignment); + for next in it { + if remaining > 0 { + dump.push(next); + remaining -= 1; + } + } + } else { + for next in it { + if remaining > 0 { + dump.push(next); + remaining -= 1; + } + } + } + } + + assert_eq!( + dump.len(), + length, + "tried to dump with offset {}, length {}, got a bytestring of length {}", + offset, + length, + dump.len() + ); + + dump +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tests/bootloader.rs b/core/multivm_deps/vm_virtual_blocks/src/tests/bootloader.rs new file mode 100644 index 000000000000..0479672a6ef2 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tests/bootloader.rs @@ -0,0 +1,53 @@ +use zksync_types::U256; + +use crate::constants::BOOTLOADER_HEAP_PAGE; +use crate::errors::Halt; +use crate::tests::tester::VmTesterBuilder; +use crate::tests::utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}; +use crate::types::inputs::system_env::TxExecutionMode; + +use crate::types::outputs::ExecutionResult; +use crate::{HistoryEnabled, VmExecutionMode}; + +#[test] +fn test_dummy_bootloader() { + let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); + base_system_contracts.bootloader = get_bootloader("dummy"); + + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(base_system_contracts) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build(); + + let result = vm.vm.execute(VmExecutionMode::Batch); + assert!(!result.result.is_failed()); + + let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); + verify_required_memory( + &vm.vm.state, + vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], + ); +} + +#[test] +fn test_bootloader_out_of_gas() { + let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); + base_system_contracts.bootloader = get_bootloader("dummy"); + + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(base_system_contracts) + .with_gas_limit(10) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build(); + + let res = vm.vm.execute(VmExecutionMode::Batch); + + assert!(matches!( + res.result, + ExecutionResult::Halt { + reason: Halt::BootloaderOutOfGas + } + )); +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tests/bytecode_publishing.rs b/core/multivm_deps/vm_virtual_blocks/src/tests/bytecode_publishing.rs new file mode 100644 index 000000000000..60e45e252570 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tests/bytecode_publishing.rs @@ -0,0 +1,37 @@ +use zksync_types::event::extract_long_l2_to_l1_messages; +use zksync_utils::bytecode::compress_bytecode; + +use crate::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; +use crate::tests::utils::read_test_contract; +use crate::types::inputs::system_env::TxExecutionMode; +use crate::{HistoryEnabled, VmExecutionMode}; + +#[test] +fn test_bytecode_publishing() { + // In this test, we aim to ensure that the contents of the compressed bytecodes + // are included as part of the L2->L1 long messages + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let counter = read_test_contract(); + let account = &mut vm.rich_accounts[0]; + + let compressed_bytecode = compress_bytecode(&counter).unwrap(); + + let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); + vm.vm.push_transaction(tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Transaction wasn't successful"); + + vm.vm.execute(VmExecutionMode::Batch); + + let state = vm.vm.get_current_execution_state(); + let long_messages = extract_long_l2_to_l1_messages(&state.events); + assert!( + long_messages.contains(&compressed_bytecode), + "Bytecode not published" + ); +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tests/call_tracer.rs b/core/multivm_deps/vm_virtual_blocks/src/tests/call_tracer.rs new file mode 100644 index 000000000000..d55ba826030f --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tests/call_tracer.rs @@ -0,0 +1,86 @@ +use crate::constants::BLOCK_GAS_LIMIT; +use crate::tests::tester::VmTesterBuilder; +use crate::tests::utils::{read_max_depth_contract, read_test_contract}; +use crate::{CallTracer, HistoryEnabled, TxExecutionMode, VmExecutionMode}; +use once_cell::sync::OnceCell; +use std::sync::Arc; +use zksync_types::{Address, Execute}; + +// This test is ultra slow, so it's ignored by default. +#[test] +#[ignore] +fn test_max_depth() { + let contarct = read_max_depth_contract(); + let address = Address::random(); + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_random_rich_accounts(1) + .with_deployer() + .with_gas_limit(BLOCK_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_custom_contracts(vec![(contarct, address, true)]) + .build(); + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: address, + calldata: vec![], + value: Default::default(), + factory_deps: None, + }, + None, + ); + + let result = Arc::new(OnceCell::new()); + let call_tracer = CallTracer::new(result.clone(), HistoryEnabled); + vm.vm.push_transaction(tx); + let res = vm + .vm + .inspect(vec![Box::new(call_tracer)], VmExecutionMode::OneTx); + assert!(result.get().is_some()); + assert!(res.result.is_failed()); +} + +#[test] +fn test_basic_behavior() { + let contarct = read_test_contract(); + let address = Address::random(); + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_random_rich_accounts(1) + .with_deployer() + .with_gas_limit(BLOCK_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_custom_contracts(vec![(contarct, address, true)]) + .build(); + + let increment_by_6_calldata = + "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: address, + calldata: hex::decode(increment_by_6_calldata).unwrap(), + value: Default::default(), + factory_deps: None, + }, + None, + ); + + let result = Arc::new(OnceCell::new()); + let call_tracer = CallTracer::new(result.clone(), HistoryEnabled); + vm.vm.push_transaction(tx); + let res = vm + .vm + .inspect(vec![Box::new(call_tracer)], VmExecutionMode::OneTx); + + let call_tracer_result = result.get().unwrap(); + + assert_eq!(call_tracer_result.len(), 1); + // Expect that there are a plenty of subcalls underneath. + let subcall = &call_tracer_result[0].calls; + assert!(subcall.len() > 10); + assert!(!res.result.is_failed()); +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tests/default_aa.rs b/core/multivm_deps/vm_virtual_blocks/src/tests/default_aa.rs new file mode 100644 index 000000000000..b4c5f6b58323 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tests/default_aa.rs @@ -0,0 +1,68 @@ +use zksync_config::constants::L2_ETH_TOKEN_ADDRESS; +use zksync_types::system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}; + +use zksync_types::{get_code_key, get_known_code_key, get_nonce_key, AccountTreeId, U256}; +use zksync_utils::u256_to_h256; + +use crate::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; +use crate::tests::utils::{get_balance, read_test_contract, verify_required_storage}; +use crate::types::inputs::system_env::TxExecutionMode; +use crate::{HistoryEnabled, VmExecutionMode}; + +#[test] +fn test_default_aa_interaction() { + // In this test, we aim to test whether a simple account interaction (without any fee logic) + // will work. The account will try to deploy a simple contract from integration tests. + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let counter = read_test_contract(); + let account = &mut vm.rich_accounts[0]; + let DeployContractsTx { + tx, + bytecode_hash, + address, + } = account.get_deploy_tx(&counter, None, TxType::L2); + let maximal_fee = tx.gas_limit() * vm.vm.batch_env.base_fee(); + + vm.vm.push_transaction(tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Transaction wasn't successful"); + + vm.vm.execute(VmExecutionMode::Batch); + vm.vm.get_current_execution_state(); + + // Both deployment and ordinary nonce should be incremented by one. + let account_nonce_key = get_nonce_key(&account.address); + let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; + + // The code hash of the deployed contract should be marked as republished. + let known_codes_key = get_known_code_key(&bytecode_hash); + + // The contract should be deployed successfully. + let account_code_key = get_code_key(&address); + + let expected_slots = vec![ + (u256_to_h256(expected_nonce), account_nonce_key), + (u256_to_h256(U256::from(1u32)), known_codes_key), + (bytecode_hash, account_code_key), + ]; + + verify_required_storage(&vm.vm.state, expected_slots); + + let expected_fee = maximal_fee + - U256::from(result.refunds.gas_refunded) * U256::from(vm.vm.batch_env.base_fee()); + let operator_balance = get_balance( + AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), + &vm.fee_account, + vm.vm.state.storage.storage.get_ptr(), + ); + + assert_eq!( + operator_balance, expected_fee, + "Operator did not receive his fee" + ); +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tests/gas_limit.rs b/core/multivm_deps/vm_virtual_blocks/src/tests/gas_limit.rs new file mode 100644 index 000000000000..c439b6d89b20 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tests/gas_limit.rs @@ -0,0 +1,45 @@ +use zksync_types::fee::Fee; +use zksync_types::Execute; + +use crate::constants::{BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}; +use crate::tests::tester::VmTesterBuilder; + +use crate::types::inputs::system_env::TxExecutionMode; +use crate::HistoryDisabled; + +/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. +#[test] +fn test_tx_gas_limit_offset() { + let mut vm = VmTesterBuilder::new(HistoryDisabled) + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let gas_limit = 9999.into(); + let tx = vm.rich_accounts[0].get_l2_tx_for_execute( + Execute { + contract_address: Default::default(), + calldata: vec![], + value: Default::default(), + factory_deps: None, + }, + Some(Fee { + gas_limit, + ..Default::default() + }), + ); + + vm.vm.push_transaction(tx); + + let gas_limit_from_memory = vm + .vm + .state + .memory + .read_slot( + BOOTLOADER_HEAP_PAGE as usize, + TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, + ) + .value; + assert_eq!(gas_limit_from_memory, gas_limit); +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tests/get_used_contracts.rs b/core/multivm_deps/vm_virtual_blocks/src/tests/get_used_contracts.rs new file mode 100644 index 000000000000..90a8816eb55d --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tests/get_used_contracts.rs @@ -0,0 +1,104 @@ +use std::collections::{HashMap, HashSet}; + +use itertools::Itertools; + +use zksync_config::constants::CONTRACT_DEPLOYER_ADDRESS; +use zksync_state::WriteStorage; +use zksync_test_account::Account; +use zksync_types::{Execute, U256}; +use zksync_utils::bytecode::hash_bytecode; +use zksync_utils::h256_to_u256; + +use crate::tests::tester::{TxType, VmTesterBuilder}; +use crate::tests::utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}; +use crate::types::inputs::system_env::TxExecutionMode; +use crate::{HistoryDisabled, HistoryMode, Vm, VmExecutionMode}; + +#[test] +fn test_get_used_contracts() { + let mut vm = VmTesterBuilder::new(HistoryDisabled) + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build(); + + assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); + + // create and push and execute some not-empty factory deps transaction with success status + // to check that get_used_contracts() updates + let contract_code = read_test_contract(); + let mut account = Account::random(); + let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); + vm.vm.push_transaction(tx.tx.clone()); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed()); + + assert!(vm + .vm + .get_used_contracts() + .contains(&h256_to_u256(tx.bytecode_hash))); + + // Note: Default_AA will be in the list of used contracts if l2 tx is used + assert_eq!( + vm.vm + .get_used_contracts() + .into_iter() + .collect::>(), + known_bytecodes_without_aa_code(&vm.vm) + .keys() + .cloned() + .collect::>() + ); + + // create push and execute some non-empty factory deps transaction that fails + // (known_bytecodes will be updated but we expect get_used_contracts() to not be updated) + + let calldata = [1, 2, 3]; + let big_calldata: Vec = calldata + .iter() + .cycle() + .take(calldata.len() * 1024) + .cloned() + .collect(); + let account2 = Account::random(); + let tx2 = account2.get_l1_tx( + Execute { + contract_address: CONTRACT_DEPLOYER_ADDRESS, + calldata: big_calldata, + value: Default::default(), + factory_deps: Some(vec![vec![1; 32]]), + }, + 1, + ); + + vm.vm.push_transaction(tx2.clone()); + + let res2 = vm.vm.execute(VmExecutionMode::OneTx); + + assert!(res2.result.is_failed()); + + for factory_dep in tx2.execute.factory_deps.unwrap() { + let hash = hash_bytecode(&factory_dep); + let hash_to_u256 = h256_to_u256(hash); + assert!(known_bytecodes_without_aa_code(&vm.vm) + .keys() + .contains(&hash_to_u256)); + assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); + } +} + +fn known_bytecodes_without_aa_code( + vm: &Vm, +) -> HashMap> { + let mut known_bytecodes_without_aa_code = vm + .state + .decommittment_processor + .known_bytecodes + .inner() + .clone(); + + known_bytecodes_without_aa_code + .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) + .unwrap(); + + known_bytecodes_without_aa_code +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tests/invalid_bytecode.rs b/core/multivm_deps/vm_virtual_blocks/src/tests/invalid_bytecode.rs new file mode 100644 index 000000000000..6353d445e718 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tests/invalid_bytecode.rs @@ -0,0 +1,120 @@ +use zksync_types::H256; +use zksync_utils::h256_to_u256; + +use crate::tests::tester::VmTesterBuilder; +use crate::types::inputs::system_env::TxExecutionMode; +use crate::{HistoryEnabled, TxRevertReason}; + +// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. +// Port it later, it's not significant. for now + +#[test] +fn test_invalid_bytecode() { + let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) + .with_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1); + let mut storage = vm_builder.take_storage(); + let mut vm = vm_builder.build(&mut storage); + + let block_gas_per_pubdata = vm_test_env + .block_context + .context + .block_gas_price_per_pubdata(); + + let mut test_vm_with_custom_bytecode_hash = + |bytecode_hash: H256, expected_revert_reason: Option| { + let mut oracle_tools = + OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); + + let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( + h256_to_u256(bytecode_hash), + block_gas_per_pubdata as u32, + ); + + run_vm_with_custom_factory_deps( + &mut oracle_tools, + vm_test_env.block_context.context, + &vm_test_env.block_properties, + encoded_tx, + predefined_overhead, + expected_revert_reason, + ); + }; + + let failed_to_mark_factory_deps = |msg: &str, data: Vec| { + TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { + msg: msg.to_string(), + data, + }) + }; + + // Here we provide the correctly-formatted bytecode hash of + // odd length, so it should work. + test_vm_with_custom_bytecode_hash( + H256([ + 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ]), + None, + ); + + // Here we provide correctly formatted bytecode of even length, so + // it should fail. + test_vm_with_custom_bytecode_hash( + H256([ + 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ]), + Some(failed_to_mark_factory_deps( + "Code length in words must be odd", + vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, + 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, + 32, 98, 101, 32, 111, 100, 100, + ], + )), + ); + + // Here we provide incorrectly formatted bytecode of odd length, so + // it should fail. + test_vm_with_custom_bytecode_hash( + H256([ + 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ]), + Some(failed_to_mark_factory_deps( + "Incorrectly formatted bytecodeHash", + vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, + 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, + 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + )), + ); + + // Here we provide incorrectly formatted bytecode of odd length, so + // it should fail. + test_vm_with_custom_bytecode_hash( + H256([ + 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ]), + Some(failed_to_mark_factory_deps( + "Incorrectly formatted bytecodeHash", + vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, + 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, + 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + )), + ); +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tests/is_write_initial.rs b/core/multivm_deps/vm_virtual_blocks/src/tests/is_write_initial.rs new file mode 100644 index 000000000000..7ccdf072744a --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tests/is_write_initial.rs @@ -0,0 +1,42 @@ +use zksync_state::ReadStorage; +use zksync_types::get_nonce_key; + +use crate::tests::tester::{Account, TxType, VmTesterBuilder}; +use crate::tests::utils::read_test_contract; +use crate::types::inputs::system_env::TxExecutionMode; +use crate::{HistoryDisabled, VmExecutionMode}; + +#[test] +fn test_is_write_initial_behaviour() { + // In this test, we check result of `is_write_initial` at different stages. + // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't + // messed up it with the repeated writes during the one batch execution. + + let mut account = Account::random(); + let mut vm = VmTesterBuilder::new(HistoryDisabled) + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(vec![account.clone()]) + .build(); + + let nonce_key = get_nonce_key(&account.address); + // Check that the next write to the nonce key will be initial. + assert!(vm + .storage + .as_ref() + .borrow_mut() + .is_write_initial(&nonce_key)); + + let contract_code = read_test_contract(); + let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; + + vm.vm.push_transaction(tx); + vm.vm.execute(VmExecutionMode::OneTx); + + // Check that `is_write_initial` still returns true for the nonce key. + assert!(vm + .storage + .as_ref() + .borrow_mut() + .is_write_initial(&nonce_key)); +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tests/l1_tx_execution.rs b/core/multivm_deps/vm_virtual_blocks/src/tests/l1_tx_execution.rs new file mode 100644 index 000000000000..cd1c8f2460cb --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tests/l1_tx_execution.rs @@ -0,0 +1,123 @@ +use zksync_config::constants::BOOTLOADER_ADDRESS; +use zksync_types::l2_to_l1_log::L2ToL1Log; +use zksync_types::storage_writes_deduplicator::StorageWritesDeduplicator; +use zksync_types::{get_code_key, get_known_code_key, U256}; +use zksync_utils::u256_to_h256; + +use crate::tests::tester::{TxType, VmTesterBuilder}; +use crate::tests::utils::{read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS}; +use crate::types::inputs::system_env::TxExecutionMode; +use crate::types::internals::TransactionData; +use crate::{HistoryEnabled, VmExecutionMode}; + +#[test] +fn test_l1_tx_execution() { + // In this test, we try to execute a contract deployment from L1 + // Here instead of marking code hash via the bootloader means, we will be + // using L1->L2 communication, the same it would likely be done during the priority mode. + + // There are always at least 3 initial writes here, because we pay fees from l1: + // - totalSupply of ETH token + // - balance of the refund recipient + // - balance of the bootloader + // - tx_rollout hash + + let basic_initial_writes = 1; + + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let contract_code = read_test_contract(); + let account = &mut vm.rich_accounts[0]; + let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); + let tx_data: TransactionData = deploy_tx.tx.clone().into(); + + let required_l2_to_l1_logs = vec![L2ToL1Log { + shard_id: 0, + is_service: true, + tx_number_in_block: 0, + sender: BOOTLOADER_ADDRESS, + key: tx_data.tx_hash(0.into()), + value: u256_to_h256(U256::from(1u32)), + }]; + + vm.vm.push_transaction(deploy_tx.tx.clone()); + + let res = vm.vm.execute(VmExecutionMode::OneTx); + + // The code hash of the deployed contract should be marked as republished. + let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); + + // The contract should be deployed successfully. + let account_code_key = get_code_key(&deploy_tx.address); + + let expected_slots = vec![ + (u256_to_h256(U256::from(1u32)), known_codes_key), + (deploy_tx.bytecode_hash, account_code_key), + ]; + assert!(!res.result.is_failed()); + + verify_required_storage(&vm.vm.state, expected_slots); + + assert_eq!(res.logs.l2_to_l1_logs, required_l2_to_l1_logs); + + let tx = account.get_test_contract_transaction( + deploy_tx.address, + true, + None, + false, + TxType::L1 { serial_id: 0 }, + ); + vm.vm.push_transaction(tx); + let res = vm.vm.execute(VmExecutionMode::OneTx); + let storage_logs = res.logs.storage_logs; + let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); + + // Tx panicked + assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); + + let tx = account.get_test_contract_transaction( + deploy_tx.address, + false, + None, + false, + TxType::L1 { serial_id: 0 }, + ); + vm.vm.push_transaction(tx.clone()); + let res = vm.vm.execute(VmExecutionMode::OneTx); + let storage_logs = res.logs.storage_logs; + let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); + // We changed one slot inside contract + assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); + + // No repeated writes + let repeated_writes = res.repeated_storage_writes; + assert_eq!(res.repeated_storage_writes, 0); + + vm.vm.push_transaction(tx); + let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; + let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); + // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated + assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); + assert_eq!(res.repeated_storage_writes, repeated_writes); + + let tx = account.get_test_contract_transaction( + deploy_tx.address, + false, + Some(10.into()), + false, + TxType::L1 { serial_id: 1 }, + ); + vm.vm.push_transaction(tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + // Method is not payable tx should fail + assert!(result.result.is_failed(), "The transaction should fail"); + + let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); + // There are only basic initial writes + assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tests/l2_blocks.rs b/core/multivm_deps/vm_virtual_blocks/src/tests/l2_blocks.rs new file mode 100644 index 000000000000..9deac837f902 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tests/l2_blocks.rs @@ -0,0 +1,500 @@ +//! +//! Tests for the bootloader +//! The description for each of the tests can be found in the corresponding `.yul` file. +//! + +use crate::constants::{ + BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, +}; +use crate::tests::tester::default_l1_batch; +use crate::tests::tester::VmTesterBuilder; +use crate::utils::l2_blocks::get_l2_block_hash_key; +use crate::{ + ExecutionResult, Halt, HistoryEnabled, HistoryMode, L2BlockEnv, TxExecutionMode, Vm, + VmExecutionMode, +}; +use zk_evm::aux_structures::Timestamp; +use zksync_config::constants::{ + CURRENT_VIRTUAL_BLOCK_INFO_POSITION, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, +}; +use zksync_state::{ReadStorage, WriteStorage}; +use zksync_types::block::{pack_block_info, unpack_block_info}; +use zksync_types::{ + block::{legacy_miniblock_hash, miniblock_hash}, + get_code_key, AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, + MiniblockNumber, StorageKey, Transaction, H160, H256, SYSTEM_CONTEXT_ADDRESS, + SYSTEM_CONTEXT_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, U256, +}; +use zksync_utils::{h256_to_u256, u256_to_h256}; + +fn get_l1_noop() -> Transaction { + Transaction { + common_data: ExecuteTransactionCommon::L1(L1TxCommonData { + sender: H160::random(), + gas_limit: U256::from(2000000u32), + gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), + ..Default::default() + }), + execute: Execute { + contract_address: H160::zero(), + calldata: vec![], + value: U256::zero(), + factory_deps: None, + }, + received_timestamp_ms: 0, + raw_bytes: None, + } +} + +#[test] +fn test_l2_block_initialization_timestamp() { + // This test checks that the L2 block initialization works correctly. + // Here we check that that the first block must have timestamp that is greater or equal to the timestamp + // of the current batch. + + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + // Override the timestamp of the current miniblock to be 0. + vm.vm.bootloader_state.push_l2_block(L2BlockEnv { + number: 1, + timestamp: 0, + prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), + max_virtual_blocks_to_create: 1, + }); + let l1_tx = get_l1_noop(); + + vm.vm.push_transaction(l1_tx); + let res = vm.vm.execute(VmExecutionMode::OneTx); + + assert_eq!( + res.result, + ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} + ); +} + +#[test] +fn test_l2_block_initialization_number_non_zero() { + // This test checks that the L2 block initialization works correctly. + // Here we check that the first miniblock number can not be zero. + + let l1_batch = default_l1_batch(L1BatchNumber(1)); + let first_l2_block = L2BlockEnv { + number: 0, + timestamp: l1_batch.timestamp, + prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), + max_virtual_blocks_to_create: 1, + }; + + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_l1_batch_env(l1_batch) + .with_random_rich_accounts(1) + .build(); + + let l1_tx = get_l1_noop(); + + vm.vm.push_transaction(l1_tx); + + let timestamp = Timestamp(vm.vm.state.local_state.timestamp); + set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); + + let res = vm.vm.execute(VmExecutionMode::OneTx); + + assert_eq!( + res.result, + ExecutionResult::Halt { + reason: Halt::FailedToSetL2Block( + "L2 block number is never expected to be zero".to_string() + ) + } + ); +} + +fn test_same_l2_block( + expected_error: Option, + override_timestamp: Option, + override_prev_block_hash: Option, +) { + let mut l1_batch = default_l1_batch(L1BatchNumber(1)); + l1_batch.timestamp = 1; + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_l1_batch_env(l1_batch) + .with_random_rich_accounts(1) + .build(); + + let l1_tx = get_l1_noop(); + vm.vm.push_transaction(l1_tx.clone()); + let res = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!res.result.is_failed()); + + let mut current_l2_block = vm.vm.batch_env.first_l2_block; + + if let Some(timestamp) = override_timestamp { + current_l2_block.timestamp = timestamp; + } + if let Some(prev_block_hash) = override_prev_block_hash { + current_l2_block.prev_block_hash = prev_block_hash; + } + + if (None, None) == (override_timestamp, override_prev_block_hash) { + current_l2_block.max_virtual_blocks_to_create = 0; + } + + vm.vm.push_transaction(l1_tx); + let timestamp = Timestamp(vm.vm.state.local_state.timestamp); + set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); + + let result = vm.vm.execute(VmExecutionMode::OneTx); + + if let Some(err) = expected_error { + assert_eq!(result.result, ExecutionResult::Halt { reason: err }); + } else { + assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); + } +} + +#[test] +fn test_l2_block_same_l2_block() { + // This test aims to test the case when there are multiple transactions inside the same L2 block. + + // Case 1: Incorrect timestamp + test_same_l2_block( + Some(Halt::FailedToSetL2Block( + "The timestamp of the same L2 block must be same".to_string(), + )), + Some(0), + None, + ); + + // Case 2: Incorrect previous block hash + test_same_l2_block( + Some(Halt::FailedToSetL2Block( + "The previous hash of the same L2 block must be same".to_string(), + )), + None, + Some(H256::zero()), + ); + + // Case 3: Correct continuation of the same L2 block + test_same_l2_block(None, None, None); +} + +fn test_new_l2_block( + first_l2_block: L2BlockEnv, + overriden_second_block_number: Option, + overriden_second_block_timestamp: Option, + overriden_second_block_prev_block_hash: Option, + expected_error: Option, +) { + let mut l1_batch = default_l1_batch(L1BatchNumber(1)); + l1_batch.timestamp = 1; + l1_batch.first_l2_block = first_l2_block; + + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_l1_batch_env(l1_batch) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let l1_tx = get_l1_noop(); + + // Firstly we execute the first transaction + vm.vm.push_transaction(l1_tx.clone()); + vm.vm.execute(VmExecutionMode::OneTx); + + let mut second_l2_block = vm.vm.batch_env.first_l2_block; + second_l2_block.number += 1; + second_l2_block.timestamp += 1; + second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); + + if let Some(block_number) = overriden_second_block_number { + second_l2_block.number = block_number; + } + if let Some(timestamp) = overriden_second_block_timestamp { + second_l2_block.timestamp = timestamp; + } + if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { + second_l2_block.prev_block_hash = prev_block_hash; + } + + vm.vm.bootloader_state.push_l2_block(second_l2_block); + + vm.vm.push_transaction(l1_tx); + + let result = vm.vm.execute(VmExecutionMode::OneTx); + if let Some(err) = expected_error { + assert_eq!(result.result, ExecutionResult::Halt { reason: err }); + } else { + assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); + } +} + +#[test] +fn test_l2_block_new_l2_block() { + // This test is aimed to cover potential issue + + let correct_first_block = L2BlockEnv { + number: 1, + timestamp: 1, + prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), + max_virtual_blocks_to_create: 1, + }; + + // Case 1: Block number increasing by more than 1 + test_new_l2_block( + correct_first_block, + Some(3), + None, + None, + Some(Halt::FailedToSetL2Block( + "Invalid new L2 block number".to_string(), + )), + ); + + // Case 2: Timestamp not increasing + test_new_l2_block( + correct_first_block, + None, + Some(1), + None, + Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), + ); + + // Case 3: Incorrect previous block hash + test_new_l2_block( + correct_first_block, + None, + None, + Some(H256::zero()), + Some(Halt::FailedToSetL2Block( + "The current L2 block hash is incorrect".to_string(), + )), + ); + + // Case 4: Correct new block + test_new_l2_block(correct_first_block, None, None, None, None); +} + +#[allow(clippy::too_many_arguments)] +fn test_first_in_batch( + miniblock_timestamp: u64, + miniblock_number: u32, + pending_txs_hash: H256, + batch_timestamp: u64, + new_batch_timestamp: u64, + batch_number: u32, + proposed_block: L2BlockEnv, + expected_error: Option, +) { + let mut l1_batch = default_l1_batch(L1BatchNumber(1)); + l1_batch.number += 1; + l1_batch.timestamp = new_batch_timestamp; + + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_l1_batch_env(l1_batch) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + let l1_tx = get_l1_noop(); + + // Setting the values provided. + let storage_ptr = vm.vm.state.storage.storage.get_ptr(); + let miniblock_info_slot = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + ); + let pending_txs_hash_slot = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, + ); + let batch_info_slot = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_BLOCK_INFO_POSITION, + ); + let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); + + storage_ptr.borrow_mut().set_value( + miniblock_info_slot, + u256_to_h256(pack_block_info( + miniblock_number as u64, + miniblock_timestamp, + )), + ); + storage_ptr + .borrow_mut() + .set_value(pending_txs_hash_slot, pending_txs_hash); + storage_ptr.borrow_mut().set_value( + batch_info_slot, + u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), + ); + storage_ptr.borrow_mut().set_value( + prev_block_hash_position, + legacy_miniblock_hash(MiniblockNumber(miniblock_number - 1)), + ); + + // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. + // And then override it with the user-provided value + + let last_l2_block = vm.vm.bootloader_state.last_l2_block(); + let new_l2_block = L2BlockEnv { + number: last_l2_block.number + 1, + timestamp: last_l2_block.timestamp + 1, + prev_block_hash: last_l2_block.get_hash(), + max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, + }; + + vm.vm.bootloader_state.push_l2_block(new_l2_block); + vm.vm.push_transaction(l1_tx); + let timestamp = Timestamp(vm.vm.state.local_state.timestamp); + set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); + + let result = vm.vm.execute(VmExecutionMode::OneTx); + if let Some(err) = expected_error { + assert_eq!(result.result, ExecutionResult::Halt { reason: err }); + } else { + assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); + } +} + +#[test] +fn test_l2_block_first_in_batch() { + test_first_in_batch( + 1, + 1, + H256::zero(), + 1, + 2, + 1, + L2BlockEnv { + number: 2, + timestamp: 2, + prev_block_hash: miniblock_hash( + MiniblockNumber(1), + 1, + legacy_miniblock_hash(MiniblockNumber(0)), + H256::zero(), + ), + max_virtual_blocks_to_create: 1, + }, + None, + ); + + test_first_in_batch( + 8, + 1, + H256::zero(), + 5, + 12, + 1, + L2BlockEnv { + number: 2, + timestamp: 9, + prev_block_hash: miniblock_hash(MiniblockNumber(1), 8, legacy_miniblock_hash(MiniblockNumber(0)), H256::zero()), + max_virtual_blocks_to_create: 1 + }, + Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), + ); +} + +#[test] +fn test_l2_block_upgrade() { + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + vm.vm + .state + .storage + .storage + .get_ptr() + .borrow_mut() + .set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::default()); + + let l1_tx = get_l1_noop(); + // Firstly we execute the first transaction + vm.vm.push_transaction(l1_tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed(), "No revert reason expected"); + let result = vm.vm.execute(VmExecutionMode::Batch); + assert!(!result.result.is_failed(), "No revert reason expected"); +} + +#[test] +fn test_l2_block_upgrade_ending() { + let mut l1_batch = default_l1_batch(L1BatchNumber(1)); + l1_batch.timestamp = 1; + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_l1_batch_env(l1_batch.clone()) + .with_random_rich_accounts(1) + .build(); + + let l1_tx = get_l1_noop(); + + let storage = vm.storage.clone(); + + storage + .borrow_mut() + .set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::default()); + + vm.vm.push_transaction(l1_tx.clone()); + let result = vm.vm.execute(VmExecutionMode::OneTx); + + assert!(!result.result.is_failed(), "No revert reason expected"); + + let virtual_block_info = storage.borrow_mut().read_value(&StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + CURRENT_VIRTUAL_BLOCK_INFO_POSITION, + )); + + let (virtual_block_number, virtual_block_timestamp) = + unpack_block_info(h256_to_u256(virtual_block_info)); + + assert_eq!(virtual_block_number as u32, l1_batch.first_l2_block.number); + assert_eq!(virtual_block_timestamp, l1_batch.first_l2_block.timestamp); + vm.vm.push_transaction(l1_tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed(), "No revert reason expected"); + let result = vm.vm.execute(VmExecutionMode::Batch); + assert!(!result.result.is_failed(), "No revert reason expected"); +} + +fn set_manual_l2_block_info( + vm: &mut Vm, + tx_number: usize, + block_info: L2BlockEnv, + timestamp: Timestamp, +) { + let fictive_miniblock_position = + TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; + + vm.state.memory.populate_page( + BOOTLOADER_HEAP_PAGE as usize, + vec![ + (fictive_miniblock_position, block_info.number.into()), + (fictive_miniblock_position + 1, block_info.timestamp.into()), + ( + fictive_miniblock_position + 2, + h256_to_u256(block_info.prev_block_hash), + ), + ( + fictive_miniblock_position + 3, + block_info.max_virtual_blocks_to_create.into(), + ), + ], + timestamp, + ) +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tests/mod.rs b/core/multivm_deps/vm_virtual_blocks/src/tests/mod.rs new file mode 100644 index 000000000000..ffb38dd3725a --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tests/mod.rs @@ -0,0 +1,20 @@ +mod bootloader; +mod default_aa; +// TODO - fix this test +// mod invalid_bytecode; +mod bytecode_publishing; +mod call_tracer; +mod gas_limit; +mod get_used_contracts; +mod is_write_initial; +mod l1_tx_execution; +mod l2_blocks; +mod nonce_holder; +mod refunds; +mod require_eip712; +mod rollbacks; +mod simple_execution; +mod tester; +mod tracing_execution_error; +mod upgrade; +mod utils; diff --git a/core/multivm_deps/vm_virtual_blocks/src/tests/nonce_holder.rs b/core/multivm_deps/vm_virtual_blocks/src/tests/nonce_holder.rs new file mode 100644 index 000000000000..35af6ad15f43 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tests/nonce_holder.rs @@ -0,0 +1,180 @@ +use zksync_types::{Execute, Nonce}; + +use crate::errors::VmRevertReason; +use crate::tests::tester::{Account, VmTesterBuilder}; +use crate::tests::utils::read_nonce_holder_tester; +use crate::types::inputs::system_env::TxExecutionMode; +use crate::types::internals::TransactionData; +use crate::{ExecutionResult, Halt, HistoryEnabled, TxRevertReason, VmExecutionMode}; + +pub enum NonceHolderTestMode { + SetValueUnderNonce, + IncreaseMinNonceBy5, + IncreaseMinNonceTooMuch, + LeaveNonceUnused, + IncreaseMinNonceBy1, + SwitchToArbitraryOrdering, +} + +impl From for u8 { + fn from(mode: NonceHolderTestMode) -> u8 { + match mode { + NonceHolderTestMode::SetValueUnderNonce => 0, + NonceHolderTestMode::IncreaseMinNonceBy5 => 1, + NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, + NonceHolderTestMode::LeaveNonceUnused => 3, + NonceHolderTestMode::IncreaseMinNonceBy1 => 4, + NonceHolderTestMode::SwitchToArbitraryOrdering => 5, + } + } +} + +#[test] +fn test_nonce_holder() { + let mut account = Account::random(); + + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_deployer() + .with_custom_contracts(vec![( + read_nonce_holder_tester().to_vec(), + account.address, + true, + )]) + .with_rich_accounts(vec![account.clone()]) + .build(); + + let mut run_nonce_test = |nonce: u32, + test_mode: NonceHolderTestMode, + error_message: Option, + comment: &'static str| { + // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, + // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. + // The easiest way in terms of lifetimes is to reuse vm_builder to achieve it. + vm.reset_state(true); + let mut transaction_data: TransactionData = account + .get_l2_tx_for_execute_with_nonce( + Execute { + contract_address: account.address, + calldata: vec![12], + value: Default::default(), + factory_deps: None, + }, + None, + Nonce(nonce), + ) + .into(); + + transaction_data.signature = vec![test_mode.into()]; + vm.vm.push_raw_transaction(transaction_data, 0, 0, true); + let result = vm.vm.execute(VmExecutionMode::OneTx); + + if let Some(msg) = error_message { + let expected_error = + TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { + msg, + data: vec![], + })); + let ExecutionResult::Halt { reason } = result.result else { + panic!("Expected revert, got {:?}", result.result); + }; + assert_eq!( + reason.to_string(), + expected_error.to_string(), + "{}", + comment + ); + } else { + assert!(!result.result.is_failed(), "{}", comment); + } + }; + // Test 1: trying to set value under non sequential nonce value. + run_nonce_test( + 1u32, + NonceHolderTestMode::SetValueUnderNonce, + Some("Previous nonce has not been used".to_string()), + "Allowed to set value under non sequential value", + ); + + // Test 2: increase min nonce by 1 with sequential nonce ordering: + run_nonce_test( + 0u32, + NonceHolderTestMode::IncreaseMinNonceBy1, + None, + "Failed to increment nonce by 1 for sequential account", + ); + + // Test 3: correctly set value under nonce with sequential nonce ordering: + run_nonce_test( + 1u32, + NonceHolderTestMode::SetValueUnderNonce, + None, + "Failed to set value under nonce sequential value", + ); + + // Test 5: migrate to the arbitrary nonce ordering: + run_nonce_test( + 2u32, + NonceHolderTestMode::SwitchToArbitraryOrdering, + None, + "Failed to switch to arbitrary ordering", + ); + + // Test 6: increase min nonce by 5 + run_nonce_test( + 6u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + None, + "Failed to increase min nonce by 5", + ); + + // Test 7: since the nonces in range [6,10] are no longer allowed, the + // tx with nonce 10 should not be allowed + run_nonce_test( + 10u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + Some("Reusing the same nonce twice".to_string()), + "Allowed to reuse nonce below the minimal one", + ); + + // Test 8: we should be able to use nonce 13 + run_nonce_test( + 13u32, + NonceHolderTestMode::SetValueUnderNonce, + None, + "Did not allow to use unused nonce 10", + ); + + // Test 9: we should not be able to reuse nonce 13 + run_nonce_test( + 13u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + Some("Reusing the same nonce twice".to_string()), + "Allowed to reuse the same nonce twice", + ); + + // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 + run_nonce_test( + 14u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + None, + "Did not allow to use a bumped nonce", + ); + + // Test 11: Do not allow bumping nonce by too much + run_nonce_test( + 16u32, + NonceHolderTestMode::IncreaseMinNonceTooMuch, + Some("The value for incrementing the nonce is too high".to_string()), + "Allowed for incrementing min nonce too much", + ); + + // Test 12: Do not allow not setting a nonce as used + run_nonce_test( + 16u32, + NonceHolderTestMode::LeaveNonceUnused, + Some("The nonce was not set as used".to_string()), + "Allowed to leave nonce as unused", + ); +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tests/refunds.rs b/core/multivm_deps/vm_virtual_blocks/src/tests/refunds.rs new file mode 100644 index 000000000000..4314f57489eb --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tests/refunds.rs @@ -0,0 +1,152 @@ +use crate::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; +use crate::tests::utils::read_test_contract; +use crate::types::inputs::system_env::TxExecutionMode; + +use crate::types::internals::TransactionData; +use crate::{HistoryEnabled, VmExecutionMode}; + +#[test] +fn test_predetermined_refunded_gas() { + // In this test, we compare the execution of the bootloader with the predefined + // refunded gas and without them + + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + let l1_batch = vm.vm.batch_env.clone(); + + let counter = read_test_contract(); + let account = &mut vm.rich_accounts[0]; + + let DeployContractsTx { + tx, + bytecode_hash: _, + address: _, + } = account.get_deploy_tx(&counter, None, TxType::L2); + vm.vm.push_transaction(tx.clone()); + let result = vm.vm.execute(VmExecutionMode::OneTx); + + assert!(!result.result.is_failed()); + + // If the refund provided by the operator or the final refund are the 0 + // there is no impact of the operator's refund at all and so this test does not + // make much sense. + assert!( + result.refunds.operator_suggested_refund > 0, + "The operator's refund is 0" + ); + assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); + + let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); + let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); + assert!(!result_without_predefined_refunds.result.is_failed(),); + + // Here we want to provide the same refund from the operator and check that it's the correct one. + // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. + // But the overall result should be the same + + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_l1_batch_env(l1_batch.clone()) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(vec![account.clone()]) + .build(); + + let tx: TransactionData = tx.into(); + let block_gas_per_pubdata_byte = vm.vm.batch_env.block_gas_price_per_pubdata(); + // Overhead + let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); + vm.vm + .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); + + let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); + let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); + + assert!(!result_with_predefined_refunds.result.is_failed()); + + // We need to sort these lists as those are flattened from HashMaps + current_state_with_predefined_refunds + .used_contract_hashes + .sort(); + current_state_without_predefined_refunds + .used_contract_hashes + .sort(); + + assert_eq!( + current_state_with_predefined_refunds.events, + current_state_without_predefined_refunds.events + ); + + assert_eq!( + current_state_with_predefined_refunds.l2_to_l1_logs, + current_state_without_predefined_refunds.l2_to_l1_logs + ); + + assert_eq!( + current_state_with_predefined_refunds.storage_log_queries, + current_state_without_predefined_refunds.storage_log_queries + ); + assert_eq!( + current_state_with_predefined_refunds.used_contract_hashes, + current_state_without_predefined_refunds.used_contract_hashes + ); + + // In this test we put the different refund from the operator. + // We still can't use the refund tracer, because it will override the refund. + // But we can check that the logs and events have changed. + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_l1_batch_env(l1_batch) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(vec![account.clone()]) + .build(); + + let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; + vm.vm + .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); + let result = vm.vm.execute(VmExecutionMode::Batch); + let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); + + assert!(!result.result.is_failed()); + current_state_with_changed_predefined_refunds + .used_contract_hashes + .sort(); + current_state_without_predefined_refunds + .used_contract_hashes + .sort(); + + assert_eq!( + current_state_with_changed_predefined_refunds.events.len(), + current_state_without_predefined_refunds.events.len() + ); + + assert_ne!( + current_state_with_changed_predefined_refunds.events, + current_state_without_predefined_refunds.events + ); + + assert_eq!( + current_state_with_changed_predefined_refunds.l2_to_l1_logs, + current_state_without_predefined_refunds.l2_to_l1_logs + ); + + assert_eq!( + current_state_with_changed_predefined_refunds + .storage_log_queries + .len(), + current_state_without_predefined_refunds + .storage_log_queries + .len() + ); + + assert_ne!( + current_state_with_changed_predefined_refunds.storage_log_queries, + current_state_without_predefined_refunds.storage_log_queries + ); + assert_eq!( + current_state_with_changed_predefined_refunds.used_contract_hashes, + current_state_without_predefined_refunds.used_contract_hashes + ); +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tests/require_eip712.rs b/core/multivm_deps/vm_virtual_blocks/src/tests/require_eip712.rs new file mode 100644 index 000000000000..6538318c26f4 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tests/require_eip712.rs @@ -0,0 +1,161 @@ +use std::convert::TryInto; + +use ethabi::Token; + +use zksync_config::constants::L2_ETH_TOKEN_ADDRESS; +use zksync_eth_signer::raw_ethereum_tx::TransactionParameters; +use zksync_eth_signer::EthereumSigner; +use zksync_types::fee::Fee; +use zksync_types::l2::L2Tx; +use zksync_types::transaction_request::TransactionRequest; +use zksync_types::utils::storage_key_for_standard_token_balance; +use zksync_types::{AccountTreeId, Address, Eip712Domain, Execute, Nonce, Transaction, U256}; + +use crate::tests::tester::{Account, VmTester, VmTesterBuilder}; +use crate::tests::utils::read_many_owners_custom_account_contract; +use crate::types::inputs::system_env::TxExecutionMode; +use crate::{HistoryDisabled, VmExecutionMode}; + +impl VmTester { + pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { + let key = storage_key_for_standard_token_balance( + AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), + &address, + ); + self.vm.state.storage.storage.read_from_storage(&key) + } +} + +// TODO refactor this test it use too much internal details of the VM +#[tokio::test] +/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy +/// and EIP712 transactions. +/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. +async fn test_require_eip712() { + // Use 3 accounts: + // - private_address - EOA account, where we have the key + // - account_address - AA account, where the contract is deployed + // - beneficiary - an EOA account, where we'll try to transfer the tokens. + let account_abstraction = Account::random(); + let mut private_account = Account::random(); + let beneficiary = Account::random(); + + let (bytecode, contract) = read_many_owners_custom_account_contract(); + let mut vm = VmTesterBuilder::new(HistoryDisabled) + .with_empty_in_memory_storage() + .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) + .build(); + + assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); + + let chain_id: u32 = 270; + + // First, let's set the owners of the AA account to the private_address. + // (so that messages signed by private_address, are authorized to act on behalf of the AA account). + let set_owners_function = contract.function("setOwners").unwrap(); + let encoded_input = set_owners_function + .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) + .unwrap(); + + let tx = private_account.get_l2_tx_for_execute( + Execute { + contract_address: account_abstraction.address, + calldata: encoded_input, + value: Default::default(), + factory_deps: None, + }, + None, + ); + + vm.vm.push_transaction(tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed()); + + let private_account_balance = vm.get_eth_balance(private_account.address); + + // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). + // Normally this would not work - unless the operator is malicious. + let aa_raw_tx = TransactionParameters { + nonce: U256::from(0), + to: Some(beneficiary.address), + gas: U256::from(100000000), + gas_price: Some(U256::from(10000000)), + value: U256::from(888000088), + data: vec![], + chain_id: 270, + transaction_type: None, + access_list: None, + max_fee_per_gas: U256::from(1000000000), + max_priority_fee_per_gas: U256::from(1000000000), + }; + + let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; + let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, 270.into()).unwrap(); + + let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); + l2_tx.set_input(aa_tx, hash); + // Pretend that operator is malicious and sets the initiator to the AA account. + l2_tx.common_data.initiator_address = account_abstraction.address; + let transaction: Transaction = l2_tx.try_into().unwrap(); + + vm.vm.push_transaction(transaction); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed()); + assert_eq!( + vm.get_eth_balance(beneficiary.address), + U256::from(888000088) + ); + // Make sure that the tokens were transfered from the AA account. + assert_eq!( + private_account_balance, + vm.get_eth_balance(private_account.address) + ); + + // // Now send the 'classic' EIP712 transaction + let tx_712 = L2Tx::new( + beneficiary.address, + vec![], + Nonce(1), + Fee { + gas_limit: U256::from(1000000000), + max_fee_per_gas: U256::from(1000000000), + max_priority_fee_per_gas: U256::from(1000000000), + gas_per_pubdata_limit: U256::from(1000000000), + }, + account_abstraction.address, + U256::from(28374938), + None, + Default::default(), + ); + + let transaction_request: TransactionRequest = tx_712.into(); + + let domain = Eip712Domain::new(chain_id.into()); + let signature = private_account + .get_pk_signer() + .sign_typed_data(&domain, &transaction_request) + .await + .unwrap(); + let encoded_tx = transaction_request.get_signed_bytes(&signature, chain_id.into()); + + let (aa_txn_request, aa_hash) = + TransactionRequest::from_bytes(&encoded_tx, chain_id.into()).unwrap(); + + let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); + l2_tx.set_input(encoded_tx, aa_hash); + + let transaction: Transaction = l2_tx.try_into().unwrap(); + vm.vm.push_transaction(transaction); + vm.vm.execute(VmExecutionMode::OneTx); + + assert_eq!( + vm.get_eth_balance(beneficiary.address), + U256::from(916375026) + ); + assert_eq!( + private_account_balance, + vm.get_eth_balance(private_account.address) + ); +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tests/rollbacks.rs b/core/multivm_deps/vm_virtual_blocks/src/tests/rollbacks.rs new file mode 100644 index 000000000000..1fa6a2afe390 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tests/rollbacks.rs @@ -0,0 +1,146 @@ +use ethabi::Token; + +use zksync_contracts::get_loadnext_contract; +use zksync_contracts::test_contracts::LoadnextContractExecutionParams; + +use zksync_types::{Execute, U256}; + +use crate::tests::tester::{ + DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder, +}; +use crate::tests::utils::read_test_contract; +use crate::types::inputs::system_env::TxExecutionMode; +use crate::HistoryEnabled; + +#[test] +fn test_vm_rollbacks() { + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let mut account = vm.rich_accounts[0].clone(); + let counter = read_test_contract(); + let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; + let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; + let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; + + let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ + TransactionTestInfo::new_processed(tx_0.clone(), false), + TransactionTestInfo::new_processed(tx_1.clone(), false), + TransactionTestInfo::new_processed(tx_2.clone(), false), + ]); + + // reset vm + vm.reset_with_empty_storage(); + + let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ + TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), + TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), + TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), + // The correct nonce is 0, this tx will fail + TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), + // This tx will succeed + TransactionTestInfo::new_processed(tx_0.clone(), false), + // The correct nonce is 1, this tx will fail + TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), + // The correct nonce is 1, this tx will fail + TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), + // This tx will succeed + TransactionTestInfo::new_processed(tx_1, false), + // The correct nonce is 2, this tx will fail + TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), + // This tx will succeed + TransactionTestInfo::new_processed(tx_2.clone(), false), + // This tx will fail + TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), + ]); + + assert_eq!(result_without_rollbacks, result_with_rollbacks); +} + +#[test] +fn test_vm_loadnext_rollbacks() { + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + let mut account = vm.rich_accounts[0].clone(); + + let loadnext_contract = get_loadnext_contract(); + let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; + let DeployContractsTx { + tx: loadnext_deploy_tx, + address, + .. + } = account.get_deploy_tx_with_factory_deps( + &loadnext_contract.bytecode, + Some(loadnext_constructor_data), + loadnext_contract.factory_deps.clone(), + TxType::L2, + ); + + let loadnext_tx_1 = account.get_l2_tx_for_execute( + Execute { + contract_address: address, + calldata: LoadnextContractExecutionParams { + reads: 100, + writes: 100, + events: 100, + hashes: 500, + recursive_calls: 10, + deploys: 60, + } + .to_bytes(), + value: Default::default(), + factory_deps: None, + }, + None, + ); + + let loadnext_tx_2 = account.get_l2_tx_for_execute( + Execute { + contract_address: address, + calldata: LoadnextContractExecutionParams { + reads: 100, + writes: 100, + events: 100, + hashes: 500, + recursive_calls: 10, + deploys: 60, + } + .to_bytes(), + value: Default::default(), + factory_deps: None, + }, + None, + ); + + let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ + TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), + TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), + TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), + ]); + + // reset vm + vm.reset_with_empty_storage(); + + let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ + TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), + TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), + TransactionTestInfo::new_rejected( + loadnext_deploy_tx.clone(), + TxModifier::NonceReused.into(), + ), + TransactionTestInfo::new_processed(loadnext_tx_1, false), + TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), + TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), + TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), + TransactionTestInfo::new_processed(loadnext_tx_2, false), + ]); + + assert_eq!(result_without_rollbacks, result_with_rollbacks); +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tests/simple_execution.rs b/core/multivm_deps/vm_virtual_blocks/src/tests/simple_execution.rs new file mode 100644 index 000000000000..40e51739b072 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tests/simple_execution.rs @@ -0,0 +1,77 @@ +use crate::tests::tester::{TxType, VmTesterBuilder}; +use crate::types::outputs::ExecutionResult; +use crate::{HistoryDisabled, VmExecutionMode}; + +#[test] +fn estimate_fee() { + let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) + .with_empty_in_memory_storage() + .with_deployer() + .with_random_rich_accounts(1) + .build(); + + vm_tester.deploy_test_contract(); + let account = &mut vm_tester.rich_accounts[0]; + + let tx = account.get_test_contract_transaction( + vm_tester.test_contract.unwrap(), + false, + Default::default(), + false, + TxType::L2, + ); + + vm_tester.vm.push_transaction(tx); + + let result = vm_tester.vm.execute(VmExecutionMode::OneTx); + assert!(matches!(result.result, ExecutionResult::Success { .. })); +} + +#[test] +fn simple_execute() { + let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) + .with_empty_in_memory_storage() + .with_deployer() + .with_random_rich_accounts(1) + .build(); + + vm_tester.deploy_test_contract(); + + let account = &mut vm_tester.rich_accounts[0]; + + let tx1 = account.get_test_contract_transaction( + vm_tester.test_contract.unwrap(), + false, + Default::default(), + false, + TxType::L1 { serial_id: 1 }, + ); + + let tx2 = account.get_test_contract_transaction( + vm_tester.test_contract.unwrap(), + true, + Default::default(), + false, + TxType::L1 { serial_id: 1 }, + ); + + let tx3 = account.get_test_contract_transaction( + vm_tester.test_contract.unwrap(), + false, + Default::default(), + false, + TxType::L1 { serial_id: 1 }, + ); + let vm = &mut vm_tester.vm; + vm.push_transaction(tx1); + vm.push_transaction(tx2); + vm.push_transaction(tx3); + let tx = vm.execute(VmExecutionMode::OneTx); + assert!(matches!(tx.result, ExecutionResult::Success { .. })); + let tx = vm.execute(VmExecutionMode::OneTx); + assert!(matches!(tx.result, ExecutionResult::Revert { .. })); + let tx = vm.execute(VmExecutionMode::OneTx); + assert!(matches!(tx.result, ExecutionResult::Success { .. })); + let block_tip = vm.execute(VmExecutionMode::Batch); + assert!(matches!(block_tip.result, ExecutionResult::Success { .. })); +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tests/tester/inner_state.rs b/core/multivm_deps/vm_virtual_blocks/src/tests/tester/inner_state.rs new file mode 100644 index 000000000000..08220724b4dd --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tests/tester/inner_state.rs @@ -0,0 +1,116 @@ +use std::collections::HashMap; + +use zk_evm::aux_structures::Timestamp; +use zk_evm::vm_state::VmLocalState; +use zksync_state::WriteStorage; + +use zksync_types::{StorageKey, StorageLogQuery, StorageValue, U256}; + +use crate::old_vm::event_sink::InMemoryEventSink; +use crate::old_vm::history_recorder::{AppDataFrameManagerWithHistory, HistoryRecorder}; +use crate::{HistoryEnabled, HistoryMode, SimpleMemory, Vm}; + +#[derive(Clone, Debug)] +pub(crate) struct ModifiedKeysMap(HashMap); + +// We consider hashmaps to be equal even if there is a key +// that is not present in one but has zero value in another. +impl PartialEq for ModifiedKeysMap { + fn eq(&self, other: &Self) -> bool { + for (key, value) in self.0.iter() { + if *value != other.0.get(key).cloned().unwrap_or_default() { + return false; + } + } + for (key, value) in other.0.iter() { + if *value != self.0.get(key).cloned().unwrap_or_default() { + return false; + } + } + true + } +} + +#[derive(Clone, PartialEq, Debug)] +pub(crate) struct DecommitterTestInnerState { + /// There is no way to "trully" compare the storage pointer, + /// so we just compare the modified keys. This is reasonable enough. + pub(crate) modified_storage_keys: ModifiedKeysMap, + pub(crate) known_bytecodes: HistoryRecorder>, H>, + pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, +} + +#[derive(Clone, PartialEq, Debug)] +pub(crate) struct StorageOracleInnerState { + /// There is no way to "trully" compare the storage pointer, + /// so we just compare the modified keys. This is reasonable enough. + pub(crate) modified_storage_keys: ModifiedKeysMap, + + pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, +} + +#[derive(Clone, PartialEq, Debug)] +pub(crate) struct PrecompileProcessorTestInnerState { + pub(crate) timestamp_history: HistoryRecorder, H>, +} + +/// A struct that encapsulates the state of the VM's oracles +/// The state is to be used in tests. +#[derive(Clone, PartialEq, Debug)] +pub(crate) struct VmInstanceInnerState { + event_sink: InMemoryEventSink, + precompile_processor_state: PrecompileProcessorTestInnerState, + memory: SimpleMemory, + decommitter_state: DecommitterTestInnerState, + storage_oracle_state: StorageOracleInnerState, + local_state: VmLocalState, +} + +impl Vm { + // Dump inner state of the VM. + pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { + let event_sink = self.state.event_sink.clone(); + let precompile_processor_state = PrecompileProcessorTestInnerState { + timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), + }; + let memory = self.state.memory.clone(); + let decommitter_state = DecommitterTestInnerState { + modified_storage_keys: ModifiedKeysMap( + self.state + .decommittment_processor + .get_storage() + .borrow() + .modified_storage_keys() + .clone(), + ), + known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), + decommitted_code_hashes: self + .state + .decommittment_processor + .get_decommitted_code_hashes_with_history() + .clone(), + }; + let storage_oracle_state = StorageOracleInnerState { + modified_storage_keys: ModifiedKeysMap( + self.state + .storage + .storage + .get_ptr() + .borrow() + .modified_storage_keys() + .clone(), + ), + frames_stack: self.state.storage.frames_stack.clone(), + }; + let local_state = self.state.local_state.clone(); + + VmInstanceInnerState { + event_sink, + precompile_processor_state, + memory, + decommitter_state, + storage_oracle_state, + local_state, + } + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tests/tester/mod.rs b/core/multivm_deps/vm_virtual_blocks/src/tests/tester/mod.rs new file mode 100644 index 000000000000..dfe8905a7e08 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tests/tester/mod.rs @@ -0,0 +1,7 @@ +pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; +pub(crate) use vm_tester::{default_l1_batch, InMemoryStorageView, VmTester, VmTesterBuilder}; +pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; + +mod inner_state; +mod transaction_test_info; +mod vm_tester; diff --git a/core/multivm_deps/vm_virtual_blocks/src/tests/tester/transaction_test_info.rs b/core/multivm_deps/vm_virtual_blocks/src/tests/tester/transaction_test_info.rs new file mode 100644 index 000000000000..65ceb3c5cf3c --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tests/tester/transaction_test_info.rs @@ -0,0 +1,216 @@ +use zksync_types::{ExecuteTransactionCommon, Transaction}; + +use crate::errors::VmRevertReason; +use crate::tests::tester::vm_tester::VmTester; +use crate::{ + CurrentExecutionState, ExecutionResult, Halt, HistoryEnabled, TxRevertReason, VmExecutionMode, + VmExecutionResultAndLogs, +}; + +#[derive(Debug, Clone)] +pub(crate) enum TxModifier { + WrongSignatureLength, + WrongSignature, + WrongMagicValue, + WrongNonce, + NonceReused, +} + +#[derive(Debug, Clone)] +pub(crate) enum TxExpectedResult { + Rejected { error: ExpectedError }, + Processed { rollback: bool }, +} + +#[derive(Debug, Clone)] +pub(crate) struct TransactionTestInfo { + tx: Transaction, + result: TxExpectedResult, +} + +#[derive(Debug, Clone)] +pub(crate) struct ExpectedError { + pub(crate) revert_reason: TxRevertReason, + pub(crate) modifier: Option, +} + +impl From for ExpectedError { + fn from(value: TxModifier) -> Self { + let revert_reason = match value { + TxModifier::WrongSignatureLength => { + Halt::ValidationFailed(VmRevertReason::General { + msg: "Signature length is incorrect".to_string(), + data: vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, + 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, + 116, 0, 0, 0, + ], + }) + } + TxModifier::WrongSignature => { + Halt::ValidationFailed(VmRevertReason::General { + msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), + data: vec![], + }) + } + TxModifier::WrongMagicValue => { + Halt::ValidationFailed(VmRevertReason::General { + msg: "v is neither 27 nor 28".to_string(), + data: vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, + 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }) + + } + TxModifier::WrongNonce => { + Halt::ValidationFailed(VmRevertReason::General { + msg: "Incorrect nonce".to_string(), + data: vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, + 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }) + } + TxModifier::NonceReused => { + Halt::ValidationFailed(VmRevertReason::General { + msg: "Reusing the same nonce twice".to_string(), + data: vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, + 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, + 0, 0, 0, + ], + }) + } + }; + + ExpectedError { + revert_reason: TxRevertReason::Halt(revert_reason), + modifier: Some(value), + } + } +} + +impl TransactionTestInfo { + pub(crate) fn new_rejected( + mut transaction: Transaction, + expected_error: ExpectedError, + ) -> Self { + transaction.common_data = match transaction.common_data { + ExecuteTransactionCommon::L2(mut data) => { + if let Some(modifier) = &expected_error.modifier { + match modifier { + TxModifier::WrongSignatureLength => { + data.signature = data.signature[..data.signature.len() - 20].to_vec() + } + TxModifier::WrongSignature => data.signature = vec![27u8; 65], + TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], + TxModifier::WrongNonce => { + // Do not need to modify signature for nonce error + } + TxModifier::NonceReused => { + // Do not need to modify signature for nonce error + } + } + } + ExecuteTransactionCommon::L2(data) + } + _ => panic!("L1 transactions are not supported"), + }; + + Self { + tx: transaction, + result: TxExpectedResult::Rejected { + error: expected_error, + }, + } + } + + pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { + Self { + tx: transaction, + result: TxExpectedResult::Processed { + rollback: should_be_rollbacked, + }, + } + } + + fn verify_result(&self, result: &VmExecutionResultAndLogs) { + match &self.result { + TxExpectedResult::Rejected { error } => match &result.result { + ExecutionResult::Success { .. } => { + panic!("Transaction should be reverted {:?}", self.tx.nonce()) + } + ExecutionResult::Revert { output } => match &error.revert_reason { + TxRevertReason::TxReverted(expected) => { + assert_eq!(output, expected) + } + _ => { + panic!("Error types mismatch"); + } + }, + ExecutionResult::Halt { reason } => match &error.revert_reason { + TxRevertReason::Halt(expected) => { + assert_eq!(reason, expected) + } + _ => { + panic!("Error types mismatch"); + } + }, + }, + TxExpectedResult::Processed { .. } => { + assert!(!result.result.is_failed()); + } + } + } + + fn should_rollback(&self) -> bool { + match &self.result { + TxExpectedResult::Rejected { .. } => true, + TxExpectedResult::Processed { rollback } => *rollback, + } + } +} + +impl VmTester { + pub(crate) fn execute_and_verify_txs( + &mut self, + txs: &[TransactionTestInfo], + ) -> CurrentExecutionState { + for tx_test_info in txs { + self.execute_tx_and_verify(tx_test_info.clone()); + } + self.vm.execute(VmExecutionMode::Batch); + let mut state = self.vm.get_current_execution_state(); + state.used_contract_hashes.sort(); + state + } + + pub(crate) fn execute_tx_and_verify( + &mut self, + tx_test_info: TransactionTestInfo, + ) -> VmExecutionResultAndLogs { + let inner_state_before = self.vm.dump_inner_state(); + self.vm.make_snapshot(); + self.vm.push_transaction(tx_test_info.tx.clone()); + let result = self.vm.execute(VmExecutionMode::OneTx); + tx_test_info.verify_result(&result); + if tx_test_info.should_rollback() { + self.vm.rollback_to_the_latest_snapshot(); + let inner_state_after = self.vm.dump_inner_state(); + assert_eq!( + inner_state_before, inner_state_after, + "Inner state before and after rollback should be equal" + ); + } + result + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tests/tester/vm_tester.rs b/core/multivm_deps/vm_virtual_blocks/src/tests/tester/vm_tester.rs new file mode 100644 index 000000000000..3e69d2f01a51 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tests/tester/vm_tester.rs @@ -0,0 +1,298 @@ +use zksync_contracts::BaseSystemContracts; +use zksync_state::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; + +use zksync_types::block::legacy_miniblock_hash; +use zksync_types::helpers::unix_timestamp_ms; +use zksync_types::utils::{deployed_address_create, storage_key_for_eth_balance}; +use zksync_types::{ + get_code_key, get_is_account_key, Address, L1BatchNumber, MiniblockNumber, Nonce, + ProtocolVersionId, U256, +}; +use zksync_utils::bytecode::hash_bytecode; +use zksync_utils::u256_to_h256; + +use crate::constants::BLOCK_GAS_LIMIT; + +use crate::tests::tester::Account; +use crate::tests::tester::TxType; +use crate::tests::utils::read_test_contract; +use crate::types::inputs::system_env::TxExecutionMode; +use crate::utils::l2_blocks::load_last_l2_block; +use crate::{HistoryMode, L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, Vm, VmExecutionMode}; + +pub(crate) type InMemoryStorageView = StorageView; + +pub(crate) struct VmTester { + pub(crate) vm: Vm, + pub(crate) storage: StoragePtr, + pub(crate) fee_account: Address, + pub(crate) deployer: Option, + pub(crate) test_contract: Option

, + pub(crate) rich_accounts: Vec, + pub(crate) custom_contracts: Vec, + history_mode: H, +} + +impl VmTester { + pub(crate) fn deploy_test_contract(&mut self) { + let contract = read_test_contract(); + let tx = self + .deployer + .as_mut() + .expect("You have to initialize builder with deployer") + .get_deploy_tx(&contract, None, TxType::L2) + .tx; + let nonce = tx.nonce().unwrap().0.into(); + self.vm.push_transaction(tx); + self.vm.execute(VmExecutionMode::OneTx); + let deployed_address = + deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); + self.test_contract = Some(deployed_address); + } + + pub(crate) fn reset_with_empty_storage(&mut self) { + self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); + self.reset_state(false); + } + + /// Reset the state of the VM to the initial state. + /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, + /// otherwise it will use the first L2 block of l1 batch env + pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { + for account in self.rich_accounts.iter_mut() { + account.nonce = Nonce(0); + make_account_rich(self.storage.clone(), account); + } + if let Some(deployer) = &self.deployer { + make_account_rich(self.storage.clone(), deployer); + } + + if !self.custom_contracts.is_empty() { + println!("Inserting custom contracts is not yet supported") + // insert_contracts(&mut self.storage, &self.custom_contracts); + } + + let mut l1_batch = self.vm.batch_env.clone(); + if use_latest_l2_block { + let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { + number: 0, + timestamp: 0, + hash: legacy_miniblock_hash(MiniblockNumber(0)), + }); + l1_batch.first_l2_block = L2BlockEnv { + number: last_l2_block.number + 1, + timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), + prev_block_hash: last_l2_block.hash, + max_virtual_blocks_to_create: 1, + }; + } + + let vm = Vm::new( + l1_batch, + self.vm.system_env.clone(), + self.storage.clone(), + self.history_mode.clone(), + ); + + if self.test_contract.is_some() { + self.deploy_test_contract(); + } + + self.vm = vm; + } +} + +pub(crate) type ContractsToDeploy = (Vec, Address, bool); + +pub(crate) struct VmTesterBuilder { + history_mode: H, + storage: Option, + l1_batch_env: Option, + system_env: SystemEnv, + deployer: Option, + rich_accounts: Vec, + custom_contracts: Vec, +} + +impl Clone for VmTesterBuilder { + fn clone(&self) -> Self { + Self { + history_mode: self.history_mode.clone(), + storage: None, + l1_batch_env: self.l1_batch_env.clone(), + system_env: self.system_env.clone(), + deployer: self.deployer.clone(), + rich_accounts: self.rich_accounts.clone(), + custom_contracts: self.custom_contracts.clone(), + } + } +} + +#[allow(dead_code)] +impl VmTesterBuilder { + pub(crate) fn new(history_mode: H) -> Self { + Self { + history_mode, + storage: None, + l1_batch_env: None, + system_env: SystemEnv { + zk_porter_available: false, + version: ProtocolVersionId::latest(), + base_system_smart_contracts: BaseSystemContracts::playground(), + gas_limit: BLOCK_GAS_LIMIT, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, + chain_id: 270.into(), + }, + deployer: None, + rich_accounts: vec![], + custom_contracts: vec![], + } + } + + pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { + self.l1_batch_env = Some(l1_batch_env); + self + } + + pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { + self.system_env = system_env; + self + } + + pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { + self.storage = Some(storage); + self + } + + pub(crate) fn with_base_system_smart_contracts( + mut self, + base_system_smart_contracts: BaseSystemContracts, + ) -> Self { + self.system_env.base_system_smart_contracts = base_system_smart_contracts; + self + } + + pub(crate) fn with_gas_limit(mut self, gas_limit: u32) -> Self { + self.system_env.gas_limit = gas_limit; + self + } + + pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { + self.system_env.execution_mode = execution_mode; + self + } + + pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { + self.storage = Some(get_empty_storage()); + self + } + + pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { + for _ in 0..number { + let account = Account::random(); + self.rich_accounts.push(account); + } + self + } + + pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { + self.rich_accounts.extend(accounts); + self + } + + pub(crate) fn with_deployer(mut self) -> Self { + let deployer = Account::random(); + self.deployer = Some(deployer); + self + } + + pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { + self.custom_contracts = contracts; + self + } + + pub(crate) fn build(self) -> VmTester { + let l1_batch_env = self + .l1_batch_env + .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); + + let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); + insert_contracts(&mut raw_storage, &self.custom_contracts); + let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); + for account in self.rich_accounts.iter() { + make_account_rich(storage_ptr.clone(), account); + } + if let Some(deployer) = &self.deployer { + make_account_rich(storage_ptr.clone(), deployer); + } + let fee_account = l1_batch_env.fee_account; + + let vm = Vm::new( + l1_batch_env, + self.system_env, + storage_ptr.clone(), + self.history_mode.clone(), + ); + + VmTester { + vm, + storage: storage_ptr, + fee_account, + deployer: self.deployer, + test_contract: None, + rich_accounts: self.rich_accounts.clone(), + custom_contracts: self.custom_contracts.clone(), + history_mode: self.history_mode, + } + } +} + +pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { + let timestamp = unix_timestamp_ms(); + L1BatchEnv { + previous_batch_hash: None, + number, + timestamp, + l1_gas_price: 50_000_000_000, // 50 gwei + fair_l2_gas_price: 250_000_000, // 0.25 gwei + fee_account: Address::random(), + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number: 1, + timestamp, + prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), + max_virtual_blocks_to_create: 100, + }, + } +} + +pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { + let key = storage_key_for_eth_balance(&account.address); + storage + .as_ref() + .borrow_mut() + .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); +} + +pub(crate) fn get_empty_storage() -> InMemoryStorage { + InMemoryStorage::with_system_contracts(hash_bytecode) +} + +// Inserts the contracts into the test environment, bypassing the +// deployer system contract. Besides the reference to storage +// it accepts a `contracts` tuple of information about the contract +// and whether or not it is an account. +fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { + for (contract, address, is_account) in contracts { + let deployer_code_key = get_code_key(address); + raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); + + if *is_account { + let is_account_key = get_is_account_key(address); + raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); + } + + raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tests/tracing_execution_error.rs b/core/multivm_deps/vm_virtual_blocks/src/tests/tracing_execution_error.rs new file mode 100644 index 000000000000..dbe9f74a85bd --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tests/tracing_execution_error.rs @@ -0,0 +1,49 @@ +use zksync_types::{Execute, H160}; + +use crate::errors::VmRevertReason; +use crate::tests::tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}; +use crate::tests::utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}; +use crate::types::inputs::system_env::TxExecutionMode; +use crate::{HistoryEnabled, TxRevertReason}; + +#[test] +fn test_tracing_of_execution_errors() { + let contract_address = H160::random(); + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) + .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_deployer() + .with_random_rich_accounts(1) + .build(); + + let account = &mut vm.rich_accounts[0]; + + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address, + calldata: get_execute_error_calldata(), + value: Default::default(), + factory_deps: Some(vec![]), + }, + None, + ); + + vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( + tx, + ExpectedError { + revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { + msg: "short".to_string(), + data: vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, + ], + }), + modifier: None, + }, + )); +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tests/upgrade.rs b/core/multivm_deps/vm_virtual_blocks/src/tests/upgrade.rs new file mode 100644 index 000000000000..05646326ffd1 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tests/upgrade.rs @@ -0,0 +1,341 @@ +use zk_evm::aux_structures::Timestamp; + +use zksync_types::{ + ethabi::Contract, + Execute, COMPLEX_UPGRADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, + REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, + {ethabi::Token, Address, ExecuteTransactionCommon, Transaction, H256, U256}, + {get_code_key, get_known_code_key, H160}, +}; + +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; + +use zksync_contracts::{deployer_contract, load_contract, load_sys_contract, read_bytecode}; +use zksync_state::WriteStorage; +use zksync_test_account::TxType; + +use crate::tests::tester::VmTesterBuilder; +use crate::tests::utils::verify_required_storage; +use crate::{ExecutionResult, Halt, HistoryEnabled, TxExecutionMode, VmExecutionMode}; +use zksync_types::protocol_version::ProtocolUpgradeTxCommonData; + +use super::utils::read_test_contract; + +/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: +/// - This transaction must be the only one in block +/// - If present, this transaction must be the first one in block +#[test] +fn test_protocol_upgrade_is_first() { + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let bytecode_hash = hash_bytecode(&read_test_contract()); + + // Here we just use some random transaction of protocol upgrade type: + let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { + // The bytecode hash to put on an address + bytecode_hash, + // The address on which to deploy the bytecodehash to + address: H160::random(), + // Whether to run the constructor on the force deployment + call_constructor: false, + // The value with which to initialize a contract + value: U256::zero(), + // The constructor calldata + input: vec![], + }]); + + let normal_l1_transaction = vm.rich_accounts[0] + .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) + .tx; + + let expected_error = + Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); + + vm.vm.make_snapshot(); + // Test 1: there must be only one system transaction in block + vm.vm.push_transaction(protocol_upgrade_transaction.clone()); + vm.vm.push_transaction(normal_l1_transaction.clone()); + vm.vm.push_transaction(protocol_upgrade_transaction.clone()); + + vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert_eq!( + result.result, + ExecutionResult::Halt { + reason: expected_error.clone() + } + ); + + // Test 2: the protocol upgrade tx must be the first one in block + vm.vm.rollback_to_the_latest_snapshot(); + vm.vm.make_snapshot(); + vm.vm.push_transaction(normal_l1_transaction.clone()); + vm.vm.push_transaction(protocol_upgrade_transaction.clone()); + + vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert_eq!( + result.result, + ExecutionResult::Halt { + reason: expected_error + } + ); + + vm.vm.rollback_to_the_latest_snapshot(); + vm.vm.make_snapshot(); + vm.vm.push_transaction(protocol_upgrade_transaction); + vm.vm.push_transaction(normal_l1_transaction); + + vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed()); +} + +/// In this test we try to test how force deployments could be done via protocol upgrade transactions. +#[test] +fn test_force_deploy_upgrade() { + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let storage_view = vm.storage.clone(); + let bytecode_hash = hash_bytecode(&read_test_contract()); + + let known_code_key = get_known_code_key(&bytecode_hash); + // It is generally expected that all the keys will be set as known prior to the protocol upgrade. + storage_view + .borrow_mut() + .set_value(known_code_key, u256_to_h256(1.into())); + drop(storage_view); + + let address_to_deploy = H160::random(); + // Here we just use some random transaction of protocol upgrade type: + let transaction = get_forced_deploy_tx(&[ForceDeployment { + // The bytecode hash to put on an address + bytecode_hash, + // The address on which to deploy the bytecodehash to + address: address_to_deploy, + // Whether to run the constructor on the force deployment + call_constructor: false, + // The value with which to initialize a contract + value: U256::zero(), + // The constructor calldata + input: vec![], + }]); + + vm.vm.push_transaction(transaction); + + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "The force upgrade was not successful" + ); + + let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; + + // Verify that the bytecode has been set correctly + verify_required_storage(&vm.vm.state, expected_slots); +} + +/// Here we show how the work with the complex upgrader could be done +#[test] +fn test_complex_upgrader() { + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let storage_view = vm.storage.clone(); + + let bytecode_hash = hash_bytecode(&read_complex_upgrade()); + let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); + + // Let's assume that the bytecode for the implementation of the complex upgrade + // is already deployed in some address in userspace + let upgrade_impl = H160::random(); + let account_code_key = get_code_key(&upgrade_impl); + + storage_view + .borrow_mut() + .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); + storage_view.borrow_mut().set_value( + get_known_code_key(&msg_sender_test_hash), + u256_to_h256(1.into()), + ); + storage_view + .borrow_mut() + .set_value(account_code_key, bytecode_hash); + drop(storage_view); + + vm.vm.state.decommittment_processor.populate( + vec![ + ( + h256_to_u256(bytecode_hash), + bytes_to_be_words(read_complex_upgrade()), + ), + ( + h256_to_u256(msg_sender_test_hash), + bytes_to_be_words(read_msg_sender_test()), + ), + ], + Timestamp(0), + ); + + let address_to_deploy1 = H160::random(); + let address_to_deploy2 = H160::random(); + + let transaction = get_complex_upgrade_tx( + upgrade_impl, + address_to_deploy1, + address_to_deploy2, + bytecode_hash, + ); + + vm.vm.push_transaction(transaction); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "The force upgrade was not successful" + ); + + let expected_slots = vec![ + (bytecode_hash, get_code_key(&address_to_deploy1)), + (bytecode_hash, get_code_key(&address_to_deploy2)), + ]; + + // Verify that the bytecode has been set correctly + verify_required_storage(&vm.vm.state, expected_slots); +} + +#[derive(Debug, Clone)] +struct ForceDeployment { + // The bytecode hash to put on an address + bytecode_hash: H256, + // The address on which to deploy the bytecodehash to + address: Address, + // Whether to run the constructor on the force deployment + call_constructor: bool, + // The value with which to initialize a contract + value: U256, + // The constructor calldata + input: Vec, +} + +fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { + let deployer = deployer_contract(); + let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); + + let encoded_deployments: Vec<_> = deployment + .iter() + .map(|deployment| { + Token::Tuple(vec![ + Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), + Token::Address(deployment.address), + Token::Bool(deployment.call_constructor), + Token::Uint(deployment.value), + Token::Bytes(deployment.input.clone()), + ]) + }) + .collect(); + + let params = [Token::Array(encoded_deployments)]; + + let calldata = contract_function + .encode_input(¶ms) + .expect("failed to encode parameters"); + + let execute = Execute { + contract_address: CONTRACT_DEPLOYER_ADDRESS, + calldata, + factory_deps: None, + value: U256::zero(), + }; + + Transaction { + common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { + sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, + gas_limit: U256::from(200_000_000u32), + gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), + ..Default::default() + }), + execute, + received_timestamp_ms: 0, + raw_bytes: None, + } +} + +// Returns the transaction that performs a complex protocol upgrade. +// The first param is the address of the implementation of the complex upgrade +// in user-space, while the next 3 params are params of the implenentaiton itself +// For the explanatation for the parameters, please refer to: +// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol +fn get_complex_upgrade_tx( + implementation_address: Address, + address1: Address, + address2: Address, + bytecode_hash: H256, +) -> Transaction { + let impl_contract = get_complex_upgrade_abi(); + let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); + let impl_calldata = impl_function + .encode_input(&[ + Token::Address(address1), + Token::Address(address2), + Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), + ]) + .unwrap(); + + let complex_upgrader = get_complex_upgrader_abi(); + let upgrade_function = complex_upgrader.function("upgrade").unwrap(); + let complex_upgrader_calldata = upgrade_function + .encode_input(&[ + Token::Address(implementation_address), + Token::Bytes(impl_calldata), + ]) + .unwrap(); + + let execute = Execute { + contract_address: COMPLEX_UPGRADER_ADDRESS, + calldata: complex_upgrader_calldata, + factory_deps: None, + value: U256::zero(), + }; + + Transaction { + common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { + sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, + gas_limit: U256::from(200_000_000u32), + gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), + ..Default::default() + }), + execute, + received_timestamp_ms: 0, + raw_bytes: None, + } +} + +fn read_complex_upgrade() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") +} + +fn read_msg_sender_test() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") +} + +fn get_complex_upgrade_abi() -> Contract { + load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" + ) +} + +fn get_complex_upgrader_abi() -> Contract { + load_sys_contract("ComplexUpgrader") +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tests/utils.rs b/core/multivm_deps/vm_virtual_blocks/src/tests/utils.rs new file mode 100644 index 000000000000..f709ebdd8ede --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tests/utils.rs @@ -0,0 +1,106 @@ +use ethabi::Contract; +use once_cell::sync::Lazy; + +use crate::tests::tester::InMemoryStorageView; +use zksync_contracts::{ + load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, +}; +use zksync_state::{StoragePtr, WriteStorage}; +use zksync_types::utils::storage_key_for_standard_token_balance; +use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; +use zksync_utils::bytecode::hash_bytecode; +use zksync_utils::{bytes_to_be_words, h256_to_u256, u256_to_h256}; + +use crate::types::internals::ZkSyncVmState; +use crate::HistoryMode; + +pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = + Lazy::new(BaseSystemContracts::load_from_disk); + +// Probably make it a part of vm tester +pub(crate) fn verify_required_storage( + state: &ZkSyncVmState, + required_values: Vec<(H256, StorageKey)>, +) { + for (required_value, key) in required_values { + let current_value = state.storage.storage.read_from_storage(&key); + + assert_eq!( + u256_to_h256(current_value), + required_value, + "Invalid value at key {key:?}" + ); + } +} + +pub(crate) fn verify_required_memory( + state: &ZkSyncVmState, + required_values: Vec<(U256, u32, u32)>, +) { + for (required_value, memory_page, cell) in required_values { + let current_value = state + .memory + .read_slot(memory_page as usize, cell as usize) + .value; + assert_eq!(current_value, required_value); + } +} + +pub(crate) fn get_balance( + token_id: AccountTreeId, + account: &Address, + main_storage: StoragePtr, +) -> U256 { + let key = storage_key_for_standard_token_balance(token_id, account); + h256_to_u256(main_storage.borrow_mut().read_value(&key)) +} + +pub(crate) fn read_test_contract() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") +} + +pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { + let bootloader_code = read_zbin_bytecode(format!( + "etc/system-contracts/bootloader/tests/artifacts/{}.yul/{}.yul.zbin", + test, test + )); + + let bootloader_hash = hash_bytecode(&bootloader_code); + SystemContractCode { + code: bytes_to_be_words(bootloader_code), + hash: bootloader_hash, + } +} + +pub(crate) fn read_nonce_holder_tester() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") +} + +pub(crate) fn read_error_contract() -> Vec { + read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", + ) +} + +pub(crate) fn get_execute_error_calldata() -> Vec { + let test_contract = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", + ); + + let function = test_contract.function("require_short").unwrap(); + + function + .encode_input(&[]) + .expect("failed to encode parameters") +} + +pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { + let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; + (read_bytecode(path), load_contract(path)) +} + +pub(crate) fn read_max_depth_contract() -> Vec { + read_zbin_bytecode( + "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", + ) +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tracers/call.rs b/core/multivm_deps/vm_virtual_blocks/src/tracers/call.rs new file mode 100644 index 000000000000..127502476040 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tracers/call.rs @@ -0,0 +1,241 @@ +use once_cell::sync::OnceCell; +use std::marker::PhantomData; +use std::sync::Arc; + +use zk_evm::tracing::{AfterExecutionData, VmLocalStateData}; +use zk_evm::zkevm_opcode_defs::{ + FarCallABI, FarCallOpcode, FatPointer, Opcode, RetOpcode, + CALL_IMPLICIT_CALLDATA_FAT_PTR_REGISTER, RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER, +}; + +use zksync_config::constants::CONTRACT_DEPLOYER_ADDRESS; +use zksync_state::{StoragePtr, WriteStorage}; +use zksync_types::vm_trace::{Call, CallType}; +use zksync_types::U256; + +use crate::errors::VmRevertReason; +use crate::old_vm::history_recorder::HistoryMode; +use crate::old_vm::memory::SimpleMemory; +use crate::tracers::traits::{DynTracer, ExecutionEndTracer, ExecutionProcessing, VmTracer}; +use crate::types::outputs::VmExecutionResultAndLogs; + +#[derive(Debug, Clone)] +pub struct CallTracer { + stack: Vec, + result: Arc>>, + _phantom: PhantomData H>, +} + +#[derive(Debug, Clone)] +struct FarcallAndNearCallCount { + farcall: Call, + near_calls_after: usize, +} + +impl CallTracer { + pub fn new(resulted_stack: Arc>>, _history: H) -> Self { + Self { + stack: vec![], + result: resulted_stack, + _phantom: PhantomData, + } + } +} + +impl DynTracer for CallTracer { + fn after_execution( + &mut self, + state: VmLocalStateData<'_>, + data: AfterExecutionData, + memory: &SimpleMemory, + _storage: StoragePtr, + ) { + match data.opcode.variant.opcode { + Opcode::NearCall(_) => { + if let Some(last) = self.stack.last_mut() { + last.near_calls_after += 1; + } + } + Opcode::FarCall(far_call) => { + // We use parent gas for properly calculating gas used in the trace. + let current_ergs = state.vm_local_state.callstack.current.ergs_remaining; + let parent_gas = state + .vm_local_state + .callstack + .inner + .last() + .map(|call| call.ergs_remaining + current_ergs) + .unwrap_or(current_ergs); + + let mut current_call = Call { + r#type: CallType::Call(far_call), + gas: 0, + parent_gas, + ..Default::default() + }; + + self.handle_far_call_op_code(state, data, memory, &mut current_call); + self.stack.push(FarcallAndNearCallCount { + farcall: current_call, + near_calls_after: 0, + }); + } + Opcode::Ret(ret_code) => { + self.handle_ret_op_code(state, data, memory, ret_code); + } + _ => {} + }; + } +} + +impl ExecutionEndTracer for CallTracer {} + +impl ExecutionProcessing for CallTracer {} + +impl VmTracer for CallTracer { + fn save_results(&mut self, _result: &mut VmExecutionResultAndLogs) { + self.result + .set( + std::mem::take(&mut self.stack) + .into_iter() + .map(|x| x.farcall) + .collect(), + ) + .expect("Result is already set"); + } +} + +impl CallTracer { + fn handle_far_call_op_code( + &mut self, + state: VmLocalStateData<'_>, + _data: AfterExecutionData, + memory: &SimpleMemory, + current_call: &mut Call, + ) { + let current = state.vm_local_state.callstack.current; + // All calls from the actual users are mimic calls, + // so we need to check that the previous call was to the deployer. + // Actually it's a call of the constructor. + // And at this stage caller is user and callee is deployed contract. + let call_type = if let CallType::Call(far_call) = current_call.r#type { + if matches!(far_call, FarCallOpcode::Mimic) { + let previous_caller = state + .vm_local_state + .callstack + .inner + .last() + .map(|call| call.this_address) + // Actually it's safe to just unwrap here, because we have at least one call in the stack + // But i want to be sure that we will not have any problems in the future + .unwrap_or(current.this_address); + if previous_caller == CONTRACT_DEPLOYER_ADDRESS { + CallType::Create + } else { + CallType::Call(far_call) + } + } else { + CallType::Call(far_call) + } + } else { + unreachable!() + }; + let calldata = if current.code_page.0 == 0 || current.ergs_remaining == 0 { + vec![] + } else { + let packed_abi = + state.vm_local_state.registers[CALL_IMPLICIT_CALLDATA_FAT_PTR_REGISTER as usize]; + assert!(packed_abi.is_pointer); + let far_call_abi = FarCallABI::from_u256(packed_abi.value); + memory.read_unaligned_bytes( + far_call_abi.memory_quasi_fat_pointer.memory_page as usize, + far_call_abi.memory_quasi_fat_pointer.start as usize, + far_call_abi.memory_quasi_fat_pointer.length as usize, + ) + }; + + current_call.input = calldata; + current_call.r#type = call_type; + current_call.from = current.msg_sender; + current_call.to = current.this_address; + current_call.value = U256::from(current.context_u128_value); + current_call.gas = current.ergs_remaining; + } + + fn save_output( + &mut self, + state: VmLocalStateData<'_>, + memory: &SimpleMemory, + ret_opcode: RetOpcode, + current_call: &mut Call, + ) { + let fat_data_pointer = + state.vm_local_state.registers[RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER as usize]; + + // if fat_data_pointer is not a pointer then there is no output + let output = if fat_data_pointer.is_pointer { + let fat_data_pointer = FatPointer::from_u256(fat_data_pointer.value); + if !fat_data_pointer.is_trivial() { + Some(memory.read_unaligned_bytes( + fat_data_pointer.memory_page as usize, + fat_data_pointer.start as usize, + fat_data_pointer.length as usize, + )) + } else { + None + } + } else { + None + }; + + match ret_opcode { + RetOpcode::Ok => { + current_call.output = output.unwrap_or_default(); + } + RetOpcode::Revert => { + if let Some(output) = output { + current_call.revert_reason = + Some(VmRevertReason::from(output.as_slice()).to_string()); + } else { + current_call.revert_reason = Some("Unknown revert reason".to_string()); + } + } + RetOpcode::Panic => { + current_call.error = Some("Panic".to_string()); + } + } + } + + fn handle_ret_op_code( + &mut self, + state: VmLocalStateData<'_>, + _data: AfterExecutionData, + memory: &SimpleMemory, + ret_opcode: RetOpcode, + ) { + let Some(mut current_call) = self.stack.pop() else { + return; + }; + + if current_call.near_calls_after > 0 { + current_call.near_calls_after -= 1; + self.stack.push(current_call); + return; + } + + current_call.farcall.gas_used = current_call + .farcall + .parent_gas + .saturating_sub(state.vm_local_state.callstack.current.ergs_remaining); + + self.save_output(state, memory, ret_opcode, &mut current_call.farcall); + + // If there is a parent call, push the current call to it + // Otherwise, push the current call to the stack, because it's the top level call + if let Some(parent_call) = self.stack.last_mut() { + parent_call.farcall.calls.push(current_call.farcall); + } else { + self.stack.push(current_call); + } + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tracers/default_tracers.rs b/core/multivm_deps/vm_virtual_blocks/src/tracers/default_tracers.rs new file mode 100644 index 000000000000..7cc1e19869cf --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tracers/default_tracers.rs @@ -0,0 +1,259 @@ +use std::fmt::{Debug, Formatter}; + +use zk_evm::witness_trace::DummyTracer; +use zk_evm::zkevm_opcode_defs::{Opcode, RetOpcode}; +use zk_evm::{ + tracing::{ + AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, + }, + vm_state::VmLocalState, +}; +use zksync_state::{StoragePtr, WriteStorage}; +use zksync_types::Timestamp; + +use crate::bootloader_state::utils::apply_l2_block; +use crate::bootloader_state::BootloaderState; +use crate::constants::BOOTLOADER_HEAP_PAGE; +use crate::old_vm::history_recorder::HistoryMode; +use crate::old_vm::memory::SimpleMemory; +use crate::tracers::traits::{DynTracer, ExecutionEndTracer, ExecutionProcessing, VmTracer}; +use crate::tracers::utils::{ + computational_gas_price, gas_spent_on_bytecodes_and_long_messages_this_opcode, + print_debug_if_needed, VmHook, +}; +use crate::tracers::ResultTracer; +use crate::types::internals::ZkSyncVmState; +use crate::{VmExecutionMode, VmExecutionStopReason}; + +/// Default tracer for the VM. It manages the other tracers execution and stop the vm when needed. +pub(crate) struct DefaultExecutionTracer { + tx_has_been_processed: bool, + execution_mode: VmExecutionMode, + + pub(crate) gas_spent_on_bytecodes_and_long_messages: u32, + // Amount of gas used during account validation. + pub(crate) computational_gas_used: u32, + // Maximum number of gas that we're allowed to use during account validation. + tx_validation_gas_limit: u32, + in_account_validation: bool, + final_batch_info_requested: bool, + pub(crate) result_tracer: ResultTracer, + pub(crate) custom_tracers: Vec>>, + ret_from_the_bootloader: Option, + storage: StoragePtr, +} + +impl Debug for DefaultExecutionTracer { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("DefaultExecutionTracer").finish() + } +} + +impl Tracer for DefaultExecutionTracer { + const CALL_BEFORE_DECODING: bool = true; + const CALL_AFTER_DECODING: bool = true; + const CALL_BEFORE_EXECUTION: bool = true; + const CALL_AFTER_EXECUTION: bool = true; + type SupportedMemory = SimpleMemory; + + fn before_decoding(&mut self, state: VmLocalStateData<'_>, memory: &Self::SupportedMemory) { + >::before_decoding(&mut self.result_tracer, state, memory); + for tracer in self.custom_tracers.iter_mut() { + tracer.before_decoding(state, memory) + } + } + + fn after_decoding( + &mut self, + state: VmLocalStateData<'_>, + data: AfterDecodingData, + memory: &Self::SupportedMemory, + ) { + >::after_decoding( + &mut self.result_tracer, + state, + data, + memory, + ); + for tracer in self.custom_tracers.iter_mut() { + tracer.after_decoding(state, data, memory) + } + } + + fn before_execution( + &mut self, + state: VmLocalStateData<'_>, + data: BeforeExecutionData, + memory: &Self::SupportedMemory, + ) { + if self.in_account_validation { + self.computational_gas_used = self + .computational_gas_used + .saturating_add(computational_gas_price(state, &data)); + } + + let hook = VmHook::from_opcode_memory(&state, &data); + print_debug_if_needed(&hook, &state, memory); + + match hook { + VmHook::TxHasEnded => self.tx_has_been_processed = true, + VmHook::NoValidationEntered => self.in_account_validation = false, + VmHook::AccountValidationEntered => self.in_account_validation = true, + VmHook::FinalBatchInfo => self.final_batch_info_requested = true, + _ => {} + } + + self.gas_spent_on_bytecodes_and_long_messages += + gas_spent_on_bytecodes_and_long_messages_this_opcode(&state, &data); + self.result_tracer + .before_execution(state, data, memory, self.storage.clone()); + for tracer in self.custom_tracers.iter_mut() { + tracer.before_execution(state, data, memory, self.storage.clone()); + } + } + + fn after_execution( + &mut self, + state: VmLocalStateData<'_>, + data: AfterExecutionData, + memory: &Self::SupportedMemory, + ) { + if let VmExecutionMode::Bootloader = self.execution_mode { + let (next_opcode, _, _) = zk_evm::vm_state::read_and_decode( + state.vm_local_state, + memory, + &mut DummyTracer, + self, + ); + if current_frame_is_bootloader(state.vm_local_state) { + if let Opcode::Ret(ret) = next_opcode.inner.variant.opcode { + self.ret_from_the_bootloader = Some(ret); + } + } + } + + self.result_tracer + .after_execution(state, data, memory, self.storage.clone()); + for tracer in self.custom_tracers.iter_mut() { + tracer.after_execution(state, data, memory, self.storage.clone()); + } + } +} + +impl ExecutionEndTracer for DefaultExecutionTracer { + fn should_stop_execution(&self) -> bool { + let mut should_stop = match self.execution_mode { + VmExecutionMode::OneTx => self.tx_has_been_processed(), + VmExecutionMode::Batch => false, + VmExecutionMode::Bootloader => self.ret_from_the_bootloader == Some(RetOpcode::Ok), + }; + should_stop = should_stop || self.validation_run_out_of_gas(); + for tracer in self.custom_tracers.iter() { + should_stop = should_stop || tracer.should_stop_execution(); + } + should_stop + } +} + +impl DefaultExecutionTracer { + pub(crate) fn new( + computational_gas_limit: u32, + execution_mode: VmExecutionMode, + custom_tracers: Vec>>, + storage: StoragePtr, + ) -> Self { + Self { + tx_has_been_processed: false, + execution_mode, + gas_spent_on_bytecodes_and_long_messages: 0, + computational_gas_used: 0, + tx_validation_gas_limit: computational_gas_limit, + in_account_validation: false, + final_batch_info_requested: false, + result_tracer: ResultTracer::new(execution_mode), + custom_tracers, + ret_from_the_bootloader: None, + storage, + } + } + + pub(crate) fn tx_has_been_processed(&self) -> bool { + self.tx_has_been_processed + } + + pub(crate) fn validation_run_out_of_gas(&self) -> bool { + self.computational_gas_used > self.tx_validation_gas_limit + } + + pub(crate) fn gas_spent_on_pubdata(&self, vm_local_state: &VmLocalState) -> u32 { + self.gas_spent_on_bytecodes_and_long_messages + vm_local_state.spent_pubdata_counter + } + + fn set_fictive_l2_block( + &mut self, + state: &mut ZkSyncVmState, + bootloader_state: &mut BootloaderState, + ) { + let current_timestamp = Timestamp(state.local_state.timestamp); + let txs_index = bootloader_state.free_tx_index(); + let l2_block = bootloader_state.insert_fictive_l2_block(); + let mut memory = vec![]; + apply_l2_block(&mut memory, l2_block, txs_index); + state + .memory + .populate_page(BOOTLOADER_HEAP_PAGE as usize, memory, current_timestamp); + self.final_batch_info_requested = false; + } +} + +impl DynTracer for DefaultExecutionTracer {} + +impl ExecutionProcessing for DefaultExecutionTracer { + fn initialize_tracer(&mut self, state: &mut ZkSyncVmState) { + self.result_tracer.initialize_tracer(state); + for processor in self.custom_tracers.iter_mut() { + processor.initialize_tracer(state); + } + } + + fn before_cycle(&mut self, state: &mut ZkSyncVmState) { + self.result_tracer.before_cycle(state); + for processor in self.custom_tracers.iter_mut() { + processor.before_cycle(state); + } + } + + fn after_cycle( + &mut self, + state: &mut ZkSyncVmState, + bootloader_state: &mut BootloaderState, + ) { + self.result_tracer.after_cycle(state, bootloader_state); + for processor in self.custom_tracers.iter_mut() { + processor.after_cycle(state, bootloader_state); + } + if self.final_batch_info_requested { + self.set_fictive_l2_block(state, bootloader_state) + } + } + + fn after_vm_execution( + &mut self, + state: &mut ZkSyncVmState, + bootloader_state: &BootloaderState, + stop_reason: VmExecutionStopReason, + ) { + self.result_tracer + .after_vm_execution(state, bootloader_state, stop_reason); + for processor in self.custom_tracers.iter_mut() { + processor.after_vm_execution(state, bootloader_state, stop_reason); + } + } +} + +fn current_frame_is_bootloader(local_state: &VmLocalState) -> bool { + // The current frame is bootloader if the callstack depth is 1. + // Some of the near calls inside the bootloader can be out of gas, which is totally normal behavior + // and it shouldn't result in `is_bootloader_out_of_gas` becoming true. + local_state.callstack.inner.len() == 1 +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tracers/mod.rs b/core/multivm_deps/vm_virtual_blocks/src/tracers/mod.rs new file mode 100644 index 000000000000..11fefedc85a5 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tracers/mod.rs @@ -0,0 +1,15 @@ +pub(crate) use default_tracers::DefaultExecutionTracer; +pub(crate) use refunds::RefundsTracer; +pub(crate) use result_tracer::ResultTracer; +pub use storage_invocations::StorageInvocations; +pub use validation::{ValidationError, ValidationTracer, ValidationTracerParams}; + +pub(crate) mod default_tracers; +pub(crate) mod refunds; +pub(crate) mod result_tracer; + +pub(crate) mod call; +pub(crate) mod storage_invocations; +pub(crate) mod traits; +pub(crate) mod utils; +pub(crate) mod validation; diff --git a/core/multivm_deps/vm_virtual_blocks/src/tracers/refunds.rs b/core/multivm_deps/vm_virtual_blocks/src/tracers/refunds.rs new file mode 100644 index 000000000000..14dc16f7c8ed --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tracers/refunds.rs @@ -0,0 +1,394 @@ +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; + +use std::collections::HashMap; + +use zk_evm::{ + aux_structures::Timestamp, + tracing::{BeforeExecutionData, VmLocalStateData}, + vm_state::VmLocalState, +}; +use zksync_config::constants::{PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS}; +use zksync_state::{StoragePtr, WriteStorage}; +use zksync_types::{ + event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}, + l2_to_l1_log::L2ToL1Log, + zkevm_test_harness::witness::sort_storage_access::sort_storage_access_queries, + L1BatchNumber, StorageKey, U256, +}; +use zksync_utils::bytecode::bytecode_len_in_bytes; +use zksync_utils::{ceil_div_u256, u256_to_h256}; + +use crate::bootloader_state::BootloaderState; +use crate::constants::{BOOTLOADER_HEAP_PAGE, OPERATOR_REFUNDS_OFFSET, TX_GAS_LIMIT_OFFSET}; +use crate::old_vm::{ + events::merge_events, history_recorder::HistoryMode, memory::SimpleMemory, + oracles::storage::storage_key_of_log, utils::eth_price_per_pubdata_byte, +}; +use crate::tracers::utils::gas_spent_on_bytecodes_and_long_messages_this_opcode; +use crate::tracers::{ + traits::{DynTracer, ExecutionEndTracer, ExecutionProcessing, VmTracer}, + utils::{get_vm_hook_params, VmHook}, +}; +use crate::types::{ + inputs::L1BatchEnv, + internals::ZkSyncVmState, + outputs::{Refunds, VmExecutionResultAndLogs}, +}; + +/// Tracer responsible for collecting information about refunds. +#[derive(Debug, Clone)] +pub(crate) struct RefundsTracer { + // Some(x) means that the bootloader has asked the operator + // to provide the refund the user, where `x` is the refund proposed + // by the bootloader itself. + pending_operator_refund: Option, + refund_gas: u32, + operator_refund: Option, + timestamp_initial: Timestamp, + timestamp_before_cycle: Timestamp, + gas_remaining_before: u32, + spent_pubdata_counter_before: u32, + gas_spent_on_bytecodes_and_long_messages: u32, + l1_batch: L1BatchEnv, +} + +impl RefundsTracer { + pub(crate) fn new(l1_batch: L1BatchEnv) -> Self { + Self { + pending_operator_refund: None, + refund_gas: 0, + operator_refund: None, + timestamp_initial: Timestamp(0), + timestamp_before_cycle: Timestamp(0), + gas_remaining_before: 0, + spent_pubdata_counter_before: 0, + gas_spent_on_bytecodes_and_long_messages: 0, + l1_batch, + } + } +} + +impl RefundsTracer { + fn requested_refund(&self) -> Option { + self.pending_operator_refund + } + + fn set_refund_as_done(&mut self) { + self.pending_operator_refund = None; + } + + fn block_overhead_refund(&mut self) -> u32 { + 0 + } + + pub(crate) fn tx_body_refund( + &self, + bootloader_refund: u32, + gas_spent_on_pubdata: u32, + tx_gas_limit: u32, + current_ergs_per_pubdata_byte: u32, + pubdata_published: u32, + ) -> u32 { + let total_gas_spent = tx_gas_limit - bootloader_refund; + + let gas_spent_on_computation = total_gas_spent + .checked_sub(gas_spent_on_pubdata) + .unwrap_or_else(|| { + tracing::error!( + "Gas spent on pubdata is greater than total gas spent. On pubdata: {}, total: {}", + gas_spent_on_pubdata, + total_gas_spent + ); + 0 + }); + + // For now, bootloader charges only for base fee. + let effective_gas_price = self.l1_batch.base_fee(); + + let bootloader_eth_price_per_pubdata_byte = + U256::from(effective_gas_price) * U256::from(current_ergs_per_pubdata_byte); + + let fair_eth_price_per_pubdata_byte = + U256::from(eth_price_per_pubdata_byte(self.l1_batch.l1_gas_price)); + + // For now, L1 originated transactions are allowed to pay less than fair fee per pubdata, + // so we should take it into account. + let eth_price_per_pubdata_byte_for_calculation = std::cmp::min( + bootloader_eth_price_per_pubdata_byte, + fair_eth_price_per_pubdata_byte, + ); + + let fair_fee_eth = U256::from(gas_spent_on_computation) + * U256::from(self.l1_batch.fair_l2_gas_price) + + U256::from(pubdata_published) * eth_price_per_pubdata_byte_for_calculation; + let pre_paid_eth = U256::from(tx_gas_limit) * U256::from(effective_gas_price); + let refund_eth = pre_paid_eth.checked_sub(fair_fee_eth).unwrap_or_else(|| { + tracing::error!( + "Fair fee is greater than pre paid. Fair fee: {} wei, pre paid: {} wei", + fair_fee_eth, + pre_paid_eth + ); + U256::zero() + }); + + ceil_div_u256(refund_eth, effective_gas_price.into()).as_u32() + } + + pub(crate) fn gas_spent_on_pubdata(&self, vm_local_state: &VmLocalState) -> u32 { + self.gas_spent_on_bytecodes_and_long_messages + vm_local_state.spent_pubdata_counter + } +} + +impl DynTracer for RefundsTracer { + fn before_execution( + &mut self, + state: VmLocalStateData<'_>, + data: BeforeExecutionData, + memory: &SimpleMemory, + _storage: StoragePtr, + ) { + let hook = VmHook::from_opcode_memory(&state, &data); + match hook { + VmHook::NotifyAboutRefund => self.refund_gas = get_vm_hook_params(memory)[0].as_u32(), + VmHook::AskOperatorForRefund => { + self.pending_operator_refund = Some(get_vm_hook_params(memory)[0].as_u32()) + } + _ => {} + } + + self.gas_spent_on_bytecodes_and_long_messages += + gas_spent_on_bytecodes_and_long_messages_this_opcode(&state, &data); + } +} + +impl ExecutionEndTracer for RefundsTracer {} + +impl ExecutionProcessing for RefundsTracer { + fn initialize_tracer(&mut self, state: &mut ZkSyncVmState) { + self.timestamp_initial = Timestamp(state.local_state.timestamp); + self.gas_remaining_before = state.local_state.callstack.current.ergs_remaining; + self.spent_pubdata_counter_before = state.local_state.spent_pubdata_counter; + } + + fn before_cycle(&mut self, state: &mut ZkSyncVmState) { + self.timestamp_before_cycle = Timestamp(state.local_state.timestamp); + } + + fn after_cycle( + &mut self, + state: &mut ZkSyncVmState, + bootloader_state: &mut BootloaderState, + ) { + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] + #[metrics(label = "type", rename_all = "snake_case")] + enum RefundType { + Bootloader, + Operator, + } + + const PERCENT_BUCKETS: Buckets = Buckets::values(&[ + 5.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0, 120.0, + ]); + + #[derive(Debug, Metrics)] + #[metrics(prefix = "vm_virtual_blocks")] + struct RefundMetrics { + #[metrics(buckets = PERCENT_BUCKETS)] + refund: Family>, + #[metrics(buckets = PERCENT_BUCKETS)] + refund_diff: Histogram, + } + + #[vise::register] + static METRICS: vise::Global = vise::Global::new(); + + // This means that the bootloader has informed the system (usually via VMHooks) - that some gas + // should be refunded back (see askOperatorForRefund in bootloader.yul for details). + if let Some(bootloader_refund) = self.requested_refund() { + assert!( + self.operator_refund.is_none(), + "Operator was asked for refund two times" + ); + let gas_spent_on_pubdata = + self.gas_spent_on_pubdata(&state.local_state) - self.spent_pubdata_counter_before; + + let current_tx_index = bootloader_state.current_tx(); + let tx_description_offset = + bootloader_state.get_tx_description_offset(current_tx_index); + let tx_gas_limit = state + .memory + .read_slot( + BOOTLOADER_HEAP_PAGE as usize, + tx_description_offset + TX_GAS_LIMIT_OFFSET, + ) + .value + .as_u32(); + + let pubdata_published = + pubdata_published(state, self.timestamp_initial, self.l1_batch.number); + + let current_ergs_per_pubdata_byte = state.local_state.current_ergs_per_pubdata_byte; + let tx_body_refund = self.tx_body_refund( + bootloader_refund, + gas_spent_on_pubdata, + tx_gas_limit, + current_ergs_per_pubdata_byte, + pubdata_published, + ); + + if tx_body_refund < bootloader_refund { + tracing::error!( + "Suggested tx body refund is less than bootloader refund. Tx body refund: {tx_body_refund}, \ + bootloader refund: {bootloader_refund}" + ); + } + + let refund_to_propose = tx_body_refund + self.block_overhead_refund(); + + let refund_slot = OPERATOR_REFUNDS_OFFSET + current_tx_index; + + // Writing the refund into memory + state.memory.populate_page( + BOOTLOADER_HEAP_PAGE as usize, + vec![(refund_slot, refund_to_propose.into())], + self.timestamp_before_cycle, + ); + + bootloader_state.set_refund_for_current_tx(refund_to_propose); + self.operator_refund = Some(refund_to_propose); + self.set_refund_as_done(); + + if tx_gas_limit < bootloader_refund { + tracing::error!( + "Tx gas limit is less than bootloader refund. Tx gas limit: {tx_gas_limit}, \ + bootloader refund: {bootloader_refund}" + ); + } + if tx_gas_limit < refund_to_propose { + tracing::error!( + "Tx gas limit is less than operator refund. Tx gas limit: {tx_gas_limit}, \ + operator refund: {refund_to_propose}" + ); + } + + METRICS.refund[&RefundType::Bootloader] + .observe(bootloader_refund as f64 / tx_gas_limit as f64 * 100.0); + METRICS.refund[&RefundType::Operator] + .observe(refund_to_propose as f64 / tx_gas_limit as f64 * 100.0); + let refund_diff = + (refund_to_propose as f64 - bootloader_refund as f64) / tx_gas_limit as f64 * 100.0; + METRICS.refund_diff.observe(refund_diff); + } + } +} + +/// Returns the given transactions' gas limit - by reading it directly from the VM memory. +pub(crate) fn pubdata_published( + state: &ZkSyncVmState, + from_timestamp: Timestamp, + batch_number: L1BatchNumber, +) -> u32 { + let storage_writes_pubdata_published = pubdata_published_for_writes(state, from_timestamp); + + let (raw_events, l1_messages) = state + .event_sink + .get_events_and_l2_l1_logs_after_timestamp(from_timestamp); + let events: Vec<_> = merge_events(raw_events) + .into_iter() + .map(|e| e.into_vm_event(batch_number)) + .collect(); + // For the first transaction in L1 batch there may be (it depends on the execution mode) an L2->L1 log + // that is sent by `SystemContext` in `setNewBlock`. It's a part of the L1 batch pubdata overhead and not the transaction itself. + let l2_l1_logs_bytes = (l1_messages + .into_iter() + .map(|log| L2ToL1Log { + shard_id: log.shard_id, + is_service: log.is_first, + tx_number_in_block: log.tx_number_in_block, + sender: log.address, + key: u256_to_h256(log.key), + value: u256_to_h256(log.value), + }) + .filter(|log| log.sender != SYSTEM_CONTEXT_ADDRESS) + .count() as u32) + * zk_evm::zkevm_opcode_defs::system_params::L1_MESSAGE_PUBDATA_BYTES; + let l2_l1_long_messages_bytes: u32 = extract_long_l2_to_l1_messages(&events) + .iter() + .map(|event| event.len() as u32) + .sum(); + + let published_bytecode_bytes: u32 = extract_published_bytecodes(&events) + .iter() + .map(|bytecodehash| bytecode_len_in_bytes(*bytecodehash) as u32 + PUBLISH_BYTECODE_OVERHEAD) + .sum(); + + storage_writes_pubdata_published + + l2_l1_logs_bytes + + l2_l1_long_messages_bytes + + published_bytecode_bytes +} + +fn pubdata_published_for_writes( + state: &ZkSyncVmState, + from_timestamp: Timestamp, +) -> u32 { + // This `HashMap` contains how much was already paid for every slot that was paid during the last tx execution. + // For the slots that weren't paid during the last tx execution we can just use + // `self.state.storage.paid_changes.inner().get(&key)` to get how much it was paid before. + let pre_paid_before_tx_map: HashMap = state + .storage + .paid_changes + .history() + .iter() + .rev() + .take_while(|history_elem| history_elem.0 >= from_timestamp) + .map(|history_elem| (history_elem.1.key, history_elem.1.value.unwrap_or(0))) + .collect(); + let pre_paid_before_tx = |key: &StorageKey| -> u32 { + if let Some(pre_paid) = pre_paid_before_tx_map.get(key) { + *pre_paid + } else { + state + .storage + .paid_changes + .inner() + .get(key) + .copied() + .unwrap_or(0) + } + }; + + let storage_logs = state + .storage + .storage_log_queries_after_timestamp(from_timestamp); + let (_, deduplicated_logs) = + sort_storage_access_queries(storage_logs.iter().map(|log| &log.log_query)); + + deduplicated_logs + .into_iter() + .filter_map(|log| { + if log.rw_flag { + let key = storage_key_of_log(&log); + let pre_paid = pre_paid_before_tx(&key); + let to_pay_by_user = state.storage.base_price_for_write(&log); + + if to_pay_by_user > pre_paid { + Some(to_pay_by_user - pre_paid) + } else { + None + } + } else { + None + } + }) + .sum() +} + +impl VmTracer for RefundsTracer { + fn save_results(&mut self, result: &mut VmExecutionResultAndLogs) { + result.refunds = Refunds { + gas_refunded: self.refund_gas, + operator_suggested_refund: self.operator_refund.unwrap_or_default(), + } + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tracers/result_tracer.rs b/core/multivm_deps/vm_virtual_blocks/src/tracers/result_tracer.rs new file mode 100644 index 000000000000..b8e089493565 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tracers/result_tracer.rs @@ -0,0 +1,246 @@ +use zk_evm::{ + tracing::{AfterDecodingData, BeforeExecutionData, VmLocalStateData}, + vm_state::{ErrorFlags, VmLocalState}, + zkevm_opcode_defs::FatPointer, +}; +use zksync_state::{StoragePtr, WriteStorage}; + +use zksync_types::U256; + +use crate::bootloader_state::BootloaderState; +use crate::errors::VmRevertReason; +use crate::old_vm::{ + history_recorder::HistoryMode, + memory::SimpleMemory, + utils::{vm_may_have_ended_inner, VmExecutionResult}, +}; +use crate::tracers::{ + traits::{DynTracer, ExecutionEndTracer, ExecutionProcessing, VmTracer}, + utils::{get_vm_hook_params, read_pointer, VmHook}, +}; +use crate::types::{ + internals::ZkSyncVmState, + outputs::{ExecutionResult, VmExecutionResultAndLogs}, +}; + +use crate::constants::{BOOTLOADER_HEAP_PAGE, RESULT_SUCCESS_FIRST_SLOT}; +use crate::{Halt, TxRevertReason}; +use crate::{VmExecutionMode, VmExecutionStopReason}; + +#[derive(Debug, Clone)] +enum Result { + Error { error_reason: VmRevertReason }, + Success { return_data: Vec }, + Halt { reason: Halt }, +} + +/// Tracer responsible for handling the VM execution result. +#[derive(Debug, Clone)] +pub(crate) struct ResultTracer { + result: Option, + bootloader_out_of_gas: bool, + execution_mode: VmExecutionMode, +} + +impl ResultTracer { + pub(crate) fn new(execution_mode: VmExecutionMode) -> Self { + Self { + result: None, + bootloader_out_of_gas: false, + execution_mode, + } + } +} + +fn current_frame_is_bootloader(local_state: &VmLocalState) -> bool { + // The current frame is bootloader if the callstack depth is 1. + // Some of the near calls inside the bootloader can be out of gas, which is totally normal behavior + // and it shouldn't result in `is_bootloader_out_of_gas` becoming true. + local_state.callstack.inner.len() == 1 +} + +impl DynTracer for ResultTracer { + fn after_decoding( + &mut self, + state: VmLocalStateData<'_>, + data: AfterDecodingData, + _memory: &SimpleMemory, + ) { + // We should check not only for the `NOT_ENOUGH_ERGS` flag but if the current frame is bootloader too. + if current_frame_is_bootloader(state.vm_local_state) + && data + .error_flags_accumulated + .contains(ErrorFlags::NOT_ENOUGH_ERGS) + { + self.bootloader_out_of_gas = true; + } + } + + fn before_execution( + &mut self, + state: VmLocalStateData<'_>, + data: BeforeExecutionData, + memory: &SimpleMemory, + _storage: StoragePtr, + ) { + let hook = VmHook::from_opcode_memory(&state, &data); + if let VmHook::ExecutionResult = hook { + let vm_hook_params = get_vm_hook_params(memory); + let success = vm_hook_params[0]; + let returndata_ptr = FatPointer::from_u256(vm_hook_params[1]); + let returndata = read_pointer(memory, returndata_ptr); + if success == U256::zero() { + self.result = Some(Result::Error { + // Tx has reverted, without bootloader error, we can simply parse the revert reason + error_reason: (VmRevertReason::from(returndata.as_slice())), + }); + } else { + self.result = Some(Result::Success { + return_data: returndata, + }); + } + } + } +} + +impl ExecutionEndTracer for ResultTracer {} + +impl ExecutionProcessing for ResultTracer { + fn after_vm_execution( + &mut self, + state: &mut ZkSyncVmState, + bootloader_state: &BootloaderState, + stop_reason: VmExecutionStopReason, + ) { + match stop_reason { + // Vm has finished execution, we need to check the result of it + VmExecutionStopReason::VmFinished => { + self.vm_finished_execution(state); + } + // One of the tracers above has requested to stop the execution. + // If it was the correct stop we already have the result, + // otherwise it can be out of gas error + VmExecutionStopReason::TracerRequestedStop => { + match self.execution_mode { + VmExecutionMode::OneTx => self.vm_stopped_execution(state, bootloader_state), + VmExecutionMode::Batch => self.vm_finished_execution(state), + VmExecutionMode::Bootloader => self.vm_finished_execution(state), + }; + } + } + } +} + +impl ResultTracer { + fn vm_finished_execution( + &mut self, + state: &ZkSyncVmState, + ) { + let Some(result) = vm_may_have_ended_inner(state) else { + // The VM has finished execution, but the result is not yet available. + self.result = Some(Result::Success { + return_data: vec![], + }); + return; + }; + + // Check it's not inside tx + match result { + VmExecutionResult::Ok(output) => { + self.result = Some(Result::Success { + return_data: output, + }); + } + VmExecutionResult::Revert(output) => { + // Unlike VmHook::ExecutionResult, vm has completely finished and returned not only the revert reason, + // but with bytecode, which represents the type of error from the bootloader side + let revert_reason = TxRevertReason::parse_error(&output); + + match revert_reason { + TxRevertReason::TxReverted(reason) => { + self.result = Some(Result::Error { + error_reason: reason, + }); + } + TxRevertReason::Halt(halt) => { + self.result = Some(Result::Halt { reason: halt }); + } + }; + } + VmExecutionResult::Panic => { + if self.bootloader_out_of_gas { + self.result = Some(Result::Halt { + reason: Halt::BootloaderOutOfGas, + }); + } else { + self.result = Some(Result::Halt { + reason: Halt::VMPanic, + }); + } + } + VmExecutionResult::MostLikelyDidNotFinish(_, _) => { + unreachable!() + } + } + } + + fn vm_stopped_execution( + &mut self, + state: &ZkSyncVmState, + bootloader_state: &BootloaderState, + ) { + if self.bootloader_out_of_gas { + self.result = Some(Result::Halt { + reason: Halt::BootloaderOutOfGas, + }); + } else { + if self.result.is_some() { + return; + } + + let has_failed = tx_has_failed(state, bootloader_state.current_tx() as u32); + if has_failed { + self.result = Some(Result::Error { + error_reason: VmRevertReason::General { + msg: "Transaction reverted with empty reason. Possibly out of gas" + .to_string(), + data: vec![], + }, + }); + } else { + self.result = Some(self.result.clone().unwrap_or(Result::Success { + return_data: vec![], + })); + } + } + } + + pub(crate) fn into_result(self) -> ExecutionResult { + match self.result.unwrap() { + Result::Error { error_reason } => ExecutionResult::Revert { + output: error_reason, + }, + Result::Success { return_data } => ExecutionResult::Success { + output: return_data, + }, + Result::Halt { reason } => ExecutionResult::Halt { reason }, + } + } +} + +impl VmTracer for ResultTracer { + fn save_results(&mut self, _result: &mut VmExecutionResultAndLogs) {} +} + +pub(crate) fn tx_has_failed( + state: &ZkSyncVmState, + tx_id: u32, +) -> bool { + let mem_slot = RESULT_SUCCESS_FIRST_SLOT + tx_id; + let mem_value = state + .memory + .read_slot(BOOTLOADER_HEAP_PAGE as usize, mem_slot as usize) + .value; + + mem_value == U256::zero() +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tracers/storage_invocations.rs b/core/multivm_deps/vm_virtual_blocks/src/tracers/storage_invocations.rs new file mode 100644 index 000000000000..ef4b59c60a88 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tracers/storage_invocations.rs @@ -0,0 +1,44 @@ +use crate::bootloader_state::BootloaderState; +use crate::old_vm::history_recorder::HistoryMode; +use crate::tracers::traits::{DynTracer, ExecutionEndTracer, ExecutionProcessing, VmTracer}; +use crate::types::internals::ZkSyncVmState; +use zksync_state::WriteStorage; + +#[derive(Debug, Default, Clone)] +pub struct StorageInvocations { + limit: usize, + current: usize, +} + +impl StorageInvocations { + pub fn new(limit: usize) -> Self { + Self { limit, current: 0 } + } +} + +/// Tracer responsible for calculating the number of storage invocations and +/// stopping the VM execution if the limit is reached. +impl DynTracer for StorageInvocations {} + +impl ExecutionEndTracer for StorageInvocations { + fn should_stop_execution(&self) -> bool { + self.current >= self.limit + } +} + +impl ExecutionProcessing for StorageInvocations { + fn after_cycle( + &mut self, + state: &mut ZkSyncVmState, + _bootloader_state: &mut BootloaderState, + ) { + self.current = state + .storage + .storage + .get_ptr() + .borrow() + .missed_storage_invocations(); + } +} + +impl VmTracer for StorageInvocations {} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tracers/traits.rs b/core/multivm_deps/vm_virtual_blocks/src/tracers/traits.rs new file mode 100644 index 000000000000..6e76a041fabc --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tracers/traits.rs @@ -0,0 +1,85 @@ +use zk_evm::tracing::{ + AfterDecodingData, AfterExecutionData, BeforeExecutionData, VmLocalStateData, +}; +use zksync_state::{StoragePtr, WriteStorage}; + +use crate::bootloader_state::BootloaderState; +use crate::old_vm::history_recorder::HistoryMode; +use crate::old_vm::memory::SimpleMemory; +use crate::types::internals::ZkSyncVmState; +use crate::types::outputs::VmExecutionResultAndLogs; +use crate::VmExecutionStopReason; + +/// Run tracer for collecting data during the vm execution cycles +pub trait ExecutionProcessing: + DynTracer + ExecutionEndTracer +{ + fn initialize_tracer(&mut self, _state: &mut ZkSyncVmState) {} + fn before_cycle(&mut self, _state: &mut ZkSyncVmState) {} + fn after_cycle( + &mut self, + _state: &mut ZkSyncVmState, + _bootloader_state: &mut BootloaderState, + ) { + } + fn after_vm_execution( + &mut self, + _state: &mut ZkSyncVmState, + _bootloader_state: &BootloaderState, + _stop_reason: VmExecutionStopReason, + ) { + } +} + +/// Stop the vm execution if the tracer conditions are met +pub trait ExecutionEndTracer { + // Returns whether the vm execution should stop. + fn should_stop_execution(&self) -> bool { + false + } +} + +/// Version of zk_evm::Tracer suitable for dynamic dispatch. +pub trait DynTracer { + fn before_decoding(&mut self, _state: VmLocalStateData<'_>, _memory: &SimpleMemory) {} + fn after_decoding( + &mut self, + _state: VmLocalStateData<'_>, + _data: AfterDecodingData, + _memory: &SimpleMemory, + ) { + } + fn before_execution( + &mut self, + _state: VmLocalStateData<'_>, + _data: BeforeExecutionData, + _memory: &SimpleMemory, + _storage: StoragePtr, + ) { + } + fn after_execution( + &mut self, + _state: VmLocalStateData<'_>, + _data: AfterExecutionData, + _memory: &SimpleMemory, + _storage: StoragePtr, + ) { + } +} + +/// Save the results of the vm execution. +pub trait VmTracer: + DynTracer + ExecutionEndTracer + ExecutionProcessing + Send +{ + fn save_results(&mut self, _result: &mut VmExecutionResultAndLogs) {} +} + +pub trait BoxedTracer { + fn into_boxed(self) -> Box>; +} + +impl + 'static> BoxedTracer for T { + fn into_boxed(self) -> Box> { + Box::new(self) + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tracers/utils.rs b/core/multivm_deps/vm_virtual_blocks/src/tracers/utils.rs new file mode 100644 index 000000000000..f86b496b0787 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tracers/utils.rs @@ -0,0 +1,224 @@ +use zk_evm::aux_structures::MemoryPage; +use zk_evm::zkevm_opcode_defs::{FarCallABI, FarCallForwardPageType}; +use zk_evm::{ + tracing::{BeforeExecutionData, VmLocalStateData}, + zkevm_opcode_defs::{FatPointer, LogOpcode, Opcode, UMAOpcode}, +}; + +use zksync_config::constants::{ + ECRECOVER_PRECOMPILE_ADDRESS, KECCAK256_PRECOMPILE_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, + L1_MESSENGER_ADDRESS, SHA256_PRECOMPILE_ADDRESS, +}; +use zksync_types::U256; +use zksync_utils::u256_to_h256; + +use crate::constants::{ + BOOTLOADER_HEAP_PAGE, VM_HOOK_PARAMS_COUNT, VM_HOOK_PARAMS_START_POSITION, VM_HOOK_POSITION, +}; +use crate::old_vm::history_recorder::HistoryMode; +use crate::old_vm::memory::SimpleMemory; +use crate::old_vm::utils::{aux_heap_page_from_base, heap_page_from_base}; + +#[derive(Clone, Debug, Copy)] +pub(crate) enum VmHook { + AccountValidationEntered, + PaymasterValidationEntered, + NoValidationEntered, + ValidationStepEndeded, + TxHasEnded, + DebugLog, + DebugReturnData, + NoHook, + NearCallCatch, + AskOperatorForRefund, + NotifyAboutRefund, + ExecutionResult, + FinalBatchInfo, +} + +impl VmHook { + pub(crate) fn from_opcode_memory( + state: &VmLocalStateData<'_>, + data: &BeforeExecutionData, + ) -> Self { + let opcode_variant = data.opcode.variant; + let heap_page = + heap_page_from_base(state.vm_local_state.callstack.current.base_memory_page).0; + + let src0_value = data.src0_value.value; + + let fat_ptr = FatPointer::from_u256(src0_value); + + let value = data.src1_value.value; + + // Only UMA opcodes in the bootloader serve for vm hooks + if !matches!(opcode_variant.opcode, Opcode::UMA(UMAOpcode::HeapWrite)) + || heap_page != BOOTLOADER_HEAP_PAGE + || fat_ptr.offset != VM_HOOK_POSITION * 32 + { + return Self::NoHook; + } + + match value.as_u32() { + 0 => Self::AccountValidationEntered, + 1 => Self::PaymasterValidationEntered, + 2 => Self::NoValidationEntered, + 3 => Self::ValidationStepEndeded, + 4 => Self::TxHasEnded, + 5 => Self::DebugLog, + 6 => Self::DebugReturnData, + 7 => Self::NearCallCatch, + 8 => Self::AskOperatorForRefund, + 9 => Self::NotifyAboutRefund, + 10 => Self::ExecutionResult, + 11 => Self::FinalBatchInfo, + _ => panic!("Unkown hook"), + } + } +} + +pub(crate) fn get_debug_log( + state: &VmLocalStateData<'_>, + memory: &SimpleMemory, +) -> String { + let vm_hook_params: Vec<_> = get_vm_hook_params(memory) + .into_iter() + .map(u256_to_h256) + .collect(); + let msg = vm_hook_params[0].as_bytes().to_vec(); + let data = vm_hook_params[1].as_bytes().to_vec(); + + let msg = String::from_utf8(msg).expect("Invalid debug message"); + let data = U256::from_big_endian(&data); + + // For long data, it is better to use hex-encoding for greater readibility + let data_str = if data > U256::from(u64::max_value()) { + let mut bytes = [0u8; 32]; + data.to_big_endian(&mut bytes); + format!("0x{}", hex::encode(bytes)) + } else { + data.to_string() + }; + + let tx_id = state.vm_local_state.tx_number_in_block; + + format!("Bootloader transaction {}: {} {}", tx_id, msg, data_str) +} + +/// Reads the memory slice represented by the fat pointer. +/// Note, that the fat pointer must point to the accesible memory (i.e. not cleared up yet). +pub(crate) fn read_pointer( + memory: &SimpleMemory, + pointer: FatPointer, +) -> Vec { + let FatPointer { + offset, + length, + start, + memory_page, + } = pointer; + + // The actual bounds of the returndata ptr is [start+offset..start+length] + let mem_region_start = start + offset; + let mem_region_length = length - offset; + + memory.read_unaligned_bytes( + memory_page as usize, + mem_region_start as usize, + mem_region_length as usize, + ) +} + +/// Outputs the returndata for the latest call. +/// This is usually used to output the revert reason. +pub(crate) fn get_debug_returndata(memory: &SimpleMemory) -> String { + let vm_hook_params: Vec<_> = get_vm_hook_params(memory); + let returndata_ptr = FatPointer::from_u256(vm_hook_params[0]); + let returndata = read_pointer(memory, returndata_ptr); + + format!("0x{}", hex::encode(returndata)) +} + +/// Accepts a vm hook and, if it requires to output some debug log, outputs it. +pub(crate) fn print_debug_if_needed( + hook: &VmHook, + state: &VmLocalStateData<'_>, + memory: &SimpleMemory, +) { + let log = match hook { + VmHook::DebugLog => get_debug_log(state, memory), + VmHook::DebugReturnData => get_debug_returndata(memory), + _ => return, + }; + + tracing::trace!("{}", log); +} + +pub(crate) fn computational_gas_price( + state: VmLocalStateData<'_>, + data: &BeforeExecutionData, +) -> u32 { + // We calculate computational gas used as a raw price for opcode plus cost for precompiles. + // This calculation is incomplete as it misses decommitment and memory growth costs. + // To calculate decommitment cost we need an access to decommitter oracle which is missing in tracer now. + // Memory growth calculation is complex and it will require different logic for different opcodes (`FarCall`, `Ret`, `UMA`). + let base_price = data.opcode.inner.variant.ergs_price(); + let precompile_price = match data.opcode.variant.opcode { + Opcode::Log(LogOpcode::PrecompileCall) => { + let address = state.vm_local_state.callstack.current.this_address; + + if address == KECCAK256_PRECOMPILE_ADDRESS + || address == SHA256_PRECOMPILE_ADDRESS + || address == ECRECOVER_PRECOMPILE_ADDRESS + { + data.src1_value.value.low_u32() + } else { + 0 + } + } + _ => 0, + }; + base_price + precompile_price +} + +pub(crate) fn gas_spent_on_bytecodes_and_long_messages_this_opcode( + state: &VmLocalStateData<'_>, + data: &BeforeExecutionData, +) -> u32 { + if data.opcode.variant.opcode == Opcode::Log(LogOpcode::PrecompileCall) { + let current_stack = state.vm_local_state.callstack.get_current_stack(); + // Trace for precompile calls from `KNOWN_CODES_STORAGE_ADDRESS` and `L1_MESSENGER_ADDRESS` that burn some gas. + // Note, that if there is less gas left than requested to burn it will be burnt anyway. + if current_stack.this_address == KNOWN_CODES_STORAGE_ADDRESS + || current_stack.this_address == L1_MESSENGER_ADDRESS + { + std::cmp::min(data.src1_value.value.as_u32(), current_stack.ergs_remaining) + } else { + 0 + } + } else { + 0 + } +} + +pub(crate) fn get_calldata_page_via_abi(far_call_abi: &FarCallABI, base_page: MemoryPage) -> u32 { + match far_call_abi.forwarding_mode { + FarCallForwardPageType::ForwardFatPointer => { + far_call_abi.memory_quasi_fat_pointer.memory_page + } + FarCallForwardPageType::UseAuxHeap => aux_heap_page_from_base(base_page).0, + FarCallForwardPageType::UseHeap => heap_page_from_base(base_page).0, + } +} +pub(crate) fn get_vm_hook_params(memory: &SimpleMemory) -> Vec { + memory.dump_page_content_as_u256_words( + BOOTLOADER_HEAP_PAGE, + VM_HOOK_PARAMS_START_POSITION..VM_HOOK_PARAMS_START_POSITION + VM_HOOK_PARAMS_COUNT, + ) +} + +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum VmExecutionStopReason { + VmFinished, + TracerRequestedStop, +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tracers/validation/error.rs b/core/multivm_deps/vm_virtual_blocks/src/tracers/validation/error.rs new file mode 100644 index 000000000000..8fb104cb67a3 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tracers/validation/error.rs @@ -0,0 +1,22 @@ +use crate::Halt; +use std::fmt::Display; +use zksync_types::vm_trace::ViolatedValidationRule; + +#[derive(Debug, Clone)] +pub enum ValidationError { + FailedTx(Halt), + ViolatedRule(ViolatedValidationRule), +} + +impl Display for ValidationError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::FailedTx(revert_reason) => { + write!(f, "Validation revert: {}", revert_reason) + } + Self::ViolatedRule(rule) => { + write!(f, "Violated validation rules: {}", rule) + } + } + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tracers/validation/mod.rs b/core/multivm_deps/vm_virtual_blocks/src/tracers/validation/mod.rs new file mode 100644 index 000000000000..ca66aac9e730 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tracers/validation/mod.rs @@ -0,0 +1,388 @@ +mod error; +mod params; +mod types; + +use std::sync::Arc; +use std::{collections::HashSet, marker::PhantomData}; + +use once_cell::sync::OnceCell; +use zk_evm::{ + tracing::{BeforeExecutionData, VmLocalStateData}, + zkevm_opcode_defs::{ContextOpcode, FarCallABI, LogOpcode, Opcode}, +}; + +use zksync_config::constants::{ + ACCOUNT_CODE_STORAGE_ADDRESS, BOOTLOADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, + KECCAK256_PRECOMPILE_ADDRESS, L2_ETH_TOKEN_ADDRESS, MSG_VALUE_SIMULATOR_ADDRESS, + SYSTEM_CONTEXT_ADDRESS, +}; +use zksync_state::{StoragePtr, WriteStorage}; + +use zksync_types::{ + get_code_key, web3::signing::keccak256, AccountTreeId, Address, StorageKey, H256, U256, +}; +use zksync_utils::{ + be_bytes_to_safe_address, h256_to_account_address, u256_to_account_address, u256_to_h256, +}; + +use crate::old_vm::history_recorder::HistoryMode; +use crate::old_vm::memory::SimpleMemory; +use crate::tracers::traits::{DynTracer, ExecutionEndTracer, ExecutionProcessing, VmTracer}; +use crate::tracers::utils::{ + computational_gas_price, get_calldata_page_via_abi, print_debug_if_needed, VmHook, +}; + +pub use error::ValidationError; +pub use params::ValidationTracerParams; + +use types::NewTrustedValidationItems; +use types::ValidationTracerMode; +use zksync_types::vm_trace::ViolatedValidationRule; + +use crate::VmExecutionResultAndLogs; + +/// Tracer that is used to ensure that the validation adheres to all the rules +/// to prevent DDoS attacks on the server. +#[derive(Debug, Clone)] +pub struct ValidationTracer { + validation_mode: ValidationTracerMode, + auxilary_allowed_slots: HashSet, + + user_address: Address, + #[allow(dead_code)] + paymaster_address: Address, + should_stop_execution: bool, + trusted_slots: HashSet<(Address, U256)>, + trusted_addresses: HashSet
, + trusted_address_slots: HashSet<(Address, U256)>, + computational_gas_used: u32, + computational_gas_limit: u32, + result: Arc>, + _marker: PhantomData H>, +} + +type ValidationRoundResult = Result; + +impl ValidationTracer { + pub fn new( + params: ValidationTracerParams, + result: Arc>, + ) -> Self { + Self { + validation_mode: ValidationTracerMode::NoValidation, + auxilary_allowed_slots: Default::default(), + + should_stop_execution: false, + user_address: params.user_address, + paymaster_address: params.paymaster_address, + trusted_slots: params.trusted_slots, + trusted_addresses: params.trusted_addresses, + trusted_address_slots: params.trusted_address_slots, + computational_gas_used: 0, + computational_gas_limit: params.computational_gas_limit, + result, + _marker: Default::default(), + } + } + + fn process_validation_round_result(&mut self, result: ValidationRoundResult) { + match result { + Ok(NewTrustedValidationItems { + new_allowed_slots, + new_trusted_addresses, + }) => { + self.auxilary_allowed_slots.extend(new_allowed_slots); + self.trusted_addresses.extend(new_trusted_addresses); + } + Err(err) => { + if self.result.get().is_some() { + tracing::trace!("Validation error is already set, skipping"); + return; + } + self.result.set(err).expect("Result should be empty"); + } + } + } + + // Checks whether such storage access is acceptable. + fn is_allowed_storage_read( + &self, + storage: StoragePtr, + address: Address, + key: U256, + msg_sender: Address, + ) -> bool { + // If there are no restrictions, all storage reads are valid. + // We also don't support the paymaster validation for now. + if matches!( + self.validation_mode, + ValidationTracerMode::NoValidation | ValidationTracerMode::PaymasterTxValidation + ) { + return true; + } + + // The pair of MSG_VALUE_SIMULATOR_ADDRESS & L2_ETH_TOKEN_ADDRESS simulates the behavior of transfering ETH + // that is safe for the DDoS protection rules. + if valid_eth_token_call(address, msg_sender) { + return true; + } + + if self.trusted_slots.contains(&(address, key)) + || self.trusted_addresses.contains(&address) + || self.trusted_address_slots.contains(&(address, key)) + { + return true; + } + + if touches_allowed_context(address, key) { + return true; + } + + // The user is allowed to touch its own slots or slots semantically related to him. + let valid_users_slot = address == self.user_address + || u256_to_account_address(&key) == self.user_address + || self.auxilary_allowed_slots.contains(&u256_to_h256(key)); + if valid_users_slot { + return true; + } + + if is_constant_code_hash(address, key, storage) { + return true; + } + + false + } + + // Used to remember user-related fields (its balance/allowance/etc). + // Note that it assumes that the length of the calldata is 64 bytes. + fn slot_to_add_from_keccak_call( + &self, + calldata: &[u8], + validated_address: Address, + ) -> Option { + assert_eq!(calldata.len(), 64); + + let (potential_address_bytes, potential_position_bytes) = calldata.split_at(32); + let potential_address = be_bytes_to_safe_address(potential_address_bytes); + + // If the validation_address is equal to the potential_address, + // then it is a request that could be used for mapping of kind mapping(address => ...). + // + // If the potential_position_bytes were already allowed before, then this keccak might be used + // for ERC-20 allowance or any other of mapping(address => mapping(...)) + if potential_address == Some(validated_address) + || self + .auxilary_allowed_slots + .contains(&H256::from_slice(potential_position_bytes)) + { + // This is request that could be used for mapping of kind mapping(address => ...) + + // We could theoretically wait for the slot number to be returned by the + // keccak256 precompile itself, but this would complicate the code even further + // so let's calculate it here. + let slot = keccak256(calldata); + + // Adding this slot to the allowed ones + Some(H256(slot)) + } else { + None + } + } + + fn check_user_restrictions( + &mut self, + state: VmLocalStateData<'_>, + data: BeforeExecutionData, + memory: &SimpleMemory, + storage: StoragePtr, + ) -> ValidationRoundResult { + if self.computational_gas_used > self.computational_gas_limit { + return Err(ViolatedValidationRule::TookTooManyComputationalGas( + self.computational_gas_limit, + )); + } + + let opcode_variant = data.opcode.variant; + match opcode_variant.opcode { + Opcode::FarCall(_) => { + let packed_abi = data.src0_value.value; + let call_destination_value = data.src1_value.value; + + let called_address = u256_to_account_address(&call_destination_value); + let far_call_abi = FarCallABI::from_u256(packed_abi); + + if called_address == KECCAK256_PRECOMPILE_ADDRESS + && far_call_abi.memory_quasi_fat_pointer.length == 64 + { + let calldata_page = get_calldata_page_via_abi( + &far_call_abi, + state.vm_local_state.callstack.current.base_memory_page, + ); + let calldata = memory.read_unaligned_bytes( + calldata_page as usize, + far_call_abi.memory_quasi_fat_pointer.start as usize, + 64, + ); + + let slot_to_add = + self.slot_to_add_from_keccak_call(&calldata, self.user_address); + + if let Some(slot) = slot_to_add { + return Ok(NewTrustedValidationItems { + new_allowed_slots: vec![slot], + ..Default::default() + }); + } + } else if called_address != self.user_address { + let code_key = get_code_key(&called_address); + let code = storage.borrow_mut().read_value(&code_key); + + if code == H256::zero() { + // The users are not allowed to call contracts with no code + return Err(ViolatedValidationRule::CalledContractWithNoCode( + called_address, + )); + } + } + } + Opcode::Context(context) => { + match context { + ContextOpcode::Meta => { + return Err(ViolatedValidationRule::TouchedUnallowedContext); + } + ContextOpcode::ErgsLeft => { + // TODO (SMA-1168): implement the correct restrictions for the gas left opcode. + } + _ => {} + } + } + Opcode::Log(LogOpcode::StorageRead) => { + let key = data.src0_value.value; + let this_address = state.vm_local_state.callstack.current.this_address; + let msg_sender = state.vm_local_state.callstack.current.msg_sender; + + if !self.is_allowed_storage_read(storage.clone(), this_address, key, msg_sender) { + return Err(ViolatedValidationRule::TouchedUnallowedStorageSlots( + this_address, + key, + )); + } + + if self.trusted_address_slots.contains(&(this_address, key)) { + let storage_key = + StorageKey::new(AccountTreeId::new(this_address), u256_to_h256(key)); + + let value = storage.borrow_mut().read_value(&storage_key); + + return Ok(NewTrustedValidationItems { + new_trusted_addresses: vec![h256_to_account_address(&value)], + ..Default::default() + }); + } + } + _ => {} + } + + Ok(Default::default()) + } +} + +impl DynTracer for ValidationTracer { + fn before_execution( + &mut self, + state: VmLocalStateData<'_>, + data: BeforeExecutionData, + memory: &SimpleMemory, + storage: StoragePtr, + ) { + // For now, we support only validations for users. + if let ValidationTracerMode::UserTxValidation = self.validation_mode { + self.computational_gas_used = self + .computational_gas_used + .saturating_add(computational_gas_price(state, &data)); + + let validation_round_result = + self.check_user_restrictions(state, data, memory, storage); + self.process_validation_round_result(validation_round_result); + } + + let hook = VmHook::from_opcode_memory(&state, &data); + print_debug_if_needed(&hook, &state, memory); + + let current_mode = self.validation_mode; + match (current_mode, hook) { + (ValidationTracerMode::NoValidation, VmHook::AccountValidationEntered) => { + // Account validation can be entered when there is no prior validation (i.e. "nested" validations are not allowed) + self.validation_mode = ValidationTracerMode::UserTxValidation; + } + (ValidationTracerMode::NoValidation, VmHook::PaymasterValidationEntered) => { + // Paymaster validation can be entered when there is no prior validation (i.e. "nested" validations are not allowed) + self.validation_mode = ValidationTracerMode::PaymasterTxValidation; + } + (_, VmHook::AccountValidationEntered | VmHook::PaymasterValidationEntered) => { + panic!( + "Unallowed transition inside the validation tracer. Mode: {:#?}, hook: {:#?}", + self.validation_mode, hook + ); + } + (_, VmHook::NoValidationEntered) => { + // Validation can be always turned off + self.validation_mode = ValidationTracerMode::NoValidation; + } + (_, VmHook::ValidationStepEndeded) => { + // The validation step has ended. + self.should_stop_execution = true; + } + (_, _) => { + // The hook is not relevant to the validation tracer. Ignore. + } + } + } +} + +impl ExecutionEndTracer for ValidationTracer { + fn should_stop_execution(&self) -> bool { + self.should_stop_execution || self.result.get().is_some() + } +} + +impl ExecutionProcessing for ValidationTracer {} + +impl VmTracer for ValidationTracer { + fn save_results(&mut self, _result: &mut VmExecutionResultAndLogs) {} +} + +fn touches_allowed_context(address: Address, key: U256) -> bool { + // Context is not touched at all + if address != SYSTEM_CONTEXT_ADDRESS { + return false; + } + + // Only chain_id is allowed to be touched. + key == U256::from(0u32) +} + +fn is_constant_code_hash( + address: Address, + key: U256, + storage: StoragePtr, +) -> bool { + if address != ACCOUNT_CODE_STORAGE_ADDRESS { + // Not a code hash + return false; + } + + let value = storage.borrow_mut().read_value(&StorageKey::new( + AccountTreeId::new(address), + u256_to_h256(key), + )); + + value != H256::zero() +} + +fn valid_eth_token_call(address: Address, msg_sender: Address) -> bool { + let is_valid_caller = msg_sender == MSG_VALUE_SIMULATOR_ADDRESS + || msg_sender == CONTRACT_DEPLOYER_ADDRESS + || msg_sender == BOOTLOADER_ADDRESS; + address == L2_ETH_TOKEN_ADDRESS && is_valid_caller +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tracers/validation/params.rs b/core/multivm_deps/vm_virtual_blocks/src/tracers/validation/params.rs new file mode 100644 index 000000000000..1a4ced478b67 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tracers/validation/params.rs @@ -0,0 +1,18 @@ +use std::collections::HashSet; +use zksync_types::{Address, U256}; + +#[derive(Debug, Clone)] +pub struct ValidationTracerParams { + pub user_address: Address, + pub paymaster_address: Address, + /// Slots that are trusted (i.e. the user can access them). + pub trusted_slots: HashSet<(Address, U256)>, + /// Trusted addresses (the user can access any slots on these addresses). + pub trusted_addresses: HashSet
, + /// Slots, that are trusted and the value of them is the new trusted address. + /// They are needed to work correctly with beacon proxy, where the address of the implementation is + /// stored in the beacon. + pub trusted_address_slots: HashSet<(Address, U256)>, + /// Number of computational gas that validation step is allowed to use. + pub computational_gas_limit: u32, +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/tracers/validation/types.rs b/core/multivm_deps/vm_virtual_blocks/src/tracers/validation/types.rs new file mode 100644 index 000000000000..b9d442279927 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/tracers/validation/types.rs @@ -0,0 +1,18 @@ +use zksync_types::{Address, H256}; + +#[derive(Debug, Clone, Eq, PartialEq, Copy)] +#[allow(clippy::enum_variant_names)] +pub(super) enum ValidationTracerMode { + /// Should be activated when the transaction is being validated by user. + UserTxValidation, + /// Should be activated when the transaction is being validated by the paymaster. + PaymasterTxValidation, + /// Is a state when there are no restrictions on the execution. + NoValidation, +} + +#[derive(Debug, Clone, Default)] +pub(super) struct NewTrustedValidationItems { + pub(super) new_allowed_slots: Vec, + pub(super) new_trusted_addresses: Vec
, +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/types/inputs/execution_mode.rs b/core/multivm_deps/vm_virtual_blocks/src/types/inputs/execution_mode.rs new file mode 100644 index 000000000000..41492af6edc5 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/types/inputs/execution_mode.rs @@ -0,0 +1,15 @@ +/// Execution mode determines when the virtual machine execution should stop. +/// We are also using a different set of tracers, depending on the selected mode - for example for OneTx, +/// we use Refund Tracer, and for Bootloader we use 'DefaultTracer` in a special mode to track the Bootloader return code +/// Flow of execution: +/// VmStarted -> Enter the bootloader -> Tx1 -> Tx2 -> ... -> TxN -> +/// -> Terminate bootloader execution -> Exit bootloader -> VmStopped +#[derive(Debug, Copy, Clone)] +pub enum VmExecutionMode { + /// Stop after executing the next transaction. + OneTx, + /// Stop after executing the entire batch. + Batch, + /// Stop after executing the entire bootloader. But before you exit the bootloader. + Bootloader, +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/types/inputs/l1_batch_env.rs b/core/multivm_deps/vm_virtual_blocks/src/types/inputs/l1_batch_env.rs new file mode 100644 index 000000000000..ff8433257698 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/types/inputs/l1_batch_env.rs @@ -0,0 +1,76 @@ +use std::collections::HashMap; + +use crate::L2BlockEnv; +use zk_evm::address_to_u256; +use zksync_types::{Address, L1BatchNumber, H256, U256}; +use zksync_utils::h256_to_u256; + +use crate::utils::fee::derive_base_fee_and_gas_per_pubdata; + +/// Unique params for each block +#[derive(Debug, Clone)] +pub struct L1BatchEnv { + // If previous batch hash is None, then this is the first batch + pub previous_batch_hash: Option, + pub number: L1BatchNumber, + pub timestamp: u64, + pub l1_gas_price: u64, + pub fair_l2_gas_price: u64, + pub fee_account: Address, + pub enforced_base_fee: Option, + pub first_l2_block: L2BlockEnv, +} + +impl L1BatchEnv { + pub fn base_fee(&self) -> u64 { + if let Some(base_fee) = self.enforced_base_fee { + return base_fee; + } + let (base_fee, _) = + derive_base_fee_and_gas_per_pubdata(self.l1_gas_price, self.fair_l2_gas_price); + base_fee + } +} + +impl L1BatchEnv { + const OPERATOR_ADDRESS_SLOT: usize = 0; + const PREV_BLOCK_HASH_SLOT: usize = 1; + const NEW_BLOCK_TIMESTAMP_SLOT: usize = 2; + const NEW_BLOCK_NUMBER_SLOT: usize = 3; + const L1_GAS_PRICE_SLOT: usize = 4; + const FAIR_L2_GAS_PRICE_SLOT: usize = 5; + const EXPECTED_BASE_FEE_SLOT: usize = 6; + const SHOULD_SET_NEW_BLOCK_SLOT: usize = 7; + + /// Returns the initial memory for the bootloader based on the current batch environment. + pub(crate) fn bootloader_initial_memory(&self) -> Vec<(usize, U256)> { + let mut base_params: HashMap = vec![ + ( + Self::OPERATOR_ADDRESS_SLOT, + address_to_u256(&self.fee_account), + ), + (Self::PREV_BLOCK_HASH_SLOT, Default::default()), + (Self::NEW_BLOCK_TIMESTAMP_SLOT, U256::from(self.timestamp)), + (Self::NEW_BLOCK_NUMBER_SLOT, U256::from(self.number.0)), + (Self::L1_GAS_PRICE_SLOT, U256::from(self.l1_gas_price)), + ( + Self::FAIR_L2_GAS_PRICE_SLOT, + U256::from(self.fair_l2_gas_price), + ), + (Self::EXPECTED_BASE_FEE_SLOT, U256::from(self.base_fee())), + (Self::SHOULD_SET_NEW_BLOCK_SLOT, U256::from(0u32)), + ] + .into_iter() + .collect(); + + if let Some(prev_block_hash) = self.previous_batch_hash { + base_params.insert(Self::PREV_BLOCK_HASH_SLOT, h256_to_u256(prev_block_hash)); + base_params.insert(Self::SHOULD_SET_NEW_BLOCK_SLOT, U256::from(1u32)); + } + base_params.into_iter().collect() + } + + pub(crate) fn block_gas_price_per_pubdata(&self) -> u64 { + derive_base_fee_and_gas_per_pubdata(self.l1_gas_price, self.fair_l2_gas_price).1 + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/types/inputs/l2_block.rs b/core/multivm_deps/vm_virtual_blocks/src/types/inputs/l2_block.rs new file mode 100644 index 000000000000..42d0709f5ddc --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/types/inputs/l2_block.rs @@ -0,0 +1,9 @@ +use zksync_types::H256; + +#[derive(Debug, Clone, Copy)] +pub struct L2BlockEnv { + pub number: u32, + pub timestamp: u64, + pub prev_block_hash: H256, + pub max_virtual_blocks_to_create: u32, +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/types/inputs/mod.rs b/core/multivm_deps/vm_virtual_blocks/src/types/inputs/mod.rs new file mode 100644 index 000000000000..f88d40def4bf --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/types/inputs/mod.rs @@ -0,0 +1,9 @@ +pub use execution_mode::VmExecutionMode; +pub use l1_batch_env::L1BatchEnv; +pub use l2_block::L2BlockEnv; +pub use system_env::{SystemEnv, TxExecutionMode}; + +pub(crate) mod execution_mode; +pub(crate) mod l1_batch_env; +pub(crate) mod l2_block; +pub(crate) mod system_env; diff --git a/core/multivm_deps/vm_virtual_blocks/src/types/inputs/system_env.rs b/core/multivm_deps/vm_virtual_blocks/src/types/inputs/system_env.rs new file mode 100644 index 000000000000..3f861bddce09 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/types/inputs/system_env.rs @@ -0,0 +1,52 @@ +use std::fmt::Debug; + +use zksync_contracts::BaseSystemContracts; +use zksync_types::{L2ChainId, ProtocolVersionId}; + +/// Params related to the execution process, not batch it self +#[derive(Clone)] +pub struct SystemEnv { + // Always false for VM + pub zk_porter_available: bool, + pub version: ProtocolVersionId, + pub base_system_smart_contracts: BaseSystemContracts, + pub gas_limit: u32, + pub execution_mode: TxExecutionMode, + pub default_validation_computational_gas_limit: u32, + pub chain_id: L2ChainId, +} + +impl Debug for SystemEnv { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("SystemEnv") + .field("zk_porter_available", &self.zk_porter_available) + .field("version", &self.version) + .field( + "base_system_smart_contracts", + &self.base_system_smart_contracts.hashes(), + ) + .field("gas_limit", &self.gas_limit) + .field( + "default_validation_computational_gas_limit", + &self.default_validation_computational_gas_limit, + ) + .field("execution_mode", &self.execution_mode) + .field("chain_id", &self.chain_id) + .finish() + } +} + +/// Enum denoting the *in-server* execution mode for the bootloader transactions. +/// +/// If `EthCall` mode is chosen, the bootloader will use `mimicCall` opcode +/// to simulate the call instead of using the standard `execute` method of account. +/// This is needed to be able to behave equivalently to Ethereum without much overhead for custom account builders. +/// With `VerifyExecute` mode, transaction will be executed normally. +/// With `EstimateFee`, the bootloader will be used that has the same behavior +/// as the full `VerifyExecute` block, but errors in the account validation will be ignored. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum TxExecutionMode { + VerifyExecute, + EstimateFee, + EthCall, +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/types/internals/mod.rs b/core/multivm_deps/vm_virtual_blocks/src/types/internals/mod.rs new file mode 100644 index 000000000000..601b7b8bd014 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/types/internals/mod.rs @@ -0,0 +1,7 @@ +pub(crate) use snapshot::VmSnapshot; +pub(crate) use transaction_data::TransactionData; +pub(crate) use vm_state::new_vm_state; +pub use vm_state::ZkSyncVmState; +mod snapshot; +mod transaction_data; +mod vm_state; diff --git a/core/multivm_deps/vm_virtual_blocks/src/types/internals/snapshot.rs b/core/multivm_deps/vm_virtual_blocks/src/types/internals/snapshot.rs new file mode 100644 index 000000000000..3b336d5e3541 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/types/internals/snapshot.rs @@ -0,0 +1,11 @@ +use zk_evm::vm_state::VmLocalState; + +use crate::bootloader_state::BootloaderStateSnapshot; + +/// A snapshot of the VM that holds enough information to +/// rollback the VM to some historical state. +#[derive(Debug, Clone)] +pub(crate) struct VmSnapshot { + pub(crate) local_state: VmLocalState, + pub(crate) bootloader_state: BootloaderStateSnapshot, +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/types/internals/transaction_data.rs b/core/multivm_deps/vm_virtual_blocks/src/types/internals/transaction_data.rs new file mode 100644 index 000000000000..7d8598842d83 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/types/internals/transaction_data.rs @@ -0,0 +1,344 @@ +use std::convert::TryInto; +use zksync_types::ethabi::{encode, Address, Token}; +use zksync_types::fee::{encoding_len, Fee}; +use zksync_types::l1::is_l1_tx_type; +use zksync_types::l2::L2Tx; +use zksync_types::transaction_request::{PaymasterParams, TransactionRequest}; +use zksync_types::{ + l2::TransactionType, Bytes, Execute, ExecuteTransactionCommon, L2ChainId, L2TxCommonData, + Nonce, Transaction, H256, U256, +}; +use zksync_utils::address_to_h256; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; + +use crate::utils::overhead::{get_amortized_overhead, OverheadCoeficients}; + +/// This structure represents the data that is used by +/// the Bootloader to describe the transaction. +#[derive(Debug, Default, Clone)] +pub(crate) struct TransactionData { + pub(crate) tx_type: u8, + pub(crate) from: Address, + pub(crate) to: Address, + pub(crate) gas_limit: U256, + pub(crate) pubdata_price_limit: U256, + pub(crate) max_fee_per_gas: U256, + pub(crate) max_priority_fee_per_gas: U256, + pub(crate) paymaster: Address, + pub(crate) nonce: U256, + pub(crate) value: U256, + // The reserved fields that are unique for different types of transactions. + // E.g. nonce is currently used in all transaction, but it should not be mandatory + // in the long run. + pub(crate) reserved: [U256; 4], + pub(crate) data: Vec, + pub(crate) signature: Vec, + // The factory deps provided with the transaction. + // Note that *only hashes* of these bytecodes are signed by the user + // and they are used in the ABI encoding of the struct. + // TODO: include this into the tx signature as part of SMA-1010 + pub(crate) factory_deps: Vec>, + pub(crate) paymaster_input: Vec, + pub(crate) reserved_dynamic: Vec, + pub(crate) raw_bytes: Option>, +} + +impl From for TransactionData { + fn from(execute_tx: Transaction) -> Self { + match execute_tx.common_data { + ExecuteTransactionCommon::L2(common_data) => { + let nonce = U256::from_big_endian(&common_data.nonce.to_be_bytes()); + + let should_check_chain_id = if matches!( + common_data.transaction_type, + TransactionType::LegacyTransaction + ) && common_data.extract_chain_id().is_some() + { + U256([1, 0, 0, 0]) + } else { + U256::zero() + }; + + TransactionData { + tx_type: (common_data.transaction_type as u32) as u8, + from: common_data.initiator_address, + to: execute_tx.execute.contract_address, + gas_limit: common_data.fee.gas_limit, + pubdata_price_limit: common_data.fee.gas_per_pubdata_limit, + max_fee_per_gas: common_data.fee.max_fee_per_gas, + max_priority_fee_per_gas: common_data.fee.max_priority_fee_per_gas, + paymaster: common_data.paymaster_params.paymaster, + nonce, + value: execute_tx.execute.value, + reserved: [ + should_check_chain_id, + U256::zero(), + U256::zero(), + U256::zero(), + ], + data: execute_tx.execute.calldata, + signature: common_data.signature, + factory_deps: execute_tx.execute.factory_deps.unwrap_or_default(), + paymaster_input: common_data.paymaster_params.paymaster_input, + reserved_dynamic: vec![], + raw_bytes: execute_tx.raw_bytes.map(|a| a.0), + } + } + ExecuteTransactionCommon::L1(common_data) => { + let refund_recipient = h256_to_u256(address_to_h256(&common_data.refund_recipient)); + TransactionData { + tx_type: common_data.tx_format() as u8, + from: common_data.sender, + to: execute_tx.execute.contract_address, + gas_limit: common_data.gas_limit, + pubdata_price_limit: common_data.gas_per_pubdata_limit, + // It doesn't matter what we put here, since + // the bootloader does not charge anything + max_fee_per_gas: common_data.max_fee_per_gas, + max_priority_fee_per_gas: U256::zero(), + paymaster: Address::default(), + nonce: U256::from(common_data.serial_id.0), // priority op ID + value: execute_tx.execute.value, + reserved: [ + common_data.to_mint, + refund_recipient, + U256::zero(), + U256::zero(), + ], + data: execute_tx.execute.calldata, + // The signature isn't checked for L1 transactions so we don't care + signature: vec![], + factory_deps: execute_tx.execute.factory_deps.unwrap_or_default(), + paymaster_input: vec![], + reserved_dynamic: vec![], + raw_bytes: None, + } + } + ExecuteTransactionCommon::ProtocolUpgrade(common_data) => { + let refund_recipient = h256_to_u256(address_to_h256(&common_data.refund_recipient)); + TransactionData { + tx_type: common_data.tx_format() as u8, + from: common_data.sender, + to: execute_tx.execute.contract_address, + gas_limit: common_data.gas_limit, + pubdata_price_limit: common_data.gas_per_pubdata_limit, + // It doesn't matter what we put here, since + // the bootloader does not charge anything + max_fee_per_gas: common_data.max_fee_per_gas, + max_priority_fee_per_gas: U256::zero(), + paymaster: Address::default(), + nonce: U256::from(common_data.upgrade_id as u16), + value: execute_tx.execute.value, + reserved: [ + common_data.to_mint, + refund_recipient, + U256::zero(), + U256::zero(), + ], + data: execute_tx.execute.calldata, + // The signature isn't checked for L1 transactions so we don't care + signature: vec![], + factory_deps: execute_tx.execute.factory_deps.unwrap_or_default(), + paymaster_input: vec![], + reserved_dynamic: vec![], + raw_bytes: None, + } + } + } + } +} + +impl TransactionData { + pub(crate) fn abi_encode_with_custom_factory_deps( + self, + factory_deps_hashes: Vec, + ) -> Vec { + encode(&[Token::Tuple(vec![ + Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), + Token::Address(self.from), + Token::Address(self.to), + Token::Uint(self.gas_limit), + Token::Uint(self.pubdata_price_limit), + Token::Uint(self.max_fee_per_gas), + Token::Uint(self.max_priority_fee_per_gas), + Token::Address(self.paymaster), + Token::Uint(self.nonce), + Token::Uint(self.value), + Token::FixedArray(self.reserved.iter().copied().map(Token::Uint).collect()), + Token::Bytes(self.data), + Token::Bytes(self.signature), + Token::Array(factory_deps_hashes.into_iter().map(Token::Uint).collect()), + Token::Bytes(self.paymaster_input), + Token::Bytes(self.reserved_dynamic), + ])]) + } + + pub(crate) fn abi_encode(self) -> Vec { + let factory_deps_hashes = self + .factory_deps + .iter() + .map(|dep| h256_to_u256(hash_bytecode(dep))) + .collect(); + self.abi_encode_with_custom_factory_deps(factory_deps_hashes) + } + + pub(crate) fn into_tokens(self) -> Vec { + let bytes = self.abi_encode(); + assert!(bytes.len() % 32 == 0); + + bytes_to_be_words(bytes) + } + + pub(crate) fn effective_gas_price_per_pubdata(&self, block_gas_price_per_pubdata: u32) -> u32 { + // It is enforced by the protocol that the L1 transactions always pay the exact amount of gas per pubdata + // as was supplied in the transaction. + if is_l1_tx_type(self.tx_type) { + self.pubdata_price_limit.as_u32() + } else { + block_gas_price_per_pubdata + } + } + + pub(crate) fn overhead_gas(&self, block_gas_price_per_pubdata: u32) -> u32 { + let total_gas_limit = self.gas_limit.as_u32(); + let gas_price_per_pubdata = + self.effective_gas_price_per_pubdata(block_gas_price_per_pubdata); + + let encoded_len = encoding_len( + self.data.len() as u64, + self.signature.len() as u64, + self.factory_deps.len() as u64, + self.paymaster_input.len() as u64, + self.reserved_dynamic.len() as u64, + ); + + let coeficients = OverheadCoeficients::from_tx_type(self.tx_type); + get_amortized_overhead( + total_gas_limit, + gas_price_per_pubdata, + encoded_len, + coeficients, + ) + } + + pub(crate) fn trusted_ergs_limit(&self, _block_gas_price_per_pubdata: u64) -> U256 { + // TODO (EVM-66): correctly calculate the trusted gas limit for a transaction + self.gas_limit + } + + pub(crate) fn tx_hash(&self, chain_id: L2ChainId) -> H256 { + if is_l1_tx_type(self.tx_type) { + return self.canonical_l1_tx_hash().unwrap(); + } + + let l2_tx: L2Tx = self.clone().try_into().unwrap(); + let transaction_request: TransactionRequest = l2_tx.into(); + + // It is assumed that the TransactionData always has all the necessary components to recover the hash. + transaction_request + .get_tx_hash(chain_id) + .expect("Could not recover L2 transaction hash") + } + + fn canonical_l1_tx_hash(&self) -> Result { + use zksync_types::web3::signing::keccak256; + + if !is_l1_tx_type(self.tx_type) { + return Err(TxHashCalculationError::CannotCalculateL1HashForL2Tx); + } + + let encoded_bytes = self.clone().abi_encode(); + + Ok(H256(keccak256(&encoded_bytes))) + } +} + +#[derive(Debug, Clone, Copy)] +pub(crate) enum TxHashCalculationError { + CannotCalculateL1HashForL2Tx, + CannotCalculateL2HashForL1Tx, +} + +impl TryInto for TransactionData { + type Error = TxHashCalculationError; + + fn try_into(self) -> Result { + if is_l1_tx_type(self.tx_type) { + return Err(TxHashCalculationError::CannotCalculateL2HashForL1Tx); + } + + let common_data = L2TxCommonData { + transaction_type: (self.tx_type as u32).try_into().unwrap(), + nonce: Nonce(self.nonce.as_u32()), + fee: Fee { + max_fee_per_gas: self.max_fee_per_gas, + max_priority_fee_per_gas: self.max_priority_fee_per_gas, + gas_limit: self.gas_limit, + gas_per_pubdata_limit: self.pubdata_price_limit, + }, + signature: self.signature, + input: None, + initiator_address: self.from, + paymaster_params: PaymasterParams { + paymaster: self.paymaster, + paymaster_input: self.paymaster_input, + }, + }; + let factory_deps = (!self.factory_deps.is_empty()).then_some(self.factory_deps); + let execute = Execute { + contract_address: self.to, + value: self.value, + calldata: self.data, + factory_deps, + }; + + Ok(L2Tx { + execute, + common_data, + received_timestamp_ms: 0, + raw_bytes: self.raw_bytes.map(Bytes::from), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use zksync_types::fee::encoding_len; + + #[test] + fn test_consistency_with_encoding_length() { + let transaction = TransactionData { + tx_type: 113, + from: Address::random(), + to: Address::random(), + gas_limit: U256::from(1u32), + pubdata_price_limit: U256::from(1u32), + max_fee_per_gas: U256::from(1u32), + max_priority_fee_per_gas: U256::from(1u32), + paymaster: Address::random(), + nonce: U256::zero(), + value: U256::zero(), + // The reserved fields that are unique for different types of transactions. + // E.g. nonce is currently used in all transaction, but it should not be mandatory + // in the long run. + reserved: [U256::zero(); 4], + data: vec![0u8; 65], + signature: vec![0u8; 75], + // The factory deps provided with the transaction. + // Note that *only hashes* of these bytecodes are signed by the user + // and they are used in the ABI encoding of the struct. + // TODO: include this into the tx signature as part of SMA-1010 + factory_deps: vec![vec![0u8; 32], vec![1u8; 32]], + paymaster_input: vec![0u8; 85], + reserved_dynamic: vec![0u8; 32], + raw_bytes: None, + }; + + let assumed_encoded_len = encoding_len(65, 75, 2, 85, 32); + + let true_encoding_len = transaction.into_tokens().len(); + + assert_eq!(assumed_encoded_len, true_encoding_len); + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/types/internals/vm_state.rs b/core/multivm_deps/vm_virtual_blocks/src/types/internals/vm_state.rs new file mode 100644 index 000000000000..60969241295c --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/types/internals/vm_state.rs @@ -0,0 +1,175 @@ +use zk_evm::{ + aux_structures::MemoryPage, + aux_structures::Timestamp, + block_properties::BlockProperties, + vm_state::{CallStackEntry, PrimitiveValue, VmState}, + witness_trace::DummyTracer, + zkevm_opcode_defs::{ + system_params::{BOOTLOADER_MAX_MEMORY, INITIAL_FRAME_FORMAL_EH_LOCATION}, + FatPointer, BOOTLOADER_CALLDATA_PAGE, + }, +}; + +use zk_evm::zkevm_opcode_defs::{ + BOOTLOADER_BASE_PAGE, BOOTLOADER_CODE_PAGE, STARTING_BASE_PAGE, STARTING_TIMESTAMP, +}; +use zksync_config::constants::BOOTLOADER_ADDRESS; +use zksync_state::{StoragePtr, WriteStorage}; +use zksync_types::block::legacy_miniblock_hash; +use zksync_types::{zkevm_test_harness::INITIAL_MONOTONIC_CYCLE_COUNTER, Address, MiniblockNumber}; +use zksync_utils::h256_to_u256; + +use crate::bootloader_state::BootloaderState; +use crate::constants::BOOTLOADER_HEAP_PAGE; +use crate::old_vm::{ + event_sink::InMemoryEventSink, history_recorder::HistoryMode, memory::SimpleMemory, + oracles::decommitter::DecommitterOracle, oracles::precompile::PrecompilesProcessorWithHistory, + oracles::storage::StorageOracle, +}; +use crate::types::inputs::{L1BatchEnv, SystemEnv}; +use crate::utils::l2_blocks::{assert_next_block, load_last_l2_block}; +use crate::L2Block; + +pub type ZkSyncVmState = VmState< + StorageOracle, + SimpleMemory, + InMemoryEventSink, + PrecompilesProcessorWithHistory, + DecommitterOracle, + DummyTracer, +>; + +fn formal_calldata_abi() -> PrimitiveValue { + let fat_pointer = FatPointer { + offset: 0, + memory_page: BOOTLOADER_CALLDATA_PAGE, + start: 0, + length: 0, + }; + + PrimitiveValue { + value: fat_pointer.to_u256(), + is_pointer: true, + } +} + +/// Initialize the vm state and all necessary oracles +pub(crate) fn new_vm_state( + storage: StoragePtr, + system_env: &SystemEnv, + l1_batch_env: &L1BatchEnv, +) -> (ZkSyncVmState, BootloaderState) { + let last_l2_block = if let Some(last_l2_block) = load_last_l2_block(storage.clone()) { + last_l2_block + } else { + // This is the scenario of either the first L2 block ever or + // the first block after the upgrade for support of L2 blocks. + L2Block { + number: l1_batch_env.first_l2_block.number.saturating_sub(1), + timestamp: 0, + hash: legacy_miniblock_hash(MiniblockNumber(l1_batch_env.first_l2_block.number) - 1), + } + }; + + assert_next_block(&last_l2_block, &l1_batch_env.first_l2_block); + let first_l2_block = l1_batch_env.first_l2_block; + let storage_oracle: StorageOracle = StorageOracle::new(storage.clone()); + let mut memory = SimpleMemory::default(); + let event_sink = InMemoryEventSink::default(); + let precompiles_processor = PrecompilesProcessorWithHistory::::default(); + let mut decommittment_processor: DecommitterOracle = + DecommitterOracle::new(storage); + + decommittment_processor.populate( + vec![( + h256_to_u256(system_env.base_system_smart_contracts.default_aa.hash), + system_env + .base_system_smart_contracts + .default_aa + .code + .clone(), + )], + Timestamp(0), + ); + + memory.populate( + vec![( + BOOTLOADER_CODE_PAGE, + system_env + .base_system_smart_contracts + .bootloader + .code + .clone(), + )], + Timestamp(0), + ); + + let bootloader_initial_memory = l1_batch_env.bootloader_initial_memory(); + memory.populate_page( + BOOTLOADER_HEAP_PAGE as usize, + bootloader_initial_memory.clone(), + Timestamp(0), + ); + + let mut vm = VmState::empty_state( + storage_oracle, + memory, + event_sink, + precompiles_processor, + decommittment_processor, + DummyTracer, + BlockProperties { + default_aa_code_hash: h256_to_u256( + system_env.base_system_smart_contracts.default_aa.hash, + ), + zkporter_is_available: system_env.zk_porter_available, + }, + ); + + vm.local_state.callstack.current.ergs_remaining = system_env.gas_limit; + + let initial_context = CallStackEntry { + this_address: BOOTLOADER_ADDRESS, + msg_sender: Address::zero(), + code_address: BOOTLOADER_ADDRESS, + base_memory_page: MemoryPage(BOOTLOADER_BASE_PAGE), + code_page: MemoryPage(BOOTLOADER_CODE_PAGE), + sp: 0, + pc: 0, + // Note, that since the results are written at the end of the memory + // it is needed to have the entire heap available from the beginning + heap_bound: BOOTLOADER_MAX_MEMORY, + aux_heap_bound: BOOTLOADER_MAX_MEMORY, + exception_handler_location: INITIAL_FRAME_FORMAL_EH_LOCATION, + ergs_remaining: system_env.gas_limit, + this_shard_id: 0, + caller_shard_id: 0, + code_shard_id: 0, + is_static: false, + is_local_frame: false, + context_u128_value: 0, + }; + + // We consider the contract that is being run as a bootloader + vm.push_bootloader_context(INITIAL_MONOTONIC_CYCLE_COUNTER - 1, initial_context); + vm.local_state.timestamp = STARTING_TIMESTAMP; + vm.local_state.memory_page_counter = STARTING_BASE_PAGE; + vm.local_state.monotonic_cycle_counter = INITIAL_MONOTONIC_CYCLE_COUNTER; + vm.local_state.current_ergs_per_pubdata_byte = 0; + vm.local_state.registers[0] = formal_calldata_abi(); + + // Deleting all the historical records brought by the initial + // initialization of the VM to make them permanent. + vm.decommittment_processor.delete_history(); + vm.event_sink.delete_history(); + vm.storage.delete_history(); + vm.memory.delete_history(); + vm.precompiles_processor.delete_history(); + let bootloader_state = BootloaderState::new( + system_env.execution_mode, + bootloader_initial_memory, + first_l2_block, + ); + + (vm, bootloader_state) +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/types/mod.rs b/core/multivm_deps/vm_virtual_blocks/src/types/mod.rs new file mode 100644 index 000000000000..cd31e7dc5c59 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/types/mod.rs @@ -0,0 +1,3 @@ +pub(crate) mod inputs; +pub(crate) mod internals; +pub(crate) mod outputs; diff --git a/core/multivm_deps/vm_virtual_blocks/src/types/outputs/execution_result.rs b/core/multivm_deps/vm_virtual_blocks/src/types/outputs/execution_result.rs new file mode 100644 index 000000000000..bb46cb41ec81 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/types/outputs/execution_result.rs @@ -0,0 +1,83 @@ +use crate::{Halt, VmExecutionStatistics, VmRevertReason}; +use zksync_config::constants::PUBLISH_BYTECODE_OVERHEAD; +use zksync_types::event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}; +use zksync_types::tx::tx_execution_info::VmExecutionLogs; +use zksync_types::tx::ExecutionMetrics; +use zksync_types::Transaction; +use zksync_utils::bytecode::bytecode_len_in_bytes; + +/// Refunds produced for the user. +#[derive(Debug, Clone, Default)] +pub struct Refunds { + pub gas_refunded: u32, + pub operator_suggested_refund: u32, +} + +/// Result and logs of the VM execution. +#[derive(Debug, Clone)] +pub struct VmExecutionResultAndLogs { + pub result: ExecutionResult, + pub logs: VmExecutionLogs, + pub statistics: VmExecutionStatistics, + pub refunds: Refunds, +} + +#[derive(Debug, Clone, PartialEq)] +pub enum ExecutionResult { + /// Returned successfully + Success { output: Vec }, + /// Reverted by contract + Revert { output: VmRevertReason }, + /// Reverted for various reasons + Halt { reason: Halt }, +} + +impl ExecutionResult { + /// Returns `true` if the execution was failed. + pub fn is_failed(&self) -> bool { + matches!(self, Self::Revert { .. } | Self::Halt { .. }) + } +} + +impl VmExecutionResultAndLogs { + pub fn get_execution_metrics(&self, tx: Option<&Transaction>) -> ExecutionMetrics { + let contracts_deployed = tx + .map(|tx| { + tx.execute + .factory_deps + .as_ref() + .map_or(0, |deps| deps.len() as u16) + }) + .unwrap_or(0); + + // We published the data as ABI-encoded `bytes`, so the total length is: + // - message length in bytes, rounded up to a multiple of 32 + // - 32 bytes of encoded offset + // - 32 bytes of encoded length + let l2_l1_long_messages = extract_long_l2_to_l1_messages(&self.logs.events) + .iter() + .map(|event| (event.len() + 31) / 32 * 32 + 64) + .sum(); + + let published_bytecode_bytes = extract_published_bytecodes(&self.logs.events) + .iter() + .map(|bytecodehash| { + bytecode_len_in_bytes(*bytecodehash) + PUBLISH_BYTECODE_OVERHEAD as usize + }) + .sum(); + + ExecutionMetrics { + gas_used: self.statistics.gas_used as usize, + published_bytecode_bytes, + l2_l1_long_messages, + l2_l1_logs: self.logs.l2_to_l1_logs.len(), + contracts_used: self.statistics.contracts_used, + contracts_deployed, + vm_events: self.logs.events.len(), + storage_logs: self.logs.storage_logs.len(), + total_log_queries: self.statistics.total_log_queries, + cycles_used: self.statistics.cycles_used, + computational_gas_used: self.statistics.computational_gas_used, + } + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/types/outputs/execution_state.rs b/core/multivm_deps/vm_virtual_blocks/src/types/outputs/execution_state.rs new file mode 100644 index 000000000000..3ae36a179672 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/types/outputs/execution_state.rs @@ -0,0 +1,22 @@ +use zksync_types::l2_to_l1_log::L2ToL1Log; +use zksync_types::{StorageLogQuery, VmEvent, U256}; + +/// State of the VM since the start of the batch execution. +#[derive(Debug, Clone, PartialEq)] +pub struct CurrentExecutionState { + /// Events produced by the VM. + pub events: Vec, + /// Storage logs produced by the VM. + pub storage_log_queries: Vec, + /// Hashes of the contracts used by the VM. + pub used_contract_hashes: Vec, + /// L2 to L1 logs produced by the VM. + pub l2_to_l1_logs: Vec, + /// Number of log queries produced by the VM. Including l2_to_l1 logs, storage logs and events. + pub total_log_queries: usize, + /// Number of cycles used by the VM. + pub cycles_used: u32, +} + +/// Bootloader Memory of the VM. +pub type BootloaderMemory = Vec<(usize, U256)>; diff --git a/core/multivm_deps/vm_virtual_blocks/src/types/outputs/finished_l1batch.rs b/core/multivm_deps/vm_virtual_blocks/src/types/outputs/finished_l1batch.rs new file mode 100644 index 000000000000..064d4c2d6586 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/types/outputs/finished_l1batch.rs @@ -0,0 +1,12 @@ +use crate::{BootloaderMemory, CurrentExecutionState, VmExecutionResultAndLogs}; + +/// State of the VM after the batch execution. +#[derive(Debug, Clone)] +pub struct FinishedL1Batch { + /// Result of the execution of the block tip part of the batch. + pub block_tip_execution_result: VmExecutionResultAndLogs, + /// State of the VM after the execution of the last transaction. + pub final_execution_state: CurrentExecutionState, + /// Memory of the bootloader with all executed transactions. Could be optional for old versions of the VM. + pub final_bootloader_memory: Option, +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/types/outputs/l2_block.rs b/core/multivm_deps/vm_virtual_blocks/src/types/outputs/l2_block.rs new file mode 100644 index 000000000000..ccbcba15f654 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/types/outputs/l2_block.rs @@ -0,0 +1,7 @@ +use zksync_types::H256; + +pub struct L2Block { + pub number: u32, + pub timestamp: u64, + pub hash: H256, +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/types/outputs/mod.rs b/core/multivm_deps/vm_virtual_blocks/src/types/outputs/mod.rs new file mode 100644 index 000000000000..8aa029cb53fa --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/types/outputs/mod.rs @@ -0,0 +1,11 @@ +mod execution_result; +mod execution_state; +mod finished_l1batch; +mod l2_block; +mod statistic; + +pub use execution_result::{ExecutionResult, Refunds, VmExecutionResultAndLogs}; +pub use execution_state::{BootloaderMemory, CurrentExecutionState}; +pub use finished_l1batch::FinishedL1Batch; +pub use l2_block::L2Block; +pub use statistic::{VmExecutionStatistics, VmMemoryMetrics}; diff --git a/core/multivm_deps/vm_virtual_blocks/src/types/outputs/statistic.rs b/core/multivm_deps/vm_virtual_blocks/src/types/outputs/statistic.rs new file mode 100644 index 000000000000..8f03678315f9 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/types/outputs/statistic.rs @@ -0,0 +1,26 @@ +/// Statistics of the tx execution. +#[derive(Debug, Default, Clone)] +pub struct VmExecutionStatistics { + /// Number of contracts used by the VM during the tx execution. + pub contracts_used: usize, + /// Cycles used by the VM during the tx execution. + pub cycles_used: u32, + /// Gas used by the VM during the tx execution. + pub gas_used: u32, + /// Computational gas used by the VM during the tx execution. + pub computational_gas_used: u32, + /// Number of log queries produced by the VM during the tx execution. + pub total_log_queries: usize, +} + +/// Oracle metrics of the VM. +pub struct VmMemoryMetrics { + pub event_sink_inner: usize, + pub event_sink_history: usize, + pub memory_inner: usize, + pub memory_history: usize, + pub decommittment_processor_inner: usize, + pub decommittment_processor_history: usize, + pub storage_inner: usize, + pub storage_history: usize, +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/utils/fee.rs b/core/multivm_deps/vm_virtual_blocks/src/utils/fee.rs new file mode 100644 index 000000000000..e2e0bdbe599b --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/utils/fee.rs @@ -0,0 +1,29 @@ +//! Utility functions for vm +use zksync_config::constants::MAX_GAS_PER_PUBDATA_BYTE; +use zksync_utils::ceil_div; + +use crate::old_vm::utils::eth_price_per_pubdata_byte; + +/// Calcluates the amount of gas required to publish one byte of pubdata +pub fn base_fee_to_gas_per_pubdata(l1_gas_price: u64, base_fee: u64) -> u64 { + let eth_price_per_pubdata_byte = eth_price_per_pubdata_byte(l1_gas_price); + + ceil_div(eth_price_per_pubdata_byte, base_fee) +} + +/// Calculates the base fee and gas per pubdata for the given L1 gas price. +pub fn derive_base_fee_and_gas_per_pubdata(l1_gas_price: u64, fair_gas_price: u64) -> (u64, u64) { + let eth_price_per_pubdata_byte = eth_price_per_pubdata_byte(l1_gas_price); + + // The baseFee is set in such a way that it is always possible for a transaction to + // publish enough public data while compensating us for it. + let base_fee = std::cmp::max( + fair_gas_price, + ceil_div(eth_price_per_pubdata_byte, MAX_GAS_PER_PUBDATA_BYTE), + ); + + ( + base_fee, + base_fee_to_gas_per_pubdata(l1_gas_price, base_fee), + ) +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/utils/l2_blocks.rs b/core/multivm_deps/vm_virtual_blocks/src/utils/l2_blocks.rs new file mode 100644 index 000000000000..ca33bce0dfd0 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/utils/l2_blocks.rs @@ -0,0 +1,93 @@ +use crate::{L2Block, L2BlockEnv}; +use zksync_config::constants::{ + SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_HASHES_POSITION, + SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, + SYSTEM_CONTEXT_STORED_L2_BLOCK_HASHES, +}; +use zksync_state::{ReadStorage, StoragePtr}; +use zksync_types::block::unpack_block_info; +use zksync_types::web3::signing::keccak256; +use zksync_types::{AccountTreeId, MiniblockNumber, StorageKey, H256, U256}; +use zksync_utils::{h256_to_u256, u256_to_h256}; + +pub(crate) fn get_l2_block_hash_key(block_number: u32) -> StorageKey { + let position = h256_to_u256(SYSTEM_CONTEXT_CURRENT_L2_BLOCK_HASHES_POSITION) + + U256::from(block_number % SYSTEM_CONTEXT_STORED_L2_BLOCK_HASHES); + StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + u256_to_h256(position), + ) +} + +pub(crate) fn assert_next_block(prev_block: &L2Block, next_block: &L2BlockEnv) { + if prev_block.number == 0 { + // Special case for the first block it can have the same timestamp as the previous block. + assert!(prev_block.timestamp <= next_block.timestamp); + } else { + assert_eq!(prev_block.number + 1, next_block.number); + assert!(prev_block.timestamp < next_block.timestamp); + } + assert_eq!(prev_block.hash, next_block.prev_block_hash); +} + +/// Returns the hash of the l2_block. +/// `txs_rolling_hash` of the l2_block is calculated the following way: +/// If the l2_block has 0 transactions, then `txs_rolling_hash` is equal to `H256::zero()`. +/// If the l2_block has i transactions, then `txs_rolling_hash` is equal to `H(H_{i-1}, H(tx_i))`, where +/// `H_{i-1}` is the `txs_rolling_hash` of the first i-1 transactions. +pub(crate) fn l2_block_hash( + l2_block_number: MiniblockNumber, + l2_block_timestamp: u64, + prev_l2_block_hash: H256, + txs_rolling_hash: H256, +) -> H256 { + let mut digest: [u8; 128] = [0u8; 128]; + U256::from(l2_block_number.0).to_big_endian(&mut digest[0..32]); + U256::from(l2_block_timestamp).to_big_endian(&mut digest[32..64]); + digest[64..96].copy_from_slice(prev_l2_block_hash.as_bytes()); + digest[96..128].copy_from_slice(txs_rolling_hash.as_bytes()); + + H256(keccak256(&digest)) +} + +/// Get last saved block from storage +pub fn load_last_l2_block(storage: StoragePtr) -> Option { + // Get block number and timestamp + let current_l2_block_info_key = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + ); + let mut storage_ptr = storage.borrow_mut(); + let current_l2_block_info = storage_ptr.read_value(¤t_l2_block_info_key); + let (block_number, block_timestamp) = unpack_block_info(h256_to_u256(current_l2_block_info)); + let block_number = block_number as u32; + if block_number == 0 { + // The block does not exist yet + return None; + } + + // Get prev block hash + let position = get_l2_block_hash_key(block_number - 1); + let prev_block_hash = storage_ptr.read_value(&position); + + // Get current tx rolling hash + let position = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, + ); + let current_tx_rolling_hash = storage_ptr.read_value(&position); + + // Calculate current hash + let current_block_hash = l2_block_hash( + MiniblockNumber(block_number), + block_timestamp, + prev_block_hash, + current_tx_rolling_hash, + ); + + Some(L2Block { + number: block_number, + timestamp: block_timestamp, + hash: current_block_hash, + }) +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/utils/mod.rs b/core/multivm_deps/vm_virtual_blocks/src/utils/mod.rs new file mode 100644 index 000000000000..15ffa92b5490 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/utils/mod.rs @@ -0,0 +1,5 @@ +/// Utility functions for the VM. +pub mod fee; +pub mod l2_blocks; +pub mod overhead; +pub mod transaction_encoding; diff --git a/core/multivm_deps/vm_virtual_blocks/src/utils/overhead.rs b/core/multivm_deps/vm_virtual_blocks/src/utils/overhead.rs new file mode 100644 index 000000000000..6fdce9c724eb --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/utils/overhead.rs @@ -0,0 +1,347 @@ +use crate::constants::{BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_PUBDATA, BOOTLOADER_TX_ENCODING_SPACE}; +use zk_evm::zkevm_opcode_defs::system_params::MAX_TX_ERGS_LIMIT; +use zksync_config::constants::{MAX_L2_TX_GAS_LIMIT, MAX_TXS_IN_BLOCK}; +use zksync_types::l1::is_l1_tx_type; +use zksync_types::U256; +use zksync_utils::ceil_div_u256; + +/// Derives the overhead for processing transactions in a block. +pub fn derive_overhead( + gas_limit: u32, + gas_price_per_pubdata: u32, + encoded_len: usize, + coeficients: OverheadCoeficients, +) -> u32 { + // Even if the gas limit is greater than the MAX_TX_ERGS_LIMIT, we assume that everything beyond MAX_TX_ERGS_LIMIT + // will be spent entirely on publishing bytecodes and so we derive the overhead solely based on the capped value + let gas_limit = std::cmp::min(MAX_TX_ERGS_LIMIT, gas_limit); + + // Using large U256 type to avoid overflow + let max_block_overhead = U256::from(block_overhead_gas(gas_price_per_pubdata)); + let gas_limit = U256::from(gas_limit); + let encoded_len = U256::from(encoded_len); + + // The MAX_TX_ERGS_LIMIT is formed in a way that may fullfills a single-instance circuits + // if used in full. That is, within MAX_TX_ERGS_LIMIT it is possible to fully saturate all the single-instance + // circuits. + let overhead_for_single_instance_circuits = + ceil_div_u256(gas_limit * max_block_overhead, MAX_TX_ERGS_LIMIT.into()); + + // The overhead for occupying the bootloader memory + let overhead_for_length = ceil_div_u256( + encoded_len * max_block_overhead, + BOOTLOADER_TX_ENCODING_SPACE.into(), + ); + + // The overhead for occupying a single tx slot + let tx_slot_overhead = ceil_div_u256(max_block_overhead, MAX_TXS_IN_BLOCK.into()); + + // We use "ceil" here for formal reasons to allow easier approach for calculating the overhead in O(1) + // let max_pubdata_in_tx = ceil_div_u256(gas_limit, gas_price_per_pubdata); + + // The maximal potential overhead from pubdata + // TODO (EVM-67): possibly use overhead for pubdata + // let pubdata_overhead = ceil_div_u256( + // max_pubdata_in_tx * max_block_overhead, + // MAX_PUBDATA_PER_BLOCK.into(), + // ); + + vec![ + (coeficients.ergs_limit_overhead_coeficient + * overhead_for_single_instance_circuits.as_u32() as f64) + .floor() as u32, + (coeficients.bootloader_memory_overhead_coeficient * overhead_for_length.as_u32() as f64) + .floor() as u32, + (coeficients.slot_overhead_coeficient * tx_slot_overhead.as_u32() as f64) as u32, + ] + .into_iter() + .max() + .unwrap() +} + +/// Contains the coeficients with which the overhead for transactions will be calculated. +/// All of the coeficients should be <= 1. There are here to provide a certain "discount" for normal transactions +/// at the risk of malicious transactions that may close the block prematurely. +/// IMPORTANT: to perform correct computations, `MAX_TX_ERGS_LIMIT / coeficients.ergs_limit_overhead_coeficient` MUST +/// result in an integer number +#[derive(Debug, Clone, Copy)] +pub struct OverheadCoeficients { + slot_overhead_coeficient: f64, + bootloader_memory_overhead_coeficient: f64, + ergs_limit_overhead_coeficient: f64, +} + +impl OverheadCoeficients { + // This method ensures that the parameters keep the required invariants + fn new_checked( + slot_overhead_coeficient: f64, + bootloader_memory_overhead_coeficient: f64, + ergs_limit_overhead_coeficient: f64, + ) -> Self { + assert!( + (MAX_TX_ERGS_LIMIT as f64 / ergs_limit_overhead_coeficient).round() + == MAX_TX_ERGS_LIMIT as f64 / ergs_limit_overhead_coeficient, + "MAX_TX_ERGS_LIMIT / ergs_limit_overhead_coeficient must be an integer" + ); + + Self { + slot_overhead_coeficient, + bootloader_memory_overhead_coeficient, + ergs_limit_overhead_coeficient, + } + } + + // L1->L2 do not receive any discounts + fn new_l1() -> Self { + OverheadCoeficients::new_checked(1.0, 1.0, 1.0) + } + + fn new_l2() -> Self { + OverheadCoeficients::new_checked( + 1.0, 1.0, + // For L2 transactions we allow a certain default discount with regard to the number of ergs. + // Multiinstance circuits can in theory be spawned infinite times, while projected future limitations + // on gas per pubdata allow for roughly 800kk gas per L1 batch, so the rough trust "discount" on the proof's part + // to be paid by the users is 0.1. + 0.1, + ) + } + + /// Return the coeficients for the given transaction type + pub fn from_tx_type(tx_type: u8) -> Self { + if is_l1_tx_type(tx_type) { + Self::new_l1() + } else { + Self::new_l2() + } + } +} + +/// This method returns the overhead for processing the block +pub(crate) fn get_amortized_overhead( + total_gas_limit: u32, + gas_per_pubdata_byte_limit: u32, + encoded_len: usize, + coeficients: OverheadCoeficients, +) -> u32 { + // Using large U256 type to prevent overflows. + let overhead_for_block_gas = U256::from(block_overhead_gas(gas_per_pubdata_byte_limit)); + let total_gas_limit = U256::from(total_gas_limit); + let encoded_len = U256::from(encoded_len); + + // Derivation of overhead consists of 4 parts: + // 1. The overhead for taking up a transaction's slot. (O1): O1 = 1 / MAX_TXS_IN_BLOCK + // 2. The overhead for taking up the bootloader's memory (O2): O2 = encoded_len / BOOTLOADER_TX_ENCODING_SPACE + // 3. The overhead for possible usage of pubdata. (O3): O3 = max_pubdata_in_tx / MAX_PUBDATA_PER_BLOCK + // 4. The overhead for possible usage of all the single-instance circuits. (O4): O4 = gas_limit / MAX_TX_ERGS_LIMIT + // + // The maximum of these is taken to derive the part of the block's overhead to be paid by the users: + // + // max_overhead = max(O1, O2, O3, O4) + // overhead_gas = ceil(max_overhead * overhead_for_block_gas). Thus, overhead_gas is a function of + // tx_gas_limit, gas_per_pubdata_byte_limit and encoded_len. + // + // While it is possible to derive the overhead with binary search in O(log n), it is too expensive to be done + // on L1, so here is a reference implementation of finding the overhead for transaction in O(1): + // + // Given total_gas_limit = tx_gas_limit + overhead_gas, we need to find overhead_gas and tx_gas_limit, such that: + // 1. overhead_gas is maximal possible (the operator is paid fairly) + // 2. overhead_gas(tx_gas_limit, gas_per_pubdata_byte_limit, encoded_len) >= overhead_gas (the user does not overpay) + // The third part boils to the following 4 inequalities (at least one of these must hold): + // ceil(O1 * overhead_for_block_gas) >= overhead_gas + // ceil(O2 * overhead_for_block_gas) >= overhead_gas + // ceil(O3 * overhead_for_block_gas) >= overhead_gas + // ceil(O4 * overhead_for_block_gas) >= overhead_gas + // + // Now, we need to solve each of these separately: + + // 1. The overhead for occupying a single tx slot is a constant: + let tx_slot_overhead = { + let tx_slot_overhead = + ceil_div_u256(overhead_for_block_gas, MAX_TXS_IN_BLOCK.into()).as_u32(); + (coeficients.slot_overhead_coeficient * tx_slot_overhead as f64).floor() as u32 + }; + + // 2. The overhead for occupying the bootloader memory can be derived from encoded_len + let overhead_for_length = { + let overhead_for_length = ceil_div_u256( + encoded_len * overhead_for_block_gas, + BOOTLOADER_TX_ENCODING_SPACE.into(), + ) + .as_u32(); + + (coeficients.bootloader_memory_overhead_coeficient * overhead_for_length as f64).floor() + as u32 + }; + + // TODO (EVM-67): possibly include the overhead for pubdata. The formula below has not been properly maintained, + // since the pubdat is not published. If decided to use the pubdata overhead, it needs to be updated. + // 3. ceil(O3 * overhead_for_block_gas) >= overhead_gas + // O3 = max_pubdata_in_tx / MAX_PUBDATA_PER_BLOCK = ceil(gas_limit / gas_per_pubdata_byte_limit) / MAX_PUBDATA_PER_BLOCK + // >= (gas_limit / (gas_per_pubdata_byte_limit * MAX_PUBDATA_PER_BLOCK). Throwing off the `ceil`, while may provide marginally lower + // overhead to the operator, provides substantially easier formula to work with. + // + // For better clarity, let's denote gas_limit = GL, MAX_PUBDATA_PER_BLOCK = MP, gas_per_pubdata_byte_limit = EP, overhead_for_block_gas = OB, total_gas_limit = TL, overhead_gas = OE + // ceil(OB * (TL - OE) / (EP * MP)) >= OE + // + // OB * (TL - OE) / (MP * EP) > OE - 1 + // OB * (TL - OE) > (OE - 1) * EP * MP + // OB * TL + EP * MP > OE * EP * MP + OE * OB + // (OB * TL + EP * MP) / (EP * MP + OB) > OE + // OE = floor((OB * TL + EP * MP) / (EP * MP + OB)) with possible -1 if the division is without remainder + // let overhead_for_pubdata = { + // let numerator: U256 = overhead_for_block_gas * total_gas_limit + // + gas_per_pubdata_byte_limit * U256::from(MAX_PUBDATA_PER_BLOCK); + // let denominator = + // gas_per_pubdata_byte_limit * U256::from(MAX_PUBDATA_PER_BLOCK) + overhead_for_block_gas; + + // // Corner case: if `total_gas_limit` = `gas_per_pubdata_byte_limit` = 0 + // // then the numerator will be 0 and subtracting 1 will cause a panic, so we just return a zero. + // if numerator.is_zero() { + // 0.into() + // } else { + // (numerator - 1) / denominator + // } + // }; + + // 4. K * ceil(O4 * overhead_for_block_gas) >= overhead_gas, where K is the discount + // O4 = gas_limit / MAX_TX_ERGS_LIMIT. Using the notation from the previous equation: + // ceil(OB * GL / MAX_TX_ERGS_LIMIT) >= (OE / K) + // ceil(OB * (TL - OE) / MAX_TX_ERGS_LIMIT) >= (OE/K) + // OB * (TL - OE) / MAX_TX_ERGS_LIMIT > (OE/K) - 1 + // OB * (TL - OE) > (OE/K) * MAX_TX_ERGS_LIMIT - MAX_TX_ERGS_LIMIT + // OB * TL + MAX_TX_ERGS_LIMIT > OE * ( MAX_TX_ERGS_LIMIT/K + OB) + // OE = floor(OB * TL + MAX_TX_ERGS_LIMIT / (MAX_TX_ERGS_LIMIT/K + OB)), with possible -1 if the division is without remainder + let overhead_for_gas = { + let numerator = overhead_for_block_gas * total_gas_limit + U256::from(MAX_TX_ERGS_LIMIT); + let denominator: U256 = U256::from( + (MAX_TX_ERGS_LIMIT as f64 / coeficients.ergs_limit_overhead_coeficient) as u64, + ) + overhead_for_block_gas; + + let overhead_for_gas = (numerator - 1) / denominator; + + overhead_for_gas.as_u32() + }; + + let overhead = vec![tx_slot_overhead, overhead_for_length, overhead_for_gas] + .into_iter() + .max() + // For the sake of consistency making sure that total_gas_limit >= max_overhead + .map(|max_overhead| std::cmp::min(max_overhead, total_gas_limit.as_u32())) + .unwrap(); + + let limit_after_deducting_overhead = total_gas_limit - overhead; + + // During double checking of the overhead, the bootloader will assume that the + // body of the transaction does not have any more than MAX_L2_TX_GAS_LIMIT ergs available to it. + if limit_after_deducting_overhead.as_u64() > MAX_L2_TX_GAS_LIMIT { + // We derive the same overhead that would exist for the MAX_L2_TX_GAS_LIMIT ergs + derive_overhead( + MAX_L2_TX_GAS_LIMIT as u32, + gas_per_pubdata_byte_limit, + encoded_len.as_usize(), + coeficients, + ) + } else { + overhead + } +} + +pub(crate) fn block_overhead_gas(gas_per_pubdata_byte: u32) -> u32 { + BLOCK_OVERHEAD_GAS + BLOCK_OVERHEAD_PUBDATA * gas_per_pubdata_byte +} + +#[cfg(test)] +mod tests { + + use super::*; + + // This method returns the maximum block overhead that can be charged from the user based on the binary search approach + pub(crate) fn get_maximal_allowed_overhead_bin_search( + total_gas_limit: u32, + gas_per_pubdata_byte_limit: u32, + encoded_len: usize, + coeficients: OverheadCoeficients, + ) -> u32 { + let mut left_bound = if MAX_TX_ERGS_LIMIT < total_gas_limit { + total_gas_limit - MAX_TX_ERGS_LIMIT + } else { + 0u32 + }; + // Safe cast: the gas_limit for a transaction can not be larger than 2^32 + let mut right_bound = total_gas_limit; + + // The closure returns whether a certain overhead would be accepted by the bootloader. + // It is accepted if the derived overhead (i.e. the actual overhead that the user has to pay) + // is >= than the overhead proposed by the operator. + let is_overhead_accepted = |suggested_overhead: u32| { + let derived_overhead = derive_overhead( + total_gas_limit - suggested_overhead, + gas_per_pubdata_byte_limit, + encoded_len, + coeficients, + ); + + derived_overhead >= suggested_overhead + }; + + // In order to find the maximal allowed overhead we are doing binary search + while left_bound + 1 < right_bound { + let mid = (left_bound + right_bound) / 2; + + if is_overhead_accepted(mid) { + left_bound = mid; + } else { + right_bound = mid; + } + } + + if is_overhead_accepted(right_bound) { + right_bound + } else { + left_bound + } + } + + #[test] + fn test_correctness_for_efficient_overhead() { + let test_params = |total_gas_limit: u32, + gas_per_pubdata: u32, + encoded_len: usize, + coeficients: OverheadCoeficients| { + let result_by_efficient_search = + get_amortized_overhead(total_gas_limit, gas_per_pubdata, encoded_len, coeficients); + + let result_by_binary_search = get_maximal_allowed_overhead_bin_search( + total_gas_limit, + gas_per_pubdata, + encoded_len, + coeficients, + ); + + assert_eq!(result_by_efficient_search, result_by_binary_search); + }; + + // Some arbitrary test + test_params(60_000_000, 800, 2900, OverheadCoeficients::new_l2()); + + // Very small parameters + test_params(0, 1, 12, OverheadCoeficients::new_l2()); + + // Relatively big parameters + let max_tx_overhead = derive_overhead( + MAX_TX_ERGS_LIMIT, + 5000, + 10000, + OverheadCoeficients::new_l2(), + ); + test_params( + MAX_TX_ERGS_LIMIT + max_tx_overhead, + 5000, + 10000, + OverheadCoeficients::new_l2(), + ); + + test_params(115432560, 800, 2900, OverheadCoeficients::new_l1()); + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/utils/transaction_encoding.rs b/core/multivm_deps/vm_virtual_blocks/src/utils/transaction_encoding.rs new file mode 100644 index 000000000000..e911a2805d82 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/utils/transaction_encoding.rs @@ -0,0 +1,15 @@ +use crate::types::internals::TransactionData; +use zksync_types::Transaction; + +/// Extension for transactions, specific for VM. Required for bypassing the orphan rule +pub trait TransactionVmExt { + /// Get the size of the transaction in tokens. + fn bootloader_encoding_size(&self) -> usize; +} + +impl TransactionVmExt for Transaction { + fn bootloader_encoding_size(&self) -> usize { + let transaction_data: TransactionData = self.clone().into(); + transaction_data.into_tokens().len() + } +} diff --git a/core/multivm_deps/vm_virtual_blocks/src/vm.rs b/core/multivm_deps/vm_virtual_blocks/src/vm.rs new file mode 100644 index 000000000000..ee196683db37 --- /dev/null +++ b/core/multivm_deps/vm_virtual_blocks/src/vm.rs @@ -0,0 +1,158 @@ +use zksync_state::{StoragePtr, WriteStorage}; +use zksync_types::Transaction; +use zksync_utils::bytecode::CompressedBytecodeInfo; + +use crate::old_vm::events::merge_events; +use crate::old_vm::history_recorder::{HistoryEnabled, HistoryMode}; + +use crate::bootloader_state::BootloaderState; +use crate::errors::BytecodeCompressionError; +use crate::tracers::traits::VmTracer; +use crate::types::{ + inputs::{L1BatchEnv, SystemEnv, VmExecutionMode}, + internals::{new_vm_state, VmSnapshot, ZkSyncVmState}, + outputs::{BootloaderMemory, CurrentExecutionState, VmExecutionResultAndLogs}, +}; +use crate::L2BlockEnv; + +/// Main entry point for Virtual Machine integration. +/// The instance should process only one l1 batch +#[derive(Debug)] +pub struct Vm { + pub(crate) bootloader_state: BootloaderState, + // Current state and oracles of virtual machine + pub(crate) state: ZkSyncVmState, + pub(crate) storage: StoragePtr, + pub(crate) system_env: SystemEnv, + pub(crate) batch_env: L1BatchEnv, + // Snapshots for the current run + pub(crate) snapshots: Vec, + _phantom: std::marker::PhantomData, +} + +/// Public interface for VM +impl Vm { + pub fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr, _: H) -> Self { + let (state, bootloader_state) = new_vm_state(storage.clone(), &system_env, &batch_env); + Self { + bootloader_state, + state, + storage, + system_env, + batch_env, + snapshots: vec![], + _phantom: Default::default(), + } + } + + /// Push tx into memory for the future execution + pub fn push_transaction(&mut self, tx: Transaction) { + self.push_transaction_with_compression(tx, true) + } + + /// Execute VM with default tracers. The execution mode determines whether the VM will stop and + /// how the vm will be processed. + pub fn execute(&mut self, execution_mode: VmExecutionMode) -> VmExecutionResultAndLogs { + self.inspect(vec![], execution_mode) + } + + /// Execute VM with custom tracers. + pub fn inspect( + &mut self, + tracers: Vec>>, + execution_mode: VmExecutionMode, + ) -> VmExecutionResultAndLogs { + self.inspect_inner(tracers, execution_mode) + } + + /// Get current state of bootloader memory. + pub fn get_bootloader_memory(&self) -> BootloaderMemory { + self.bootloader_state.bootloader_memory() + } + + /// Get compressed bytecodes of the last executed transaction + pub fn get_last_tx_compressed_bytecodes(&self) -> Vec { + self.bootloader_state.get_last_tx_compressed_bytecodes() + } + + pub fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { + self.bootloader_state.start_new_l2_block(l2_block_env); + } + + /// Get current state of virtual machine. + /// This method should be used only after the batch execution. + /// Otherwise it can panic. + pub fn get_current_execution_state(&self) -> CurrentExecutionState { + let (_full_history, raw_events, l1_messages) = self.state.event_sink.flatten(); + let events = merge_events(raw_events) + .into_iter() + .map(|e| e.into_vm_event(self.batch_env.number)) + .collect(); + let l2_to_l1_logs = l1_messages.into_iter().map(|log| log.into()).collect(); + let total_log_queries = self.state.event_sink.get_log_queries() + + self + .state + .precompiles_processor + .get_timestamp_history() + .len() + + self.state.storage.get_final_log_queries().len(); + + CurrentExecutionState { + events, + storage_log_queries: self.state.storage.get_final_log_queries(), + used_contract_hashes: self.get_used_contracts(), + l2_to_l1_logs, + total_log_queries, + cycles_used: self.state.local_state.monotonic_cycle_counter, + } + } + + /// Execute transaction with optional bytecode compression. + pub fn execute_transaction_with_bytecode_compression( + &mut self, + tx: Transaction, + with_compression: bool, + ) -> Result { + self.inspect_transaction_with_bytecode_compression(vec![], tx, with_compression) + } + + /// Inspect transaction with optional bytecode compression. + pub fn inspect_transaction_with_bytecode_compression( + &mut self, + tracers: Vec>>, + tx: Transaction, + with_compression: bool, + ) -> Result { + self.push_transaction_with_compression(tx, with_compression); + let result = self.inspect(tracers, VmExecutionMode::OneTx); + if self.has_unpublished_bytecodes() { + Err(BytecodeCompressionError::BytecodeCompressionFailed) + } else { + Ok(result) + } + } +} + +/// Methods of vm, which required some history manipullations +impl Vm { + /// Create snapshot of current vm state and push it into the memory + pub fn make_snapshot(&mut self) { + self.make_snapshot_inner() + } + + /// Rollback vm state to the latest snapshot and destroy the snapshot + pub fn rollback_to_the_latest_snapshot(&mut self) { + let snapshot = self + .snapshots + .pop() + .expect("Snapshot should be created before rolling it back"); + self.rollback_to_snapshot(snapshot); + } + + /// Pop the latest snapshot from the memory and destroy it + pub fn pop_snapshot_no_rollback(&mut self) { + self.snapshots + .pop() + .expect("Snapshot should be created before rolling it back"); + } +} From 5352b498fd32ca3d44e62c9c88aa1b5f41cf044d Mon Sep 17 00:00:00 2001 From: pompon0 Date: Tue, 10 Oct 2023 12:45:06 +0200 Subject: [PATCH 27/29] refactor: Removed unused fields from SyncBlock (#185) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ Removed unused fields from SyncBlock. It is a backward compatible change, because the removed fields were optional. ## Why ❔ We decrease the public api surface this way. --- core/lib/dal/sqlx-data.json | 220 ++++++++++-------------- core/lib/dal/src/models/storage_sync.rs | 31 +--- core/lib/dal/src/sync_dal.rs | 9 - core/lib/types/src/api/en.rs | 16 -- 4 files changed, 93 insertions(+), 183 deletions(-) diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json index 84799dd2180f..ba14fa8b6968 100644 --- a/core/lib/dal/sqlx-data.json +++ b/core/lib/dal/sqlx-data.json @@ -2542,134 +2542,6 @@ }, "query": "SELECT MAX(number) as \"number\" FROM miniblocks" }, - "3365f652e8e0070672ab522bd60f92d002dac7bb782763575a0337a8b5502994": { - "describe": { - "columns": [ - { - "name": "number", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "l1_batch_number!", - "ordinal": 1, - "type_info": "Int8" - }, - { - "name": "last_batch_miniblock?", - "ordinal": 2, - "type_info": "Int8" - }, - { - "name": "timestamp", - "ordinal": 3, - "type_info": "Int8" - }, - { - "name": "root_hash?", - "ordinal": 4, - "type_info": "Bytea" - }, - { - "name": "commit_tx_hash?", - "ordinal": 5, - "type_info": "Text" - }, - { - "name": "committed_at?", - "ordinal": 6, - "type_info": "Timestamp" - }, - { - "name": "prove_tx_hash?", - "ordinal": 7, - "type_info": "Text" - }, - { - "name": "proven_at?", - "ordinal": 8, - "type_info": "Timestamp" - }, - { - "name": "execute_tx_hash?", - "ordinal": 9, - "type_info": "Text" - }, - { - "name": "executed_at?", - "ordinal": 10, - "type_info": "Timestamp" - }, - { - "name": "l1_gas_price", - "ordinal": 11, - "type_info": "Int8" - }, - { - "name": "l2_fair_gas_price", - "ordinal": 12, - "type_info": "Int8" - }, - { - "name": "bootloader_code_hash", - "ordinal": 13, - "type_info": "Bytea" - }, - { - "name": "default_aa_code_hash", - "ordinal": 14, - "type_info": "Bytea" - }, - { - "name": "virtual_blocks", - "ordinal": 15, - "type_info": "Int8" - }, - { - "name": "hash", - "ordinal": 16, - "type_info": "Bytea" - }, - { - "name": "protocol_version!", - "ordinal": 17, - "type_info": "Int4" - }, - { - "name": "fee_account_address?", - "ordinal": 18, - "type_info": "Bytea" - } - ], - "nullable": [ - false, - null, - null, - false, - false, - false, - true, - false, - true, - false, - true, - false, - false, - true, - true, - false, - false, - true, - false - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "\n SELECT miniblocks.number,\n COALESCE(miniblocks.l1_batch_number, (SELECT (max(number) + 1) FROM l1_batches)) as \"l1_batch_number!\",\n (SELECT max(m2.number) FROM miniblocks m2 WHERE miniblocks.l1_batch_number = m2.l1_batch_number) as \"last_batch_miniblock?\",\n miniblocks.timestamp,\n miniblocks.hash as \"root_hash?\",\n commit_tx.tx_hash as \"commit_tx_hash?\",\n commit_tx.confirmed_at as \"committed_at?\",\n prove_tx.tx_hash as \"prove_tx_hash?\",\n prove_tx.confirmed_at as \"proven_at?\",\n execute_tx.tx_hash as \"execute_tx_hash?\",\n execute_tx.confirmed_at as \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version as \"protocol_version!\",\n l1_batches.fee_account_address as \"fee_account_address?\"\n FROM miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE miniblocks.number = $1\n " - }, "357347157ed8ff19d223c54533c3a85bd7e64a37514d657f8d49bd6eb5be1806": { "describe": { "columns": [ @@ -3861,6 +3733,98 @@ }, "query": "UPDATE eth_txs SET has_failed = TRUE WHERE id = $1" }, + "5190fad25f0c476380af4013761d42ae97dbd55f87e38ceec33f8e148c5cbb14": { + "describe": { + "columns": [ + { + "name": "number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "l1_batch_number!", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "last_batch_miniblock?", + "ordinal": 2, + "type_info": "Int8" + }, + { + "name": "timestamp", + "ordinal": 3, + "type_info": "Int8" + }, + { + "name": "root_hash?", + "ordinal": 4, + "type_info": "Bytea" + }, + { + "name": "l1_gas_price", + "ordinal": 5, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 6, + "type_info": "Int8" + }, + { + "name": "bootloader_code_hash", + "ordinal": 7, + "type_info": "Bytea" + }, + { + "name": "default_aa_code_hash", + "ordinal": 8, + "type_info": "Bytea" + }, + { + "name": "virtual_blocks", + "ordinal": 9, + "type_info": "Int8" + }, + { + "name": "hash", + "ordinal": 10, + "type_info": "Bytea" + }, + { + "name": "protocol_version!", + "ordinal": 11, + "type_info": "Int4" + }, + { + "name": "fee_account_address?", + "ordinal": 12, + "type_info": "Bytea" + } + ], + "nullable": [ + false, + null, + null, + false, + false, + false, + false, + true, + true, + false, + false, + true, + false + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "\n SELECT miniblocks.number,\n COALESCE(miniblocks.l1_batch_number, (SELECT (max(number) + 1) FROM l1_batches)) as \"l1_batch_number!\",\n (SELECT max(m2.number) FROM miniblocks m2 WHERE miniblocks.l1_batch_number = m2.l1_batch_number) as \"last_batch_miniblock?\",\n miniblocks.timestamp,\n miniblocks.hash as \"root_hash?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version as \"protocol_version!\",\n l1_batches.fee_account_address as \"fee_account_address?\"\n FROM miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n WHERE miniblocks.number = $1\n " + }, "51cb712685991ffd600dce59f5ed8b5a1bfce8feed46ebd02471c43802e6e65a": { "describe": { "columns": [ diff --git a/core/lib/dal/src/models/storage_sync.rs b/core/lib/dal/src/models/storage_sync.rs index f68908dc1182..052fadcf60a3 100644 --- a/core/lib/dal/src/models/storage_sync.rs +++ b/core/lib/dal/src/models/storage_sync.rs @@ -1,6 +1,4 @@ -use std::{convert::TryInto, str::FromStr}; - -use sqlx::types::chrono::{DateTime, NaiveDateTime, Utc}; +use std::convert::TryInto; use zksync_contracts::BaseSystemContractsHashes; use zksync_types::api::en::SyncBlock; @@ -14,12 +12,6 @@ pub struct StorageSyncBlock { pub last_batch_miniblock: Option, pub timestamp: i64, pub root_hash: Option>, - pub commit_tx_hash: Option, - pub committed_at: Option, - pub prove_tx_hash: Option, - pub proven_at: Option, - pub execute_tx_hash: Option, - pub executed_at: Option, // L1 gas price assumed in the corresponding batch pub l1_gas_price: i64, // L2 gas price assumed in the corresponding batch @@ -47,27 +39,6 @@ impl StorageSyncBlock { .unwrap_or(false), timestamp: self.timestamp as u64, root_hash: self.root_hash.as_deref().map(H256::from_slice), - commit_tx_hash: self - .commit_tx_hash - .as_deref() - .map(|hash| H256::from_str(hash).expect("Incorrect commit_tx hash")), - committed_at: self - .committed_at - .map(|committed_at| DateTime::::from_naive_utc_and_offset(committed_at, Utc)), - prove_tx_hash: self - .prove_tx_hash - .as_deref() - .map(|hash| H256::from_str(hash).expect("Incorrect prove_tx hash")), - proven_at: self - .proven_at - .map(|proven_at| DateTime::::from_naive_utc_and_offset(proven_at, Utc)), - execute_tx_hash: self - .execute_tx_hash - .as_deref() - .map(|hash| H256::from_str(hash).expect("Incorrect execute_tx hash")), - executed_at: self - .executed_at - .map(|executed_at| DateTime::::from_naive_utc_and_offset(executed_at, Utc)), l1_gas_price: self.l1_gas_price as u64, l2_fair_gas_price: self.l2_fair_gas_price as u64, // TODO (SMA-1635): Make these filed non optional in database diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index d75ebc740f7b..ab905dd5cb22 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -29,12 +29,6 @@ impl SyncDal<'_, '_> { (SELECT max(m2.number) FROM miniblocks m2 WHERE miniblocks.l1_batch_number = m2.l1_batch_number) as "last_batch_miniblock?", miniblocks.timestamp, miniblocks.hash as "root_hash?", - commit_tx.tx_hash as "commit_tx_hash?", - commit_tx.confirmed_at as "committed_at?", - prove_tx.tx_hash as "prove_tx_hash?", - prove_tx.confirmed_at as "proven_at?", - execute_tx.tx_hash as "execute_tx_hash?", - execute_tx.confirmed_at as "executed_at?", miniblocks.l1_gas_price, miniblocks.l2_fair_gas_price, miniblocks.bootloader_code_hash, @@ -45,9 +39,6 @@ impl SyncDal<'_, '_> { l1_batches.fee_account_address as "fee_account_address?" FROM miniblocks LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number - LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL) - LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL) - LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL) WHERE miniblocks.number = $1 "#, block_number.0 as i64 diff --git a/core/lib/types/src/api/en.rs b/core/lib/types/src/api/en.rs index 74c48df8349b..18a83f9f821b 100644 --- a/core/lib/types/src/api/en.rs +++ b/core/lib/types/src/api/en.rs @@ -1,6 +1,5 @@ //! API types related to the External Node specific methods. -use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; use zk_evm::ethereum_types::Address; use zksync_basic_types::{L1BatchNumber, MiniblockNumber, H256}; @@ -27,21 +26,6 @@ pub struct SyncBlock { pub timestamp: u64, /// Hash of the L2 block (not the Merkle root hash). pub root_hash: Option, - /// Hash of the block's commit transaction on L1. - /// May be `None` if the corresponsing L1 batch is not committed yet. - pub commit_tx_hash: Option, - /// Timestamp of the commit transaction, as provided by the main node. - pub committed_at: Option>, - /// Hash of the block's prove transaction on L1. - /// May be `None` if the corresponsing L1 batch is not proven yet. - pub prove_tx_hash: Option, - /// Timestamp of the prove transaction, as provided by the main node. - pub proven_at: Option>, - /// Hash of the block's execute transaction on L1. - /// May be `None` if the corresponsing L1 batch is not executed yet. - pub execute_tx_hash: Option, - /// Timestamp of the execute transaction, as provided by the main node. - pub executed_at: Option>, /// L1 gas price used as VM parameter for the L1 batch corresponding to this L2 block. pub l1_gas_price: u64, /// L2 gas price used as VM parameter for the L1 batch corresponding to this L2 block. From bab099d83d9640c965bc02b32d90cce86a3f53cb Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Tue, 10 Oct 2023 14:14:47 +0300 Subject: [PATCH 28/29] feat(storage): save enum indices in RocksDB (#162) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ Enumeration indices now are saved along with values in the same column family. Indices are added gradually for old DB entries. The number of keys processed each L1 batch is configurable. ## Why ❔ Enumeration indices in storage are necessary for boojum upgrade. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- Cargo.lock | 1 + core/bin/external_node/src/config/mod.rs | 7 + core/bin/external_node/src/main.rs | 1 + core/lib/config/src/configs/chain.rs | 9 + core/lib/dal/sqlx-data.json | 58 +-- core/lib/dal/src/storage_logs_dal.rs | 36 +- core/lib/state/Cargo.toml | 1 + core/lib/state/src/in_memory.rs | 31 +- core/lib/state/src/rocksdb/mod.rs | 338 ++++++++++++++++-- core/lib/storage/src/db.rs | 18 +- .../src/metadata_calculator/helpers.rs | 4 +- .../src/metadata_calculator/tests.rs | 18 +- .../src/state_keeper/batch_executor/mod.rs | 4 + .../batch_executor/tests/tester.rs | 17 +- core/lib/zksync_core/src/state_keeper/mod.rs | 1 + 15 files changed, 461 insertions(+), 83 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e5c438cb51ec..6fa750f187e6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8055,6 +8055,7 @@ version = "0.1.0" dependencies = [ "anyhow", "db_test_macro", + "itertools", "mini-moka", "rand 0.8.5", "tempfile", diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 66f4e54ff571..5835e516d07d 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -197,6 +197,9 @@ pub struct OptionalENConfig { /// Whether to try running EN with MultiVM. #[serde(default)] pub experimental_multivm_support: bool, + /// Number of keys that is processed by enum_index migration in State Keeper each L1 batch. + #[serde(default = "OptionalENConfig::default_enum_index_migration_chunk_size")] + pub enum_index_migration_chunk_size: usize, } impl OptionalENConfig { @@ -283,6 +286,10 @@ impl OptionalENConfig { 10 } + const fn default_enum_index_migration_chunk_size() -> usize { + 1000 + } + pub fn polling_interval(&self) -> Duration { Duration::from_millis(self.polling_interval) } diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index a65f3178b2ce..7f59f856ae9d 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -76,6 +76,7 @@ async fn build_state_keeper( max_allowed_l2_tx_gas_limit, save_call_traces, false, + config.optional.enum_index_migration_chunk_size, )); let io = Box::new( diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index afb928716946..ddf0c85e63e5 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -109,6 +109,9 @@ pub struct StateKeeperConfig { /// Flag which will enable storage to cache witness_inputs during State Keeper's run. /// NOTE: This will slow down StateKeeper, to be used in non-production environments! pub upload_witness_inputs_to_gcs: bool, + + /// Number of keys that is processed by enum_index migration in State Keeper each L1 batch. + pub enum_index_migration_chunk_size: Option, } impl StateKeeperConfig { @@ -122,6 +125,10 @@ impl StateKeeperConfig { default_aa: self.default_aa_hash, } } + + pub fn enum_index_migration_chunk_size(&self) -> usize { + self.enum_index_migration_chunk_size.unwrap_or(1_000) + } } #[derive(Debug, Deserialize, Clone, PartialEq)] @@ -226,6 +233,7 @@ mod tests { virtual_blocks_interval: 1, virtual_blocks_per_miniblock: 1, upload_witness_inputs_to_gcs: false, + enum_index_migration_chunk_size: Some(2_000), }, operations_manager: OperationsManagerConfig { delay_interval: 100, @@ -273,6 +281,7 @@ mod tests { CHAIN_STATE_KEEPER_VALIDATION_COMPUTATIONAL_GAS_LIMIT="10000000" CHAIN_STATE_KEEPER_SAVE_CALL_TRACES="false" CHAIN_STATE_KEEPER_UPLOAD_WITNESS_INPUTS_TO_GCS="false" + CHAIN_STATE_KEEPER_ENUM_INDEX_MIGRATION_CHUNK_SIZE="2000" CHAIN_OPERATIONS_MANAGER_DELAY_INTERVAL="100" CHAIN_MEMPOOL_SYNC_INTERVAL_MS="10" CHAIN_MEMPOOL_SYNC_BATCH_SIZE="1000" diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json index ba14fa8b6968..e6dba044f12b 100644 --- a/core/lib/dal/sqlx-data.json +++ b/core/lib/dal/sqlx-data.json @@ -9099,6 +9099,38 @@ }, "query": "\n UPDATE prover_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now(),\n picked_by = $2\n WHERE id = (\n SELECT id\n FROM prover_jobs_fri\n WHERE status = 'queued'\n AND protocol_version = ANY($1)\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs_fri.id, prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round, prover_jobs_fri.sequence_number, prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n " }, + "d1c82bd0b3c010569937ad7600760fa0c3aca7c9585bbf9598a5c0515b431b26": { + "describe": { + "columns": [ + { + "name": "hashed_key", + "ordinal": 0, + "type_info": "Bytea" + }, + { + "name": "l1_batch_number", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "index", + "ordinal": 2, + "type_info": "Int8" + } + ], + "nullable": [ + false, + false, + false + ], + "parameters": { + "Left": [ + "ByteaArray" + ] + } + }, + "query": "SELECT hashed_key, l1_batch_number, index FROM initial_writes WHERE hashed_key = ANY($1::bytea[])" + }, "d5dea31f2a325bb44e8ef2cbbabbeb73fd6996a3e6cb99d62c6b97a4aa49c1ca": { "describe": { "columns": [ @@ -9394,32 +9426,6 @@ }, "query": "UPDATE l1_batches SET skip_proof = TRUE WHERE number = $1" }, - "da01d59119023c822cffa5dc226e82b2abd4cbd46d3856d7db16289868a27fa1": { - "describe": { - "columns": [ - { - "name": "hashed_key", - "ordinal": 0, - "type_info": "Bytea" - }, - { - "name": "l1_batch_number", - "ordinal": 1, - "type_info": "Int8" - } - ], - "nullable": [ - false, - false - ], - "parameters": { - "Left": [ - "ByteaArray" - ] - } - }, - "query": "SELECT hashed_key, l1_batch_number FROM initial_writes WHERE hashed_key = ANY($1::bytea[])" - }, "dc16d0fac093a52480b66dfcb5976fb01e6629e8c982c265f2af1d5000090572": { "describe": { "columns": [ diff --git a/core/lib/dal/src/storage_logs_dal.rs b/core/lib/dal/src/storage_logs_dal.rs index 9633dd856e2d..b0424c534fe9 100644 --- a/core/lib/dal/src/storage_logs_dal.rs +++ b/core/lib/dal/src/storage_logs_dal.rs @@ -3,7 +3,7 @@ use sqlx::Row; use std::{collections::HashMap, time::Instant}; -use crate::StorageProcessor; +use crate::{instrument::InstrumentExt, StorageProcessor}; use zksync_types::{ get_code_key, AccountTreeId, Address, L1BatchNumber, MiniblockNumber, StorageKey, StorageLog, FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, H256, @@ -244,7 +244,7 @@ impl StorageLogsDal<'_, '_> { pub async fn get_storage_logs_for_revert( &mut self, l1_batch_number: L1BatchNumber, - ) -> HashMap> { + ) -> HashMap> { let miniblock_range = self .storage .blocks_dal() @@ -268,7 +268,9 @@ impl StorageLogsDal<'_, '_> { // as per `initial_writes`, so if we return such keys from this method, it will lead to // the incorrect state after revert. let stage_start = Instant::now(); - let l1_batch_by_key = self.get_l1_batches_for_initial_writes(&modified_keys).await; + let l1_batch_and_index_by_key = self + .get_l1_batches_and_indices_for_initial_writes(&modified_keys) + .await; tracing::info!( "Loaded initial write info for modified keys in {:?}", stage_start.elapsed() @@ -277,12 +279,12 @@ impl StorageLogsDal<'_, '_> { let stage_start = Instant::now(); let mut output = HashMap::with_capacity(modified_keys.len()); modified_keys.retain(|key| { - match l1_batch_by_key.get(key) { + match l1_batch_and_index_by_key.get(key) { None => { // Key is completely deduped. It should not be present in the output map. false } - Some(write_batch) if *write_batch > l1_batch_number => { + Some((write_batch, _)) if *write_batch > l1_batch_number => { // Key was initially written to after the specified L1 batch. output.insert(*key, None); false @@ -295,18 +297,24 @@ impl StorageLogsDal<'_, '_> { stage_start.elapsed() ); - let deduped_count = modified_keys_count - l1_batch_by_key.len(); + let deduped_count = modified_keys_count - l1_batch_and_index_by_key.len(); tracing::info!( "Keys to update: {update_count}, to delete: {delete_count}; {deduped_count} modified keys \ are deduped and will be ignored", update_count = modified_keys.len(), - delete_count = l1_batch_by_key.len() - modified_keys.len() + delete_count = l1_batch_and_index_by_key.len() - modified_keys.len() ); let stage_start = Instant::now(); let prev_values_for_updated_keys = self .get_storage_values(&modified_keys, last_miniblock) - .await; + .await + .into_iter() + .map(|(key, value)| { + let value = value.unwrap(); // We already filtered out keys that weren't touched. + let index = l1_batch_and_index_by_key[&key].1; + (key, Some((value, index))) + }); tracing::info!( "Loaded previous values for {} keys in {:?}", prev_values_for_updated_keys.len(), @@ -316,20 +324,22 @@ impl StorageLogsDal<'_, '_> { output } - pub async fn get_l1_batches_for_initial_writes( + pub async fn get_l1_batches_and_indices_for_initial_writes( &mut self, hashed_keys: &[H256], - ) -> HashMap { + ) -> HashMap { if hashed_keys.is_empty() { return HashMap::new(); // Shortcut to save time on communication with DB in the common case } let hashed_keys: Vec<_> = hashed_keys.iter().map(H256::as_bytes).collect(); let rows = sqlx::query!( - "SELECT hashed_key, l1_batch_number FROM initial_writes \ + "SELECT hashed_key, l1_batch_number, index FROM initial_writes \ WHERE hashed_key = ANY($1::bytea[])", &hashed_keys as &[&[u8]], ) + .instrument("get_l1_batches_and_indices_for_initial_writes") + .report_latency() .fetch_all(self.storage.conn()) .await .unwrap(); @@ -338,7 +348,7 @@ impl StorageLogsDal<'_, '_> { .map(|row| { ( H256::from_slice(&row.hashed_key), - L1BatchNumber(row.l1_batch_number as u32), + (L1BatchNumber(row.l1_batch_number as u32), row.index as u64), ) }) .collect() @@ -696,7 +706,7 @@ mod tests { .await; assert_eq!(logs_for_revert.len(), 15); // 5 updated + 10 new keys for log in &logs[5..] { - let prev_value = logs_for_revert[&log.key.hashed_key()].unwrap(); + let prev_value = logs_for_revert[&log.key.hashed_key()].unwrap().0; assert_eq!(prev_value, log.value); } for log in &new_logs[5..] { diff --git a/core/lib/state/Cargo.toml b/core/lib/state/Cargo.toml index cf8a09d91590..f89a1707e0e2 100644 --- a/core/lib/state/Cargo.toml +++ b/core/lib/state/Cargo.toml @@ -20,6 +20,7 @@ anyhow = "1.0" mini-moka = "0.10.0" tokio = { version = "1", features = ["rt"] } tracing = "0.1" +itertools = "0.10.3" [dev-dependencies] db_test_macro = { path = "../db_test_macro" } diff --git a/core/lib/state/src/in_memory.rs b/core/lib/state/src/in_memory.rs index e44187e34d95..9fadc8813718 100644 --- a/core/lib/state/src/in_memory.rs +++ b/core/lib/state/src/in_memory.rs @@ -1,4 +1,4 @@ -use std::collections::HashMap; +use std::collections::{hash_map::Entry, BTreeMap, HashMap}; use crate::ReadStorage; use zksync_types::{ @@ -14,8 +14,9 @@ pub const IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID: u32 = 270; /// In-memory storage. #[derive(Debug, Default)] pub struct InMemoryStorage { - pub(crate) state: HashMap, + pub(crate) state: HashMap, pub(crate) factory_deps: HashMap>, + last_enum_index_set: u64, } impl InMemoryStorage { @@ -47,7 +48,7 @@ impl InMemoryStorage { ) -> Self { let system_context_init_log = get_system_context_init_logs(chain_id); - let state = contracts + let state_without_indices: BTreeMap<_, _> = contracts .iter() .flat_map(|contract| { let bytecode_hash = bytecode_hasher(&contract.bytecode); @@ -63,20 +64,36 @@ impl InMemoryStorage { .chain(system_context_init_log) .filter_map(|log| (log.kind == StorageLogKind::Write).then_some((log.key, log.value))) .collect(); + let state: HashMap<_, _> = state_without_indices + .into_iter() + .enumerate() + .map(|(idx, (key, value))| (key, (value, idx as u64 + 1))) + .collect(); let factory_deps = contracts .into_iter() .map(|contract| (bytecode_hasher(&contract.bytecode), contract.bytecode)) .collect(); + + let last_enum_index_set = state.len() as u64; Self { state, factory_deps, + last_enum_index_set, } } /// Sets the storage `value` at the specified `key`. pub fn set_value(&mut self, key: StorageKey, value: StorageValue) { - self.state.insert(key, value); + match self.state.entry(key) { + Entry::Occupied(mut entry) => { + entry.get_mut().0 = value; + } + Entry::Vacant(entry) => { + self.last_enum_index_set += 1; + entry.insert((value, self.last_enum_index_set)); + } + } } /// Stores a factory dependency with the specified `hash` and `bytecode`. @@ -87,7 +104,11 @@ impl InMemoryStorage { impl ReadStorage for &InMemoryStorage { fn read_value(&mut self, key: &StorageKey) -> StorageValue { - self.state.get(key).copied().unwrap_or_default() + self.state + .get(key) + .map(|(value, _)| value) + .copied() + .unwrap_or_default() } fn is_write_initial(&mut self, key: &StorageKey) -> bool { diff --git a/core/lib/state/src/rocksdb/mod.rs b/core/lib/state/src/rocksdb/mod.rs index ab6db6bbc891..8723efd31d52 100644 --- a/core/lib/state/src/rocksdb/mod.rs +++ b/core/lib/state/src/rocksdb/mod.rs @@ -8,18 +8,24 @@ //! - Contracts //! - Factory dependencies //! -//! | Column | Key | Value | Description | -//! | ------------ | ---------------------- | ----------------------- | ------------------------------------ | -//! | State | 'block_number' | serialized block number | Last processed L1 batch number (u32) | -//! | State | hashed `StorageKey` | 32 bytes value | State for the given key | -//! | Contracts | address (20 bytes) | `Vec` | Contract contents | -//! | Factory deps | hash (32 bytes) | `Vec` | Bytecodes for new contracts that a certain contract may deploy. | - -use std::{collections::HashMap, mem, path::Path, time::Instant}; +//! | Column | Key | Value | Description | +//! | ------------ | ------------------------------- | ------------------------------- | ----------------------------------------- | +//! | State | 'block_number' | serialized block number | Last processed L1 batch number (u32) | +//! | State | 'enum_index_migration_cursor' | serialized hashed key or empty | If key is not present it means that the migration hasn't started. | +//! | | | bytes | If value is of length 32 then it represents hashed_key migration should start from. | +//! | | | | If value is empty then it means the migration has finished | +//! | State | hashed `StorageKey` | 32 bytes value ++ 8 bytes index | State value for the given key | +//! | | | (big-endian) | | +//! | Contracts | address (20 bytes) | `Vec` | Contract contents | +//! | Factory deps | hash (32 bytes) | `Vec` | Bytecodes for new contracts that a certain contract may deploy. | + +use itertools::{Either, Itertools}; +use std::{collections::HashMap, convert::TryInto, mem, path::Path, time::Instant}; use zksync_dal::StorageProcessor; use zksync_storage::{db::NamedColumnFamily, RocksDB}; -use zksync_types::{L1BatchNumber, StorageKey, StorageValue, H256}; +use zksync_types::{L1BatchNumber, StorageKey, StorageValue, H256, U256}; +use zksync_utils::{h256_to_u256, u256_to_h256}; mod metrics; @@ -55,15 +61,56 @@ impl NamedColumnFamily for StateKeeperColumnFamily { } } +#[derive(Debug, Clone, Copy)] +struct StateValue { + pub value: H256, + pub enum_index: Option, +} + +impl StateValue { + pub fn new(value: H256, enum_index: Option) -> Self { + Self { value, enum_index } + } + + pub fn deserialize(bytes: &[u8]) -> Self { + if bytes.len() == 32 { + Self { + value: H256::from_slice(bytes), + enum_index: None, + } + } else { + Self { + value: H256::from_slice(&bytes[..32]), + enum_index: Some(u64::from_be_bytes(bytes[32..40].try_into().unwrap())), + } + } + } + + pub fn serialize(&self) -> Vec { + let mut buffer = Vec::with_capacity(40); + buffer.extend_from_slice(self.value.as_bytes()); + if let Some(index) = self.enum_index { + buffer.extend_from_slice(&index.to_be_bytes()); + } + buffer + } +} + /// [`ReadStorage`] implementation backed by RocksDB. #[derive(Debug)] pub struct RocksdbStorage { db: RocksDB, pending_patch: InMemoryStorage, + enum_index_migration_chunk_size: usize, } impl RocksdbStorage { const BLOCK_NUMBER_KEY: &'static [u8] = b"block_number"; + const ENUM_INDEX_MIGRATION_CURSOR: &'static [u8] = b"enum_index_migration_cursor"; + + fn is_special_key(key: &[u8]) -> bool { + key == Self::BLOCK_NUMBER_KEY || key == Self::ENUM_INDEX_MIGRATION_CURSOR + } /// Creates a new storage with the provided RocksDB `path`. pub fn new(path: &Path) -> Self { @@ -71,9 +118,15 @@ impl RocksdbStorage { Self { db, pending_patch: InMemoryStorage::default(), + enum_index_migration_chunk_size: 0, } } + /// Enables enum indices migration. + pub fn enable_enum_index_migration(&mut self, chunk_size: usize) { + self.enum_index_migration_chunk_size = chunk_size; + } + /// Synchronizes this storage with Postgres using the provided connection. /// /// # Panics @@ -108,7 +161,7 @@ impl RocksdbStorage { .storage_logs_dal() .get_touched_slots_for_l1_batch(L1BatchNumber(current_l1_batch_number)) .await; - self.process_transaction_logs(&storage_logs); + self.apply_storage_logs(storage_logs, conn).await; tracing::debug!("loading factory deps for l1 batch {current_l1_batch_number}"); let factory_deps = conn @@ -131,23 +184,141 @@ impl RocksdbStorage { tracing::info!( "Secondary storage for L1 batch #{latest_l1_batch_number} initialized, size is {estimated_size}" ); + + self.save_missing_enum_indices(conn).await; + } + + async fn apply_storage_logs( + &mut self, + storage_logs: HashMap, + conn: &mut StorageProcessor<'_>, + ) { + let (logs_with_known_indices, logs_with_unknown_indices): (Vec<_>, Vec<_>) = self + .process_transaction_logs(storage_logs) + .partition_map(|(key, StateValue { value, enum_index })| match enum_index { + Some(index) => Either::Left((key, (value, index))), + None => Either::Right((key, value)), + }); + let keys_with_unknown_indices: Vec<_> = logs_with_unknown_indices + .iter() + .map(|(key, _)| key.hashed_key()) + .collect(); + + let enum_indices_and_batches = conn + .storage_logs_dal() + .get_l1_batches_and_indices_for_initial_writes(&keys_with_unknown_indices) + .await; + assert_eq!( + keys_with_unknown_indices.len(), + enum_indices_and_batches.len() + ); + self.pending_patch.state = + logs_with_known_indices + .into_iter() + .chain(logs_with_unknown_indices.into_iter().map(|(key, value)| { + (key, (value, enum_indices_and_batches[&key.hashed_key()].1)) + })) + .collect(); + } + + async fn save_missing_enum_indices(&self, conn: &mut StorageProcessor<'_>) { + let (Some(start_from), true) = ( + self.enum_migration_start_from(), + self.enum_index_migration_chunk_size > 0, + ) else { + return; + }; + + let started_at = Instant::now(); + tracing::info!( + "RocksDB enum index migration is not finished, starting from key {start_from:0>64x}" + ); + + let mut write_batch = self.db.new_write_batch(); + let (keys, values): (Vec<_>, Vec<_>) = self + .db + .from_iterator_cf(StateKeeperColumnFamily::State, start_from.as_bytes()) + .filter_map(|(key, value)| { + if Self::is_special_key(&key) { + return None; + } + let state_value = StateValue::deserialize(&value); + (state_value.enum_index.is_none()) + .then(|| (H256::from_slice(&key), state_value.value)) + }) + .take(self.enum_index_migration_chunk_size) + .unzip(); + let enum_indices_and_batches = conn + .storage_logs_dal() + .get_l1_batches_and_indices_for_initial_writes(&keys) + .await; + assert_eq!(keys.len(), enum_indices_and_batches.len()); + + for (key, value) in keys.iter().zip(values) { + let index = enum_indices_and_batches[key].1; + write_batch.put_cf( + StateKeeperColumnFamily::State, + key.as_bytes(), + &StateValue::new(value, Some(index)).serialize(), + ); + } + + let next_key = keys + .last() + .and_then(|last_key| h256_to_u256(*last_key).checked_add(U256::one())) + .map(u256_to_h256); + match (next_key, keys.len()) { + (Some(next_key), keys_len) if keys_len == self.enum_index_migration_chunk_size => { + write_batch.put_cf( + StateKeeperColumnFamily::State, + Self::ENUM_INDEX_MIGRATION_CURSOR, + next_key.as_bytes(), + ); + } + _ => { + write_batch.put_cf( + StateKeeperColumnFamily::State, + Self::ENUM_INDEX_MIGRATION_CURSOR, + &[], + ); + tracing::info!("RocksDB enum index migration finished"); + } + } + self.db + .write(write_batch) + .expect("failed to save state data into rocksdb"); + tracing::info!( + "RocksDB enum index migration chunk took {:?}, migrated {} keys", + started_at.elapsed(), + keys.len() + ); } fn read_value_inner(&self, key: &StorageKey) -> Option { + self.read_state_value(key) + .map(|state_value| state_value.value) + } + + fn read_state_value(&self, key: &StorageKey) -> Option { let cf = StateKeeperColumnFamily::State; self.db .get_cf(cf, &Self::serialize_state_key(key)) .expect("failed to read rocksdb state value") - .map(|value| H256::from_slice(&value)) + .map(|value| StateValue::deserialize(&value)) } - /// Processes storage `logs` produced by transactions. - fn process_transaction_logs(&mut self, updates: &HashMap) { - for (&key, &value) in updates { - if !value.is_zero() || self.read_value_inner(&key).is_some() { - self.pending_patch.state.insert(key, value); + /// Returns storage logs to apply. + fn process_transaction_logs( + &self, + updates: HashMap, + ) -> impl Iterator + '_ { + updates.into_iter().filter_map(|(key, new_value)| { + if let Some(state_value) = self.read_state_value(&key) { + Some((key, StateValue::new(new_value, state_value.enum_index))) + } else { + (!new_value.is_zero()).then_some((key, StateValue::new(new_value, None))) } - } + }) } /// Stores a factory dependency with the specified `hash` and `bytecode`. @@ -206,8 +377,12 @@ impl RocksdbStorage { let cf = StateKeeperColumnFamily::State; for (key, maybe_value) in logs { - if let Some(prev_value) = maybe_value { - batch.put_cf(cf, key.as_bytes(), prev_value.as_bytes()); + if let Some((prev_value, prev_index)) = maybe_value { + batch.put_cf( + cf, + key.as_bytes(), + &StateValue::new(prev_value, Some(prev_index)).serialize(), + ); } else { batch.delete_cf(cf, key.as_bytes()); } @@ -243,8 +418,12 @@ impl RocksdbStorage { Self::BLOCK_NUMBER_KEY, &serialize_block_number(l1_batch_number.0), ); - for (key, value) in pending_patch.state { - batch.put_cf(cf, &Self::serialize_state_key(&key), value.as_ref()); + for (key, (value, enum_index)) in pending_patch.state { + batch.put_cf( + cf, + &Self::serialize_state_key(&key), + &StateValue::new(value, Some(enum_index)).serialize(), + ); } let cf = StateKeeperColumnFamily::FactoryDeps; @@ -279,6 +458,21 @@ impl RocksdbStorage { self.db .estimated_number_of_entries(StateKeeperColumnFamily::State) } + + fn enum_migration_start_from(&self) -> Option { + let value = self + .db + .get_cf( + StateKeeperColumnFamily::State, + Self::ENUM_INDEX_MIGRATION_CURSOR, + ) + .expect("failed to read `ENUM_INDEX_MIGRATION_CURSOR`"); + match value { + Some(v) if v.is_empty() => None, + Some(cursor) => Some(H256::from_slice(&cursor)), + None => Some(H256::zero()), + } + } } impl ReadStorage for RocksdbStorage { @@ -314,11 +508,14 @@ mod tests { async fn rocksdb_storage_basics() { let dir = TempDir::new().expect("cannot create temporary dir for state keeper"); let mut storage = RocksdbStorage::new(dir.path()); - let mut storage_logs = gen_storage_logs(0..20) + let mut storage_logs: HashMap<_, _> = gen_storage_logs(0..20) .into_iter() .map(|log| (log.key, log.value)) .collect(); - storage.process_transaction_logs(&storage_logs); + let changed_keys = storage.process_transaction_logs(storage_logs.clone()); + storage.pending_patch.state = changed_keys + .map(|(key, state_value)| (key, (state_value.value, 1))) // enum index doesn't matter in the test + .collect(); storage.save(L1BatchNumber(0)).await; { for (key, value) in &storage_logs { @@ -331,7 +528,10 @@ mod tests { for log in storage_logs.values_mut().step_by(2) { *log = StorageValue::zero(); } - storage.process_transaction_logs(&storage_logs); + let changed_keys = storage.process_transaction_logs(storage_logs.clone()); + storage.pending_patch.state = changed_keys + .map(|(key, state_value)| (key, (state_value.value, 1))) // enum index doesn't matter in the test + .collect(); storage.save(L1BatchNumber(1)).await; for (key, value) in &storage_logs { @@ -441,4 +641,94 @@ mod tests { } } } + + #[db_test] + async fn rocksdb_enum_index_migration(pool: ConnectionPool) { + let mut conn = pool.access_storage().await.unwrap(); + prepare_postgres(&mut conn).await; + let storage_logs = gen_storage_logs(20..40); + create_miniblock(&mut conn, MiniblockNumber(1), storage_logs.clone()).await; + create_l1_batch(&mut conn, L1BatchNumber(1), &storage_logs).await; + + let enum_indices: HashMap<_, _> = conn + .storage_logs_dedup_dal() + .initial_writes_for_batch(L1BatchNumber(1)) + .await + .into_iter() + .collect(); + + let dir = TempDir::new().expect("cannot create temporary dir for state keeper"); + let mut storage = RocksdbStorage::new(dir.path()); + storage.update_from_postgres(&mut conn).await; + + assert_eq!(storage.l1_batch_number(), L1BatchNumber(2)); + // Check that enum indices are correct after syncing with postgres. + for log in &storage_logs { + let expected_index = enum_indices[&log.key.hashed_key()]; + assert_eq!( + storage.read_state_value(&log.key).unwrap().enum_index, + Some(expected_index) + ); + } + + // Remove enum indices for some keys. + let mut write_batch = storage.db.new_write_batch(); + for log in &storage_logs { + write_batch.put_cf( + StateKeeperColumnFamily::State, + log.key.hashed_key().as_bytes(), + log.value.as_bytes(), + ); + write_batch.delete_cf( + StateKeeperColumnFamily::State, + RocksdbStorage::ENUM_INDEX_MIGRATION_CURSOR, + ); + } + storage.db.write(write_batch).unwrap(); + + // Check that migration works as expected. + let ordered_keys_to_migrate: Vec = storage_logs + .iter() + .map(|log| log.key) + .sorted_by_key(StorageKey::hashed_key) + .collect(); + + storage.enable_enum_index_migration(10); + let start_from = storage.enum_migration_start_from(); + assert_eq!(start_from, Some(H256::zero())); + + // Migrate the first half. + storage.save_missing_enum_indices(&mut conn).await; + for key in ordered_keys_to_migrate.iter().take(10) { + let expected_index = enum_indices[&key.hashed_key()]; + assert_eq!( + storage.read_state_value(key).unwrap().enum_index, + Some(expected_index) + ); + } + assert!(storage + .read_state_value(&ordered_keys_to_migrate[10]) + .unwrap() + .enum_index + .is_none()); + + // Migrate the second half. + storage.save_missing_enum_indices(&mut conn).await; + for key in ordered_keys_to_migrate.iter().skip(10) { + let expected_index = enum_indices[&key.hashed_key()]; + assert_eq!( + storage.read_state_value(key).unwrap().enum_index, + Some(expected_index) + ); + } + + // 20 keys were processed but we haven't checked that no keys to migrate are left. + let start_from = storage.enum_migration_start_from(); + assert!(start_from.is_some()); + + // Check that migration will be marked as completed after the next iteration. + storage.save_missing_enum_indices(&mut conn).await; + let start_from = storage.enum_migration_start_from(); + assert!(start_from.is_none()); + } } diff --git a/core/lib/storage/src/db.rs b/core/lib/storage/src/db.rs index c1cad72226db..44808af94581 100644 --- a/core/lib/storage/src/db.rs +++ b/core/lib/storage/src/db.rs @@ -1,6 +1,6 @@ use rocksdb::{ properties, BlockBasedOptions, Cache, ColumnFamily, ColumnFamilyDescriptor, DBPinnableSlice, - IteratorMode, Options, PrefixRange, ReadOptions, WriteOptions, DB, + Direction, IteratorMode, Options, PrefixRange, ReadOptions, WriteOptions, DB, }; use std::ffi::CStr; @@ -329,6 +329,22 @@ impl RocksDB { // We panic on RocksDB errors elsewhere and fuse it to prevent polling after the end of the range. // Thus, `unwrap()` should be safe. } + + /// Iterates over key-value pairs in the specified column family `cf` in the lexical + /// key order starting from the given `key_from`. + pub fn from_iterator_cf( + &self, + cf: CF, + key_from: &[u8], + ) -> impl Iterator, Box<[u8]>)> + '_ { + let cf = self.column_family(cf); + self.inner + .db + .iterator_cf(cf, IteratorMode::From(key_from, Direction::Forward)) + .map(Result::unwrap) + .fuse() + // ^ unwrap() is safe for the same reasons as in `prefix_iterator_cf()`. + } } impl RocksDB<()> { diff --git a/core/lib/zksync_core/src/metadata_calculator/helpers.rs b/core/lib/zksync_core/src/metadata_calculator/helpers.rs index 0abcc30c6444..26707c731178 100644 --- a/core/lib/zksync_core/src/metadata_calculator/helpers.rs +++ b/core/lib/zksync_core/src/metadata_calculator/helpers.rs @@ -244,7 +244,7 @@ impl L1BatchWithLogs { let latency = LoadChangesStage::InitialWritesForZeroValues.start(); let l1_batches_for_initial_writes = storage .storage_logs_dal() - .get_l1_batches_for_initial_writes(&hashed_keys_for_zero_values) + .get_l1_batches_and_indices_for_initial_writes(&hashed_keys_for_zero_values) .await; latency.report_with_count(hashed_keys_for_zero_values.len()); @@ -252,7 +252,7 @@ impl L1BatchWithLogs { let write_matters = if value.is_zero() { let initial_write_batch_for_key = l1_batches_for_initial_writes.get(&storage_key.hashed_key()); - initial_write_batch_for_key.map_or(false, |&number| number <= l1_batch_number) + initial_write_batch_for_key.map_or(false, |&(number, _)| number <= l1_batch_number) } else { true }; diff --git a/core/lib/zksync_core/src/metadata_calculator/tests.rs b/core/lib/zksync_core/src/metadata_calculator/tests.rs index e5e6e1f43ba5..269b48f28208 100644 --- a/core/lib/zksync_core/src/metadata_calculator/tests.rs +++ b/core/lib/zksync_core/src/metadata_calculator/tests.rs @@ -669,12 +669,12 @@ async fn deduplication_works_as_expected(pool: ConnectionPool) { let initial_writes = storage .storage_logs_dal() - .get_l1_batches_for_initial_writes(&hashed_keys) + .get_l1_batches_and_indices_for_initial_writes(&hashed_keys) .await; assert_eq!(initial_writes.len(), hashed_keys.len()); assert!(initial_writes .values() - .all(|&batch| batch == L1BatchNumber(1))); + .all(|&(batch, _)| batch == L1BatchNumber(1))); let mut new_logs = gen_storage_logs(120..140, 1).pop().unwrap(); let new_hashed_keys: Vec<_> = new_logs.iter().map(|log| log.key.hashed_key()).collect(); @@ -688,21 +688,21 @@ async fn deduplication_works_as_expected(pool: ConnectionPool) { // Initial writes for previously inserted keys should not change. let initial_writes = storage .storage_logs_dal() - .get_l1_batches_for_initial_writes(&hashed_keys) + .get_l1_batches_and_indices_for_initial_writes(&hashed_keys) .await; assert_eq!(initial_writes.len(), hashed_keys.len()); assert!(initial_writes .values() - .all(|&batch| batch == L1BatchNumber(1))); + .all(|&(batch, _)| batch == L1BatchNumber(1))); let initial_writes = storage .storage_logs_dal() - .get_l1_batches_for_initial_writes(&new_hashed_keys) + .get_l1_batches_and_indices_for_initial_writes(&new_hashed_keys) .await; assert_eq!(initial_writes.len(), new_hashed_keys.len()); assert!(initial_writes .values() - .all(|&batch| batch == L1BatchNumber(2))); + .all(|&(batch, _)| batch == L1BatchNumber(2))); let mut no_op_logs = gen_storage_logs(140..160, 1).pop().unwrap(); let no_op_hashed_keys: Vec<_> = no_op_logs.iter().map(|log| log.key.hashed_key()).collect(); @@ -713,7 +713,7 @@ async fn deduplication_works_as_expected(pool: ConnectionPool) { let initial_writes = storage .storage_logs_dal() - .get_l1_batches_for_initial_writes(&no_op_hashed_keys) + .get_l1_batches_and_indices_for_initial_writes(&no_op_hashed_keys) .await; assert!(initial_writes.is_empty()); @@ -730,10 +730,10 @@ async fn deduplication_works_as_expected(pool: ConnectionPool) { let initial_writes = storage .storage_logs_dal() - .get_l1_batches_for_initial_writes(&no_op_hashed_keys) + .get_l1_batches_and_indices_for_initial_writes(&no_op_hashed_keys) .await; assert_eq!(initial_writes.len(), no_op_hashed_keys.len() / 2); for key in no_op_hashed_keys.iter().step_by(2) { - assert_eq!(initial_writes[key], L1BatchNumber(4)); + assert_eq!(initial_writes[key].0, L1BatchNumber(4)); } } diff --git a/core/lib/zksync_core/src/state_keeper/batch_executor/mod.rs b/core/lib/zksync_core/src/state_keeper/batch_executor/mod.rs index de4b0289cea0..5ebb14f99b78 100644 --- a/core/lib/zksync_core/src/state_keeper/batch_executor/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/batch_executor/mod.rs @@ -84,6 +84,7 @@ pub struct MainBatchExecutorBuilder { save_call_traces: bool, max_allowed_tx_gas_limit: U256, upload_witness_inputs_to_gcs: bool, + enum_index_migration_chunk_size: usize, } impl MainBatchExecutorBuilder { @@ -93,6 +94,7 @@ impl MainBatchExecutorBuilder { max_allowed_tx_gas_limit: U256, save_call_traces: bool, upload_witness_inputs_to_gcs: bool, + enum_index_migration_chunk_size: usize, ) -> Self { Self { state_keeper_db_path, @@ -100,6 +102,7 @@ impl MainBatchExecutorBuilder { save_call_traces, max_allowed_tx_gas_limit, upload_witness_inputs_to_gcs, + enum_index_migration_chunk_size, } } } @@ -112,6 +115,7 @@ impl L1BatchExecutorBuilder for MainBatchExecutorBuilder { system_env: SystemEnv, ) -> BatchExecutorHandle { let mut secondary_storage = RocksdbStorage::new(self.state_keeper_db_path.as_ref()); + secondary_storage.enable_enum_index_migration(self.enum_index_migration_chunk_size); let mut conn = self .pool .access_storage_tagged("state_keeper") diff --git a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs index d41b0c98a82a..27e59c0110e5 100644 --- a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs @@ -172,16 +172,27 @@ impl Tester { address, ); let value = u256_to_h256(eth_amount); - let storage_logs = vec![StorageLog::new_write_log(key, value)]; + let storage_log = StorageLog::new_write_log(key, value); storage .storage_logs_dal() - .append_storage_logs(MiniblockNumber(0), &[(H256::zero(), storage_logs.clone())]) + .append_storage_logs(MiniblockNumber(0), &[(H256::zero(), vec![storage_log])]) .await; storage .storage_dal() - .apply_storage_logs(&[(H256::zero(), storage_logs)]) + .apply_storage_logs(&[(H256::zero(), vec![storage_log])]) .await; + if storage + .storage_logs_dedup_dal() + .filter_written_slots(&[storage_log.key.hashed_key()]) + .await + .is_empty() + { + storage + .storage_logs_dedup_dal() + .insert_initial_writes(L1BatchNumber(0), &[storage_log.key]) + .await + } } } } diff --git a/core/lib/zksync_core/src/state_keeper/mod.rs b/core/lib/zksync_core/src/state_keeper/mod.rs index 8eef5d6adbc3..5d5b2dac7a35 100644 --- a/core/lib/zksync_core/src/state_keeper/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/mod.rs @@ -59,6 +59,7 @@ where state_keeper_config.max_allowed_l2_tx_gas_limit.into(), state_keeper_config.save_call_traces, state_keeper_config.upload_witness_inputs_to_gcs, + state_keeper_config.enum_index_migration_chunk_size(), ); let io = MempoolIO::new( From 7e231887d3f0585dcccae730bc6b223dd96bd668 Mon Sep 17 00:00:00 2001 From: AnastasiiaVashchuk <72273339+AnastasiiaVashchuk@users.noreply.github.com> Date: Tue, 10 Oct 2023 15:03:44 +0300 Subject: [PATCH 29/29] chore: Add stage2 upgrade data (#186) --- .../common.json | 5 ++ .../stage2/transactions.json | 59 +++++++++++++++++++ 2 files changed, 64 insertions(+) create mode 100644 etc/upgrades/1696936385-refunds-enhancement/common.json create mode 100644 etc/upgrades/1696936385-refunds-enhancement/stage2/transactions.json diff --git a/etc/upgrades/1696936385-refunds-enhancement/common.json b/etc/upgrades/1696936385-refunds-enhancement/common.json new file mode 100644 index 000000000000..a7f26f0c70d8 --- /dev/null +++ b/etc/upgrades/1696936385-refunds-enhancement/common.json @@ -0,0 +1,5 @@ +{ + "name": "refunds-enhancement", + "creationTimestamp": 1696936385, + "protocolVersion": "16" +} diff --git a/etc/upgrades/1696936385-refunds-enhancement/stage2/transactions.json b/etc/upgrades/1696936385-refunds-enhancement/stage2/transactions.json new file mode 100644 index 000000000000..4f982017d535 --- /dev/null +++ b/etc/upgrades/1696936385-refunds-enhancement/stage2/transactions.json @@ -0,0 +1,59 @@ +{ + "proposeUpgradeTx": { + "l2ProtocolUpgradeTx": { + "txType": 0, + "from": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000000", + "gasLimit": 0, + "gasPerPubdataByteLimit": 0, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": 0, + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0x", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + }, + "bootloaderHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "defaultAccountHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "verifier": "0x0000000000000000000000000000000000000000", + "verifierParams": { + "recursionNodeLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionLeafLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionCircuitsSetVksHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "l1ContractsUpgradeCalldata": "0x", + "postUpgradeCalldata": "0x", + "upgradeTimestamp": { + "type": "BigNumber", + "hex": "0x65251ac4" + }, + "factoryDeps": [], + "newProtocolVersion": "16", + "newAllowList": "0x0000000000000000000000000000000000000000" + }, + "l1upgradeCalldata": "0x1ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000004e00000000000000000000000000000000000000000000000000000000065251ac400000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "upgradeAddress": "0xdb876240F01A6dd38F5EFC4EcEFe52e5C13dB3C7", + "protocolVersion": "16", + "diamondUpgradeProposalId": { + "type": "BigNumber", + "hex": "0x08" + }, + "upgradeTimestamp": "1696930500", + "proposeTransparentUpgradeCalldata": "0x8043760a000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000060000000000000000000000000db876240f01a6dd38f5efc4ecefe52e5c13db3c70000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005241ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000004e00000000000000000000000000000000000000000000000000000000065251ac400000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "transparentUpgrade": { + "facetCuts": [], + "initAddress": "0xdb876240F01A6dd38F5EFC4EcEFe52e5C13dB3C7", + "initCalldata": "0x1ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000004e00000000000000000000000000000000000000000000000000000000065251ac400000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "executeUpgradeCalldata": "0x36d4eb84000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000060000000000000000000000000db876240f01a6dd38f5efc4ecefe52e5c13db3c70000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005241ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000004e00000000000000000000000000000000000000000000000000000000065251ac400000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +} \ No newline at end of file