diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 86dfd338a8cd..1daa63226906 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { - "core": "24.15.0", + "core": "24.16.0", "prover": "16.3.0", "zk_toolbox": "0.1.1" } diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 93aa1bb1658b..36564600d832 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -67,6 +67,10 @@ jobs: loadtest: runs-on: [matterlabs-ci-runner] + strategy: + fail-fast: false + matrix: + vm_mode: ["old", "new"] steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 @@ -82,7 +86,8 @@ jobs: - name: Loadtest configuration run: | - echo EXPECTED_TX_COUNT="16000" >> .env + echo EXPECTED_TX_COUNT=${{ matrix.vm_mode == 'new' && 24000 || 18000 }} >> .env + echo ACCOUNTS_AMOUNT="150" >> .env echo FAIL_FAST=true >> .env echo IN_DOCKER=1 >> .env echo DATABASE_MERKLE_TREE_MODE=lightweight >> .env @@ -105,7 +110,9 @@ jobs: # `sleep 60` because we need to wait until server added all the tokens - name: Run server run: | - ci_run zk server --uring --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log & + EXPERIMENTAL_VM_STATE_KEEPER_FAST_VM_MODE=${{ matrix.vm_mode }} \ + PASSED_ENV_VARS="EXPERIMENTAL_VM_STATE_KEEPER_FAST_VM_MODE" \ + ci_run zk server --uring --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log & ci_run sleep 60 - name: Deploy legacy era contracts @@ -135,7 +142,7 @@ jobs: base_token: ["Eth", "Custom"] deployment_mode: ["Rollup", "Validium"] env: - SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,da_dispatcher${{ matrix.consensus && ',consensus' || '' }}${{ matrix.base_token == 'Custom' && ',base_token_ratio_persister' || '' }}" + SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,vm_playground,da_dispatcher${{ matrix.consensus && ',consensus' || '' }}${{ matrix.base_token == 'Custom' && ',base_token_ratio_persister' || '' }}" runs-on: [matterlabs-ci-runner] steps: diff --git a/Cargo.lock b/Cargo.lock index 446bf4a8e956..ce20580b3856 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -722,9 +722,9 @@ dependencies = [ [[package]] name = "boojum" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cf10f4b3980dc82dc31709dfa8193b7d6106a3a7ce9f9a9f8872bfb8719aa2d" +checksum = "df88daa33db46d683967ca09a4f04817c38950483f2501a771d497669a8a4bb1" dependencies = [ "arrayvec 0.7.4", "bincode", @@ -1045,14 +1045,14 @@ dependencies = [ [[package]] name = "circuit_encodings" -version = "0.150.2-rc.2" +version = "0.150.2-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e59747066b9a0d1a15d45f5837658aec5d53744fb643954f9dcc412f76c0d346" +checksum = "c928cad0aeeb35e86f8605376fdbb27b506cfcec14af1f532459a47e34d8b6f9" dependencies = [ "derivative", "serde", "zk_evm 0.150.0", - "zkevm_circuits 0.150.1", + "zkevm_circuits 0.150.2", ] [[package]] @@ -1112,12 +1112,12 @@ dependencies = [ [[package]] name = "circuit_sequencer_api" -version = "0.150.2-rc.2" +version = "0.150.2-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbfeb50910b20c4f05cc51700b2396a655cef9e6f0c84debd71cb02ce4853902" +checksum = "18d8ca58b9bb7c63a75813c96a5a80314fd70013d7929f61fc0e9e69b0e440a7" dependencies = [ "bellman_ce", - "circuit_encodings 0.150.2-rc.2", + "circuit_encodings 0.150.2-rc.3", "derivative", "rayon", "serde", @@ -1578,9 +1578,9 @@ dependencies = [ [[package]] name = "cs_derive" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab1f510bfddd1fc643a1d1bf8a405e279ffc818ee7ac86ed658e667a44958178" +checksum = "24cf603ca4299c6e20e644da88897f7b81d688510f4887e818b0bfe0b792081b" dependencies = [ "proc-macro-error", "proc-macro2 1.0.86", @@ -1588,6 +1588,16 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "ctor" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edb49164822f3ee45b17acd4a208cfc1251410cf0cad9a833234c9890774dd9f" +dependencies = [ + "quote 1.0.36", + "syn 2.0.72", +] + [[package]] name = "ctr" version = "0.9.2" @@ -1923,6 +1933,18 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "enum_dispatch" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa18ce2bc66555b3218614519ac839ddb759a7d6720732f979ef8d13be147ecd" +dependencies = [ + "once_cell", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 2.0.72", +] + [[package]] name = "env_filter" version = "0.1.0" @@ -3499,26 +3521,6 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" -[[package]] -name = "linkme" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ed2ee9464ff9707af8e9ad834cffa4802f072caad90639c583dd3c62e6e608" -dependencies = [ - "linkme-impl", -] - -[[package]] -name = "linkme-impl" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba125974b109d512fccbc6c0244e7580143e460895dfd6ea7f8bbb692fd94396" -dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", -] - [[package]] name = "linux-raw-sys" version = "0.4.14" @@ -7238,13 +7240,13 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "vise" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229baafe01d5177b63c6ee1def80d8e39a2365e64caf69ddb05a57594b15647c" +checksum = "90ade36f3548b1524396f4de7b36f4f210c8a01dfab568eb2bff466af64eb6e5" dependencies = [ "compile-fmt", + "ctor", "elsa", - "linkme", "once_cell", "prometheus-client", "vise-macros", @@ -7252,9 +7254,9 @@ dependencies = [ [[package]] name = "vise-exporter" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23981b18d697026f5430249ab01ba739ef2edc463e400042394331cb2bb63494" +checksum = "671d3b894d5d0849f0a597f56bf071f42d4f2a1cbcf2f78ca21f870ab7c0cc2b" dependencies = [ "hyper 0.14.29", "once_cell", @@ -7265,9 +7267,9 @@ dependencies = [ [[package]] name = "vise-macros" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bb19c33cd5f04dcf4e767635e058a998edbc2b7fca32ade0a4a1cea0f8e9b34" +checksum = "6a511871dc5de990a3b2a0e715facfbc5da848c0c0395597a1415029fb7c250a" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.36", @@ -7286,6 +7288,17 @@ dependencies = [ "zksync_vm_benchmark_harness", ] +[[package]] +name = "vm2" +version = "0.1.0" +source = "git+https://github.com/matter-labs/vm2.git?rev=9a38900d7af9b1d72b47ce3be980e77c1239a61d#9a38900d7af9b1d72b47ce3be980e77c1239a61d" +dependencies = [ + "enum_dispatch", + "primitive-types", + "zk_evm_abstractions 0.150.0", + "zkevm_opcode_defs 0.150.0", +] + [[package]] name = "walkdir" version = "2.4.0" @@ -7957,9 +7970,9 @@ dependencies = [ [[package]] name = "zkevm_circuits" -version = "0.150.1" +version = "0.150.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8a85c1987a1e7e89f1f8c39ca19bffb61521e719050086372aaea8817f403fc" +checksum = "94d97632ba26e4e6a77a680d6b2bfbcc6f7b9b722976ee31afb922d16a675d45" dependencies = [ "arrayvec 0.7.4", "boojum", @@ -8115,7 +8128,7 @@ dependencies = [ "anyhow", "circuit_sequencer_api 0.140.0", "circuit_sequencer_api 0.141.1", - "circuit_sequencer_api 0.150.2-rc.2", + "circuit_sequencer_api 0.150.2-rc.3", "futures 0.3.28", "itertools 0.10.5", "num_cpus", @@ -8142,9 +8155,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.8" +version = "0.1.0-rc.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "209b83578357184ab72af4d4cb2eca76f85e5f2f35d739a47e3fd5931eb9252d" +checksum = "a9f9a4352244ccd5e5fd34fb0d029861a5f57b05c80fe7944a7b532f54c58f89" dependencies = [ "anyhow", "once_cell", @@ -8167,6 +8180,7 @@ dependencies = [ "rand 0.8.5", "secrecy", "serde", + "tracing", "url", "zksync_basic_types", "zksync_concurrency", @@ -8177,9 +8191,9 @@ dependencies = [ [[package]] name = "zksync_consensus_bft" -version = "0.1.0-rc.8" +version = "0.1.0-rc.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7c7f1613bdb9d02b21d66ab60bdf6523456dcc5006290cd67702d3f729f549e" +checksum = "f69309c1c9e2c730b8858af2301cc8762280dab8b838b571524e4d43107aa7ff" dependencies = [ "anyhow", "async-trait", @@ -8199,9 +8213,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.1.0-rc.8" +version = "0.1.0-rc.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1abf1f3d9c8109da32a6d5e61a2a64a61b0bff90fdd355992522a4e8a57e69" +checksum = "f8c91270540e8db9479e1eaedaf0e600de468f71ccd5dc7c0258072e743830e6" dependencies = [ "anyhow", "blst", @@ -8223,9 +8237,9 @@ dependencies = [ [[package]] name = "zksync_consensus_executor" -version = "0.1.0-rc.8" +version = "0.1.0-rc.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8495b9056a895ee4e720b803d3e26ffad18776ae374805bab34a5ff5b648be6e" +checksum = "a05b45ae9c0bf45f4acc6833dca34907404d1ddd9041a5cd554751c2c5710764" dependencies = [ "anyhow", "async-trait", @@ -8244,9 +8258,9 @@ dependencies = [ [[package]] name = "zksync_consensus_network" -version = "0.1.0-rc.8" +version = "0.1.0-rc.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ec4a076c63c76599711a7dc28cdf3a7923b6bc7720bc572ea11e92fb2b526f" +checksum = "477abd01af60faa5afffbff651cbdf9d108bcae4f1326b508bc84063126d34f9" dependencies = [ "anyhow", "async-trait", @@ -8279,9 +8293,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.1.0-rc.8" +version = "0.1.0-rc.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32e0b04d43a542a3bb1af0ac4c0a17acf6b743607c3cb9028192df0c7d2f5b24" +checksum = "87e79025fd678ec2733add1697645827e9daed3f120c8cebf43513ac17e65b63" dependencies = [ "anyhow", "bit-vec", @@ -8301,9 +8315,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.1.0-rc.8" +version = "0.1.0-rc.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c39f79628bd6685f7ec5561874c007f5d26693d6ba7e5595dfa260981e8f006" +checksum = "470991a42d5f9a3f2385ebe52889e63742d95d141b80b95a1eabe9f51e18cb7e" dependencies = [ "anyhow", "async-trait", @@ -8321,9 +8335,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.8" +version = "0.1.0-rc.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4514629a34abdf943ef911c16228dfec656edb02d8412db4febd4df5ccf3f91" +checksum = "6c43283b5813fd887e0e7ccaee73c6e41907b1de311a3a01b2fa5f2e3f2ba503" dependencies = [ "anyhow", "rand 0.8.5", @@ -8661,7 +8675,7 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "24.15.0" +version = "24.16.0" dependencies = [ "anyhow", "assert_matches", @@ -8734,6 +8748,22 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zksync_external_proof_integration_api" +version = "0.1.0" +dependencies = [ + "anyhow", + "axum", + "bincode", + "tokio", + "tracing", + "zksync_basic_types", + "zksync_config", + "zksync_dal", + "zksync_object_store", + "zksync_prover_interface", +] + [[package]] name = "zksync_health_check" version = "0.1.0" @@ -8767,9 +8797,9 @@ dependencies = [ [[package]] name = "zksync_kzg" -version = "0.150.2-rc.2" +version = "0.150.2-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c44edd3a3316dcab45aab7e190c96150f2586d4a92fa21f93dcc20178308313a" +checksum = "9235fbdaa98f27b9aacaa861bcb850b6b0dbf37e59477ce3f08c64555a25d00d" dependencies = [ "boojum", "derivative", @@ -8779,7 +8809,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "zkevm_circuits 0.150.1", + "zkevm_circuits 0.150.2", ] [[package]] @@ -8891,16 +8921,18 @@ dependencies = [ "circuit_sequencer_api 0.140.0", "circuit_sequencer_api 0.141.1", "circuit_sequencer_api 0.142.0", - "circuit_sequencer_api 0.150.2-rc.2", + "circuit_sequencer_api 0.150.2-rc.3", "ethabi", "hex", "itertools 0.10.5", "once_cell", + "pretty_assertions", "serde", "thiserror", "tokio", "tracing", "vise", + "vm2", "zk_evm 0.131.0-rc.2", "zk_evm 0.133.0", "zk_evm 0.140.0", @@ -9083,6 +9115,7 @@ dependencies = [ "zksync_eth_sender", "zksync_eth_watch", "zksync_external_price_api", + "zksync_external_proof_integration_api", "zksync_health_check", "zksync_house_keeper", "zksync_metadata_calculator", @@ -9261,9 +9294,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.8" +version = "0.1.0-rc.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53128384270314bfbd4e044c15138af63cb3a505ab95bb3339f3b866ccbe211c" +checksum = "b5db598a518958b244aed5e3f925c763808429a5ea022bb50957b98e68540495" dependencies = [ "anyhow", "bit-vec", @@ -9282,9 +9315,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.8" +version = "0.1.0-rc.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d7dfb4dcdd48ab5fa1ccff25f585d73b58cf95e0fb74e96618dd666f198a005" +checksum = "4047ed624c7a19e206125f8259f7e175ad70020beeb66e1975e068af060d2fb5" dependencies = [ "anyhow", "heck 0.5.0", @@ -9332,7 +9365,7 @@ version = "0.1.0" dependencies = [ "bincode", "chrono", - "circuit_sequencer_api 0.150.2-rc.2", + "circuit_sequencer_api 0.150.2-rc.3", "serde", "serde_json", "serde_with", @@ -9734,6 +9767,7 @@ dependencies = [ "futures 0.3.28", "once_cell", "rand 0.8.5", + "serde", "tempfile", "test-casing", "tokio", @@ -9741,6 +9775,7 @@ dependencies = [ "vise", "zksync_contracts", "zksync_dal", + "zksync_health_check", "zksync_multivm", "zksync_node_genesis", "zksync_node_test_utils", diff --git a/Cargo.toml b/Cargo.toml index 4210911a2596..06bd6669b679 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,6 +36,7 @@ members = [ "core/node/api_server", "core/node/tee_verifier_input_producer", "core/node/base_token_adjuster", + "core/node/external_proof_integration_api", # Libraries "core/lib/db_connection", "core/lib/zksync_core_leftovers", @@ -77,7 +78,6 @@ members = [ "core/tests/loadnext", "core/tests/vm-benchmark", "core/tests/vm-benchmark/harness", - # Parts of prover workspace that are needed for Core workspace "prover/crates/lib/prover_dal", ] @@ -194,8 +194,8 @@ proc-macro2 = "1.0" trybuild = "1.0" # "Internal" dependencies -vise = "0.1.0" -vise-exporter = "0.1.0" +vise = "0.2.0" +vise-exporter = "0.2.0" # Here and below: # We *always* pin the latest version of protocol to disallow accidental changes in the execution logic. @@ -205,9 +205,9 @@ circuit_sequencer_api_1_3_3 = { package = "circuit_sequencer_api", version = "0. circuit_sequencer_api_1_4_0 = { package = "circuit_sequencer_api", version = "0.140" } circuit_sequencer_api_1_4_1 = { package = "circuit_sequencer_api", version = "0.141" } circuit_sequencer_api_1_4_2 = { package = "circuit_sequencer_api", version = "0.142" } -circuit_sequencer_api_1_5_0 = { package = "circuit_sequencer_api", version = "=0.150.2-rc.2" } +circuit_sequencer_api_1_5_0 = { package = "circuit_sequencer_api", version = "=0.150.2-rc.3" } crypto_codegen = { package = "zksync_solidity_vk_codegen", version = "=0.1.0" } -kzg = { package = "zksync_kzg", version = "=0.150.2-rc.2" } +kzg = { package = "zksync_kzg", version = "=0.150.2-rc.3" } zk_evm = { version = "=0.133.0" } zk_evm_1_3_1 = { package = "zk_evm", version = "0.131.0-rc.2" } zk_evm_1_3_3 = { package = "zk_evm", version = "0.133.0" } @@ -215,17 +215,20 @@ zk_evm_1_4_0 = { package = "zk_evm", version = "0.140.0" } zk_evm_1_4_1 = { package = "zk_evm", version = "0.141.0" } zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.0" } +# New VM; pinned to a specific commit because of instability +vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "9a38900d7af9b1d72b47ce3be980e77c1239a61d" } + # Consensus dependencies. -zksync_concurrency = "=0.1.0-rc.8" -zksync_consensus_bft = "=0.1.0-rc.8" -zksync_consensus_crypto = "=0.1.0-rc.8" -zksync_consensus_executor = "=0.1.0-rc.8" -zksync_consensus_network = "=0.1.0-rc.8" -zksync_consensus_roles = "=0.1.0-rc.8" -zksync_consensus_storage = "=0.1.0-rc.8" -zksync_consensus_utils = "=0.1.0-rc.8" -zksync_protobuf = "=0.1.0-rc.8" -zksync_protobuf_build = "=0.1.0-rc.8" +zksync_concurrency = "=0.1.0-rc.9" +zksync_consensus_bft = "=0.1.0-rc.9" +zksync_consensus_crypto = "=0.1.0-rc.9" +zksync_consensus_executor = "=0.1.0-rc.9" +zksync_consensus_network = "=0.1.0-rc.9" +zksync_consensus_roles = "=0.1.0-rc.9" +zksync_consensus_storage = "=0.1.0-rc.9" +zksync_consensus_utils = "=0.1.0-rc.9" +zksync_protobuf = "=0.1.0-rc.9" +zksync_protobuf_build = "=0.1.0-rc.9" # "Local" dependencies zksync_multivm = { version = "0.1.0", path = "core/lib/multivm" } @@ -282,6 +285,7 @@ zksync_eth_sender = { version = "0.1.0", path = "core/node/eth_sender" } zksync_node_db_pruner = { version = "0.1.0", path = "core/node/db_pruner" } zksync_node_fee_model = { version = "0.1.0", path = "core/node/fee_model" } zksync_vm_runner = { version = "0.1.0", path = "core/node/vm_runner" } +zksync_external_proof_integration_api = { version = "0.1.0", path = "core/node/external_proof_integration_api" } zksync_node_test_utils = { version = "0.1.0", path = "core/node/test_utils" } zksync_state_keeper = { version = "0.1.0", path = "core/node/state_keeper" } zksync_reorg_detector = { version = "0.1.0", path = "core/node/reorg_detector" } diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 8488606a4058..2632d997c21d 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,22 @@ # Changelog +## [24.16.0](https://github.com/matter-labs/zksync-era/compare/core-v24.15.0...core-v24.16.0) (2024-08-08) + + +### Features + +* External prover API ([#2538](https://github.com/matter-labs/zksync-era/issues/2538)) ([129a181](https://github.com/matter-labs/zksync-era/commit/129a1819262d64a36d651af01fdab93c5ff91712)) +* **node-framework:** Add API fee params resource ([#2621](https://github.com/matter-labs/zksync-era/issues/2621)) ([aff7b65](https://github.com/matter-labs/zksync-era/commit/aff7b6535ef92aaced0dd7fa1cc08d656cba027e)) +* **vlog:** Expose more resource values via opentelemetry ([#2620](https://github.com/matter-labs/zksync-era/issues/2620)) ([7ae07e4](https://github.com/matter-labs/zksync-era/commit/7ae07e446c9732a896ca8246d324e82c6e6d5a46)) +* **vlog:** Report observability config, flush, and shutdown ([#2622](https://github.com/matter-labs/zksync-era/issues/2622)) ([e23e661](https://github.com/matter-labs/zksync-era/commit/e23e6611731835ef3abd34f3f9867f9dc533eb21)) + + +### Bug Fixes + +* Bump prover dependencies & rust toolchain ([#2600](https://github.com/matter-labs/zksync-era/issues/2600)) ([849c6a5](https://github.com/matter-labs/zksync-era/commit/849c6a5dcd095e8fead0630a2a403f282c26a2aa)) +* **en:** Initialize SyncState in OutputHandler ([#2618](https://github.com/matter-labs/zksync-era/issues/2618)) ([f0c8506](https://github.com/matter-labs/zksync-era/commit/f0c85062fac96180c2e0dec52086714ee3783fcf)) +* restrictive genesis parsing ([#2605](https://github.com/matter-labs/zksync-era/issues/2605)) ([d5f8f38](https://github.com/matter-labs/zksync-era/commit/d5f8f3892a14180f590cabab921d3a68dec903e3)) + ## [24.15.0](https://github.com/matter-labs/zksync-era/compare/core-v24.14.0...core-v24.15.0) (2024-08-07) diff --git a/core/bin/block_reverter/src/main.rs b/core/bin/block_reverter/src/main.rs index f7a9ca9f9def..65810a6e9b67 100644 --- a/core/bin/block_reverter/src/main.rs +++ b/core/bin/block_reverter/src/main.rs @@ -8,7 +8,7 @@ use tokio::{ }; use zksync_block_reverter::{ eth_client::{ - clients::{Client, PKSigningClient}, + clients::{Client, PKSigningClient, L1}, EthInterface, }, BlockReverter, BlockReverterEthConfig, NodeRole, @@ -251,7 +251,7 @@ async fn main() -> anyhow::Result<()> { json, operator_address, } => { - let eth_client = Client::http(l1_secrets.l1_rpc_url.clone()) + let eth_client = Client::::http(l1_secrets.l1_rpc_url.clone()) .context("Ethereum client")? .build(); diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index 96a09a2ba2a9..4a3a4f14a556 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "zksync_external_node" description = "Non-validator ZKsync node" -version = "24.15.0" # x-release-please-version +version = "24.16.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index b0f0fa794fa1..80cfde02e5c6 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -223,12 +223,6 @@ impl RemoteENConfig { } } -#[derive(Debug, Deserialize)] -pub(crate) enum BlockFetcher { - ServerAPI, - Consensus, -} - /// This part of the external node config is completely optional to provide. /// It can tweak limits of the API, delay intervals of certain components, etc. /// If any of the fields are not provided, the default values will be used. diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index 2a219222d1b1..d24757829fa8 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -94,8 +94,8 @@ async fn build_state_keeper( stop_receiver_clone.changed().await?; result })); - let batch_executor_base: Box = - Box::new(MainBatchExecutor::new(save_call_traces, true)); + let batch_executor = MainBatchExecutor::new(save_call_traces, true); + let batch_executor: Box = Box::new(batch_executor); let io = ExternalIO::new( connection_pool, @@ -108,7 +108,7 @@ async fn build_state_keeper( Ok(ZkSyncStateKeeper::new( stop_receiver, Box::new(io), - batch_executor_base, + batch_executor, output_handler, Arc::new(NoopSealer), Arc::new(storage_factory), diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs index d71a9f0cdf57..1a7991b48a71 100644 --- a/core/bin/external_node/src/node_builder.rs +++ b/core/bin/external_node/src/node_builder.rs @@ -169,6 +169,8 @@ impl ExternalNodeBuilder { let query_eth_client_layer = QueryEthClientLayer::new( self.config.required.settlement_layer_id(), self.config.required.eth_client_url.clone(), + // TODO(EVM-676): add this config for external node + Default::default(), ); self.node.add_layer(query_eth_client_layer); Ok(self) @@ -216,8 +218,8 @@ impl ExternalNodeBuilder { rocksdb_options, ); self.node - .add_layer(persistence_layer) .add_layer(io_layer) + .add_layer(persistence_layer) .add_layer(main_node_batch_executor_builder_layer) .add_layer(state_keeper_layer); Ok(self) diff --git a/core/bin/external_node/src/tests/framework.rs b/core/bin/external_node/src/tests/framework.rs index 71a6afe503a7..e9667f2c05db 100644 --- a/core/bin/external_node/src/tests/framework.rs +++ b/core/bin/external_node/src/tests/framework.rs @@ -127,7 +127,13 @@ impl WiringLayer for MockL1ClientLayer { fn layer_name(&self) -> &'static str { // We don't care about values, we just want to hijack the layer name. - QueryEthClientLayer::new(SLChainId(1), "https://example.com".parse().unwrap()).layer_name() + // TODO(EVM-676): configure the `settlement_mode` here + QueryEthClientLayer::new( + SLChainId(1), + "https://example.com".parse().unwrap(), + Default::default(), + ) + .layer_name() } async fn wire(self, _: Self::Input) -> Result { diff --git a/core/bin/external_node/src/tests/utils.rs b/core/bin/external_node/src/tests/utils.rs index 3784fea4763b..ee92a4b802a6 100644 --- a/core/bin/external_node/src/tests/utils.rs +++ b/core/bin/external_node/src/tests/utils.rs @@ -1,7 +1,7 @@ use tempfile::TempDir; use zksync_dal::CoreDal; use zksync_db_connection::connection_pool::TestTemplate; -use zksync_eth_client::clients::MockEthereum; +use zksync_eth_client::clients::MockSettlementLayer; use zksync_node_genesis::{insert_genesis_batch, GenesisBatchParams, GenesisParams}; use zksync_types::{ api, block::L2BlockHeader, ethabi, Address, L2BlockNumber, ProtocolVersionId, H256, @@ -119,7 +119,7 @@ pub(super) fn expected_health_components(components: &ComponentsToRun) -> Vec<&' } pub(super) fn mock_eth_client(diamond_proxy_addr: Address) -> MockClient { - let mock = MockEthereum::builder().with_call_handler(move |call, _| { + let mock = MockSettlementLayer::builder().with_call_handler(move |call, _| { tracing::info!("L1 call: {call:?}"); if call.to == Some(diamond_proxy_addr) { let packed_semver = ProtocolVersionId::latest().into_packed_semver_with_patch(0); diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index 926db5124a51..3f1ee892c890 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -8,7 +8,7 @@ use zksync_contracts::{ use zksync_multivm::{ interface::{ dyn_tracers::vm_1_5_0::DynTracer, tracer::VmExecutionStopReason, L1BatchEnv, L2BlockEnv, - SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, + SystemEnv, TxExecutionMode, VmExecutionMode, VmFactory, VmInterface, }, vm_latest::{ constants::{BATCH_COMPUTATIONAL_GAS_LIMIT, BOOTLOADER_HEAP_PAGE}, @@ -276,8 +276,7 @@ pub(super) fn execute_internal_transfer_test() -> u32 { output: tracer_result.clone(), } .into_tracer_pointer(); - let mut vm: Vm<_, HistoryEnabled> = - Vm::new(l1_batch, system_env, Rc::new(RefCell::new(storage_view))); + let mut vm: Vm<_, HistoryEnabled> = Vm::new(l1_batch, system_env, storage_view.to_rc_ptr()); let result = vm.inspect(tracer.into(), VmExecutionMode::Bootloader); assert!(!result.result.is_failed(), "The internal call has reverted"); diff --git a/core/bin/verified_sources_fetcher/src/main.rs b/core/bin/verified_sources_fetcher/src/main.rs index 51ec8e9de7d6..981eebf4a706 100644 --- a/core/bin/verified_sources_fetcher/src/main.rs +++ b/core/bin/verified_sources_fetcher/src/main.rs @@ -52,14 +52,14 @@ async fn main() { file.write_all(content.as_bytes()).unwrap(); } SourceCodeData::StandardJsonInput(input) => { - let sources = input.get(&"sources".to_string()).unwrap().clone(); + let sources = input.get("sources").unwrap().clone(); for (key, val) in sources.as_object().unwrap() { let p = format!("{}/{}", &dir, key); let path = std::path::Path::new(p.as_str()); let prefix = path.parent().unwrap(); std::fs::create_dir_all(prefix).unwrap(); let mut file = std::fs::File::create(path).unwrap(); - let content = val.get(&"content".to_string()).unwrap().as_str().unwrap(); + let content = val.get("content").unwrap().as_str().unwrap(); file.write_all(content.as_bytes()).unwrap(); } } diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index a7fee61c8a01..1c22ce5c41a2 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -11,15 +11,15 @@ use zksync_config::{ }, fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, - BasicWitnessInputProducerConfig, ContractsConfig, DatabaseSecrets, + BasicWitnessInputProducerConfig, ContractsConfig, DatabaseSecrets, ExperimentalVmConfig, ExternalPriceApiClientConfig, FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig, FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, L1Secrets, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, ProtectiveReadsWriterConfig, Secrets, }, ApiConfig, BaseTokenAdjusterConfig, ContractVerifierConfig, DADispatcherConfig, DBConfig, - EthConfig, EthWatchConfig, GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, PostgresConfig, - SnapshotsCreatorConfig, + EthConfig, EthWatchConfig, ExternalProofIntegrationApiConfig, GasAdjusterConfig, GenesisConfig, + ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, }; use zksync_core_leftovers::{ temp_config_store::{decode_yaml_repr, TempConfigStore}, @@ -208,5 +208,7 @@ fn load_env_config() -> anyhow::Result { pruning: None, snapshot_recovery: None, external_price_api_client_config: ExternalPriceApiClientConfig::from_env().ok(), + external_proof_integration_api_config: ExternalProofIntegrationApiConfig::from_env().ok(), + experimental_vm_config: ExperimentalVmConfig::from_env().ok(), }) } diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index f504fa0eaebb..1998d2dae911 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -31,10 +31,12 @@ use zksync_node_framework::{ da_dispatcher::DataAvailabilityDispatcherLayer, eth_sender::{EthTxAggregatorLayer, EthTxManagerLayer}, eth_watch::EthWatchLayer, + external_proof_integration_api::ExternalProofIntegrationApiLayer, + gas_adjuster::GasAdjusterLayer, healtcheck_server::HealthCheckLayer, house_keeper::HouseKeeperLayer, l1_batch_commitment_mode_validation::L1BatchCommitmentModeValidationLayer, - l1_gas::SequencerL1GasLayer, + l1_gas::L1GasLayer, metadata_calculator::MetadataCalculatorLayer, node_storage_init::{ main_node_strategy::MainNodeInitStrategyLayer, NodeStorageInitializerLayer, @@ -53,7 +55,8 @@ use zksync_node_framework::{ }, tee_verifier_input_producer::TeeVerifierInputProducerLayer, vm_runner::{ - bwip::BasicWitnessInputProducerLayer, protective_reads::ProtectiveReadsWriterLayer, + bwip::BasicWitnessInputProducerLayer, playground::VmPlaygroundLayer, + protective_reads::ProtectiveReadsWriterLayer, }, web3_api::{ caches::MempoolCacheLayer, @@ -65,7 +68,7 @@ use zksync_node_framework::{ }, service::{ZkStackService, ZkStackServiceBuilder}, }; -use zksync_types::SHARED_BRIDGE_ETHER_TOKEN_ADDRESS; +use zksync_types::{settlement::SettlementMode, SHARED_BRIDGE_ETHER_TOKEN_ADDRESS}; use zksync_vlog::prometheus::PrometheusExporterConfig; /// Macro that looks into a path to fetch an optional config, @@ -151,32 +154,43 @@ impl MainNodeBuilder { fn add_query_eth_client_layer(mut self) -> anyhow::Result { let genesis = self.genesis_config.clone(); let eth_config = try_load_config!(self.secrets.l1); - let query_eth_client_layer = - QueryEthClientLayer::new(genesis.settlement_layer_id(), eth_config.l1_rpc_url); + let query_eth_client_layer = QueryEthClientLayer::new( + genesis.settlement_layer_id(), + eth_config.l1_rpc_url, + self.configs + .eth + .as_ref() + .and_then(|x| Some(x.gas_adjuster?.settlement_mode)) + .unwrap_or(SettlementMode::SettlesToL1), + ); self.node.add_layer(query_eth_client_layer); Ok(self) } - fn add_sequencer_l1_gas_layer(mut self) -> anyhow::Result { - // Ensure the BaseTokenRatioProviderResource is inserted if the base token is not ETH. - if self.contracts_config.base_token_addr != Some(SHARED_BRIDGE_ETHER_TOKEN_ADDRESS) { - let base_token_adjuster_config = try_load_config!(self.configs.base_token_adjuster); - self.node - .add_layer(BaseTokenRatioProviderLayer::new(base_token_adjuster_config)); - } - + fn add_gas_adjuster_layer(mut self) -> anyhow::Result { let gas_adjuster_config = try_load_config!(self.configs.eth) .gas_adjuster .context("Gas adjuster")?; - let state_keeper_config = try_load_config!(self.configs.state_keeper_config); let eth_sender_config = try_load_config!(self.configs.eth); - let sequencer_l1_gas_layer = SequencerL1GasLayer::new( + let gas_adjuster_layer = GasAdjusterLayer::new( gas_adjuster_config, self.genesis_config.clone(), - state_keeper_config, try_load_config!(eth_sender_config.sender).pubdata_sending_mode, ); - self.node.add_layer(sequencer_l1_gas_layer); + self.node.add_layer(gas_adjuster_layer); + Ok(self) + } + + fn add_l1_gas_layer(mut self) -> anyhow::Result { + // Ensure the BaseTokenRatioProviderResource is inserted if the base token is not ETH. + if self.contracts_config.base_token_addr != Some(SHARED_BRIDGE_ETHER_TOKEN_ADDRESS) { + let base_token_adjuster_config = try_load_config!(self.configs.base_token_adjuster); + self.node + .add_layer(BaseTokenRatioProviderLayer::new(base_token_adjuster_config)); + } + let state_keeper_config = try_load_config!(self.configs.state_keeper_config); + let l1_gas_layer = L1GasLayer::new(state_keeper_config); + self.node.add_layer(l1_gas_layer); Ok(self) } @@ -235,8 +249,10 @@ impl MainNodeBuilder { try_load_config!(wallets.state_keeper), ); let db_config = try_load_config!(self.configs.db_config); + let experimental_vm_config = try_load_config!(self.configs.experimental_vm_config); let main_node_batch_executor_builder_layer = - MainBatchExecutorLayer::new(sk_config.save_call_traces, OPTIONAL_BYTECODE_COMPRESSION); + MainBatchExecutorLayer::new(sk_config.save_call_traces, OPTIONAL_BYTECODE_COMPRESSION) + .with_fast_vm_mode(experimental_vm_config.state_keeper_fast_vm_mode); let rocksdb_options = RocksdbStorageOptions { block_cache_capacity: db_config @@ -560,6 +576,16 @@ impl MainNodeBuilder { Ok(self) } + fn add_vm_playground_layer(mut self) -> anyhow::Result { + let vm_config = try_load_config!(self.configs.experimental_vm_config); + self.node.add_layer(VmPlaygroundLayer::new( + vm_config.playground, + self.genesis_config.l2_chain_id, + )); + + Ok(self) + } + fn add_base_token_ratio_persister_layer(mut self) -> anyhow::Result { let config = try_load_config!(self.configs.base_token_adjuster); let contracts_config = self.contracts_config.clone(); @@ -569,6 +595,16 @@ impl MainNodeBuilder { Ok(self) } + fn add_external_proof_integration_api_layer(mut self) -> anyhow::Result { + let config = try_load_config!(self.configs.external_proof_integration_api_config); + self.node.add_layer(ExternalProofIntegrationApiLayer::new( + config, + self.genesis_config.l1_batch_commit_data_generator_mode, + )); + + Ok(self) + } + /// This layer will make sure that the database is initialized correctly, /// e.g. genesis will be performed if it's required. /// @@ -614,7 +650,7 @@ impl MainNodeBuilder { .add_healthcheck_layer()? .add_prometheus_exporter_layer()? .add_query_eth_client_layer()? - .add_sequencer_l1_gas_layer()?; + .add_gas_adjuster_layer()?; // Add preconditions for all the components. self = self @@ -637,11 +673,13 @@ impl MainNodeBuilder { // State keeper is the core component of the sequencer, // which is why we consider it to be responsible for the storage initialization. self = self + .add_l1_gas_layer()? .add_storage_initialization_layer(LayerKind::Task)? .add_state_keeper_layer()?; } Component::HttpApi => { self = self + .add_l1_gas_layer()? .add_tx_sender_layer()? .add_tree_api_client_layer()? .add_api_caches_layer()? @@ -649,6 +687,7 @@ impl MainNodeBuilder { } Component::WsApi => { self = self + .add_l1_gas_layer()? .add_tx_sender_layer()? .add_tree_api_client_layer()? .add_api_caches_layer()? @@ -710,6 +749,12 @@ impl MainNodeBuilder { Component::VmRunnerBwip => { self = self.add_vm_runner_bwip_layer()?; } + Component::VmPlayground => { + self = self.add_vm_playground_layer()?; + } + Component::ExternalProofIntegrationApi => { + self = self.add_external_proof_integration_api_layer()?; + } } } Ok(self.node.build()) diff --git a/core/bin/zksync_tee_prover/src/api_client.rs b/core/bin/zksync_tee_prover/src/api_client.rs index 1530da971157..13fbc1ba8868 100644 --- a/core/bin/zksync_tee_prover/src/api_client.rs +++ b/core/bin/zksync_tee_prover/src/api_client.rs @@ -74,8 +74,11 @@ impl TeeApiClient { /// Fetches the next job for the TEE prover to process, verifying and signing it if the /// verification is successful. - pub async fn get_job(&self) -> Result>, TeeProverError> { - let request = TeeProofGenerationDataRequest {}; + pub async fn get_job( + &self, + tee_type: TeeType, + ) -> Result>, TeeProverError> { + let request = TeeProofGenerationDataRequest { tee_type }; let response = self .post::<_, TeeProofGenerationDataResponse, _>("/tee/proof_inputs", request) .await?; diff --git a/core/bin/zksync_tee_prover/src/tee_prover.rs b/core/bin/zksync_tee_prover/src/tee_prover.rs index bcd1e4a1b6b4..64a3a9c5749d 100644 --- a/core/bin/zksync_tee_prover/src/tee_prover.rs +++ b/core/bin/zksync_tee_prover/src/tee_prover.rs @@ -112,7 +112,7 @@ impl TeeProver { } async fn step(&self) -> Result, TeeProverError> { - match self.api_client.get_job().await? { + match self.api_client.get_job(self.tee_type).await? { Some(job) => { let (signature, batch_number, root_hash) = self.verify(*job)?; self.api_client diff --git a/core/lib/basic_types/src/basic_fri_types.rs b/core/lib/basic_types/src/basic_fri_types.rs index a1563ff7e590..9765435f0973 100644 --- a/core/lib/basic_types/src/basic_fri_types.rs +++ b/core/lib/basic_types/src/basic_fri_types.rs @@ -27,6 +27,7 @@ type Eip4844BlobsInner = [Option; MAX_4844_BLOBS_PER_BLOCK]; /// - there are between [1, 16] blobs /// - all blobs are of the same size [`EIP_4844_BLOB_SIZE`] /// - there may be no blobs in case of Validium +/// /// Creating a structure violating these constraints will panic. /// /// Note: blobs are padded to fit the correct size. diff --git a/core/lib/basic_types/src/lib.rs b/core/lib/basic_types/src/lib.rs index a9522407222c..5633fa3e10df 100644 --- a/core/lib/basic_types/src/lib.rs +++ b/core/lib/basic_types/src/lib.rs @@ -26,9 +26,10 @@ pub mod commitment; pub mod network; pub mod protocol_version; pub mod prover_dal; +pub mod settlement; pub mod tee_types; pub mod url; -pub mod vm_version; +pub mod vm; pub mod web3; /// Account place in the global state tree is uniquely identified by its address. diff --git a/core/lib/basic_types/src/protocol_version.rs b/core/lib/basic_types/src/protocol_version.rs index d4300fba3f80..265c06987afd 100644 --- a/core/lib/basic_types/src/protocol_version.rs +++ b/core/lib/basic_types/src/protocol_version.rs @@ -12,7 +12,7 @@ use serde_with::{DeserializeFromStr, SerializeDisplay}; use crate::{ ethabi::Token, - vm_version::VmVersion, + vm::VmVersion, web3::contract::{Detokenize, Error}, H256, U256, }; diff --git a/core/lib/basic_types/src/settlement.rs b/core/lib/basic_types/src/settlement.rs new file mode 100644 index 000000000000..4fd921957a23 --- /dev/null +++ b/core/lib/basic_types/src/settlement.rs @@ -0,0 +1,16 @@ +use serde::{Deserialize, Serialize}; + +/// An enum which is used to describe whether a zkSync network settles to L1 or to the gateway. +/// Gateway is an Ethereum-compatible L2 and so it requires different treatment with regards to DA handling. +#[derive(Default, Debug, Copy, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub enum SettlementMode { + #[default] + SettlesToL1, + Gateway, +} + +impl SettlementMode { + pub fn is_gateway(self) -> bool { + matches!(self, Self::Gateway) + } +} diff --git a/core/lib/basic_types/src/vm.rs b/core/lib/basic_types/src/vm.rs new file mode 100644 index 000000000000..c178c853b2dc --- /dev/null +++ b/core/lib/basic_types/src/vm.rs @@ -0,0 +1,39 @@ +//! Basic VM types that shared widely enough to not put them in the `multivm` crate. + +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Copy)] +pub enum VmVersion { + M5WithoutRefunds, + M5WithRefunds, + M6Initial, + M6BugWithCompressionFixed, + Vm1_3_2, + VmVirtualBlocks, + VmVirtualBlocksRefundsEnhancement, + VmBoojumIntegration, + Vm1_4_1, + Vm1_4_2, + Vm1_5_0SmallBootloaderMemory, + Vm1_5_0IncreasedBootloaderMemory, +} + +impl VmVersion { + /// Returns the latest supported VM version. + pub const fn latest() -> VmVersion { + Self::Vm1_5_0IncreasedBootloaderMemory + } +} + +/// Mode in which to run the new fast VM implementation. +#[derive(Debug, Clone, Copy, Default, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum FastVmMode { + /// Run only the old VM. + #[default] + Old, + /// Run only the new Vm. + New, + /// Run both the new and old VM and compare their outputs for each transaction execution. + Shadow, +} diff --git a/core/lib/basic_types/src/vm_version.rs b/core/lib/basic_types/src/vm_version.rs deleted file mode 100644 index 49fec39fc9cb..000000000000 --- a/core/lib/basic_types/src/vm_version.rs +++ /dev/null @@ -1,22 +0,0 @@ -#[derive(Debug, Clone, Copy)] -pub enum VmVersion { - M5WithoutRefunds, - M5WithRefunds, - M6Initial, - M6BugWithCompressionFixed, - Vm1_3_2, - VmVirtualBlocks, - VmVirtualBlocksRefundsEnhancement, - VmBoojumIntegration, - Vm1_4_1, - Vm1_4_2, - Vm1_5_0SmallBootloaderMemory, - Vm1_5_0IncreasedBootloaderMemory, -} - -impl VmVersion { - /// Returns the latest supported VM version. - pub const fn latest() -> VmVersion { - Self::Vm1_5_0IncreasedBootloaderMemory - } -} diff --git a/core/lib/config/Cargo.toml b/core/lib/config/Cargo.toml index 784bdebfef07..b13948448cdd 100644 --- a/core/lib/config/Cargo.toml +++ b/core/lib/config/Cargo.toml @@ -16,6 +16,7 @@ zksync_crypto_primitives.workspace = true zksync_consensus_utils.workspace = true zksync_concurrency.workspace = true zksync_vlog = { workspace = true, optional = true } +tracing = { workspace = true, optional = true } url.workspace = true anyhow.workspace = true @@ -25,4 +26,4 @@ serde = { workspace = true, features = ["derive"] } [features] default = [] -observability_ext = ["zksync_vlog"] +observability_ext = ["zksync_vlog", "tracing"] diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index 31091e9ce1db..de7fddd96f94 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -30,11 +30,11 @@ impl NetworkConfig { /// An enum that represents the version of the fee model to use. /// - `V1`, the first model that was used in ZKsync Era. In this fee model, the pubdata price must be pegged to the L1 gas price. -/// Also, the fair L2 gas price is expected to only include the proving/computation price for the operator and not the costs that come from -/// processing the batch on L1. +/// Also, the fair L2 gas price is expected to only include the proving/computation price for the operator and not the costs that come from +/// processing the batch on L1. /// - `V2`, the second model that was used in ZKsync Era. There the pubdata price might be independent from the L1 gas price. Also, -/// The fair L2 gas price is expected to both the proving/computation price for the operator and the costs that come from -/// processing the batch on L1. +/// The fair L2 gas price is expected to both the proving/computation price for the operator and the costs that come from +/// processing the batch on L1. #[derive(Debug, Clone, Copy, Deserialize, PartialEq, Eq)] pub enum FeeModelVersion { V1, diff --git a/core/lib/config/src/configs/eth_sender.rs b/core/lib/config/src/configs/eth_sender.rs index c0e14dd68a87..e932cd9819b9 100644 --- a/core/lib/config/src/configs/eth_sender.rs +++ b/core/lib/config/src/configs/eth_sender.rs @@ -2,7 +2,7 @@ use std::time::Duration; use anyhow::Context as _; use serde::Deserialize; -use zksync_basic_types::H256; +use zksync_basic_types::{settlement::SettlementMode, H256}; use zksync_crypto_primitives::K256PrivateKey; use crate::EthWatchConfig; @@ -54,6 +54,7 @@ impl EthConfig { num_samples_for_blob_base_fee_estimate: 10, internal_pubdata_pricing_multiplier: 1.0, max_blob_base_fee: None, + settlement_mode: Default::default(), }), watcher: Some(EthWatchConfig { confirmations_for_eth_event: None, @@ -82,6 +83,7 @@ pub enum PubdataSendingMode { Calldata, Blobs, Custom, + RelayedL2Calldata, } #[derive(Debug, Deserialize, Clone, PartialEq)] @@ -181,6 +183,10 @@ pub struct GasAdjusterConfig { pub internal_pubdata_pricing_multiplier: f64, /// Max blob base fee that is allowed to be used. pub max_blob_base_fee: Option, + /// Whether the gas adjuster should require that the L2 node is used as a settlement layer. + /// It offers a runtime check for correctly provided values. + #[serde(default)] + pub settlement_mode: SettlementMode, } impl GasAdjusterConfig { diff --git a/core/lib/config/src/configs/experimental.rs b/core/lib/config/src/configs/experimental.rs index e362715d3d4a..bb00554ead1c 100644 --- a/core/lib/config/src/configs/experimental.rs +++ b/core/lib/config/src/configs/experimental.rs @@ -3,6 +3,7 @@ use std::num::NonZeroU32; use serde::Deserialize; +use zksync_basic_types::{vm::FastVmMode, L1BatchNumber}; #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct ExperimentalDBConfig { @@ -60,3 +61,50 @@ impl ExperimentalDBConfig { 100 } } + +/// Configuration for the VM playground (an experimental component that's unlikely to ever be stabilized). +#[derive(Debug, Clone, PartialEq, Deserialize)] +pub struct ExperimentalVmPlaygroundConfig { + /// Mode in which to run the fast VM implementation. Note that for it to actually be used, L1 batches should have a recent version. + #[serde(default)] + pub fast_vm_mode: FastVmMode, + /// Path to the RocksDB cache directory. + #[serde(default = "ExperimentalVmPlaygroundConfig::default_db_path")] + pub db_path: String, + /// First L1 batch to consider processed. Will not be used if the processing cursor is persisted, unless the `reset` flag is set. + #[serde(default)] + pub first_processed_batch: L1BatchNumber, + /// If set to true, processing cursor will reset `first_processed_batch` regardless of the current progress. Beware that this will likely + /// require to drop the RocksDB cache. + #[serde(default)] + pub reset: bool, +} + +impl Default for ExperimentalVmPlaygroundConfig { + fn default() -> Self { + Self { + fast_vm_mode: FastVmMode::default(), + db_path: Self::default_db_path(), + first_processed_batch: L1BatchNumber(0), + reset: false, + } + } +} + +impl ExperimentalVmPlaygroundConfig { + pub fn default_db_path() -> String { + "./db/vm_playground".to_owned() + } +} + +/// Experimental VM configuration options. +#[derive(Debug, Clone, Default, PartialEq, Deserialize)] +pub struct ExperimentalVmConfig { + #[serde(skip)] // Isn't properly deserialized by `envy` + pub playground: ExperimentalVmPlaygroundConfig, + + /// Mode in which to run the fast VM implementation in the state keeper. Should not be set in production; + /// the new VM doesn't produce call traces and can diverge from the old VM! + #[serde(default)] + pub state_keeper_fast_vm_mode: FastVmMode, +} diff --git a/core/lib/config/src/configs/external_proof_integration_api.rs b/core/lib/config/src/configs/external_proof_integration_api.rs new file mode 100644 index 000000000000..f9a43995ad17 --- /dev/null +++ b/core/lib/config/src/configs/external_proof_integration_api.rs @@ -0,0 +1,6 @@ +use serde::Deserialize; + +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub struct ExternalProofIntegrationApiConfig { + pub http_port: u16, +} diff --git a/core/lib/config/src/configs/general.rs b/core/lib/config/src/configs/general.rs index 5707b5c70492..3e6b05d8003e 100644 --- a/core/lib/config/src/configs/general.rs +++ b/core/lib/config/src/configs/general.rs @@ -9,13 +9,13 @@ use crate::{ pruning::PruningConfig, snapshot_recovery::SnapshotRecoveryConfig, vm_runner::{BasicWitnessInputProducerConfig, ProtectiveReadsWriterConfig}, - CommitmentGeneratorConfig, ExternalPriceApiClientConfig, FriProofCompressorConfig, - FriProverConfig, FriProverGatewayConfig, FriWitnessGeneratorConfig, - FriWitnessVectorGeneratorConfig, ObservabilityConfig, PrometheusConfig, - ProofDataHandlerConfig, + CommitmentGeneratorConfig, ExperimentalVmConfig, ExternalPriceApiClientConfig, + FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig, + FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, ObservabilityConfig, + PrometheusConfig, ProofDataHandlerConfig, }, - ApiConfig, ContractVerifierConfig, DBConfig, EthConfig, ObjectStoreConfig, PostgresConfig, - SnapshotsCreatorConfig, + ApiConfig, ContractVerifierConfig, DBConfig, EthConfig, ExternalProofIntegrationApiConfig, + ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, }; #[derive(Debug, Clone, PartialEq)] @@ -50,4 +50,6 @@ pub struct GeneralConfig { pub base_token_adjuster: Option, pub external_price_api_client_config: Option, pub consensus_config: Option, + pub external_proof_integration_api_config: Option, + pub experimental_vm_config: Option, } diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index 0da6f986f353..0ecd8ee0df98 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -9,8 +9,9 @@ pub use self::{ database::{DBConfig, PostgresConfig}, eth_sender::{EthConfig, GasAdjusterConfig}, eth_watch::EthWatchConfig, - experimental::ExperimentalDBConfig, + experimental::{ExperimentalDBConfig, ExperimentalVmConfig, ExperimentalVmPlaygroundConfig}, external_price_api_client::ExternalPriceApiClientConfig, + external_proof_integration_api::ExternalProofIntegrationApiConfig, fri_proof_compressor::FriProofCompressorConfig, fri_prover::FriProverConfig, fri_prover_gateway::FriProverGatewayConfig, @@ -43,6 +44,7 @@ pub mod eth_sender; pub mod eth_watch; mod experimental; pub mod external_price_api_client; +pub mod external_proof_integration_api; pub mod fri_proof_compressor; pub mod fri_prover; pub mod fri_prover_gateway; diff --git a/core/lib/config/src/configs/vm_runner.rs b/core/lib/config/src/configs/vm_runner.rs index fa7c7c1a90a3..1fecc12668c1 100644 --- a/core/lib/config/src/configs/vm_runner.rs +++ b/core/lib/config/src/configs/vm_runner.rs @@ -1,7 +1,7 @@ use serde::Deserialize; use zksync_basic_types::L1BatchNumber; -#[derive(Debug, Deserialize, Clone, PartialEq, Default)] +#[derive(Debug, Deserialize, Clone, PartialEq)] pub struct ProtectiveReadsWriterConfig { /// Path to the RocksDB data directory that serves state cache. #[serde(default = "ProtectiveReadsWriterConfig::default_db_path")] @@ -18,7 +18,7 @@ impl ProtectiveReadsWriterConfig { } } -#[derive(Debug, Deserialize, Clone, PartialEq, Default)] +#[derive(Debug, Deserialize, Clone, PartialEq)] pub struct BasicWitnessInputProducerConfig { /// Path to the RocksDB data directory that serves state cache. #[serde(default = "BasicWitnessInputProducerConfig::default_db_path")] diff --git a/core/lib/config/src/lib.rs b/core/lib/config/src/lib.rs index c5944e581a97..ae8288fa72ea 100644 --- a/core/lib/config/src/lib.rs +++ b/core/lib/config/src/lib.rs @@ -2,8 +2,8 @@ pub use crate::configs::{ ApiConfig, BaseTokenAdjusterConfig, ContractVerifierConfig, ContractsConfig, - DADispatcherConfig, DBConfig, EthConfig, EthWatchConfig, GasAdjusterConfig, GenesisConfig, - ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, + DADispatcherConfig, DBConfig, EthConfig, EthWatchConfig, ExternalProofIntegrationApiConfig, + GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, }; pub mod configs; diff --git a/core/lib/config/src/observability_ext.rs b/core/lib/config/src/observability_ext.rs index 641b095eb3b9..1370978b5807 100644 --- a/core/lib/config/src/observability_ext.rs +++ b/core/lib/config/src/observability_ext.rs @@ -16,6 +16,9 @@ impl ObservabilityConfig { .with_sentry(sentry) .with_opentelemetry(opentelemetry) .build(); + + tracing::info!("Installed observability stack with the following configuration: {self:?}"); + Ok(guard) } } diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 6a75e9f6bf1a..b015d9d264ed 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -6,6 +6,7 @@ use zksync_basic_types::{ commitment::L1BatchCommitmentMode, network::Network, protocol_version::{ProtocolSemanticVersion, ProtocolVersionId, VersionPatch}, + vm::FastVmMode, L1BatchNumber, L1ChainId, L2ChainId, }; use zksync_consensus_utils::EncodeDist; @@ -292,6 +293,34 @@ impl Distribution for EncodeDist { } } +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> configs::ExperimentalVmPlaygroundConfig { + configs::ExperimentalVmPlaygroundConfig { + fast_vm_mode: gen_fast_vm_mode(rng), + db_path: self.sample(rng), + first_processed_batch: L1BatchNumber(rng.gen()), + reset: self.sample(rng), + } + } +} + +fn gen_fast_vm_mode(rng: &mut R) -> FastVmMode { + match rng.gen_range(0..3) { + 0 => FastVmMode::Old, + 1 => FastVmMode::New, + _ => FastVmMode::Shadow, + } +} + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> configs::ExperimentalVmConfig { + configs::ExperimentalVmConfig { + playground: self.sample(rng), + state_keeper_fast_vm_mode: gen_fast_vm_mode(rng), + } + } +} + impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::database::DBConfig { configs::database::DBConfig { @@ -398,6 +427,8 @@ impl Distribution for EncodeDist { num_samples_for_blob_base_fee_estimate: self.sample(rng), internal_pubdata_pricing_multiplier: self.sample(rng), max_blob_base_fee: self.sample(rng), + // TODO(EVM-676): generate it randomly once this value is used + settlement_mode: Default::default(), } } } @@ -997,6 +1028,19 @@ impl Distribution for Enc } } +impl Distribution + for EncodeDist +{ + fn sample( + &self, + rng: &mut R, + ) -> configs::external_proof_integration_api::ExternalProofIntegrationApiConfig { + configs::external_proof_integration_api::ExternalProofIntegrationApiConfig { + http_port: self.sample(rng), + } + } +} + impl Distribution for EncodeDist { fn sample( &self, @@ -1046,6 +1090,8 @@ impl Distribution for EncodeDist { base_token_adjuster: self.sample(rng), external_price_api_client_config: self.sample(rng), consensus_config: self.sample(rng), + external_proof_integration_api_config: self.sample(rng), + experimental_vm_config: self.sample(rng), } } } diff --git a/core/lib/crypto_primitives/src/packed_eth_signature.rs b/core/lib/crypto_primitives/src/packed_eth_signature.rs index 7e5efc07bb92..3d76de73560e 100644 --- a/core/lib/crypto_primitives/src/packed_eth_signature.rs +++ b/core/lib/crypto_primitives/src/packed_eth_signature.rs @@ -22,7 +22,7 @@ use crate::{ /// /// That is why: /// 1) when we create this structure by deserialization of message produced by user -/// we subtract 27 from v in `ETHSignature` if necessary and store it in the `ETHSignature` structure this way. +/// we subtract 27 from v in `ETHSignature` if necessary and store it in the `ETHSignature` structure this way. /// 2) When we serialize/create this structure we add 27 to v in `ETHSignature`. /// /// This way when we have methods that consumes &self we can be sure that ETHSignature::recover_signer works diff --git a/core/lib/dal/.sqlx/query-1bfcc02ac79958dcbd20f3680df7517f5d61f7f1472e7d152f575ffd56ad8633.json b/core/lib/dal/.sqlx/query-1bfcc02ac79958dcbd20f3680df7517f5d61f7f1472e7d152f575ffd56ad8633.json new file mode 100644 index 000000000000..8ae53dc7e6f4 --- /dev/null +++ b/core/lib/dal/.sqlx/query-1bfcc02ac79958dcbd20f3680df7517f5d61f7f1472e7d152f575ffd56ad8633.json @@ -0,0 +1,47 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n base_fee_per_gas,\n l2_fair_gas_price,\n fair_pubdata_price,\n protocol_version,\n l1_gas_price\n FROM\n miniblocks\n WHERE\n number <= $1\n ORDER BY\n number DESC\n LIMIT\n $2\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "base_fee_per_gas", + "type_info": "Numeric" + }, + { + "ordinal": 1, + "name": "l2_fair_gas_price", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "fair_pubdata_price", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 4, + "name": "l1_gas_price", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + false, + false, + true, + true, + false + ] + }, + "hash": "1bfcc02ac79958dcbd20f3680df7517f5d61f7f1472e7d152f575ffd56ad8633" +} diff --git a/core/lib/dal/.sqlx/query-286f27e32a152c293d07e7c22e893c6f5a43386d4183745a4668507cf672b3f6.json b/core/lib/dal/.sqlx/query-286f27e32a152c293d07e7c22e893c6f5a43386d4183745a4668507cf672b3f6.json new file mode 100644 index 000000000000..540660bddf34 --- /dev/null +++ b/core/lib/dal/.sqlx/query-286f27e32a152c293d07e7c22e893c6f5a43386d4183745a4668507cf672b3f6.json @@ -0,0 +1,37 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE tee_proof_generation_details\n SET\n status = 'picked_by_prover',\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n tee_type = $1\n AND l1_batch_number = (\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $2\n AND (\n proofs.status = 'ready_to_be_proven'\n OR (\n proofs.status = 'picked_by_prover'\n AND proofs.prover_taken_at < NOW() - $3::INTERVAL\n )\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n tee_proof_generation_details.l1_batch_number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text", + { + "Custom": { + "name": "tee_verifier_input_producer_job_status", + "kind": { + "Enum": [ + "Queued", + "ManuallySkipped", + "InProgress", + "Successful", + "Failed" + ] + } + } + }, + "Interval" + ] + }, + "nullable": [ + false + ] + }, + "hash": "286f27e32a152c293d07e7c22e893c6f5a43386d4183745a4668507cf672b3f6" +} diff --git a/core/lib/dal/.sqlx/query-640d37aa1d6dc722b1651c74b7ea2fca14965083b0589c3b3efad02e37d55f0c.json b/core/lib/dal/.sqlx/query-640d37aa1d6dc722b1651c74b7ea2fca14965083b0589c3b3efad02e37d55f0c.json deleted file mode 100644 index f0603488f1e8..000000000000 --- a/core/lib/dal/.sqlx/query-640d37aa1d6dc722b1651c74b7ea2fca14965083b0589c3b3efad02e37d55f0c.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = 'Successful'\n AND proofs.status = 'ready_to_be_proven'\n ORDER BY\n proofs.l1_batch_number ASC\n LIMIT\n 1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false - ] - }, - "hash": "640d37aa1d6dc722b1651c74b7ea2fca14965083b0589c3b3efad02e37d55f0c" -} diff --git a/core/lib/dal/.sqlx/query-7460c6101417d1a2d0f068816ca897f3bba0dd8d611a487659b472ca1f0284d5.json b/core/lib/dal/.sqlx/query-7460c6101417d1a2d0f068816ca897f3bba0dd8d611a487659b472ca1f0284d5.json new file mode 100644 index 000000000000..bcb4d32a8f17 --- /dev/null +++ b/core/lib/dal/.sqlx/query-7460c6101417d1a2d0f068816ca897f3bba0dd8d611a487659b472ca1f0284d5.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n l1_batch_number\n FROM\n proof_generation_details\n WHERE\n proof_blob_url IS NOT NULL\n ORDER BY\n l1_batch_number DESC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "7460c6101417d1a2d0f068816ca897f3bba0dd8d611a487659b472ca1f0284d5" +} diff --git a/core/lib/dal/.sqlx/query-83a931ceddf34e1c760649d613f534014b9ab9ca7725e14fb17aa050d9f35eb8.json b/core/lib/dal/.sqlx/query-83a931ceddf34e1c760649d613f534014b9ab9ca7725e14fb17aa050d9f35eb8.json deleted file mode 100644 index 8d9458dce0a4..000000000000 --- a/core/lib/dal/.sqlx/query-83a931ceddf34e1c760649d613f534014b9ab9ca7725e14fb17aa050d9f35eb8.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n base_fee_per_gas\n FROM\n miniblocks\n WHERE\n number <= $1\n ORDER BY\n number DESC\n LIMIT\n $2\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "base_fee_per_gas", - "type_info": "Numeric" - } - ], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "83a931ceddf34e1c760649d613f534014b9ab9ca7725e14fb17aa050d9f35eb8" -} diff --git a/core/lib/dal/.sqlx/query-9533a672ae82db344ae1070ae11b608d21dc70397b64ce500881a8b55953c59c.json b/core/lib/dal/.sqlx/query-9533a672ae82db344ae1070ae11b608d21dc70397b64ce500881a8b55953c59c.json deleted file mode 100644 index 994bfcfbb5a2..000000000000 --- a/core/lib/dal/.sqlx/query-9533a672ae82db344ae1070ae11b608d21dc70397b64ce500881a8b55953c59c.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n tee_proof_generation_details (l1_batch_number, status, created_at, updated_at)\n VALUES\n ($1, 'ready_to_be_proven', NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [] - }, - "hash": "9533a672ae82db344ae1070ae11b608d21dc70397b64ce500881a8b55953c59c" -} diff --git a/core/lib/dal/.sqlx/query-727d4dc6a8fdb39a6c54d4395124f8d103f12e51252c46a210a007e5e600d711.json b/core/lib/dal/.sqlx/query-a8fdcb5180fc5fd125a003e9675f213a0b02b3ff96398920bc0250397bb2a95f.json similarity index 58% rename from core/lib/dal/.sqlx/query-727d4dc6a8fdb39a6c54d4395124f8d103f12e51252c46a210a007e5e600d711.json rename to core/lib/dal/.sqlx/query-a8fdcb5180fc5fd125a003e9675f213a0b02b3ff96398920bc0250397bb2a95f.json index 8e210aade885..8b67041427d3 100644 --- a/core/lib/dal/.sqlx/query-727d4dc6a8fdb39a6c54d4395124f8d103f12e51252c46a210a007e5e600d711.json +++ b/core/lib/dal/.sqlx/query-a8fdcb5180fc5fd125a003e9675f213a0b02b3ff96398920bc0250397bb2a95f.json @@ -1,18 +1,18 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE tee_proof_generation_details\n SET\n status = 'generated',\n signature = $1,\n pubkey = $2,\n proof = $3,\n tee_type = $4,\n updated_at = NOW()\n WHERE\n l1_batch_number = $5\n ", + "query": "\n UPDATE tee_proof_generation_details\n SET\n tee_type = $1,\n status = 'generated',\n pubkey = $2,\n signature = $3,\n proof = $4,\n updated_at = NOW()\n WHERE\n l1_batch_number = $5\n ", "describe": { "columns": [], "parameters": { "Left": [ + "Text", "Bytea", "Bytea", "Bytea", - "Text", "Int8" ] }, "nullable": [] }, - "hash": "727d4dc6a8fdb39a6c54d4395124f8d103f12e51252c46a210a007e5e600d711" + "hash": "a8fdcb5180fc5fd125a003e9675f213a0b02b3ff96398920bc0250397bb2a95f" } diff --git a/core/lib/dal/.sqlx/query-d8bc4af72e3d94df53967c83d577a1e1abf3d268b16498cc65758af66781cbb6.json b/core/lib/dal/.sqlx/query-d8bc4af72e3d94df53967c83d577a1e1abf3d268b16498cc65758af66781cbb6.json new file mode 100644 index 000000000000..0ed8005289f7 --- /dev/null +++ b/core/lib/dal/.sqlx/query-d8bc4af72e3d94df53967c83d577a1e1abf3d268b16498cc65758af66781cbb6.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n tee_proof_generation_details (l1_batch_number, tee_type, status, created_at, updated_at)\n VALUES\n ($1, $2, 'ready_to_be_proven', NOW(), NOW())\n ON CONFLICT (l1_batch_number, tee_type) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text" + ] + }, + "nullable": [] + }, + "hash": "d8bc4af72e3d94df53967c83d577a1e1abf3d268b16498cc65758af66781cbb6" +} diff --git a/core/lib/dal/.sqlx/query-e048951ded9e4a4a28238334bc4dc118360ab83bae3196ec941216901be629da.json b/core/lib/dal/.sqlx/query-e048951ded9e4a4a28238334bc4dc118360ab83bae3196ec941216901be629da.json new file mode 100644 index 000000000000..70f7f9d12fa4 --- /dev/null +++ b/core/lib/dal/.sqlx/query-e048951ded9e4a4a28238334bc4dc118360ab83bae3196ec941216901be629da.json @@ -0,0 +1,35 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $1\n AND proofs.status = 'ready_to_be_proven'\n ORDER BY\n proofs.l1_batch_number ASC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + { + "Custom": { + "name": "tee_verifier_input_producer_job_status", + "kind": { + "Enum": [ + "Queued", + "ManuallySkipped", + "InProgress", + "Successful", + "Failed" + ] + } + } + } + ] + }, + "nullable": [ + false + ] + }, + "hash": "e048951ded9e4a4a28238334bc4dc118360ab83bae3196ec941216901be629da" +} diff --git a/core/lib/dal/.sqlx/query-e2ff392b3aa7a22fc39d150d08b148b0f1c7e512dd43434062341eb263fe434f.json b/core/lib/dal/.sqlx/query-e2ff392b3aa7a22fc39d150d08b148b0f1c7e512dd43434062341eb263fe434f.json deleted file mode 100644 index 4236e72fccad..000000000000 --- a/core/lib/dal/.sqlx/query-e2ff392b3aa7a22fc39d150d08b148b0f1c7e512dd43434062341eb263fe434f.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_proof_generation_details\n SET\n status = 'picked_by_prover',\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n l1_batch_number = (\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = 'Successful'\n AND (\n proofs.status = 'ready_to_be_proven'\n OR (\n proofs.status = 'picked_by_prover'\n AND proofs.prover_taken_at < NOW() - $1::INTERVAL\n )\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n tee_proof_generation_details.l1_batch_number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Interval" - ] - }, - "nullable": [ - false - ] - }, - "hash": "e2ff392b3aa7a22fc39d150d08b148b0f1c7e512dd43434062341eb263fe434f" -} diff --git a/core/lib/dal/doc/TeeProofGenerationDal.md b/core/lib/dal/doc/TeeProofGenerationDal.md new file mode 100644 index 000000000000..23474d5cb5c5 --- /dev/null +++ b/core/lib/dal/doc/TeeProofGenerationDal.md @@ -0,0 +1,19 @@ +# TeeProofGenerationDal + +## Table Name + +`tee_proofs` + +## `status` Diagram + +```mermaid +--- +title: Status Diagram +--- +stateDiagram-v2 +[*] --> ready_to_be_proven : insert_tee_proof_generation_job +ready_to_be_proven --> picked_by_prover : get_next_batch_to_be_proven +picked_by_prover --> generated : save_proof_artifacts_metadata +generated --> [*] + +``` diff --git a/core/lib/dal/migrations/20240805144000_tee_proofs_reorg.down.sql b/core/lib/dal/migrations/20240805144000_tee_proofs_reorg.down.sql new file mode 100644 index 000000000000..09a162f31fbf --- /dev/null +++ b/core/lib/dal/migrations/20240805144000_tee_proofs_reorg.down.sql @@ -0,0 +1,5 @@ +ALTER TABLE tee_verifier_input_producer_jobs ADD COLUMN picked_by TEXT; + +ALTER TABLE tee_proof_generation_details DROP CONSTRAINT tee_proof_generation_details_pkey; +ALTER TABLE tee_proof_generation_details ALTER COLUMN tee_type DROP NOT NULL; +ALTER TABLE tee_proof_generation_details ADD PRIMARY KEY (l1_batch_number); diff --git a/core/lib/dal/migrations/20240805144000_tee_proofs_reorg.up.sql b/core/lib/dal/migrations/20240805144000_tee_proofs_reorg.up.sql new file mode 100644 index 000000000000..160af44c221c --- /dev/null +++ b/core/lib/dal/migrations/20240805144000_tee_proofs_reorg.up.sql @@ -0,0 +1,7 @@ +ALTER TABLE tee_verifier_input_producer_jobs DROP COLUMN picked_by; + +ALTER TABLE tee_proof_generation_details DROP CONSTRAINT tee_proof_generation_details_pkey; +UPDATE tee_proof_generation_details SET tee_type = 'sgx' WHERE tee_type IS NULL; +ALTER TABLE tee_proof_generation_details ALTER COLUMN tee_type SET NOT NULL; +ALTER TABLE tee_proof_generation_details ALTER COLUMN l1_batch_number SET NOT NULL; +ALTER TABLE tee_proof_generation_details ADD PRIMARY KEY (l1_batch_number, tee_type); diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 8e455961cdd6..7ef6c6ebd34a 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -5,6 +5,7 @@ use zksync_db_connection::{ use zksync_system_constants::EMPTY_UNCLES_HASH; use zksync_types::{ api, + fee_model::BatchFeeInput, l2_to_l1_log::L2ToL1Log, vm_trace::Call, web3::{BlockHeader, Bytes}, @@ -564,17 +565,21 @@ impl BlocksWeb3Dal<'_, '_> { .collect()) } - /// Returns `base_fee_per_gas` for L2 block range [min(newest_block - block_count + 1, 0), newest_block] + /// Returns `base_fee_per_gas` and `fair_pubdata_price` for L2 block range [min(newest_block - block_count + 1, 0), newest_block] /// in descending order of L2 block numbers. pub async fn get_fee_history( &mut self, newest_block: L2BlockNumber, block_count: u64, - ) -> DalResult> { + ) -> DalResult<(Vec, Vec)> { let result: Vec<_> = sqlx::query!( r#" SELECT - base_fee_per_gas + base_fee_per_gas, + l2_fair_gas_price, + fair_pubdata_price, + protocol_version, + l1_gas_price FROM miniblocks WHERE @@ -593,10 +598,27 @@ impl BlocksWeb3Dal<'_, '_> { .fetch_all(self.storage) .await? .into_iter() - .map(|row| bigdecimal_to_u256(row.base_fee_per_gas)) + .map(|row| { + let fee_input = BatchFeeInput::for_protocol_version( + row.protocol_version + .map(|x| (x as u16).try_into().unwrap()) + .unwrap_or_else(ProtocolVersionId::last_potentially_undefined), + row.l2_fair_gas_price as u64, + row.fair_pubdata_price.map(|x| x as u64), + row.l1_gas_price as u64, + ); + + ( + bigdecimal_to_u256(row.base_fee_per_gas), + U256::from(fee_input.fair_pubdata_price()), + ) + }) .collect(); - Ok(result) + let (base_fee_per_gas, effective_pubdata_price): (Vec, Vec) = + result.into_iter().unzip(); + + Ok((base_fee_per_gas, effective_pubdata_price)) } pub async fn get_block_details( diff --git a/core/lib/dal/src/models/mod.rs b/core/lib/dal/src/models/mod.rs index 1e852e3f6364..d22541620f2a 100644 --- a/core/lib/dal/src/models/mod.rs +++ b/core/lib/dal/src/models/mod.rs @@ -11,6 +11,7 @@ pub mod storage_log; pub mod storage_oracle_info; pub mod storage_protocol_version; pub mod storage_sync; +pub mod storage_tee_proof; pub mod storage_transaction; pub mod storage_verification_request; pub mod storage_witness_job_info; diff --git a/core/lib/dal/src/models/storage_tee_proof.rs b/core/lib/dal/src/models/storage_tee_proof.rs new file mode 100644 index 000000000000..5c93361e7df1 --- /dev/null +++ b/core/lib/dal/src/models/storage_tee_proof.rs @@ -0,0 +1,10 @@ +use chrono::NaiveDateTime; + +#[derive(Debug, Clone, sqlx::FromRow)] +pub struct StorageTeeProof { + pub pubkey: Option>, + pub signature: Option>, + pub proof: Option>, + pub updated_at: NaiveDateTime, + pub attestation: Option>, +} diff --git a/core/lib/dal/src/proof_generation_dal.rs b/core/lib/dal/src/proof_generation_dal.rs index 4e37cc644f8e..f83f026073e6 100644 --- a/core/lib/dal/src/proof_generation_dal.rs +++ b/core/lib/dal/src/proof_generation_dal.rs @@ -88,6 +88,29 @@ impl ProofGenerationDal<'_, '_> { Ok(result) } + pub async fn get_available_batch(&mut self) -> DalResult { + let result = sqlx::query!( + r#" + SELECT + l1_batch_number + FROM + proof_generation_details + WHERE + proof_blob_url IS NOT NULL + ORDER BY + l1_batch_number DESC + LIMIT + 1 + "#, + ) + .instrument("get_available batch") + .fetch_one(self.storage) + .await? + .l1_batch_number as u32; + + Ok(L1BatchNumber(result)) + } + /// Marks a previously locked batch as 'unpicked', allowing it to be picked without having /// to wait for the processing timeout. pub async fn unlock_batch(&mut self, l1_batch_number: L1BatchNumber) -> DalResult<()> { diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index 0ddf36abdbed..2bd73323eb10 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -1,40 +1,30 @@ +#![doc = include_str!("../doc/TeeProofGenerationDal.md")] use std::time::Duration; -use strum::{Display, EnumString}; use zksync_db_connection::{ - connection::Connection, - error::DalResult, - instrument::{InstrumentExt, Instrumented}, + connection::Connection, error::DalResult, instrument::Instrumented, utils::pg_interval_from_duration, }; use zksync_types::{tee_types::TeeType, L1BatchNumber}; -use crate::Core; +use crate::{ + models::storage_tee_proof::StorageTeeProof, + tee_verifier_input_producer_dal::TeeVerifierInputProducerJobStatus, Core, +}; #[derive(Debug)] pub struct TeeProofGenerationDal<'a, 'c> { pub(crate) storage: &'a mut Connection<'c, Core>, } -#[derive(Debug, EnumString, Display)] -enum TeeProofGenerationJobStatus { - #[strum(serialize = "ready_to_be_proven")] - ReadyToBeProven, - #[strum(serialize = "picked_by_prover")] - PickedByProver, - #[strum(serialize = "generated")] - Generated, - #[strum(serialize = "skipped")] - Skipped, -} - impl TeeProofGenerationDal<'_, '_> { - pub async fn get_next_block_to_be_proven( + pub async fn get_next_batch_to_be_proven( &mut self, + tee_type: TeeType, processing_timeout: Duration, ) -> DalResult> { let processing_timeout = pg_interval_from_duration(processing_timeout); - let result: Option = sqlx::query!( + let query = sqlx::query!( r#" UPDATE tee_proof_generation_details SET @@ -42,19 +32,20 @@ impl TeeProofGenerationDal<'_, '_> { updated_at = NOW(), prover_taken_at = NOW() WHERE - l1_batch_number = ( + tee_type = $1 + AND l1_batch_number = ( SELECT proofs.l1_batch_number FROM tee_proof_generation_details AS proofs JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number WHERE - inputs.status = 'Successful' + inputs.status = $2 AND ( proofs.status = 'ready_to_be_proven' OR ( proofs.status = 'picked_by_prover' - AND proofs.prover_taken_at < NOW() - $1::INTERVAL + AND proofs.prover_taken_at < NOW() - $3::INTERVAL ) ) ORDER BY @@ -67,48 +58,53 @@ impl TeeProofGenerationDal<'_, '_> { RETURNING tee_proof_generation_details.l1_batch_number "#, + &tee_type.to_string(), + TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, &processing_timeout, - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| L1BatchNumber(row.l1_batch_number as u32)); + ); + let batch_number = Instrumented::new("get_next_batch_to_be_proven") + .with_arg("tee_type", &tee_type) + .with_arg("processing_timeout", &processing_timeout) + .with(query) + .fetch_optional(self.storage) + .await? + .map(|row| L1BatchNumber(row.l1_batch_number as u32)); - Ok(result) + Ok(batch_number) } pub async fn save_proof_artifacts_metadata( &mut self, - block_number: L1BatchNumber, - signature: &[u8], + batch_number: L1BatchNumber, + tee_type: TeeType, pubkey: &[u8], + signature: &[u8], proof: &[u8], - tee_type: TeeType, ) -> DalResult<()> { let query = sqlx::query!( r#" UPDATE tee_proof_generation_details SET + tee_type = $1, status = 'generated', - signature = $1, pubkey = $2, - proof = $3, - tee_type = $4, + signature = $3, + proof = $4, updated_at = NOW() WHERE l1_batch_number = $5 "#, - signature, + tee_type.to_string(), pubkey, + signature, proof, - tee_type.to_string(), - i64::from(block_number.0) + i64::from(batch_number.0) ); let instrumentation = Instrumented::new("save_proof_artifacts_metadata") - .with_arg("signature", &signature) + .with_arg("tee_type", &tee_type) .with_arg("pubkey", &pubkey) - .with_arg("proof", &proof) - .with_arg("tee_type", &tee_type); + .with_arg("signature", &signature) + .with_arg("proof", &proof); let result = instrumentation .clone() .with(query) @@ -116,7 +112,8 @@ impl TeeProofGenerationDal<'_, '_> { .await?; if result.rows_affected() == 0 { let err = instrumentation.constraint_error(anyhow::anyhow!( - "Updating TEE proof for a non-existent batch number is not allowed" + "Updating TEE proof for a non-existent batch number {} is not allowed", + batch_number )); return Err(err); } @@ -126,53 +123,33 @@ impl TeeProofGenerationDal<'_, '_> { pub async fn insert_tee_proof_generation_job( &mut self, - block_number: L1BatchNumber, + batch_number: L1BatchNumber, + tee_type: TeeType, ) -> DalResult<()> { - let block_number = i64::from(block_number.0); - sqlx::query!( + let batch_number = i64::from(batch_number.0); + let query = sqlx::query!( r#" INSERT INTO - tee_proof_generation_details (l1_batch_number, status, created_at, updated_at) + tee_proof_generation_details (l1_batch_number, tee_type, status, created_at, updated_at) VALUES - ($1, 'ready_to_be_proven', NOW(), NOW()) - ON CONFLICT (l1_batch_number) DO NOTHING + ($1, $2, 'ready_to_be_proven', NOW(), NOW()) + ON CONFLICT (l1_batch_number, tee_type) DO NOTHING "#, - block_number, - ) - .instrument("create_tee_proof_generation_details") - .with_arg("l1_batch_number", &block_number) - .report_latency() - .execute(self.storage) - .await?; + batch_number, + tee_type.to_string(), + ); + let instrumentation = Instrumented::new("insert_tee_proof_generation_job") + .with_arg("l1_batch_number", &batch_number) + .with_arg("tee_type", &tee_type); + instrumentation + .clone() + .with(query) + .execute(self.storage) + .await?; Ok(()) } - pub async fn get_oldest_unpicked_batch(&mut self) -> DalResult> { - let result: Option = sqlx::query!( - r#" - SELECT - proofs.l1_batch_number - FROM - tee_proof_generation_details AS proofs - JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number - WHERE - inputs.status = 'Successful' - AND proofs.status = 'ready_to_be_proven' - ORDER BY - proofs.l1_batch_number ASC - LIMIT - 1 - "#, - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| L1BatchNumber(row.l1_batch_number as u32)); - - Ok(result) - } - pub async fn save_attestation(&mut self, pubkey: &[u8], attestation: &[u8]) -> DalResult<()> { let query = sqlx::query!( r#" @@ -188,18 +165,76 @@ impl TeeProofGenerationDal<'_, '_> { let instrumentation = Instrumented::new("save_attestation") .with_arg("pubkey", &pubkey) .with_arg("attestation", &attestation); - let result = instrumentation + instrumentation .clone() .with(query) .execute(self.storage) .await?; - if result.rows_affected() == 0 { - let err = instrumentation.constraint_error(anyhow::anyhow!( - "Unable to insert TEE attestation: given pubkey already has an attestation assigned" - )); - return Err(err); - } Ok(()) } + + pub async fn get_tee_proofs( + &mut self, + batch_number: L1BatchNumber, + tee_type: Option, + ) -> DalResult> { + let query = format!( + r#" + SELECT + tp.pubkey, + tp.signature, + tp.proof, + tp.updated_at, + ta.attestation + FROM + tee_proof_generation_details tp + LEFT JOIN + tee_attestations ta ON tp.pubkey = ta.pubkey + WHERE + tp.l1_batch_number = $1 + AND tp.status = 'generated' + {} + ORDER BY tp.l1_batch_number ASC, tp.tee_type ASC + "#, + tee_type.map_or_else(String::new, |_| "AND tp.tee_type = $2".to_string()) + ); + + let mut query = sqlx::query_as(&query).bind(i64::from(batch_number.0)); + + if let Some(tee_type) = tee_type { + query = query.bind(tee_type.to_string()); + } + + let proofs: Vec = query.fetch_all(self.storage.conn()).await.unwrap(); + + Ok(proofs) + } + + pub async fn get_oldest_unpicked_batch(&mut self) -> DalResult> { + let query = sqlx::query!( + r#" + SELECT + proofs.l1_batch_number + FROM + tee_proof_generation_details AS proofs + JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number + WHERE + inputs.status = $1 + AND proofs.status = 'ready_to_be_proven' + ORDER BY + proofs.l1_batch_number ASC + LIMIT + 1 + "#, + TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus + ); + let batch_number = Instrumented::new("get_oldest_unpicked_batch") + .with(query) + .fetch_optional(self.storage) + .await? + .map(|row| L1BatchNumber(row.l1_batch_number as u32)); + + Ok(batch_number) + } } diff --git a/core/lib/dal/src/tee_verifier_input_producer_dal.rs b/core/lib/dal/src/tee_verifier_input_producer_dal.rs index bdf899fa36f8..4adee62e7aa6 100644 --- a/core/lib/dal/src/tee_verifier_input_producer_dal.rs +++ b/core/lib/dal/src/tee_verifier_input_producer_dal.rs @@ -33,7 +33,7 @@ pub enum TeeVerifierInputProducerJobStatus { /// It is expected to be used if some jobs should be skipped like: /// - testing purposes (want to check a specific L1 Batch, I can mark everything before it skipped) /// - trim down costs on some environments (if I've done breaking changes, - /// makes no sense to wait for everything to be processed, I can just skip them and save resources) + /// makes no sense to wait for everything to be processed, I can just skip them and save resources) ManuallySkipped, /// Currently being processed by one of the jobs. Transitory state, will transition to either /// [`TeeVerifierInputProducerStatus::Successful`] or [`TeeVerifierInputProducerStatus::Failed`]. diff --git a/core/lib/env_config/src/eth_sender.rs b/core/lib/env_config/src/eth_sender.rs index bd48f80609e8..18a661099b61 100644 --- a/core/lib/env_config/src/eth_sender.rs +++ b/core/lib/env_config/src/eth_sender.rs @@ -84,6 +84,7 @@ mod tests { num_samples_for_blob_base_fee_estimate: 10, internal_pubdata_pricing_multiplier: 1.0, max_blob_base_fee: None, + settlement_mode: Default::default(), }), watcher: Some(EthWatchConfig { confirmations_for_eth_event: Some(0), diff --git a/core/lib/env_config/src/external_proof_integration_api.rs b/core/lib/env_config/src/external_proof_integration_api.rs new file mode 100644 index 000000000000..dddca93eb0ec --- /dev/null +++ b/core/lib/env_config/src/external_proof_integration_api.rs @@ -0,0 +1,35 @@ +use zksync_config::configs::ExternalProofIntegrationApiConfig; + +use crate::{envy_load, FromEnv}; + +impl FromEnv for ExternalProofIntegrationApiConfig { + fn from_env() -> anyhow::Result { + envy_load( + "external_proof_integration_api", + "EXTERNAL_PROOF_INTEGRATION_API_", + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); + + fn expected_config() -> ExternalProofIntegrationApiConfig { + ExternalProofIntegrationApiConfig { http_port: 3320 } + } + + #[test] + fn from_env() { + let config = r#" + EXTERNAL_PROOF_INTEGRATION_API_HTTP_PORT="3320" + "#; + let mut lock = MUTEX.lock(); + lock.set_env(config); + let actual = ExternalProofIntegrationApiConfig::from_env().unwrap(); + assert_eq!(actual, expected_config()); + } +} diff --git a/core/lib/env_config/src/lib.rs b/core/lib/env_config/src/lib.rs index 789f6f8be2fd..fcb0f3625ea1 100644 --- a/core/lib/env_config/src/lib.rs +++ b/core/lib/env_config/src/lib.rs @@ -24,6 +24,7 @@ mod utils; mod base_token_adjuster; mod da_dispatcher; mod external_price_api_client; +mod external_proof_integration_api; mod genesis; #[cfg(test)] mod test_utils; diff --git a/core/lib/env_config/src/vm_runner.rs b/core/lib/env_config/src/vm_runner.rs index 9973d760a236..efaf5d1666c3 100644 --- a/core/lib/env_config/src/vm_runner.rs +++ b/core/lib/env_config/src/vm_runner.rs @@ -1,4 +1,6 @@ -use zksync_config::configs::{BasicWitnessInputProducerConfig, ProtectiveReadsWriterConfig}; +use zksync_config::configs::{ + BasicWitnessInputProducerConfig, ExperimentalVmConfig, ProtectiveReadsWriterConfig, +}; use crate::{envy_load, FromEnv}; @@ -13,3 +15,74 @@ impl FromEnv for BasicWitnessInputProducerConfig { envy_load("vm_runner.bwip", "VM_RUNNER_BWIP_") } } + +impl FromEnv for ExperimentalVmConfig { + fn from_env() -> anyhow::Result { + Ok(Self { + playground: envy_load("experimental_vm.playground", "EXPERIMENTAL_VM_PLAYGROUND_")?, + ..envy_load("experimental_vm", "EXPERIMENTAL_VM_")? + }) + } +} + +#[cfg(test)] +mod tests { + use zksync_basic_types::{vm::FastVmMode, L1BatchNumber}; + + use super::*; + use crate::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); + + #[test] + fn bwip_config_from_env() { + let mut lock = MUTEX.lock(); + let config = r#" + VM_RUNNER_BWIP_DB_PATH=/db/bwip + VM_RUNNER_BWIP_WINDOW_SIZE=50 + VM_RUNNER_BWIP_FIRST_PROCESSED_BATCH=123 + "#; + lock.set_env(config); + + let config = BasicWitnessInputProducerConfig::from_env().unwrap(); + assert_eq!(config.db_path, "/db/bwip"); + assert_eq!(config.window_size, 50); + assert_eq!(config.first_processed_batch, L1BatchNumber(123)); + } + + #[test] + fn experimental_vm_config_from_env() { + let mut lock = MUTEX.lock(); + let config = r#" + EXPERIMENTAL_VM_STATE_KEEPER_FAST_VM_MODE=new + EXPERIMENTAL_VM_PLAYGROUND_FAST_VM_MODE=shadow + EXPERIMENTAL_VM_PLAYGROUND_DB_PATH=/db/vm_playground + EXPERIMENTAL_VM_PLAYGROUND_FIRST_PROCESSED_BATCH=123 + EXPERIMENTAL_VM_PLAYGROUND_RESET=true + "#; + lock.set_env(config); + + let config = ExperimentalVmConfig::from_env().unwrap(); + assert_eq!(config.state_keeper_fast_vm_mode, FastVmMode::New); + assert_eq!(config.playground.fast_vm_mode, FastVmMode::Shadow); + assert_eq!(config.playground.db_path, "/db/vm_playground"); + assert_eq!(config.playground.first_processed_batch, L1BatchNumber(123)); + assert!(config.playground.reset); + + lock.remove_env(&["EXPERIMENTAL_VM_PLAYGROUND_RESET"]); + let config = ExperimentalVmConfig::from_env().unwrap(); + assert!(!config.playground.reset); + + lock.remove_env(&["EXPERIMENTAL_VM_PLAYGROUND_FIRST_PROCESSED_BATCH"]); + let config = ExperimentalVmConfig::from_env().unwrap(); + assert_eq!(config.playground.first_processed_batch, L1BatchNumber(0)); + + lock.remove_env(&["EXPERIMENTAL_VM_PLAYGROUND_FAST_VM_MODE"]); + let config = ExperimentalVmConfig::from_env().unwrap(); + assert_eq!(config.playground.fast_vm_mode, FastVmMode::Old); + + lock.remove_env(&["EXPERIMENTAL_VM_PLAYGROUND_DB_PATH"]); + let config = ExperimentalVmConfig::from_env().unwrap(); + assert!(!config.playground.db_path.is_empty()); + } +} diff --git a/core/lib/eth_client/src/clients/http/decl.rs b/core/lib/eth_client/src/clients/http/decl.rs index 77cc1c841e1b..3805bc5e7df5 100644 --- a/core/lib/eth_client/src/clients/http/decl.rs +++ b/core/lib/eth_client/src/clients/http/decl.rs @@ -1,9 +1,9 @@ use jsonrpsee::proc_macros::rpc; use zksync_types::{web3, Address, H256, U256, U64}; -use zksync_web3_decl::client::{ForNetwork, L1}; +use zksync_web3_decl::client::ForWeb3Network; /// Subset of the L1 `eth` namespace used by the L1 client. -#[rpc(client, namespace = "eth", client_bounds(Self: ForNetwork))] +#[rpc(client, namespace = "eth", client_bounds(Self: ForWeb3Network))] pub(super) trait L1EthNamespace { #[method(name = "chainId")] async fn chain_id(&self) -> RpcResult; diff --git a/core/lib/eth_client/src/clients/http/mod.rs b/core/lib/eth_client/src/clients/http/mod.rs index 2d1ed244afd4..111507c65f05 100644 --- a/core/lib/eth_client/src/clients/http/mod.rs +++ b/core/lib/eth_client/src/clients/http/mod.rs @@ -32,6 +32,7 @@ enum Method { #[metrics(name = "sign_prepared_tx_for_addr")] SignPreparedTx, Allowance, + L2FeeHistory, } #[derive(Debug, Metrics)] diff --git a/core/lib/eth_client/src/clients/http/query.rs b/core/lib/eth_client/src/clients/http/query.rs index 65387ff00779..54419f3b5626 100644 --- a/core/lib/eth_client/src/clients/http/query.rs +++ b/core/lib/eth_client/src/clients/http/query.rs @@ -3,14 +3,20 @@ use std::fmt; use async_trait::async_trait; use jsonrpsee::core::ClientError; use zksync_types::{web3, Address, SLChainId, H256, U256, U64}; -use zksync_web3_decl::error::{ClientRpcContext, EnrichedClientError, EnrichedClientResult}; +use zksync_web3_decl::{ + client::{DynClient, ForWeb3Network, MockClient, L1, L2}, + error::{ClientRpcContext, EnrichedClientError, EnrichedClientResult}, + namespaces::EthNamespaceClient, +}; use super::{decl::L1EthNamespaceClient, Method, COUNTERS, LATENCIES}; use crate::{ types::{ExecutedTxStatus, FailureInfo}, - BaseFees, EthInterface, RawTransactionBytes, + BaseFees, EthFeeInterface, EthInterface, RawTransactionBytes, }; +const FEE_HISTORY_MAX_REQUEST_CHUNK: usize = 1024; + #[async_trait] impl EthInterface for T where @@ -74,73 +80,6 @@ where Ok(tx) } - async fn base_fee_history( - &self, - upto_block: usize, - block_count: usize, - ) -> EnrichedClientResult> { - // Non-panicking conversion to u64. - fn cast_to_u64(value: U256, tag: &str) -> EnrichedClientResult { - u64::try_from(value).map_err(|_| { - let err = ClientError::Custom(format!("{tag} value does not fit in u64")); - EnrichedClientError::new(err, "cast_to_u64").with_arg("value", &value) - }) - } - - const MAX_REQUEST_CHUNK: usize = 1024; - - COUNTERS.call[&(Method::BaseFeeHistory, self.component())].inc(); - let latency = LATENCIES.direct[&Method::BaseFeeHistory].start(); - let mut history = Vec::with_capacity(block_count); - let from_block = upto_block.saturating_sub(block_count); - - // Here we are requesting `fee_history` from blocks - // `(from_block; upto_block)` in chunks of size `MAX_REQUEST_CHUNK` - // starting from the oldest block. - for chunk_start in (from_block..=upto_block).step_by(MAX_REQUEST_CHUNK) { - let chunk_end = (chunk_start + MAX_REQUEST_CHUNK).min(upto_block); - let chunk_size = chunk_end - chunk_start; - - let fee_history = self - .fee_history( - U64::from(chunk_size), - web3::BlockNumber::from(chunk_end), - None, - ) - .rpc_context("fee_history") - .with_arg("chunk_size", &chunk_size) - .with_arg("block", &chunk_end) - .await?; - - // Check that the lengths are the same. - // Per specification, the values should always be provided, and must be 0 for blocks - // prior to EIP-4844. - // https://ethereum.github.io/execution-apis/api-documentation/ - if fee_history.base_fee_per_gas.len() != fee_history.base_fee_per_blob_gas.len() { - tracing::error!( - "base_fee_per_gas and base_fee_per_blob_gas have different lengths: {} and {}", - fee_history.base_fee_per_gas.len(), - fee_history.base_fee_per_blob_gas.len() - ); - } - - for (base, blob) in fee_history - .base_fee_per_gas - .into_iter() - .zip(fee_history.base_fee_per_blob_gas) - { - let fees = BaseFees { - base_fee_per_gas: cast_to_u64(base, "base_fee_per_gas")?, - base_fee_per_blob_gas: blob, - }; - history.push(fees) - } - } - - latency.observe(); - Ok(history) - } - async fn get_pending_block_base_fee_per_gas(&self) -> EnrichedClientResult { COUNTERS.call[&(Method::PendingBlockBaseFee, self.component())].inc(); let latency = LATENCIES.direct[&Method::PendingBlockBaseFee].start(); @@ -354,6 +293,178 @@ where } } +async fn l1_base_fee_history( + client: &T, + upto_block: usize, + block_count: usize, +) -> EnrichedClientResult> +where + T: ForWeb3Network + L1EthNamespaceClient + Send + Sync, +{ + COUNTERS.call[&(Method::BaseFeeHistory, client.component())].inc(); + let latency = LATENCIES.direct[&Method::BaseFeeHistory].start(); + let mut history = Vec::with_capacity(block_count); + let from_block = upto_block.saturating_sub(block_count); + + // Here we are requesting `fee_history` from blocks + // `(from_block; upto_block)` in chunks of size `MAX_REQUEST_CHUNK` + // starting from the oldest block. + for chunk_start in (from_block..=upto_block).step_by(FEE_HISTORY_MAX_REQUEST_CHUNK) { + let chunk_end = (chunk_start + FEE_HISTORY_MAX_REQUEST_CHUNK).min(upto_block); + let chunk_size = chunk_end - chunk_start; + + let fee_history = client + .fee_history( + U64::from(chunk_size), + web3::BlockNumber::from(chunk_end), + None, + ) + .rpc_context("fee_history") + .with_arg("chunk_size", &chunk_size) + .with_arg("block", &chunk_end) + .await?; + + // Check that the lengths are the same. + // Per specification, the values should always be provided, and must be 0 for blocks + // prior to EIP-4844. + // https://ethereum.github.io/execution-apis/api-documentation/ + if fee_history.base_fee_per_gas.len() != fee_history.base_fee_per_blob_gas.len() { + tracing::error!( + "base_fee_per_gas and base_fee_per_blob_gas have different lengths: {} and {}", + fee_history.base_fee_per_gas.len(), + fee_history.base_fee_per_blob_gas.len() + ); + } + + for (base, blob) in fee_history + .base_fee_per_gas + .into_iter() + .zip(fee_history.base_fee_per_blob_gas) + { + let fees = BaseFees { + base_fee_per_gas: cast_to_u64(base, "base_fee_per_gas")?, + base_fee_per_blob_gas: blob, + l2_pubdata_price: 0.into(), + }; + history.push(fees) + } + } + + latency.observe(); + Ok(history) +} + +#[async_trait::async_trait] +impl EthFeeInterface for Box> { + async fn base_fee_history( + &self, + upto_block: usize, + block_count: usize, + ) -> EnrichedClientResult> { + l1_base_fee_history(self, upto_block, block_count).await + } +} + +#[async_trait::async_trait] +impl EthFeeInterface for MockClient { + async fn base_fee_history( + &self, + upto_block: usize, + block_count: usize, + ) -> EnrichedClientResult> { + l1_base_fee_history(self, upto_block, block_count).await + } +} + +async fn l2_base_fee_history( + client: &T, + upto_block: usize, + block_count: usize, +) -> EnrichedClientResult> +where + T: ForWeb3Network + EthNamespaceClient + Send + Sync, +{ + COUNTERS.call[&(Method::L2FeeHistory, client.component())].inc(); + let latency = LATENCIES.direct[&Method::BaseFeeHistory].start(); + let mut history = Vec::with_capacity(block_count); + let from_block = upto_block.saturating_sub(block_count); + + // Here we are requesting `fee_history` from blocks + // `(from_block; upto_block)` in chunks of size `FEE_HISTORY_MAX_REQUEST_CHUNK` + // starting from the oldest block. + for chunk_start in (from_block..=upto_block).step_by(FEE_HISTORY_MAX_REQUEST_CHUNK) { + let chunk_end = (chunk_start + FEE_HISTORY_MAX_REQUEST_CHUNK).min(upto_block); + let chunk_size = chunk_end - chunk_start; + + let fee_history = EthNamespaceClient::fee_history( + client, + U64::from(chunk_size), + zksync_types::api::BlockNumber::from(chunk_end), + vec![], + ) + .rpc_context("fee_history") + .with_arg("chunk_size", &chunk_size) + .with_arg("block", &chunk_end) + .await?; + + // Check that the lengths are the same. + if fee_history.inner.base_fee_per_gas.len() != fee_history.l2_pubdata_price.len() { + tracing::error!( + "base_fee_per_gas and pubdata_price have different lengths: {} and {}", + fee_history.inner.base_fee_per_gas.len(), + fee_history.l2_pubdata_price.len() + ); + } + + for (base, l2_pubdata_price) in fee_history + .inner + .base_fee_per_gas + .into_iter() + .zip(fee_history.l2_pubdata_price) + { + let fees = BaseFees { + base_fee_per_gas: cast_to_u64(base, "base_fee_per_gas")?, + base_fee_per_blob_gas: 0.into(), + l2_pubdata_price, + }; + history.push(fees) + } + } + + latency.observe(); + Ok(history) +} + +#[async_trait::async_trait] +impl EthFeeInterface for Box> { + async fn base_fee_history( + &self, + upto_block: usize, + block_count: usize, + ) -> EnrichedClientResult> { + l2_base_fee_history(self, upto_block, block_count).await + } +} + +#[async_trait::async_trait] +impl EthFeeInterface for MockClient { + async fn base_fee_history( + &self, + upto_block: usize, + block_count: usize, + ) -> EnrichedClientResult> { + l2_base_fee_history(self, upto_block, block_count).await + } +} + +/// Non-panicking conversion to u64. +fn cast_to_u64(value: U256, tag: &str) -> EnrichedClientResult { + u64::try_from(value).map_err(|_| { + let err = ClientError::Custom(format!("{tag} value does not fit in u64")); + EnrichedClientError::new(err, "cast_to_u64").with_arg("value", &value) + }) +} + #[cfg(test)] mod tests { use zksync_web3_decl::client::{Client, L1}; diff --git a/core/lib/eth_client/src/clients/http/signing.rs b/core/lib/eth_client/src/clients/http/signing.rs index 542b42420ae3..e602f98a35e9 100644 --- a/core/lib/eth_client/src/clients/http/signing.rs +++ b/core/lib/eth_client/src/clients/http/signing.rs @@ -74,9 +74,9 @@ impl fmt::Debug for SigningClient { } } -impl AsRef> for SigningClient { - fn as_ref(&self) -> &DynClient { - self.query_client.as_ref() +impl AsRef for SigningClient { + fn as_ref(&self) -> &(dyn EthInterface + 'static) { + &self.query_client } } diff --git a/core/lib/eth_client/src/clients/mock.rs b/core/lib/eth_client/src/clients/mock.rs index d2d6b2108f53..46ad5dc5310e 100644 --- a/core/lib/eth_client/src/clients/mock.rs +++ b/core/lib/eth_client/src/clients/mock.rs @@ -1,20 +1,22 @@ use std::{ collections::{BTreeMap, HashMap}, fmt, + marker::PhantomData, sync::{Arc, RwLock, RwLockWriteGuard}, }; use jsonrpsee::{core::ClientError, types::ErrorObject}; use zksync_types::{ + api::FeeHistory, ethabi, web3::{self, contract::Tokenize, BlockId}, - Address, SLChainId, EIP_4844_TX_TYPE, H160, H256, U256, U64, + Address, L1ChainId, L2ChainId, SLChainId, EIP_4844_TX_TYPE, H160, H256, U256, U64, }; -use zksync_web3_decl::client::{DynClient, MockClient, L1}; +use zksync_web3_decl::client::{MockClient, MockClientBuilder, Network, L1, L2}; use crate::{ types::{ContractCallError, SignedCallResult, SigningError}, - BaseFees, BoundEthInterface, Options, RawTransactionBytes, + BaseFees, BoundEthInterface, EthInterface, Options, RawTransactionBytes, }; #[derive(Debug, Clone)] @@ -82,9 +84,9 @@ struct MockExecutedTx { success: bool, } -/// Mutable part of [`MockEthereum`] that needs to be synchronized via an `RwLock`. +/// Mutable part of [`MockSettlementLayer`] that needs to be synchronized via an `RwLock`. #[derive(Debug, Default)] -struct MockEthereumInner { +struct MockSettlementLayerInner { block_number: u64, executed_txs: HashMap, sent_txs: HashMap, @@ -93,7 +95,7 @@ struct MockEthereumInner { nonces: BTreeMap, } -impl MockEthereumInner { +impl MockSettlementLayerInner { fn execute_tx( &mut self, tx_hash: H256, @@ -131,7 +133,7 @@ impl MockEthereumInner { } fn get_transaction_count(&self, address: Address, block: web3::BlockNumber) -> U256 { - if address != MockEthereum::SENDER_ACCOUNT { + if address != MOCK_SENDER_ACCOUNT { unimplemented!("Getting nonce for custom account is not supported"); } @@ -206,7 +208,7 @@ impl MockEthereumInner { #[derive(Debug)] pub struct MockExecutedTxHandle<'a> { - inner: RwLockWriteGuard<'a, MockEthereumInner>, + inner: RwLockWriteGuard<'a, MockSettlementLayerInner>, tx_hash: H256, } @@ -221,22 +223,27 @@ impl MockExecutedTxHandle<'_> { type CallHandler = dyn Fn(&web3::CallRequest, BlockId) -> Result + Send + Sync; -/// Builder for [`MockEthereum`] client. -pub struct MockEthereumBuilder { +pub trait SupportedMockSLNetwork: Network { + fn build_client(builder: MockSettlementLayerBuilder) -> MockClient; +} + +/// Builder for [`MockSettlementLayer`] client. +pub struct MockSettlementLayerBuilder { max_fee_per_gas: U256, max_priority_fee_per_gas: U256, base_fee_history: Vec, /// If true, the mock will not check the ordering nonces of the transactions. /// This is useful for testing the cases when the transactions are executed out of order. non_ordering_confirmations: bool, - inner: Arc>, + inner: Arc>, call_handler: Box, + _network: PhantomData, } -impl fmt::Debug for MockEthereumBuilder { +impl fmt::Debug for MockSettlementLayerBuilder { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter - .debug_struct("MockEthereumBuilder") + .debug_struct("MockSettlementLayerBuilder") .field("max_fee_per_gas", &self.max_fee_per_gas) .field("max_priority_fee_per_gas", &self.max_priority_fee_per_gas) .field("base_fee_history", &self.base_fee_history) @@ -249,7 +256,7 @@ impl fmt::Debug for MockEthereumBuilder { } } -impl Default for MockEthereumBuilder { +impl Default for MockSettlementLayerBuilder { fn default() -> Self { Self { max_fee_per_gas: 100.into(), @@ -260,11 +267,12 @@ impl Default for MockEthereumBuilder { call_handler: Box::new(|call, block_id| { panic!("Unexpected eth_call: {call:?}, {block_id:?}"); }), + _network: PhantomData, } } } -impl MockEthereumBuilder { +impl MockSettlementLayerBuilder { /// Sets fee history for each block in the mocked Ethereum network, starting from the 0th block. pub fn with_fee_history(self, history: Vec) -> Self { Self { @@ -327,14 +335,11 @@ impl MockEthereumBuilder { }) } - fn build_client(self) -> MockClient { - const CHAIN_ID: SLChainId = SLChainId(9); - - let base_fee_history = self.base_fee_history.clone(); + fn build_client_inner(self, chaind_id: u64, network: Net) -> MockClientBuilder { let call_handler = self.call_handler; - MockClient::builder(CHAIN_ID.into()) - .method("eth_chainId", || Ok(U64::from(CHAIN_ID.0))) + MockClient::builder(network) + .method("eth_chainId", move || Ok(U64::from(chaind_id))) .method("eth_blockNumber", { let inner = self.inner.clone(); move || Ok(U64::from(inner.read().unwrap().block_number)) @@ -355,30 +360,6 @@ impl MockEthereumBuilder { } }) .method("eth_gasPrice", move || Ok(self.max_fee_per_gas)) - .method( - "eth_feeHistory", - move |block_count: U64, newest_block: web3::BlockNumber, _: Option>| { - let web3::BlockNumber::Number(from_block) = newest_block else { - panic!("Non-numeric newest block in `eth_feeHistory`"); - }; - let from_block = from_block.as_usize(); - let start_block = from_block.saturating_sub(block_count.as_usize() - 1); - Ok(web3::FeeHistory { - oldest_block: start_block.into(), - base_fee_per_gas: base_fee_history[start_block..=from_block] - .iter() - .map(|fee| U256::from(fee.base_fee_per_gas)) - .collect(), - base_fee_per_blob_gas: base_fee_history[start_block..=from_block] - .iter() - .map(|fee| fee.base_fee_per_blob_gas) - .collect(), - gas_used_ratio: vec![], // not used - blob_gas_used_ratio: vec![], // not used - reward: None, - }) - }, - ) .method("eth_call", { let inner = self.inner.clone(); move |req, block| { @@ -410,43 +391,120 @@ impl MockEthereumBuilder { Ok(status.map(|status| status.receipt.clone())) } }) - .build() } - /// Builds a mock Ethereum client. - pub fn build(self) -> MockEthereum { - MockEthereum { + pub fn build(self) -> MockSettlementLayer { + MockSettlementLayer { max_fee_per_gas: self.max_fee_per_gas, max_priority_fee_per_gas: self.max_priority_fee_per_gas, non_ordering_confirmations: self.non_ordering_confirmations, inner: self.inner.clone(), - client: self.build_client(), + client: Net::build_client(self), } } } -/// Mock Ethereum client. +fn l2_eth_fee_history( + base_fee_history: &[BaseFees], + block_count: U64, + newest_block: web3::BlockNumber, +) -> FeeHistory { + let web3::BlockNumber::Number(from_block) = newest_block else { + panic!("Non-numeric newest block in `eth_feeHistory`"); + }; + let from_block = from_block.as_usize(); + let start_block = from_block.saturating_sub(block_count.as_usize() - 1); + + FeeHistory { + inner: web3::FeeHistory { + oldest_block: start_block.into(), + base_fee_per_gas: base_fee_history[start_block..=from_block] + .iter() + .map(|fee| U256::from(fee.base_fee_per_gas)) + .collect(), + base_fee_per_blob_gas: base_fee_history[start_block..=from_block] + .iter() + .map(|fee| fee.base_fee_per_blob_gas) + .collect(), + gas_used_ratio: vec![], // not used + blob_gas_used_ratio: vec![], // not used + reward: None, + }, + l2_pubdata_price: base_fee_history[start_block..=from_block] + .iter() + .map(|fee| fee.l2_pubdata_price) + .collect(), + } +} + +impl SupportedMockSLNetwork for L1 { + fn build_client(builder: MockSettlementLayerBuilder) -> MockClient { + const CHAIN_ID: L1ChainId = L1ChainId(9); + + let base_fee_history = builder.base_fee_history.clone(); + + builder + .build_client_inner(CHAIN_ID.0, CHAIN_ID.into()) + .method( + "eth_feeHistory", + move |block_count: U64, newest_block: web3::BlockNumber, _: Option>| { + Ok(l2_eth_fee_history(&base_fee_history, block_count, newest_block).inner) + }, + ) + .build() + } +} + +impl SupportedMockSLNetwork for L2 { + fn build_client(builder: MockSettlementLayerBuilder) -> MockClient { + let chain_id: L2ChainId = 9u64.try_into().unwrap(); + + let base_fee_history = builder.base_fee_history.clone(); + + builder + .build_client_inner(chain_id.as_u64(), chain_id.into()) + .method( + "eth_feeHistory", + move |block_count: U64, newest_block: web3::BlockNumber, _: Option>| { + Ok(l2_eth_fee_history( + &base_fee_history, + block_count, + newest_block, + )) + }, + ) + .build() + } +} + +/// Mock settlement layer client. #[derive(Debug, Clone)] -pub struct MockEthereum { +pub struct MockSettlementLayer { max_fee_per_gas: U256, max_priority_fee_per_gas: U256, non_ordering_confirmations: bool, - inner: Arc>, - client: MockClient, + inner: Arc>, + client: MockClient, +} + +impl Default for MockSettlementLayer { + fn default() -> Self { + Self::builder().build() + } } -impl Default for MockEthereum { +impl Default for MockSettlementLayer { fn default() -> Self { Self::builder().build() } } -impl MockEthereum { - const SENDER_ACCOUNT: Address = Address::repeat_byte(0x11); +const MOCK_SENDER_ACCOUNT: Address = Address::repeat_byte(0x11); - /// Initializes a builder for a [`MockEthereum`] instance. - pub fn builder() -> MockEthereumBuilder { - MockEthereumBuilder::default() +impl MockSettlementLayer { + /// Initializes a builder for a [`MockSettlementLayer`] instance. + pub fn builder() -> MockSettlementLayerBuilder { + MockSettlementLayerBuilder::default() } /// A fake `sha256` hasher, which calculates an `std::hash` instead. @@ -524,19 +582,21 @@ impl MockEthereum { } /// Converts this client into an immutable / contract-agnostic client. - pub fn into_client(self) -> MockClient { + pub fn into_client(self) -> MockClient { self.client } } -impl AsRef> for MockEthereum { - fn as_ref(&self) -> &DynClient { +impl AsRef for MockSettlementLayer { + fn as_ref(&self) -> &(dyn EthInterface + 'static) { &self.client } } #[async_trait::async_trait] -impl BoundEthInterface for MockEthereum { +impl BoundEthInterface + for MockSettlementLayer +{ fn clone_boxed(&self) -> Box { Box::new(self.clone()) } @@ -558,7 +618,7 @@ impl BoundEthInterface for MockEthereum { } fn sender_account(&self) -> Address { - Self::SENDER_ACCOUNT + MOCK_SENDER_ACCOUNT } async fn sign_prepared_tx_for_addr( @@ -586,24 +646,25 @@ mod tests { use zksync_types::{commitment::L1BatchCommitmentMode, ProtocolVersionId}; use super::*; - use crate::{CallFunctionArgs, EthInterface}; + use crate::{CallFunctionArgs, EthFeeInterface, EthInterface}; - fn base_fees(block: u64, blob: u64) -> BaseFees { + fn base_fees(block: u64, blob: u64, pubdata_price: u64) -> BaseFees { BaseFees { base_fee_per_gas: block, base_fee_per_blob_gas: U256::from(blob), + l2_pubdata_price: U256::from(pubdata_price), } } #[tokio::test] async fn managing_block_number() { - let mock = MockEthereum::builder() + let mock = MockSettlementLayer::::builder() .with_fee_history(vec![ - base_fees(0, 4), - base_fees(1, 3), - base_fees(2, 2), - base_fees(3, 1), - base_fees(4, 0), + base_fees(0, 4, 0), + base_fees(1, 3, 0), + base_fees(2, 2, 0), + base_fees(3, 1, 0), + base_fees(4, 0, 0), ]) .build(); let block_number = mock.client.block_number().await.unwrap(); @@ -628,7 +689,7 @@ mod tests { #[tokio::test] async fn getting_chain_id() { - let mock = MockEthereum::builder().build(); + let mock = MockSettlementLayer::::builder().build(); let chain_id = mock.client.fetch_chain_id().await.unwrap(); assert_eq!(chain_id, SLChainId(9)); } @@ -636,28 +697,50 @@ mod tests { #[tokio::test] async fn managing_fee_history() { let initial_fee_history = vec![ - base_fees(1, 4), - base_fees(2, 3), - base_fees(3, 2), - base_fees(4, 1), - base_fees(5, 0), + base_fees(1, 4, 0), + base_fees(2, 3, 0), + base_fees(3, 2, 0), + base_fees(4, 1, 0), + base_fees(5, 0, 0), + ]; + let client = MockSettlementLayer::::builder() + .with_fee_history(initial_fee_history.clone()) + .build(); + client.advance_block_number(4); + + let fee_history = client.client.base_fee_history(4, 4).await.unwrap(); + assert_eq!(fee_history, initial_fee_history[1..=4]); + let fee_history = client.client.base_fee_history(2, 2).await.unwrap(); + assert_eq!(fee_history, initial_fee_history[1..=2]); + let fee_history = client.client.base_fee_history(3, 2).await.unwrap(); + assert_eq!(fee_history, initial_fee_history[2..=3]); + } + + #[tokio::test] + async fn managing_fee_history_l2() { + let initial_fee_history = vec![ + base_fees(1, 0, 11), + base_fees(2, 0, 12), + base_fees(3, 0, 13), + base_fees(4, 0, 14), + base_fees(5, 0, 15), ]; - let client = MockEthereum::builder() + let client = MockSettlementLayer::::builder() .with_fee_history(initial_fee_history.clone()) .build(); client.advance_block_number(4); - let fee_history = client.as_ref().base_fee_history(4, 4).await.unwrap(); - assert_eq!(fee_history, &initial_fee_history[1..=4]); - let fee_history = client.as_ref().base_fee_history(2, 2).await.unwrap(); - assert_eq!(fee_history, &initial_fee_history[1..=2]); - let fee_history = client.as_ref().base_fee_history(3, 2).await.unwrap(); - assert_eq!(fee_history, &initial_fee_history[2..=3]); + let fee_history = client.client.base_fee_history(4, 4).await.unwrap(); + assert_eq!(fee_history, initial_fee_history[1..=4]); + let fee_history = client.client.base_fee_history(2, 2).await.unwrap(); + assert_eq!(fee_history, initial_fee_history[1..=2]); + let fee_history = client.client.base_fee_history(3, 2).await.unwrap(); + assert_eq!(fee_history, initial_fee_history[2..=3]); } #[tokio::test] async fn managing_transactions() { - let client = MockEthereum::builder() + let client = MockSettlementLayer::::builder() .with_non_ordering_confirmation(true) .build(); client.advance_block_number(2); @@ -710,7 +793,7 @@ mod tests { #[tokio::test] async fn calling_contracts() { - let client = MockEthereum::builder() + let client = MockSettlementLayer::::builder() .with_call_handler(|req, _block_id| { let packed_semver = ProtocolVersionId::latest().into_packed_semver_with_patch(0); let call_signature = &req.data.as_ref().unwrap().0[..4]; @@ -759,7 +842,7 @@ mod tests { #[tokio::test] async fn getting_transaction_failure_reason() { - let client = MockEthereum::default(); + let client = MockSettlementLayer::::default(); let signed_tx = client .sign_prepared_tx( vec![1, 2, 3], diff --git a/core/lib/eth_client/src/clients/mod.rs b/core/lib/eth_client/src/clients/mod.rs index 05b7f852f391..b08f92115492 100644 --- a/core/lib/eth_client/src/clients/mod.rs +++ b/core/lib/eth_client/src/clients/mod.rs @@ -7,5 +7,5 @@ pub use zksync_web3_decl::client::{Client, DynClient, L1}; pub use self::{ http::{PKSigningClient, SigningClient}, - mock::{MockEthereum, MockEthereumBuilder}, + mock::{MockSettlementLayer, MockSettlementLayerBuilder}, }; diff --git a/core/lib/eth_client/src/lib.rs b/core/lib/eth_client/src/lib.rs index 3e8641845c61..64fa30481118 100644 --- a/core/lib/eth_client/src/lib.rs +++ b/core/lib/eth_client/src/lib.rs @@ -10,7 +10,6 @@ use zksync_types::{ }, Address, SLChainId, H160, H256, U256, U64, }; -use zksync_web3_decl::client::{DynClient, L1}; pub use zksync_web3_decl::{ error::{EnrichedClientError, EnrichedClientResult}, jsonrpsee::core::ClientError, @@ -69,7 +68,10 @@ impl Options { #[derive(Debug, Clone, PartialEq, Eq, Default)] pub struct BaseFees { pub base_fee_per_gas: u64, + // Base fee per blob gas. It is zero on networks that do not support blob transactions (e.g. L2s). pub base_fee_per_blob_gas: U256, + // The price (in wei) for relaying the pubdata to L1. It is non-zero only for L2 settlement layers. + pub l2_pubdata_price: U256, } /// Common Web3 interface, as seen by the core applications. @@ -83,7 +85,7 @@ pub struct BaseFees { /// If you want to add a method to this trait, make sure that it doesn't depend on any particular /// contract or account address. For that, you can use the `BoundEthInterface` trait. #[async_trait] -pub trait EthInterface: Sync + Send { +pub trait EthInterface: Sync + Send + fmt::Debug { /// Fetches the L1 chain ID (in contrast to [`BoundEthInterface::chain_id()`] which returns /// the *expected* L1 chain ID). async fn fetch_chain_id(&self) -> EnrichedClientResult; @@ -95,16 +97,6 @@ pub trait EthInterface: Sync + Send { block: BlockNumber, ) -> EnrichedClientResult; - /// Collects the base fee history for the specified block range. - /// - /// Returns 1 value for each block in range, assuming that these blocks exist. - /// Will return an error if the `from_block + block_count` is beyond the head block. - async fn base_fee_history( - &self, - from_block: usize, - block_count: usize, - ) -> EnrichedClientResult>; - /// Returns the `base_fee_per_gas` value for the currently pending L1 block. async fn get_pending_block_base_fee_per_gas(&self) -> EnrichedClientResult; @@ -154,6 +146,19 @@ pub trait EthInterface: Sync + Send { async fn block(&self, block_id: BlockId) -> EnrichedClientResult>>; } +#[async_trait::async_trait] +pub trait EthFeeInterface: EthInterface { + /// Collects the base fee history for the specified block range. + /// + /// Returns 1 value for each block in range, assuming that these blocks exist. + /// Will return an error if the `from_block + block_count` is beyond the head block. + async fn base_fee_history( + &self, + from_block: usize, + block_count: usize, + ) -> EnrichedClientResult>; +} + /// An extension of `EthInterface` trait, which is used to perform queries that are bound to /// a certain contract and account. /// @@ -168,7 +173,7 @@ pub trait EthInterface: Sync + Send { /// 2. Consider adding the "unbound" version to the `EthInterface` trait and create a default method /// implementation that invokes `contract` / `contract_addr` / `sender_account` methods. #[async_trait] -pub trait BoundEthInterface: AsRef> + 'static + Sync + Send + fmt::Debug { +pub trait BoundEthInterface: 'static + Sync + Send + fmt::Debug + AsRef { /// Clones this client. fn clone_boxed(&self) -> Box; diff --git a/core/lib/eth_client/src/types.rs b/core/lib/eth_client/src/types.rs index 8ac5ff427fb8..59fb1cdeddcc 100644 --- a/core/lib/eth_client/src/types.rs +++ b/core/lib/eth_client/src/types.rs @@ -8,10 +8,7 @@ use zksync_types::{ }, Address, EIP_4844_TX_TYPE, H256, U256, }; -use zksync_web3_decl::{ - client::{DynClient, L1}, - error::EnrichedClientError, -}; +use zksync_web3_decl::error::EnrichedClientError; use crate::EthInterface; @@ -81,7 +78,7 @@ impl ContractCall<'_> { pub async fn call( &self, - client: &DynClient, + client: &dyn EthInterface, ) -> Result { let func = self .contract_abi diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs index b5d77ff60c16..179c04748d3b 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs @@ -202,7 +202,10 @@ impl Tokenizable for CommitBatchInfo<'_> { } else { tokens.push(Token::Bytes(match (self.mode, self.pubdata_da) { // Here we're not pushing any pubdata on purpose; no pubdata is sent in Validium mode. - (L1BatchCommitmentMode::Validium, PubdataDA::Calldata) => { + ( + L1BatchCommitmentMode::Validium, + PubdataDA::Calldata | PubdataDA::RelayedL2Calldata, + ) => { vec![PUBDATA_SOURCE_CALLDATA] } (L1BatchCommitmentMode::Validium, PubdataDA::Blobs) => { @@ -216,7 +219,10 @@ impl Tokenizable for CommitBatchInfo<'_> { vec![PUBDATA_SOURCE_CUSTOM] } - (L1BatchCommitmentMode::Rollup, PubdataDA::Calldata) => { + ( + L1BatchCommitmentMode::Rollup, + PubdataDA::Calldata | PubdataDA::RelayedL2Calldata, + ) => { // We compute and add the blob commitment to the pubdata payload so that we can verify the proof // even if we are not using blobs. let pubdata = self.pubdata_input(); diff --git a/core/lib/merkle_tree/src/recovery/mod.rs b/core/lib/merkle_tree/src/recovery/mod.rs index c208c12795a2..51dc87e16d3e 100644 --- a/core/lib/merkle_tree/src/recovery/mod.rs +++ b/core/lib/merkle_tree/src/recovery/mod.rs @@ -15,11 +15,11 @@ //! Recovery process proceeds as follows: //! //! 1. Initialize a tree in the recovery mode. Until recovery is finished, the tree cannot be accessed -//! using ordinary [`MerkleTree`] APIs. +//! using ordinary [`MerkleTree`] APIs. //! 2. Update the tree from a snapshot, which [is fed to the tree](MerkleTreeRecovery::extend()) -//! as [`RecoveryEntry`] chunks. Recovery entries must be ordered by increasing key. +//! as [`RecoveryEntry`] chunks. Recovery entries must be ordered by increasing key. //! 3. Finalize recovery using [`MerkleTreeRecovery::finalize()`]. To check integrity, you may compare -//! [`MerkleTreeRecovery::root_hash()`] to the reference value. +//! [`MerkleTreeRecovery::root_hash()`] to the reference value. //! //! The recovery process is tolerant to crashes and may be resumed from the middle. To find the latest //! recovered key, you may use [`MerkleTreeRecovery::last_processed_key()`]. diff --git a/core/lib/merkle_tree/src/storage/mod.rs b/core/lib/merkle_tree/src/storage/mod.rs index b70485b93188..dfc99f9be7cb 100644 --- a/core/lib/merkle_tree/src/storage/mod.rs +++ b/core/lib/merkle_tree/src/storage/mod.rs @@ -118,13 +118,13 @@ impl TreeUpdater { /// /// 1. Walk from the root of the tree along the inserted `key` while we can. /// 2. If the node we've stopped at is an internal node, it means it doesn't have - /// a child at the corresponding nibble from `key`. Create a new leaf node with `key` and - /// `value_hash` and insert it as a new child of the found internal node. + /// a child at the corresponding nibble from `key`. Create a new leaf node with `key` and + /// `value_hash` and insert it as a new child of the found internal node. /// 3. Else the node we've stopped is a leaf. If the full key stored in this leaf is `key`, - /// we just need to update `value_hash` stored in the leaf. + /// we just need to update `value_hash` stored in the leaf. /// 4. Else (the node we've stopped is a leaf with `other_key != key`) we need to create - /// one or more internal nodes that would contain the common prefix between `key` - /// and `other_key` and a "fork" where these keys differ. + /// one or more internal nodes that would contain the common prefix between `key` + /// and `other_key` and a "fork" where these keys differ. /// /// We change step 1 by starting not from the root, but rather from the node ancestor /// we've found in [`Self::load_ancestors()`] for a (moderate) performance boost. Note that diff --git a/core/lib/multivm/Cargo.toml b/core/lib/multivm/Cargo.toml index 5e5440ff9407..fc35f152ae19 100644 --- a/core/lib/multivm/Cargo.toml +++ b/core/lib/multivm/Cargo.toml @@ -16,6 +16,7 @@ zk_evm_1_4_1.workspace = true zk_evm_1_4_0.workspace = true zk_evm_1_3_3.workspace = true zk_evm_1_3_1.workspace = true +vm2.workspace = true circuit_sequencer_api_1_3_3.workspace = true circuit_sequencer_api_1_4_0.workspace = true @@ -34,6 +35,7 @@ anyhow.workspace = true hex.workspace = true itertools.workspace = true once_cell.workspace = true +pretty_assertions.workspace = true serde.workspace = true thiserror.workspace = true tracing.workspace = true diff --git a/core/lib/multivm/src/glue/tracers/mod.rs b/core/lib/multivm/src/glue/tracers/mod.rs index 8a138d9461fd..7aa792ef1f71 100644 --- a/core/lib/multivm/src/glue/tracers/mod.rs +++ b/core/lib/multivm/src/glue/tracers/mod.rs @@ -8,16 +8,16 @@ //! this module defines one primary trait: //! //! - `MultiVMTracer`: This trait represents a tracer that can be converted into a tracer for -//! a specific VM version. +//! a specific VM version. //! //! Specific traits for each VM version, which support Custom Tracers: //! - `IntoLatestTracer`: This trait is responsible for converting a tracer -//! into a form compatible with the latest VM version. -//! It defines a method `latest` for obtaining a boxed tracer. +//! into a form compatible with the latest VM version. +//! It defines a method `latest` for obtaining a boxed tracer. //! //! - `IntoVmVirtualBlocksTracer`: This trait is responsible for converting a tracer -//! into a form compatible with the vm_virtual_blocks version. -//! It defines a method `vm_virtual_blocks` for obtaining a boxed tracer. +//! into a form compatible with the vm_virtual_blocks version. +//! It defines a method `vm_virtual_blocks` for obtaining a boxed tracer. //! //! For `MultiVMTracer` to be implemented, the Tracer must implement all N currently //! existing sub-traits. diff --git a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs index 1ee9f5ea90f4..2bf320aeb140 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs @@ -62,9 +62,6 @@ impl GlueFrom for crate::interface::Fi .map(UserL2ToL1Log) .collect(), system_logs: vec![], - total_log_queries: value.full_result.total_log_queries, - cycles_used: value.full_result.cycles_used, - deduplicated_events_logs: vec![], storage_refunds: Vec::new(), pubdata_costs: Vec::new(), }, @@ -121,9 +118,6 @@ impl GlueFrom for crate::interface::Fi .map(UserL2ToL1Log) .collect(), system_logs: vec![], - total_log_queries: value.full_result.total_log_queries, - cycles_used: value.full_result.cycles_used, - deduplicated_events_logs: vec![], storage_refunds: Vec::new(), pubdata_costs: Vec::new(), }, @@ -179,9 +173,6 @@ impl GlueFrom for crate::interface: .map(UserL2ToL1Log) .collect(), system_logs: vec![], - total_log_queries: value.full_result.total_log_queries, - cycles_used: value.full_result.cycles_used, - deduplicated_events_logs: vec![], storage_refunds: Vec::new(), pubdata_costs: Vec::new(), }, diff --git a/core/lib/multivm/src/interface/mod.rs b/core/lib/multivm/src/interface/mod.rs index 5cb1f5cd1d15..360d53df52a7 100644 --- a/core/lib/multivm/src/interface/mod.rs +++ b/core/lib/multivm/src/interface/mod.rs @@ -1,19 +1,21 @@ pub(crate) mod traits; - -pub use traits::{ - tracers::dyn_tracers, - vm::{VmInterface, VmInterfaceHistoryEnabled}, -}; pub mod types; -pub use types::{ - errors::{ - BytecodeCompressionError, Halt, TxRevertReason, VmRevertReason, VmRevertReasonParsingError, +pub use self::{ + traits::{ + tracers::dyn_tracers, + vm::{VmFactory, VmInterface, VmInterfaceHistoryEnabled}, }, - inputs::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode}, - outputs::{ - BootloaderMemory, CurrentExecutionState, ExecutionResult, FinishedL1Batch, L2Block, - Refunds, VmExecutionResultAndLogs, VmExecutionStatistics, VmMemoryMetrics, + types::{ + errors::{ + BytecodeCompressionError, Halt, TxRevertReason, VmRevertReason, + VmRevertReasonParsingError, + }, + inputs::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode}, + outputs::{ + BootloaderMemory, CurrentExecutionState, ExecutionResult, FinishedL1Batch, L2Block, + Refunds, VmExecutionResultAndLogs, VmExecutionStatistics, VmMemoryMetrics, + }, + tracer, }, - tracer, }; diff --git a/core/lib/multivm/src/interface/traits/vm.rs b/core/lib/multivm/src/interface/traits/vm.rs index 499c46a7b52d..0fd41934cc61 100644 --- a/core/lib/multivm/src/interface/traits/vm.rs +++ b/core/lib/multivm/src/interface/traits/vm.rs @@ -51,25 +51,17 @@ use zksync_state::StoragePtr; use zksync_types::Transaction; use zksync_utils::bytecode::CompressedBytecodeInfo; -use crate::{ - interface::{ - types::{ - errors::BytecodeCompressionError, - inputs::{L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode}, - outputs::{BootloaderMemory, CurrentExecutionState, VmExecutionResultAndLogs}, - }, - FinishedL1Batch, VmMemoryMetrics, +use crate::interface::{ + types::{ + errors::BytecodeCompressionError, + inputs::{L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode}, + outputs::{BootloaderMemory, CurrentExecutionState, VmExecutionResultAndLogs}, }, - tracers::TracerDispatcher, - vm_latest::HistoryEnabled, - HistoryMode, + FinishedL1Batch, VmMemoryMetrics, }; -pub trait VmInterface { - type TracerDispatcher: Default + From>; - - /// Initialize VM. - fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self; +pub trait VmInterface { + type TracerDispatcher: Default; /// Push transaction to bootloader memory. fn push_transaction(&mut self, tx: Transaction); @@ -148,14 +140,32 @@ pub trait VmInterface { } } +/// Encapsulates creating VM instance based on the provided environment. +pub trait VmFactory: VmInterface { + /// Creates a new VM instance. + fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self; +} + /// Methods of VM requiring history manipulations. -pub trait VmInterfaceHistoryEnabled: VmInterface { +/// +/// # Snapshot workflow +/// +/// External callers must follow the following snapshot workflow: +/// +/// - Each new snapshot created using `make_snapshot()` must be either popped or rolled back before creating the following snapshot. +/// OTOH, it's not required to call either of these methods by the end of VM execution. +/// - `pop_snapshot_no_rollback()` may be called spuriously, when no snapshot was created. It is a no-op in this case. +/// +/// These rules guarantee that at each given moment, a VM instance has at most one snapshot (unless the VM makes snapshots internally), +/// which may allow additional VM optimizations. +pub trait VmInterfaceHistoryEnabled: VmInterface { /// Create a snapshot of the current VM state and push it into memory. fn make_snapshot(&mut self); /// Roll back VM state to the latest snapshot and destroy the snapshot. fn rollback_to_the_latest_snapshot(&mut self); - /// Pop the latest snapshot from memory and destroy it. + /// Pop the latest snapshot from memory and destroy it. If there are no snapshots, this should be a no-op + /// (i.e., the VM must not panic in this case). fn pop_snapshot_no_rollback(&mut self); } diff --git a/core/lib/multivm/src/interface/types/outputs/execution_result.rs b/core/lib/multivm/src/interface/types/outputs/execution_result.rs index 19ce9b599c80..35d14524e0a8 100644 --- a/core/lib/multivm/src/interface/types/outputs/execution_result.rs +++ b/core/lib/multivm/src/interface/types/outputs/execution_result.rs @@ -10,7 +10,7 @@ use zksync_utils::bytecode::bytecode_len_in_bytes; use crate::interface::{Halt, VmExecutionStatistics, VmRevertReason}; /// Refunds produced for the user. -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone, Default, PartialEq)] pub struct Refunds { pub gas_refunded: u64, pub operator_suggested_refund: u64, diff --git a/core/lib/multivm/src/interface/types/outputs/execution_state.rs b/core/lib/multivm/src/interface/types/outputs/execution_state.rs index cc7bb64d4030..05eab795c873 100644 --- a/core/lib/multivm/src/interface/types/outputs/execution_state.rs +++ b/core/lib/multivm/src/interface/types/outputs/execution_state.rs @@ -1,6 +1,5 @@ use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, - zk_evm_types::LogQuery, StorageLog, VmEvent, U256, }; @@ -15,17 +14,10 @@ pub struct CurrentExecutionState { pub used_contract_hashes: Vec, /// L2 to L1 logs produced by the VM. pub system_logs: Vec, - /// L2 to L1 logs produced by the L1Messeger. + /// L2 to L1 logs produced by the `L1Messenger`. /// For pre-boojum VMs, there was no distinction between user logs and system /// logs and so all the outputted logs were treated as user_l2_to_l1_logs. pub user_l2_to_l1_logs: Vec, - /// Number of log queries produced by the VM. Including l2_to_l1 logs, storage logs and events. - pub total_log_queries: usize, - /// Number of cycles used by the VM. - pub cycles_used: u32, - /// Sorted & deduplicated events logs for batch. Note, that this is a more "low-level" representation of - /// the `events` field of this struct TODO(PLA-649): refactor to remove duplication of data. - pub deduplicated_events_logs: Vec, /// Refunds returned by `StorageOracle`. pub storage_refunds: Vec, /// Pubdata costs returned by `StorageOracle`. diff --git a/core/lib/multivm/src/interface/types/outputs/l2_block.rs b/core/lib/multivm/src/interface/types/outputs/l2_block.rs index ccbcba15f654..6125b2742d15 100644 --- a/core/lib/multivm/src/interface/types/outputs/l2_block.rs +++ b/core/lib/multivm/src/interface/types/outputs/l2_block.rs @@ -1,5 +1,6 @@ use zksync_types::H256; +#[derive(Debug)] pub struct L2Block { pub number: u32, pub timestamp: u64, diff --git a/core/lib/multivm/src/lib.rs b/core/lib/multivm/src/lib.rs index 007c69fdf7fd..08b077ce3eab 100644 --- a/core/lib/multivm/src/lib.rs +++ b/core/lib/multivm/src/lib.rs @@ -4,17 +4,17 @@ pub use circuit_sequencer_api_1_5_0 as circuit_sequencer_api_latest; pub use zk_evm_1_5_0 as zk_evm_latest; -pub use zksync_types::vm_version::VmVersion; +pub use zksync_types::vm::VmVersion; -pub use self::versions::{ - vm_1_3_2, vm_1_4_1, vm_1_4_2, vm_boojum_integration, vm_latest, vm_m5, vm_m6, - vm_refunds_enhancement, vm_virtual_blocks, -}; pub use crate::{ glue::{ history_mode::HistoryMode, tracers::{MultiVMTracer, MultiVmTracerPointer}, }, + versions::{ + vm_1_3_2, vm_1_4_1, vm_1_4_2, vm_boojum_integration, vm_fast, vm_latest, vm_m5, vm_m6, + vm_refunds_enhancement, vm_virtual_blocks, + }, vm_instance::VmInstance, }; diff --git a/core/lib/multivm/src/tracers/validator/mod.rs b/core/lib/multivm/src/tracers/validator/mod.rs index b56d92015a33..635915f95278 100644 --- a/core/lib/multivm/src/tracers/validator/mod.rs +++ b/core/lib/multivm/src/tracers/validator/mod.rs @@ -7,8 +7,8 @@ use zksync_system_constants::{ L2_BASE_TOKEN_ADDRESS, MSG_VALUE_SIMULATOR_ADDRESS, SYSTEM_CONTEXT_ADDRESS, }; use zksync_types::{ - vm_trace::ViolatedValidationRule, web3::keccak256, AccountTreeId, Address, StorageKey, - VmVersion, H256, U256, + vm::VmVersion, vm_trace::ViolatedValidationRule, web3::keccak256, AccountTreeId, Address, + StorageKey, H256, U256, }; use zksync_utils::{be_bytes_to_safe_address, u256_to_account_address, u256_to_h256}; diff --git a/core/lib/multivm/src/utils.rs b/core/lib/multivm/src/utils.rs index a15fdba6b703..96ae580a5f73 100644 --- a/core/lib/multivm/src/utils.rs +++ b/core/lib/multivm/src/utils.rs @@ -1,6 +1,7 @@ use zksync_types::{ fee_model::{BatchFeeInput, L1PeggedBatchFeeModelInput, PubdataIndependentBatchFeeModelInput}, - VmVersion, U256, + vm::VmVersion, + U256, }; use crate::vm_latest::L1BatchEnv; diff --git a/core/lib/multivm/src/versions/mod.rs b/core/lib/multivm/src/versions/mod.rs index c594d50af0e7..e9e34c1cda16 100644 --- a/core/lib/multivm/src/versions/mod.rs +++ b/core/lib/multivm/src/versions/mod.rs @@ -1,7 +1,9 @@ +pub mod shadow; pub mod vm_1_3_2; pub mod vm_1_4_1; pub mod vm_1_4_2; pub mod vm_boojum_integration; +pub mod vm_fast; pub mod vm_latest; pub mod vm_m5; pub mod vm_m6; diff --git a/core/lib/multivm/src/versions/shadow.rs b/core/lib/multivm/src/versions/shadow.rs new file mode 100644 index 000000000000..675a95c5ba73 --- /dev/null +++ b/core/lib/multivm/src/versions/shadow.rs @@ -0,0 +1,381 @@ +use std::{ + collections::{BTreeMap, HashSet}, + fmt, +}; + +use anyhow::Context as _; +use zksync_state::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}; +use zksync_types::{StorageKey, StorageLog, StorageLogWithPreviousValue, Transaction}; +use zksync_utils::bytecode::CompressedBytecodeInfo; + +use crate::{ + interface::{ + BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, + L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, + VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + }, + vm_fast, +}; + +#[derive(Debug)] +pub struct ShadowVm { + main: T, + shadow: vm_fast::Vm>, +} + +impl VmFactory> for ShadowVm +where + S: ReadStorage, + T: VmFactory>, +{ + fn new( + batch_env: L1BatchEnv, + system_env: SystemEnv, + storage: StoragePtr>, + ) -> Self { + Self { + main: T::new(batch_env.clone(), system_env.clone(), storage.clone()), + shadow: vm_fast::Vm::new(batch_env, system_env, ImmutableStorageView::new(storage)), + } + } +} + +impl VmInterface for ShadowVm +where + S: ReadStorage, + T: VmInterface, +{ + type TracerDispatcher = T::TracerDispatcher; + + fn push_transaction(&mut self, tx: Transaction) { + self.shadow.push_transaction(tx.clone()); + self.main.push_transaction(tx); + } + + fn execute(&mut self, execution_mode: VmExecutionMode) -> VmExecutionResultAndLogs { + let main_result = self.main.execute(execution_mode); + let shadow_result = self.shadow.execute(execution_mode); + let mut errors = DivergenceErrors::default(); + errors.check_results_match(&main_result, &shadow_result); + errors + .into_result() + .with_context(|| format!("executing VM with mode {execution_mode:?}")) + .unwrap(); + main_result + } + + fn inspect( + &mut self, + dispatcher: Self::TracerDispatcher, + execution_mode: VmExecutionMode, + ) -> VmExecutionResultAndLogs { + let shadow_result = self.shadow.inspect((), execution_mode); + let main_result = self.main.inspect(dispatcher, execution_mode); + let mut errors = DivergenceErrors::default(); + errors.check_results_match(&main_result, &shadow_result); + errors + .into_result() + .with_context(|| format!("executing VM with mode {execution_mode:?}")) + .unwrap(); + main_result + } + + fn get_bootloader_memory(&self) -> BootloaderMemory { + let main_memory = self.main.get_bootloader_memory(); + let shadow_memory = self.shadow.get_bootloader_memory(); + DivergenceErrors::single("get_bootloader_memory", &main_memory, &shadow_memory).unwrap(); + main_memory + } + + fn get_last_tx_compressed_bytecodes(&self) -> Vec { + let main_bytecodes = self.main.get_last_tx_compressed_bytecodes(); + let shadow_bytecodes = self.shadow.get_last_tx_compressed_bytecodes(); + DivergenceErrors::single( + "get_last_tx_compressed_bytecodes", + &main_bytecodes, + &shadow_bytecodes, + ) + .unwrap(); + main_bytecodes + } + + fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { + self.shadow.start_new_l2_block(l2_block_env); + self.main.start_new_l2_block(l2_block_env); + } + + fn get_current_execution_state(&self) -> CurrentExecutionState { + let main_state = self.main.get_current_execution_state(); + let shadow_state = self.shadow.get_current_execution_state(); + DivergenceErrors::single("get_current_execution_state", &main_state, &shadow_state) + .unwrap(); + main_state + } + + fn execute_transaction_with_bytecode_compression( + &mut self, + tx: Transaction, + with_compression: bool, + ) -> ( + Result<(), BytecodeCompressionError>, + VmExecutionResultAndLogs, + ) { + let tx_hash = tx.hash(); + let main_result = self + .main + .execute_transaction_with_bytecode_compression(tx.clone(), with_compression); + let shadow_result = self + .shadow + .execute_transaction_with_bytecode_compression(tx, with_compression); + let mut errors = DivergenceErrors::default(); + errors.check_results_match(&main_result.1, &shadow_result.1); + errors + .into_result() + .with_context(|| { + format!("executing transaction {tx_hash:?}, with_compression={with_compression:?}") + }) + .unwrap(); + main_result + } + + fn inspect_transaction_with_bytecode_compression( + &mut self, + tracer: Self::TracerDispatcher, + tx: Transaction, + with_compression: bool, + ) -> ( + Result<(), BytecodeCompressionError>, + VmExecutionResultAndLogs, + ) { + let tx_hash = tx.hash(); + let main_result = self.main.inspect_transaction_with_bytecode_compression( + tracer, + tx.clone(), + with_compression, + ); + let shadow_result = + self.shadow + .inspect_transaction_with_bytecode_compression((), tx, with_compression); + let mut errors = DivergenceErrors::default(); + errors.check_results_match(&main_result.1, &shadow_result.1); + errors + .into_result() + .with_context(|| { + format!("inspecting transaction {tx_hash:?}, with_compression={with_compression:?}") + }) + .unwrap(); + main_result + } + + fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + self.main.record_vm_memory_metrics() + } + + fn gas_remaining(&self) -> u32 { + let main_gas = self.main.gas_remaining(); + let shadow_gas = self.shadow.gas_remaining(); + DivergenceErrors::single("gas_remaining", &main_gas, &shadow_gas).unwrap(); + main_gas + } + + fn finish_batch(&mut self) -> FinishedL1Batch { + let main_batch = self.main.finish_batch(); + let shadow_batch = self.shadow.finish_batch(); + + let mut errors = DivergenceErrors::default(); + errors.check_results_match( + &main_batch.block_tip_execution_result, + &shadow_batch.block_tip_execution_result, + ); + errors.check_final_states_match( + &main_batch.final_execution_state, + &shadow_batch.final_execution_state, + ); + errors.check_match( + "final_bootloader_memory", + &main_batch.final_bootloader_memory, + &shadow_batch.final_bootloader_memory, + ); + errors.check_match( + "pubdata_input", + &main_batch.pubdata_input, + &shadow_batch.pubdata_input, + ); + errors.check_match( + "state_diffs", + &main_batch.state_diffs, + &shadow_batch.state_diffs, + ); + errors.into_result().unwrap(); + main_batch + } +} + +#[must_use = "Should be converted to a `Result`"] +#[derive(Debug, Default)] +pub struct DivergenceErrors(Vec); + +impl DivergenceErrors { + fn single( + context: &str, + main: &T, + shadow: &T, + ) -> anyhow::Result<()> { + let mut this = Self::default(); + this.check_match(context, main, shadow); + this.into_result() + } + + fn check_results_match( + &mut self, + main_result: &VmExecutionResultAndLogs, + shadow_result: &VmExecutionResultAndLogs, + ) { + self.check_match("result", &main_result.result, &shadow_result.result); + self.check_match( + "logs.events", + &main_result.logs.events, + &shadow_result.logs.events, + ); + self.check_match( + "logs.system_l2_to_l1_logs", + &main_result.logs.system_l2_to_l1_logs, + &shadow_result.logs.system_l2_to_l1_logs, + ); + self.check_match( + "logs.user_l2_to_l1_logs", + &main_result.logs.user_l2_to_l1_logs, + &shadow_result.logs.user_l2_to_l1_logs, + ); + let main_logs = UniqueStorageLogs::new(&main_result.logs.storage_logs); + let shadow_logs = UniqueStorageLogs::new(&shadow_result.logs.storage_logs); + self.check_match("logs.storage_logs", &main_logs, &shadow_logs); + self.check_match("refunds", &main_result.refunds, &shadow_result.refunds); + } + + fn check_match(&mut self, context: &str, main: &T, shadow: &T) { + if main != shadow { + let comparison = pretty_assertions::Comparison::new(main, shadow); + let err = anyhow::anyhow!("`{context}` mismatch: {comparison}"); + self.0.push(err); + } + } + + fn check_final_states_match( + &mut self, + main: &CurrentExecutionState, + shadow: &CurrentExecutionState, + ) { + self.check_match("final_state.events", &main.events, &shadow.events); + self.check_match( + "final_state.user_l2_to_l1_logs", + &main.user_l2_to_l1_logs, + &shadow.user_l2_to_l1_logs, + ); + self.check_match( + "final_state.system_logs", + &main.system_logs, + &shadow.system_logs, + ); + self.check_match( + "final_state.storage_refunds", + &main.storage_refunds, + &shadow.storage_refunds, + ); + self.check_match( + "final_state.pubdata_costs", + &main.pubdata_costs, + &shadow.pubdata_costs, + ); + self.check_match( + "final_state.used_contract_hashes", + &main.used_contract_hashes.iter().collect::>(), + &shadow.used_contract_hashes.iter().collect::>(), + ); + + let main_deduplicated_logs = Self::gather_logs(&main.deduplicated_storage_logs); + let shadow_deduplicated_logs = Self::gather_logs(&shadow.deduplicated_storage_logs); + self.check_match( + "deduplicated_storage_logs", + &main_deduplicated_logs, + &shadow_deduplicated_logs, + ); + } + + fn gather_logs(logs: &[StorageLog]) -> BTreeMap { + logs.iter() + .filter(|log| log.is_write()) + .map(|log| (log.key, log)) + .collect() + } + + fn into_result(self) -> anyhow::Result<()> { + if self.0.is_empty() { + Ok(()) + } else { + Err(anyhow::anyhow!( + "divergence between old VM and new VM execution: [{:?}]", + self.0 + )) + } + } +} + +// The new VM doesn't support read logs yet, doesn't order logs by access and deduplicates them +// inside the VM, hence this auxiliary struct. +#[derive(PartialEq)] +struct UniqueStorageLogs(BTreeMap); + +impl fmt::Debug for UniqueStorageLogs { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut map = formatter.debug_map(); + for log in self.0.values() { + map.entry( + &format!("{:?}:{:?}", log.log.key.address(), log.log.key.key()), + &format!("{:?} -> {:?}", log.previous_value, log.log.value), + ); + } + map.finish() + } +} + +impl UniqueStorageLogs { + fn new(logs: &[StorageLogWithPreviousValue]) -> Self { + let mut unique_logs = BTreeMap::::new(); + for log in logs { + if !log.log.is_write() { + continue; + } + if let Some(existing_log) = unique_logs.get_mut(&log.log.key) { + existing_log.log.value = log.log.value; + } else { + unique_logs.insert(log.log.key, *log); + } + } + + // Remove no-op write logs (i.e., X -> X writes) produced by the old VM. + unique_logs.retain(|_, log| log.previous_value != log.log.value); + Self(unique_logs) + } +} + +impl VmInterfaceHistoryEnabled for ShadowVm +where + S: ReadStorage, + T: VmInterfaceHistoryEnabled, +{ + fn make_snapshot(&mut self) { + self.shadow.make_snapshot(); + self.main.make_snapshot(); + } + + fn rollback_to_the_latest_snapshot(&mut self) { + self.shadow.rollback_to_the_latest_snapshot(); + self.main.rollback_to_the_latest_snapshot(); + } + + fn pop_snapshot_no_rollback(&mut self) { + self.shadow.pop_snapshot_no_rollback(); + self.main.pop_snapshot_no_rollback(); + } +} diff --git a/core/lib/multivm/src/versions/vm_1_3_2/event_sink.rs b/core/lib/multivm/src/versions/vm_1_3_2/event_sink.rs index 7f7b44071a1a..d73ebb1648b6 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/event_sink.rs @@ -29,7 +29,7 @@ impl OracleWithHistory for InMemoryEventSink { // otherwise we carry rollbacks to the parent's frames impl InMemoryEventSink { - pub fn flatten(&self) -> (Vec, Vec, Vec) { + pub fn flatten(&self) -> (Vec, Vec) { assert_eq!( self.frames_stack.len(), 1, @@ -38,8 +38,7 @@ impl InMemoryEventSink { // we forget rollbacks as we have finished the execution and can just apply them let history = self.frames_stack.forward().current_frame(); - let (events, l1_messages) = Self::events_and_l1_messages_from_history(history); - (history.iter().map(|x| **x).collect(), events, l1_messages) + Self::events_and_l1_messages_from_history(history) } pub fn get_log_queries(&self) -> usize { diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs index 5721f4e2185e..ff6c7f2f3d08 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs @@ -16,7 +16,8 @@ use crate::{ interface::{ BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, tracers::old_tracers::TracerDispatcher, vm_1_3_2::{events::merge_events, VmInstance}, @@ -30,34 +31,9 @@ pub struct Vm { pub(crate) last_tx_compressed_bytecodes: Vec, } -impl VmInterface for Vm { +impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { - let oracle_tools = crate::vm_1_3_2::OracleTools::new(storage.clone()); - let block_properties = crate::vm_1_3_2::BlockProperties { - default_aa_code_hash: h256_to_u256( - system_env.base_system_smart_contracts.default_aa.hash, - ), - zkporter_is_available: false, - }; - let inner_vm: VmInstance = - crate::vm_1_3_2::vm_with_bootloader::init_vm_with_gas_limit( - oracle_tools, - batch_env.clone().glue_into(), - block_properties, - system_env.execution_mode.glue_into(), - &system_env.base_system_smart_contracts.clone().glue_into(), - system_env.bootloader_gas_limit, - ); - Self { - vm: inner_vm, - system_env, - batch_env, - last_tx_compressed_bytecodes: vec![], - } - } - fn push_transaction(&mut self, tx: Transaction) { crate::vm_1_3_2::vm_with_bootloader::push_transaction_to_bootloader_memory( &mut self.vm, @@ -120,7 +96,7 @@ impl VmInterface for Vm { } fn get_current_execution_state(&self) -> CurrentExecutionState { - let (_full_history, raw_events, l1_messages) = self.vm.state.event_sink.flatten(); + let (raw_events, l1_messages) = self.vm.state.event_sink.flatten(); let events = merge_events(raw_events) .into_iter() .map(|e| e.into_vm_event(self.batch_env.number)) @@ -138,14 +114,6 @@ impl VmInterface for Vm { }) }) .collect(); - let total_log_queries = self.vm.state.event_sink.get_log_queries() - + self - .vm - .state - .precompiles_processor - .get_timestamp_history() - .len() - + self.vm.state.storage.get_final_log_queries().len(); let used_contract_hashes = self .vm @@ -171,10 +139,7 @@ impl VmInterface for Vm { used_contract_hashes, user_l2_to_l1_logs: l2_to_l1_logs, system_logs: vec![], - total_log_queries, - cycles_used: self.vm.state.local_state.monotonic_cycle_counter, - // It's not applicable for vm 1.3.2 - deduplicated_events_logs: vec![], + // Fields below are not produced by VM 1.3.2 storage_refunds: vec![], pubdata_costs: Vec::new(), } @@ -295,7 +260,34 @@ impl VmInterface for Vm { } } -impl VmInterfaceHistoryEnabled for Vm { +impl VmFactory for Vm { + fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { + let oracle_tools = crate::vm_1_3_2::OracleTools::new(storage.clone()); + let block_properties = crate::vm_1_3_2::BlockProperties { + default_aa_code_hash: h256_to_u256( + system_env.base_system_smart_contracts.default_aa.hash, + ), + zkporter_is_available: false, + }; + let inner_vm: VmInstance = + crate::vm_1_3_2::vm_with_bootloader::init_vm_with_gas_limit( + oracle_tools, + batch_env.clone().glue_into(), + block_properties, + system_env.execution_mode.glue_into(), + &system_env.base_system_smart_contracts.clone().glue_into(), + system_env.bootloader_gas_limit, + ); + Self { + vm: inner_vm, + system_env, + batch_env, + last_tx_compressed_bytecodes: vec![], + } + } +} + +impl VmInterfaceHistoryEnabled for Vm { fn make_snapshot(&mut self) { self.vm.save_current_vm_as_snapshot() } @@ -305,6 +297,6 @@ impl VmInterfaceHistoryEnabled for Vm VmInstance { /// Removes the latest snapshot without rolling it back. /// This function expects that there is at least one snapshot present. pub fn pop_snapshot_no_rollback(&mut self) { - self.snapshots.pop().unwrap(); + self.snapshots.pop(); } /// Returns the amount of gas remaining to the VM. @@ -768,7 +768,7 @@ impl VmInstance { // Collecting `block_tip_result` needs logs with timestamp, so we drain events for the `full_result` // after because draining will drop timestamps. - let (_full_history, raw_events, l1_messages) = self.state.event_sink.flatten(); + let (raw_events, l1_messages) = self.state.event_sink.flatten(); full_result.events = merge_events(raw_events) .into_iter() .map(|e| { diff --git a/core/lib/multivm/src/versions/vm_1_4_1/old_vm/event_sink.rs b/core/lib/multivm/src/versions/vm_1_4_1/old_vm/event_sink.rs index 0c9d1bb01cb2..bd57239d8578 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/old_vm/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/old_vm/event_sink.rs @@ -1,6 +1,5 @@ use std::collections::HashMap; -use itertools::Itertools; use zk_evm_1_4_1::{ abstractions::EventSink, aux_structures::{LogQuery, Timestamp}, @@ -9,7 +8,6 @@ use zk_evm_1_4_1::{ BOOTLOADER_FORMAL_ADDRESS, EVENT_AUX_BYTE, L1_MESSAGE_AUX_BYTE, }, }; -use zksync_types::U256; use crate::vm_1_4_1::old_vm::{ history_recorder::{AppDataFrameManagerWithHistory, HistoryEnabled, HistoryMode}, @@ -31,7 +29,7 @@ impl OracleWithHistory for InMemoryEventSink { // otherwise we carry rollbacks to the parent's frames impl InMemoryEventSink { - pub fn flatten(&self) -> (Vec, Vec, Vec) { + pub fn flatten(&self) -> (Vec, Vec) { assert_eq!( self.frames_stack.len(), 1, @@ -40,10 +38,7 @@ impl InMemoryEventSink { // we forget rollbacks as we have finished the execution and can just apply them let history = self.frames_stack.forward().current_frame(); - let (events, l1_messages) = Self::events_and_l1_messages_from_history(history); - let events_logs = Self::events_logs_from_history(history); - - (events_logs, events, l1_messages) + Self::events_and_l1_messages_from_history(history) } pub fn get_log_queries(&self) -> usize { @@ -69,92 +64,6 @@ impl InMemoryEventSink { Self::events_and_l1_messages_from_history(self.log_queries_after_timestamp(from_timestamp)) } - fn events_logs_from_history(history: &[Box]) -> Vec { - // Filter out all the L2->L1 logs and leave only events - let mut events = history - .iter() - .filter_map(|log_query| (log_query.aux_byte == EVENT_AUX_BYTE).then_some(**log_query)) - .collect_vec(); - - // Sort the events by timestamp and rollback flag, basically ensuring that - // if an event has been rolled back, the original event and its rollback will be put together - events.sort_by_key(|log| (log.timestamp, log.rollback)); - - let mut stack = Vec::::new(); - let mut net_history = vec![]; - for el in events.iter() { - assert_eq!(el.shard_id, 0, "only rollup shard is supported"); - if stack.is_empty() { - assert!(!el.rollback); - stack.push(*el); - } else { - // we can always pop as it's either one to add to queue, or discard - let previous = stack.pop().unwrap(); - if previous.timestamp == el.timestamp { - // Only rollback can have the same timestamp, so here we do nothing and simply - // double check the invariants - assert!(!previous.rollback); - assert!(el.rollback); - assert!(previous.rw_flag); - assert!(el.rw_flag); - assert_eq!(previous.tx_number_in_block, el.tx_number_in_block); - assert_eq!(previous.shard_id, el.shard_id); - assert_eq!(previous.address, el.address); - assert_eq!(previous.key, el.key); - assert_eq!(previous.written_value, el.written_value); - assert_eq!(previous.is_service, el.is_service); - continue; - } else { - // The event on the stack has not been rolled back. It must be a different event, - // with a different timestamp. - assert!(!el.rollback); - stack.push(*el); - - // cleanup some fields - // flags are conventions - let sorted_log_query = LogQuery { - timestamp: Timestamp(0), - tx_number_in_block: previous.tx_number_in_block, - aux_byte: 0, - shard_id: previous.shard_id, - address: previous.address, - key: previous.key, - read_value: U256::zero(), - written_value: previous.written_value, - rw_flag: false, - rollback: false, - is_service: previous.is_service, - }; - - net_history.push(sorted_log_query); - } - } - } - - // In case the stack is non-empty, then the last element of it has not been rolled back. - if let Some(previous) = stack.pop() { - // cleanup some fields - // flags are conventions - let sorted_log_query = LogQuery { - timestamp: Timestamp(0), - tx_number_in_block: previous.tx_number_in_block, - aux_byte: 0, - shard_id: previous.shard_id, - address: previous.address, - key: previous.key, - read_value: U256::zero(), - written_value: previous.written_value, - rw_flag: false, - rollback: false, - is_service: previous.is_service, - }; - - net_history.push(sorted_log_query); - } - - net_history - } - fn events_and_l1_messages_from_history( history: &[Box], ) -> (Vec, Vec) { diff --git a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs index 993cc7950554..345948bfdfbb 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs @@ -11,8 +11,8 @@ use crate::{ glue::GlueInto, interface::{ BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, + L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, + VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, vm_1_4_1::{ bootloader_state::BootloaderState, @@ -38,22 +38,9 @@ pub struct Vm { _phantom: std::marker::PhantomData, } -impl VmInterface for Vm { +impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { - let (state, bootloader_state) = new_vm_state(storage.clone(), &system_env, &batch_env); - Self { - bootloader_state, - state, - storage, - system_env, - batch_env, - snapshots: vec![], - _phantom: Default::default(), - } - } - /// Push tx into memory for the future execution fn push_transaction(&mut self, tx: Transaction) { self.push_transaction_with_compression(tx, true); @@ -86,7 +73,7 @@ impl VmInterface for Vm { /// This method should be used only after the batch execution. /// Otherwise it can panic. fn get_current_execution_state(&self) -> CurrentExecutionState { - let (deduplicated_events_logs, raw_events, l1_messages) = self.state.event_sink.flatten(); + let (raw_events, l1_messages) = self.state.event_sink.flatten(); let events: Vec<_> = merge_events(raw_events) .into_iter() .map(|e| e.into_vm_event(self.batch_env.number)) @@ -97,13 +84,6 @@ impl VmInterface for Vm { .into_iter() .map(|log| SystemL2ToL1Log(log.glue_into())) .collect(); - let total_log_queries = self.state.event_sink.get_log_queries() - + self - .state - .precompiles_processor - .get_timestamp_history() - .len() - + self.state.storage.get_final_log_queries().len(); let storage_log_queries = self.state.storage.get_final_log_queries(); @@ -122,12 +102,6 @@ impl VmInterface for Vm { .map(|log| UserL2ToL1Log(log.into())) .collect(), system_logs, - total_log_queries, - cycles_used: self.state.local_state.monotonic_cycle_counter, - deduplicated_events_logs: deduplicated_events_logs - .into_iter() - .map(GlueInto::glue_into) - .collect(), storage_refunds: self.state.storage.returned_refunds.inner().clone(), pubdata_costs: Vec::new(), } @@ -184,14 +158,28 @@ impl VmInterface for Vm { } } +impl VmFactory for Vm { + fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { + let (state, bootloader_state) = new_vm_state(storage.clone(), &system_env, &batch_env); + Self { + bootloader_state, + state, + storage, + system_env, + batch_env, + snapshots: vec![], + _phantom: Default::default(), + } + } +} + /// Methods of vm, which required some history manipulations -impl VmInterfaceHistoryEnabled for Vm { +impl VmInterfaceHistoryEnabled for Vm { /// Create snapshot of current vm state and push it into the memory fn make_snapshot(&mut self) { self.make_snapshot_inner() } - /// Rollback vm state to the latest snapshot and destroy the snapshot fn rollback_to_the_latest_snapshot(&mut self) { let snapshot = self .snapshots @@ -200,10 +188,7 @@ impl VmInterfaceHistoryEnabled for Vm { // otherwise we carry rollbacks to the parent's frames impl InMemoryEventSink { - pub fn flatten(&self) -> (Vec, Vec, Vec) { + pub fn flatten(&self) -> (Vec, Vec) { assert_eq!( self.frames_stack.len(), 1, @@ -40,10 +38,7 @@ impl InMemoryEventSink { // we forget rollbacks as we have finished the execution and can just apply them let history = self.frames_stack.forward().current_frame(); - let (events, l1_messages) = Self::events_and_l1_messages_from_history(history); - let events_logs = Self::events_logs_from_history(history); - - (events_logs, events, l1_messages) + Self::events_and_l1_messages_from_history(history) } pub fn get_log_queries(&self) -> usize { @@ -69,92 +64,6 @@ impl InMemoryEventSink { Self::events_and_l1_messages_from_history(self.log_queries_after_timestamp(from_timestamp)) } - fn events_logs_from_history(history: &[Box]) -> Vec { - // Filter out all the L2->L1 logs and leave only events - let mut events = history - .iter() - .filter_map(|log_query| (log_query.aux_byte == EVENT_AUX_BYTE).then_some(**log_query)) - .collect_vec(); - - // Sort the events by timestamp and rollback flag, basically ensuring that - // if an event has been rolled back, the original event and its rollback will be put together - events.sort_by_key(|log| (log.timestamp, log.rollback)); - - let mut stack = Vec::::new(); - let mut net_history = vec![]; - for el in events.iter() { - assert_eq!(el.shard_id, 0, "only rollup shard is supported"); - if stack.is_empty() { - assert!(!el.rollback); - stack.push(*el); - } else { - // we can always pop as it's either one to add to queue, or discard - let previous = stack.pop().unwrap(); - if previous.timestamp == el.timestamp { - // Only rollback can have the same timestamp, so here we do nothing and simply - // double check the invariants - assert!(!previous.rollback); - assert!(el.rollback); - assert!(previous.rw_flag); - assert!(el.rw_flag); - assert_eq!(previous.tx_number_in_block, el.tx_number_in_block); - assert_eq!(previous.shard_id, el.shard_id); - assert_eq!(previous.address, el.address); - assert_eq!(previous.key, el.key); - assert_eq!(previous.written_value, el.written_value); - assert_eq!(previous.is_service, el.is_service); - continue; - } else { - // The event on the stack has not been rolled back. It must be a different event, - // with a different timestamp. - assert!(!el.rollback); - stack.push(*el); - - // cleanup some fields - // flags are conventions - let sorted_log_query = LogQuery { - timestamp: Timestamp(0), - tx_number_in_block: previous.tx_number_in_block, - aux_byte: 0, - shard_id: previous.shard_id, - address: previous.address, - key: previous.key, - read_value: U256::zero(), - written_value: previous.written_value, - rw_flag: false, - rollback: false, - is_service: previous.is_service, - }; - - net_history.push(sorted_log_query); - } - } - } - - // In case the stack is non-empty, then the last element of it has not been rolled back. - if let Some(previous) = stack.pop() { - // cleanup some fields - // flags are conventions - let sorted_log_query = LogQuery { - timestamp: Timestamp(0), - tx_number_in_block: previous.tx_number_in_block, - aux_byte: 0, - shard_id: previous.shard_id, - address: previous.address, - key: previous.key, - read_value: U256::zero(), - written_value: previous.written_value, - rw_flag: false, - rollback: false, - is_service: previous.is_service, - }; - - net_history.push(sorted_log_query); - } - - net_history - } - fn events_and_l1_messages_from_history( history: &[Box], ) -> (Vec, Vec) { diff --git a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs index 6a02b162733d..264ebde5611d 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs @@ -11,8 +11,8 @@ use crate::{ glue::GlueInto, interface::{ BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, + L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, + VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, vm_1_4_2::{ bootloader_state::BootloaderState, @@ -38,22 +38,9 @@ pub struct Vm { _phantom: std::marker::PhantomData, } -impl VmInterface for Vm { +impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { - let (state, bootloader_state) = new_vm_state(storage.clone(), &system_env, &batch_env); - Self { - bootloader_state, - state, - storage, - system_env, - batch_env, - snapshots: vec![], - _phantom: Default::default(), - } - } - /// Push tx into memory for the future execution fn push_transaction(&mut self, tx: Transaction) { self.push_transaction_with_compression(tx, true); @@ -86,7 +73,7 @@ impl VmInterface for Vm { /// This method should be used only after the batch execution. /// Otherwise it can panic. fn get_current_execution_state(&self) -> CurrentExecutionState { - let (deduplicated_events_logs, raw_events, l1_messages) = self.state.event_sink.flatten(); + let (raw_events, l1_messages) = self.state.event_sink.flatten(); let events: Vec<_> = merge_events(raw_events) .into_iter() .map(|e| e.into_vm_event(self.batch_env.number)) @@ -97,13 +84,6 @@ impl VmInterface for Vm { .into_iter() .map(|log| SystemL2ToL1Log(log.glue_into())) .collect(); - let total_log_queries = self.state.event_sink.get_log_queries() - + self - .state - .precompiles_processor - .get_timestamp_history() - .len() - + self.state.storage.get_final_log_queries().len(); let storage_log_queries = self.state.storage.get_final_log_queries(); @@ -122,12 +102,6 @@ impl VmInterface for Vm { .map(|log| UserL2ToL1Log(log.into())) .collect(), system_logs, - total_log_queries, - cycles_used: self.state.local_state.monotonic_cycle_counter, - deduplicated_events_logs: deduplicated_events_logs - .into_iter() - .map(GlueInto::glue_into) - .collect(), storage_refunds: self.state.storage.returned_refunds.inner().clone(), pubdata_costs: Vec::new(), } @@ -189,14 +163,26 @@ impl VmInterface for Vm { } } -/// Methods of vm, which required some history manipulations -impl VmInterfaceHistoryEnabled for Vm { - /// Create snapshot of current vm state and push it into the memory +impl VmFactory for Vm { + fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { + let (state, bootloader_state) = new_vm_state(storage.clone(), &system_env, &batch_env); + Self { + bootloader_state, + state, + storage, + system_env, + batch_env, + snapshots: vec![], + _phantom: Default::default(), + } + } +} + +impl VmInterfaceHistoryEnabled for Vm { fn make_snapshot(&mut self) { - self.make_snapshot_inner() + self.make_snapshot_inner(); } - /// Rollback vm state to the latest snapshot and destroy the snapshot fn rollback_to_the_latest_snapshot(&mut self) { let snapshot = self .snapshots @@ -205,10 +191,7 @@ impl VmInterfaceHistoryEnabled for Vm { // otherwise we carry rollbacks to the parent's frames impl InMemoryEventSink { - pub fn flatten(&self) -> (Vec, Vec, Vec) { + pub fn flatten(&self) -> (Vec, Vec) { assert_eq!( self.frames_stack.len(), 1, @@ -40,10 +38,7 @@ impl InMemoryEventSink { // we forget rollbacks as we have finished the execution and can just apply them let history = self.frames_stack.forward().current_frame(); - let (events, l1_messages) = Self::events_and_l1_messages_from_history(history); - let events_logs = Self::events_logs_from_history(history); - - (events_logs, events, l1_messages) + Self::events_and_l1_messages_from_history(history) } pub fn get_log_queries(&self) -> usize { @@ -69,92 +64,6 @@ impl InMemoryEventSink { Self::events_and_l1_messages_from_history(self.log_queries_after_timestamp(from_timestamp)) } - fn events_logs_from_history(history: &[Box]) -> Vec { - // Filter out all the L2->L1 logs and leave only events - let mut events = history - .iter() - .filter_map(|log_query| (log_query.aux_byte == EVENT_AUX_BYTE).then_some(**log_query)) - .collect_vec(); - - // Sort the events by timestamp and rollback flag, basically ensuring that - // if an event has been rolled back, the original event and its rollback will be put together - events.sort_by_key(|log| (log.timestamp, log.rollback)); - - let mut stack = Vec::::new(); - let mut net_history = vec![]; - for el in events.iter() { - assert_eq!(el.shard_id, 0, "only rollup shard is supported"); - if stack.is_empty() { - assert!(!el.rollback); - stack.push(*el); - } else { - // we can always pop as it's either one to add to queue, or discard - let previous = stack.pop().unwrap(); - if previous.timestamp == el.timestamp { - // Only rollback can have the same timestamp, so here we do nothing and simply - // double check the invariants - assert!(!previous.rollback); - assert!(el.rollback); - assert!(previous.rw_flag); - assert!(el.rw_flag); - assert_eq!(previous.tx_number_in_block, el.tx_number_in_block); - assert_eq!(previous.shard_id, el.shard_id); - assert_eq!(previous.address, el.address); - assert_eq!(previous.key, el.key); - assert_eq!(previous.written_value, el.written_value); - assert_eq!(previous.is_service, el.is_service); - continue; - } else { - // The event on the stack has not been rolled back. It must be a different event, - // with a different timestamp. - assert!(!el.rollback); - stack.push(*el); - - // cleanup some fields - // flags are conventions - let sorted_log_query = LogQuery { - timestamp: Timestamp(0), - tx_number_in_block: previous.tx_number_in_block, - aux_byte: 0, - shard_id: previous.shard_id, - address: previous.address, - key: previous.key, - read_value: U256::zero(), - written_value: previous.written_value, - rw_flag: false, - rollback: false, - is_service: previous.is_service, - }; - - net_history.push(sorted_log_query); - } - } - } - - // In case the stack is non-empty, then the last element of it has not been rolled back. - if let Some(previous) = stack.pop() { - // cleanup some fields - // flags are conventions - let sorted_log_query = LogQuery { - timestamp: Timestamp(0), - tx_number_in_block: previous.tx_number_in_block, - aux_byte: 0, - shard_id: previous.shard_id, - address: previous.address, - key: previous.key, - read_value: U256::zero(), - written_value: previous.written_value, - rw_flag: false, - rollback: false, - is_service: previous.is_service, - }; - - net_history.push(sorted_log_query); - } - - net_history - } - fn events_and_l1_messages_from_history( history: &[Box], ) -> (Vec, Vec) { diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs index c893046c8542..90cea403084c 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs @@ -11,8 +11,8 @@ use crate::{ glue::GlueInto, interface::{ BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, + L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, + VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, vm_boojum_integration::{ bootloader_state::BootloaderState, @@ -38,22 +38,9 @@ pub struct Vm { _phantom: std::marker::PhantomData, } -impl VmInterface for Vm { +impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { - let (state, bootloader_state) = new_vm_state(storage.clone(), &system_env, &batch_env); - Self { - bootloader_state, - state, - storage, - system_env, - batch_env, - snapshots: vec![], - _phantom: Default::default(), - } - } - /// Push tx into memory for the future execution fn push_transaction(&mut self, tx: Transaction) { self.push_transaction_with_compression(tx, true); @@ -86,7 +73,7 @@ impl VmInterface for Vm { /// This method should be used only after the batch execution. /// Otherwise it can panic. fn get_current_execution_state(&self) -> CurrentExecutionState { - let (deduplicated_events_logs, raw_events, l1_messages) = self.state.event_sink.flatten(); + let (raw_events, l1_messages) = self.state.event_sink.flatten(); let events: Vec<_> = merge_events(raw_events) .into_iter() .map(|e| e.into_vm_event(self.batch_env.number)) @@ -97,13 +84,6 @@ impl VmInterface for Vm { .into_iter() .map(|log| SystemL2ToL1Log(log.glue_into())) .collect(); - let total_log_queries = self.state.event_sink.get_log_queries() - + self - .state - .precompiles_processor - .get_timestamp_history() - .len() - + self.state.storage.get_final_log_queries().len(); let storage_log_queries = self.state.storage.get_final_log_queries(); @@ -122,12 +102,6 @@ impl VmInterface for Vm { .map(|log| UserL2ToL1Log(log.into())) .collect(), system_logs, - total_log_queries, - cycles_used: self.state.local_state.monotonic_cycle_counter, - deduplicated_events_logs: deduplicated_events_logs - .into_iter() - .map(GlueInto::glue_into) - .collect(), storage_refunds: self.state.storage.returned_refunds.inner().clone(), pubdata_costs: Vec::new(), } @@ -184,14 +158,26 @@ impl VmInterface for Vm { } } -/// Methods of vm, which required some history manipulations -impl VmInterfaceHistoryEnabled for Vm { - /// Create snapshot of current vm state and push it into the memory +impl VmFactory for Vm { + fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { + let (state, bootloader_state) = new_vm_state(storage.clone(), &system_env, &batch_env); + Self { + bootloader_state, + state, + storage, + system_env, + batch_env, + snapshots: vec![], + _phantom: Default::default(), + } + } +} + +impl VmInterfaceHistoryEnabled for Vm { fn make_snapshot(&mut self) { - self.make_snapshot_inner() + self.make_snapshot_inner(); } - /// Rollback vm state to the latest snapshot and destroy the snapshot fn rollback_to_the_latest_snapshot(&mut self) { let snapshot = self .snapshots @@ -200,10 +186,7 @@ impl VmInterfaceHistoryEnabled for Vm, +} + +impl BootloaderL2Block { + pub(crate) fn new(l2_block: L2BlockEnv, first_tx_place: usize) -> Self { + Self { + number: l2_block.number, + timestamp: l2_block.timestamp, + txs_rolling_hash: EMPTY_TXS_ROLLING_HASH, + prev_block_hash: l2_block.prev_block_hash, + first_tx_index: first_tx_place, + max_virtual_blocks_to_create: l2_block.max_virtual_blocks_to_create, + txs: vec![], + } + } + + pub(super) fn push_tx(&mut self, tx: BootloaderTx) { + self.update_rolling_hash(tx.hash); + self.txs.push(tx) + } + + pub(crate) fn get_hash(&self) -> H256 { + l2_block_hash( + L2BlockNumber(self.number), + self.timestamp, + self.prev_block_hash, + self.txs_rolling_hash, + ) + } + + fn update_rolling_hash(&mut self, tx_hash: H256) { + self.txs_rolling_hash = concat_and_hash(self.txs_rolling_hash, tx_hash) + } + + pub(crate) fn make_snapshot(&self) -> L2BlockSnapshot { + L2BlockSnapshot { + txs_rolling_hash: self.txs_rolling_hash, + txs_len: self.txs.len(), + } + } + + pub(crate) fn apply_snapshot(&mut self, snapshot: L2BlockSnapshot) { + self.txs_rolling_hash = snapshot.txs_rolling_hash; + match self.txs.len().cmp(&snapshot.txs_len) { + Ordering::Greater => self.txs.truncate(snapshot.txs_len), + Ordering::Less => panic!("Applying snapshot from future is not supported"), + Ordering::Equal => {} + } + } + pub(crate) fn l2_block(&self) -> L2Block { + L2Block { + number: self.number, + timestamp: self.timestamp, + hash: self.get_hash(), + } + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/mod.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/mod.rs new file mode 100644 index 000000000000..73830de2759b --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/mod.rs @@ -0,0 +1,8 @@ +mod l2_block; +mod snapshot; +mod state; +mod tx; + +pub(crate) mod utils; +pub(crate) use snapshot::BootloaderStateSnapshot; +pub use state::BootloaderState; diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/snapshot.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/snapshot.rs new file mode 100644 index 000000000000..8f1cec3cb7f1 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/snapshot.rs @@ -0,0 +1,25 @@ +use zksync_types::H256; + +#[derive(Debug, Clone)] +pub(crate) struct BootloaderStateSnapshot { + /// ID of the next transaction to be executed. + pub(crate) tx_to_execute: usize, + /// Stored L2 blocks in bootloader memory + pub(crate) l2_blocks_len: usize, + /// Snapshot of the last L2 block. Only this block could be changed during the rollback + pub(crate) last_l2_block: L2BlockSnapshot, + /// The number of 32-byte words spent on the already included compressed bytecodes. + pub(crate) compressed_bytecodes_encoding: usize, + /// Current offset of the free space in the bootloader memory. + pub(crate) free_tx_offset: usize, + /// Whether the pubdata information has been provided already + pub(crate) is_pubdata_information_provided: bool, +} + +#[derive(Debug, Clone)] +pub(crate) struct L2BlockSnapshot { + /// The rolling hash of all the transactions in the miniblock + pub(crate) txs_rolling_hash: H256, + /// The number of transactions in the last L2 block + pub(crate) txs_len: usize, +} diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs new file mode 100644 index 000000000000..ae1c70db5862 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs @@ -0,0 +1,293 @@ +use std::cmp::Ordering; + +use once_cell::sync::OnceCell; +use zksync_types::{L2ChainId, U256}; +use zksync_utils::bytecode::CompressedBytecodeInfo; + +use super::{ + l2_block::BootloaderL2Block, + tx::BootloaderTx, + utils::{apply_l2_block, apply_pubdata_to_memory, apply_tx_to_memory}, + BootloaderStateSnapshot, +}; +use crate::{ + interface::{BootloaderMemory, L2BlockEnv, TxExecutionMode}, + versions::vm_fast::{pubdata::PubdataInput, transaction_data::TransactionData}, + vm_latest::{constants::TX_DESCRIPTION_OFFSET, utils::l2_blocks::assert_next_block}, +}; + +/// Intermediate bootloader-related VM state. +/// +/// Required to process transactions one by one (since we intercept the VM execution to execute +/// transactions and add new ones to the memory on the fly). +/// Keeps tracking everything related to the bootloader memory and can restore the whole memory. +/// +/// +/// Serves two purposes: +/// - Tracks where next tx should be pushed to in the bootloader memory. +/// - Tracks which transaction should be executed next. +#[derive(Debug)] +pub struct BootloaderState { + /// ID of the next transaction to be executed. + /// See the structure doc-comment for a better explanation of purpose. + tx_to_execute: usize, + /// Stored txs in bootloader memory + l2_blocks: Vec, + /// The number of 32-byte words spent on the already included compressed bytecodes. + compressed_bytecodes_encoding: usize, + /// Initial memory of bootloader + initial_memory: BootloaderMemory, + /// Mode of txs for execution, it can be changed once per vm lunch + execution_mode: TxExecutionMode, + /// Current offset of the free space in the bootloader memory. + free_tx_offset: usize, + /// Information about the pubdata that will be needed to supply to the L1Messenger + pubdata_information: OnceCell, +} + +impl BootloaderState { + pub(crate) fn new( + execution_mode: TxExecutionMode, + initial_memory: BootloaderMemory, + first_l2_block: L2BlockEnv, + ) -> Self { + let l2_block = BootloaderL2Block::new(first_l2_block, 0); + Self { + tx_to_execute: 0, + compressed_bytecodes_encoding: 0, + l2_blocks: vec![l2_block], + initial_memory, + execution_mode, + free_tx_offset: 0, + pubdata_information: Default::default(), + } + } + + pub(crate) fn set_refund_for_current_tx(&mut self, refund: u64) { + let current_tx = self.current_tx(); + // We can't set the refund for the latest tx or using the latest l2_block for fining tx + // Because we can fill the whole batch first and then execute txs one by one + let tx = self.find_tx_mut(current_tx); + tx.refund = refund; + } + + pub(crate) fn set_pubdata_input(&mut self, info: PubdataInput) { + self.pubdata_information + .set(info) + .expect("Pubdata information is already set"); + } + + pub(crate) fn start_new_l2_block(&mut self, l2_block: L2BlockEnv) { + let last_block = self.last_l2_block(); + assert!( + !last_block.txs.is_empty(), + "Can not create new miniblocks on top of empty ones" + ); + assert_next_block(&last_block.l2_block(), &l2_block); + self.push_l2_block(l2_block); + } + + /// This method bypass sanity checks and should be used carefully. + pub(crate) fn push_l2_block(&mut self, l2_block: L2BlockEnv) { + self.l2_blocks + .push(BootloaderL2Block::new(l2_block, self.free_tx_index())) + } + + pub(crate) fn push_tx( + &mut self, + tx: TransactionData, + predefined_overhead: u32, + predefined_refund: u64, + compressed_bytecodes: Vec, + trusted_ergs_limit: U256, + chain_id: L2ChainId, + ) -> BootloaderMemory { + let tx_offset = self.free_tx_offset(); + let bootloader_tx = BootloaderTx::new( + tx, + predefined_refund, + predefined_overhead, + trusted_ergs_limit, + compressed_bytecodes, + tx_offset, + chain_id, + ); + + let mut memory = vec![]; + let compressed_bytecode_size = apply_tx_to_memory( + &mut memory, + &bootloader_tx, + self.last_l2_block(), + self.free_tx_index(), + self.free_tx_offset(), + self.compressed_bytecodes_encoding, + self.execution_mode, + self.last_l2_block().txs.is_empty(), + ); + self.compressed_bytecodes_encoding += compressed_bytecode_size; + self.free_tx_offset = tx_offset + bootloader_tx.encoded_len(); + self.last_mut_l2_block().push_tx(bootloader_tx); + memory + } + + pub(crate) fn last_l2_block(&self) -> &BootloaderL2Block { + self.l2_blocks.last().unwrap() + } + + pub(crate) fn get_pubdata_information(&self) -> &PubdataInput { + self.pubdata_information + .get() + .expect("Pubdata information is not set") + } + + fn last_mut_l2_block(&mut self) -> &mut BootloaderL2Block { + self.l2_blocks.last_mut().unwrap() + } + + /// Apply all bootloader transaction to the initial memory + pub(crate) fn bootloader_memory(&self) -> BootloaderMemory { + let mut initial_memory = self.initial_memory.clone(); + let mut offset = 0; + let mut compressed_bytecodes_offset = 0; + let mut tx_index = 0; + for l2_block in &self.l2_blocks { + for (num, tx) in l2_block.txs.iter().enumerate() { + let compressed_bytecodes_size = apply_tx_to_memory( + &mut initial_memory, + tx, + l2_block, + tx_index, + offset, + compressed_bytecodes_offset, + self.execution_mode, + num == 0, + ); + offset += tx.encoded_len(); + compressed_bytecodes_offset += compressed_bytecodes_size; + tx_index += 1; + } + if l2_block.txs.is_empty() { + apply_l2_block(&mut initial_memory, l2_block, tx_index) + } + } + + let pubdata_information = self + .pubdata_information + .clone() + .into_inner() + .expect("Empty pubdata information"); + + apply_pubdata_to_memory(&mut initial_memory, pubdata_information); + initial_memory + } + + fn free_tx_offset(&self) -> usize { + self.free_tx_offset + } + + pub(crate) fn free_tx_index(&self) -> usize { + let l2_block = self.last_l2_block(); + l2_block.first_tx_index + l2_block.txs.len() + } + + pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> Vec { + if let Some(tx) = self.last_l2_block().txs.last() { + tx.compressed_bytecodes.clone() + } else { + vec![] + } + } + + /// Returns the id of current tx + pub(crate) fn current_tx(&self) -> usize { + self.tx_to_execute + .checked_sub(1) + .expect("There are no current tx to execute") + } + + /// Returns the ID of the next transaction to be executed and increments the local transaction counter. + pub(crate) fn move_tx_to_execute_pointer(&mut self) -> usize { + assert!( + self.tx_to_execute < self.free_tx_index(), + "Attempt to execute tx that was not pushed to memory. Tx ID: {}, txs in bootloader: {}", + self.tx_to_execute, + self.free_tx_index() + ); + + let old = self.tx_to_execute; + self.tx_to_execute += 1; + old + } + + /// Get offset of tx description + pub(crate) fn get_tx_description_offset(&self, tx_index: usize) -> usize { + TX_DESCRIPTION_OFFSET + self.find_tx(tx_index).offset + } + + pub(crate) fn insert_fictive_l2_block(&mut self) -> &BootloaderL2Block { + let block = self.last_l2_block(); + if !block.txs.is_empty() { + self.start_new_l2_block(L2BlockEnv { + timestamp: block.timestamp + 1, + number: block.number + 1, + prev_block_hash: block.get_hash(), + max_virtual_blocks_to_create: 1, + }); + } + self.last_l2_block() + } + + fn find_tx(&self, tx_index: usize) -> &BootloaderTx { + for block in self.l2_blocks.iter().rev() { + if tx_index >= block.first_tx_index { + return &block.txs[tx_index - block.first_tx_index]; + } + } + panic!("The tx with index {} must exist", tx_index) + } + + fn find_tx_mut(&mut self, tx_index: usize) -> &mut BootloaderTx { + for block in self.l2_blocks.iter_mut().rev() { + if tx_index >= block.first_tx_index { + return &mut block.txs[tx_index - block.first_tx_index]; + } + } + panic!("The tx with index {} must exist", tx_index) + } + + pub(crate) fn get_snapshot(&self) -> BootloaderStateSnapshot { + BootloaderStateSnapshot { + tx_to_execute: self.tx_to_execute, + l2_blocks_len: self.l2_blocks.len(), + last_l2_block: self.last_l2_block().make_snapshot(), + compressed_bytecodes_encoding: self.compressed_bytecodes_encoding, + free_tx_offset: self.free_tx_offset, + is_pubdata_information_provided: self.pubdata_information.get().is_some(), + } + } + + pub(crate) fn apply_snapshot(&mut self, snapshot: BootloaderStateSnapshot) { + self.tx_to_execute = snapshot.tx_to_execute; + self.compressed_bytecodes_encoding = snapshot.compressed_bytecodes_encoding; + self.free_tx_offset = snapshot.free_tx_offset; + match self.l2_blocks.len().cmp(&snapshot.l2_blocks_len) { + Ordering::Greater => self.l2_blocks.truncate(snapshot.l2_blocks_len), + Ordering::Less => panic!("Applying snapshot from future is not supported"), + Ordering::Equal => {} + } + self.last_mut_l2_block() + .apply_snapshot(snapshot.last_l2_block); + + if !snapshot.is_pubdata_information_provided { + self.pubdata_information = Default::default(); + } else { + // Under the correct usage of the snapshots of the bootloader state, + // this assertion should never fail, i.e. since the pubdata information + // can be set only once. However, we have this assertion just in case. + assert!( + self.pubdata_information.get().is_some(), + "Snapshot with no pubdata can not rollback to snapshot with one" + ); + } + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/tx.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/tx.rs new file mode 100644 index 000000000000..36c1d65ddd35 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/tx.rs @@ -0,0 +1,49 @@ +use zksync_types::{L2ChainId, H256, U256}; +use zksync_utils::bytecode::CompressedBytecodeInfo; + +use crate::versions::vm_fast::transaction_data::TransactionData; + +/// Information about tx necessary for execution in bootloader. +#[derive(Debug, Clone)] +pub(crate) struct BootloaderTx { + pub(crate) hash: H256, + /// Encoded transaction + pub(crate) encoded: Vec, + /// Compressed bytecodes, which has been published during this transaction + pub(crate) compressed_bytecodes: Vec, + /// Refunds for this transaction + pub(crate) refund: u64, + /// Gas overhead + pub(crate) gas_overhead: u32, + /// Gas Limit for this transaction. It can be different from the gas limit inside the transaction + pub(crate) trusted_gas_limit: U256, + /// Offset of the tx in bootloader memory + pub(crate) offset: usize, +} + +impl BootloaderTx { + pub(super) fn new( + tx: TransactionData, + predefined_refund: u64, + predefined_overhead: u32, + trusted_gas_limit: U256, + compressed_bytecodes: Vec, + offset: usize, + chain_id: L2ChainId, + ) -> Self { + let hash = tx.tx_hash(chain_id); + Self { + hash, + encoded: tx.into_tokens(), + compressed_bytecodes, + refund: predefined_refund, + gas_overhead: predefined_overhead, + trusted_gas_limit, + offset, + } + } + + pub(super) fn encoded_len(&self) -> usize { + self.encoded.len() + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs new file mode 100644 index 000000000000..21259e366d1b --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs @@ -0,0 +1,181 @@ +use zksync_types::{ethabi, U256}; +use zksync_utils::{bytecode::CompressedBytecodeInfo, bytes_to_be_words, h256_to_u256}; + +use super::{l2_block::BootloaderL2Block, tx::BootloaderTx}; +use crate::{ + interface::{BootloaderMemory, TxExecutionMode}, + versions::vm_fast::pubdata::PubdataInput, + vm_latest::constants::{ + BOOTLOADER_TX_DESCRIPTION_OFFSET, BOOTLOADER_TX_DESCRIPTION_SIZE, + COMPRESSED_BYTECODES_OFFSET, OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET, + OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS, OPERATOR_REFUNDS_OFFSET, + TX_DESCRIPTION_OFFSET, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, TX_OVERHEAD_OFFSET, TX_TRUSTED_GAS_LIMIT_OFFSET, + }, +}; + +pub(super) fn get_memory_for_compressed_bytecodes( + compressed_bytecodes: &[CompressedBytecodeInfo], +) -> Vec { + let memory_addition: Vec<_> = compressed_bytecodes + .iter() + .flat_map(|x| x.encode_call()) + .collect(); + + bytes_to_be_words(memory_addition) +} + +#[allow(clippy::too_many_arguments)] +pub(super) fn apply_tx_to_memory( + memory: &mut BootloaderMemory, + bootloader_tx: &BootloaderTx, + bootloader_l2_block: &BootloaderL2Block, + tx_index: usize, + tx_offset: usize, + compressed_bytecodes_size: usize, + execution_mode: TxExecutionMode, + start_new_l2_block: bool, +) -> usize { + let bootloader_description_offset = + BOOTLOADER_TX_DESCRIPTION_OFFSET + BOOTLOADER_TX_DESCRIPTION_SIZE * tx_index; + let tx_description_offset = TX_DESCRIPTION_OFFSET + tx_offset; + + memory.push(( + bootloader_description_offset, + assemble_tx_meta(execution_mode, true), + )); + + memory.push(( + bootloader_description_offset + 1, + U256::from_big_endian(&(32 * tx_description_offset).to_be_bytes()), + )); + + let refund_offset = OPERATOR_REFUNDS_OFFSET + tx_index; + memory.push((refund_offset, bootloader_tx.refund.into())); + + let overhead_offset = TX_OVERHEAD_OFFSET + tx_index; + memory.push((overhead_offset, bootloader_tx.gas_overhead.into())); + + let trusted_gas_limit_offset = TX_TRUSTED_GAS_LIMIT_OFFSET + tx_index; + memory.push((trusted_gas_limit_offset, bootloader_tx.trusted_gas_limit)); + + memory.extend( + (tx_description_offset..tx_description_offset + bootloader_tx.encoded_len()) + .zip(bootloader_tx.encoded.clone()), + ); + apply_l2_block_inner(memory, bootloader_l2_block, tx_index, start_new_l2_block); + + // Note, +1 is moving for pointer + let compressed_bytecodes_offset = COMPRESSED_BYTECODES_OFFSET + 1 + compressed_bytecodes_size; + + let encoded_compressed_bytecodes = + get_memory_for_compressed_bytecodes(&bootloader_tx.compressed_bytecodes); + let compressed_bytecodes_encoding = encoded_compressed_bytecodes.len(); + + memory.extend( + (compressed_bytecodes_offset + ..compressed_bytecodes_offset + encoded_compressed_bytecodes.len()) + .zip(encoded_compressed_bytecodes), + ); + compressed_bytecodes_encoding +} + +pub(crate) fn apply_l2_block( + memory: &mut BootloaderMemory, + bootloader_l2_block: &BootloaderL2Block, + txs_index: usize, +) { + apply_l2_block_inner(memory, bootloader_l2_block, txs_index, true) +} + +fn apply_l2_block_inner( + memory: &mut BootloaderMemory, + bootloader_l2_block: &BootloaderL2Block, + txs_index: usize, + start_new_l2_block: bool, +) { + // Since L2 block information start from the `TX_OPERATOR_L2_BLOCK_INFO_OFFSET` and each + // L2 block info takes `TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO` slots, the position where the L2 block info + // for this transaction needs to be written is: + + let block_position = + TX_OPERATOR_L2_BLOCK_INFO_OFFSET + txs_index * TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO; + + memory.extend(vec![ + (block_position, bootloader_l2_block.number.into()), + (block_position + 1, bootloader_l2_block.timestamp.into()), + ( + block_position + 2, + h256_to_u256(bootloader_l2_block.prev_block_hash), + ), + ( + block_position + 3, + if start_new_l2_block { + bootloader_l2_block.max_virtual_blocks_to_create.into() + } else { + U256::zero() + }, + ), + ]) +} + +pub(crate) fn apply_pubdata_to_memory( + memory: &mut BootloaderMemory, + pubdata_information: PubdataInput, +) { + // Skipping two slots as they will be filled by the bootloader itself: + // - One slot is for the selector of the call to the L1Messenger. + // - The other slot is for the 0x20 offset for the calldata. + let l1_messenger_pubdata_start_slot = OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET + 2; + + // Need to skip first word as it represents array offset + // while bootloader expects only [len || data] + let pubdata = ethabi::encode(&[ethabi::Token::Bytes( + pubdata_information.build_pubdata(true), + )])[32..] + .to_vec(); + + assert!( + pubdata.len() / 32 <= OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS - 2, + "The encoded pubdata is too big" + ); + + pubdata + .chunks(32) + .enumerate() + .for_each(|(slot_offset, value)| { + memory.push(( + l1_messenger_pubdata_start_slot + slot_offset, + U256::from(value), + )) + }); +} + +/// Forms a word that contains meta information for the transaction execution. +/// +/// # Current layout +/// +/// - 0 byte (MSB): server-side tx execution mode +/// In the server, we may want to execute different parts of the transaction in the different context +/// For example, when checking validity, we don't want to actually execute transaction and have side effects. +/// +/// Possible values: +/// - 0x00: validate & execute (normal mode) +/// - 0x02: execute but DO NOT validate +/// +/// - 31 byte (LSB): whether to execute transaction or not (at all). +pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool) -> U256 { + let mut output = [0u8; 32]; + + // Set 0 byte (execution mode) + output[0] = match execution_mode { + TxExecutionMode::VerifyExecute => 0x00, + TxExecutionMode::EstimateFee { .. } => 0x00, + TxExecutionMode::EthCall { .. } => 0x02, + }; + + // Set 31 byte (marker for tx execution) + output[31] = u8::from(execute_tx); + + U256::from_big_endian(&output) +} diff --git a/core/lib/multivm/src/versions/vm_fast/bytecode.rs b/core/lib/multivm/src/versions/vm_fast/bytecode.rs new file mode 100644 index 000000000000..7a16b5940df6 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/bytecode.rs @@ -0,0 +1,52 @@ +use itertools::Itertools; +use zksync_state::ReadStorage; +use zksync_types::H256; +use zksync_utils::{ + bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}, + h256_to_u256, +}; + +use super::Vm; + +impl Vm { + /// Checks the last transaction has successfully published compressed bytecodes and returns `true` if there is at least one is still unknown. + pub(crate) fn has_unpublished_bytecodes(&mut self) -> bool { + self.bootloader_state + .get_last_tx_compressed_bytecodes() + .iter() + .any(|info| { + let hash_bytecode = hash_bytecode(&info.original); + let is_bytecode_known = self.world.storage.is_bytecode_known(&hash_bytecode); + + let is_bytecode_known_cache = self + .world + .bytecode_cache + .contains_key(&h256_to_u256(hash_bytecode)); + !(is_bytecode_known || is_bytecode_known_cache) + }) + } +} + +pub(crate) fn compress_bytecodes( + bytecodes: &[Vec], + mut is_bytecode_known: impl FnMut(H256) -> bool, +) -> Vec { + bytecodes + .iter() + .enumerate() + .sorted_by_key(|(_idx, dep)| *dep) + .dedup_by(|x, y| x.1 == y.1) + .filter(|(_idx, dep)| !is_bytecode_known(hash_bytecode(dep))) + .sorted_by_key(|(idx, _dep)| *idx) + .filter_map(|(_idx, dep)| { + let compressed_bytecode = compress_bytecode(dep); + + compressed_bytecode + .ok() + .map(|compressed| CompressedBytecodeInfo { + original: dep.clone(), + compressed, + }) + }) + .collect() +} diff --git a/core/lib/multivm/src/versions/vm_fast/events.rs b/core/lib/multivm/src/versions/vm_fast/events.rs new file mode 100644 index 000000000000..b39d501b655b --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/events.rs @@ -0,0 +1,112 @@ +use vm2::Event; +use zksync_types::{L1BatchNumber, VmEvent, H256}; +use zksync_utils::h256_to_account_address; + +#[derive(Clone)] +struct EventAccumulator { + pub(crate) shard_id: u8, + pub(crate) tx_number_in_block: u16, + pub(crate) topics: Vec<[u8; 32]>, + pub(crate) data: Vec, +} + +impl EventAccumulator { + fn into_vm_event(self, block_number: L1BatchNumber) -> VmEvent { + VmEvent { + location: (block_number, self.tx_number_in_block as u32), + address: h256_to_account_address(&H256(self.topics[0])), + indexed_topics: self.topics[1..].iter().map(H256::from).collect(), + value: self.data, + } + } +} + +pub(crate) fn merge_events(events: &[Event], block_number: L1BatchNumber) -> Vec { + let mut result = vec![]; + let mut current: Option<(usize, u32, EventAccumulator)> = None; + + for message in events.iter() { + let Event { + shard_id, + is_first, + tx_number, + key, + value, + } = message.clone(); + + if !is_first { + if let Some((mut remaining_data_length, mut remaining_topics, mut event)) = + current.take() + { + if event.shard_id != shard_id || event.tx_number_in_block != tx_number { + continue; + } + let mut data_0 = [0u8; 32]; + let mut data_1 = [0u8; 32]; + key.to_big_endian(&mut data_0); + value.to_big_endian(&mut data_1); + for el in [data_0, data_1].iter() { + if remaining_topics != 0 { + event.topics.push(*el); + remaining_topics -= 1; + } else if remaining_data_length != 0 { + if remaining_data_length >= 32 { + event.data.extend_from_slice(el); + remaining_data_length -= 32; + } else { + event.data.extend_from_slice(&el[..remaining_data_length]); + remaining_data_length = 0; + } + } + } + + if remaining_data_length != 0 || remaining_topics != 0 { + current = Some((remaining_data_length, remaining_topics, event)) + } else { + result.push(event.into_vm_event(block_number)); + } + } + } else { + // start new one. First take the old one only if it's well formed + if let Some((remaining_data_length, remaining_topics, event)) = current.take() { + if remaining_data_length == 0 && remaining_topics == 0 { + result.push(event.into_vm_event(block_number)); + } + } + + // split key as our internal marker. Ignore higher bits + let mut num_topics = key.0[0] as u32; + let mut data_length = (key.0[0] >> 32) as usize; + let mut buffer = [0u8; 32]; + value.to_big_endian(&mut buffer); + + let (topics, data) = if num_topics == 0 && data_length == 0 { + (vec![], vec![]) + } else if num_topics == 0 { + data_length -= 32; + (vec![], buffer.to_vec()) + } else { + num_topics -= 1; + (vec![buffer], vec![]) + }; + + let new_event = EventAccumulator { + shard_id, + tx_number_in_block: tx_number, + topics, + data, + }; + + current = Some((data_length, num_topics, new_event)) + } + } + + // add the last one + if let Some((remaining_data_length, remaining_topics, event)) = current.take() { + if remaining_data_length == 0 && remaining_topics == 0 { + result.push(event.into_vm_event(block_number)); + } + } + + result +} diff --git a/core/lib/multivm/src/versions/vm_fast/glue.rs b/core/lib/multivm/src/versions/vm_fast/glue.rs new file mode 100644 index 000000000000..cbf22f9122b0 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/glue.rs @@ -0,0 +1,26 @@ +use zksync_types::l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log}; +use zksync_utils::u256_to_h256; + +use crate::glue::GlueFrom; + +impl GlueFrom<&vm2::L2ToL1Log> for SystemL2ToL1Log { + fn glue_from(value: &vm2::L2ToL1Log) -> Self { + let vm2::L2ToL1Log { + key, + value, + is_service, + address, + shard_id, + tx_number, + } = *value; + + Self(L2ToL1Log { + shard_id, + is_service, + tx_number_in_block: tx_number, + sender: address, + key: u256_to_h256(key), + value: u256_to_h256(value), + }) + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/hook.rs b/core/lib/multivm/src/versions/vm_fast/hook.rs new file mode 100644 index 000000000000..8d385f94f3e1 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/hook.rs @@ -0,0 +1,39 @@ +#[derive(Debug)] +pub(crate) enum Hook { + AccountValidationEntered, + PaymasterValidationEntered, + AccountValidationExited, + ValidationStepEnded, + TxHasEnded, + DebugLog, + DebugReturnData, + NearCallCatch, + AskOperatorForRefund, + NotifyAboutRefund, + PostResult, + FinalBatchInfo, + PubdataRequested, +} + +impl Hook { + /// # Panics + /// Panics if the number does not correspond to any hook. + pub fn from_u32(hook: u32) -> Self { + match hook { + 0 => Hook::AccountValidationEntered, + 1 => Hook::PaymasterValidationEntered, + 2 => Hook::AccountValidationExited, + 3 => Hook::ValidationStepEnded, + 4 => Hook::TxHasEnded, + 5 => Hook::DebugLog, + 6 => Hook::DebugReturnData, + 7 => Hook::NearCallCatch, + 8 => Hook::AskOperatorForRefund, + 9 => Hook::NotifyAboutRefund, + 10 => Hook::PostResult, + 11 => Hook::FinalBatchInfo, + 12 => Hook::PubdataRequested, + _ => panic!("Unknown hook {}", hook), + } + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/initial_bootloader_memory.rs b/core/lib/multivm/src/versions/vm_fast/initial_bootloader_memory.rs new file mode 100644 index 000000000000..b3bf15cb1be5 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/initial_bootloader_memory.rs @@ -0,0 +1,44 @@ +use zksync_types::U256; +use zksync_utils::{address_to_u256, h256_to_u256}; + +use crate::{interface::L1BatchEnv, vm_latest::utils::fee::get_batch_base_fee}; + +const OPERATOR_ADDRESS_SLOT: usize = 0; +const PREV_BLOCK_HASH_SLOT: usize = 1; +const NEW_BLOCK_TIMESTAMP_SLOT: usize = 2; +const NEW_BLOCK_NUMBER_SLOT: usize = 3; +const FAIR_PUBDATA_PRICE_SLOT: usize = 4; +const FAIR_L2_GAS_PRICE_SLOT: usize = 5; +const EXPECTED_BASE_FEE_SLOT: usize = 6; +const SHOULD_SET_NEW_BLOCK_SLOT: usize = 7; + +/// Returns the initial memory for the bootloader based on the current batch environment. +pub(crate) fn bootloader_initial_memory(l1_batch: &L1BatchEnv) -> Vec<(usize, U256)> { + let (prev_block_hash, should_set_new_block) = l1_batch + .previous_batch_hash + .map(|prev_block_hash| (h256_to_u256(prev_block_hash), U256::one())) + .unwrap_or_default(); + + vec![ + ( + OPERATOR_ADDRESS_SLOT, + address_to_u256(&l1_batch.fee_account), + ), + (PREV_BLOCK_HASH_SLOT, prev_block_hash), + (NEW_BLOCK_TIMESTAMP_SLOT, U256::from(l1_batch.timestamp)), + (NEW_BLOCK_NUMBER_SLOT, U256::from(l1_batch.number.0)), + ( + FAIR_PUBDATA_PRICE_SLOT, + U256::from(l1_batch.fee_input.fair_pubdata_price()), + ), + ( + FAIR_L2_GAS_PRICE_SLOT, + U256::from(l1_batch.fee_input.fair_l2_gas_price()), + ), + ( + EXPECTED_BASE_FEE_SLOT, + U256::from(get_batch_base_fee(l1_batch)), + ), + (SHOULD_SET_NEW_BLOCK_SLOT, should_set_new_block), + ] +} diff --git a/core/lib/multivm/src/versions/vm_fast/mod.rs b/core/lib/multivm/src/versions/vm_fast/mod.rs new file mode 100644 index 000000000000..4deb6b9dbf74 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/mod.rs @@ -0,0 +1,14 @@ +pub use self::vm::Vm; + +mod bootloader_state; +mod bytecode; +mod events; +mod glue; +mod hook; +mod initial_bootloader_memory; +mod pubdata; +mod refund; +#[cfg(test)] +mod tests; +mod transaction_data; +mod vm; diff --git a/core/lib/multivm/src/versions/vm_fast/pubdata.rs b/core/lib/multivm/src/versions/vm_fast/pubdata.rs new file mode 100644 index 000000000000..38489a6c8e92 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/pubdata.rs @@ -0,0 +1,124 @@ +use zksync_types::{ + event::L1MessengerL2ToL1Log, + writes::{compress_state_diffs, StateDiffRecord}, +}; + +/// Struct based on which the pubdata blob is formed +#[derive(Debug, Clone, Default)] +pub(crate) struct PubdataInput { + pub(crate) user_logs: Vec, + pub(crate) l2_to_l1_messages: Vec>, + pub(crate) published_bytecodes: Vec>, + pub(crate) state_diffs: Vec, +} + +impl PubdataInput { + pub(crate) fn build_pubdata(self, with_uncompressed_state_diffs: bool) -> Vec { + let mut l1_messenger_pubdata = vec![]; + + let PubdataInput { + user_logs, + l2_to_l1_messages, + published_bytecodes, + state_diffs, + } = self; + + // Encoding user L2->L1 logs. + // Format: `[(numberOfL2ToL1Logs as u32) || l2tol1logs[1] || ... || l2tol1logs[n]]` + l1_messenger_pubdata.extend((user_logs.len() as u32).to_be_bytes()); + for l2tol1log in user_logs { + l1_messenger_pubdata.extend(l2tol1log.packed_encoding()); + } + + // Encoding L2->L1 messages + // Format: `[(numberOfMessages as u32) || (messages[1].len() as u32) || messages[1] || ... || (messages[n].len() as u32) || messages[n]]` + l1_messenger_pubdata.extend((l2_to_l1_messages.len() as u32).to_be_bytes()); + for message in l2_to_l1_messages { + l1_messenger_pubdata.extend((message.len() as u32).to_be_bytes()); + l1_messenger_pubdata.extend(message); + } + + // Encoding bytecodes + // Format: `[(numberOfBytecodes as u32) || (bytecodes[1].len() as u32) || bytecodes[1] || ... || (bytecodes[n].len() as u32) || bytecodes[n]]` + l1_messenger_pubdata.extend((published_bytecodes.len() as u32).to_be_bytes()); + for bytecode in published_bytecodes { + l1_messenger_pubdata.extend((bytecode.len() as u32).to_be_bytes()); + l1_messenger_pubdata.extend(bytecode); + } + + // Encoding state diffs + // Format: `[size of compressed state diffs u32 || compressed state diffs || (# state diffs: intial + repeated) as u32 || sorted state diffs by ]` + let state_diffs_compressed = compress_state_diffs(state_diffs.clone()); + l1_messenger_pubdata.extend(state_diffs_compressed); + + if with_uncompressed_state_diffs { + l1_messenger_pubdata.extend((state_diffs.len() as u32).to_be_bytes()); + for state_diff in state_diffs { + l1_messenger_pubdata.extend(state_diff.encode_padded()); + } + } + + l1_messenger_pubdata + } +} + +#[cfg(test)] +mod tests { + use zksync_system_constants::{ACCOUNT_CODE_STORAGE_ADDRESS, BOOTLOADER_ADDRESS}; + use zksync_utils::u256_to_h256; + + use super::*; + + #[test] + fn test_basic_pubdata_building() { + // Just using some constant addresses for tests + let addr1 = BOOTLOADER_ADDRESS; + let addr2 = ACCOUNT_CODE_STORAGE_ADDRESS; + + let user_logs = vec![L1MessengerL2ToL1Log { + l2_shard_id: 0, + is_service: false, + tx_number_in_block: 0, + sender: addr1, + key: 1.into(), + value: 128.into(), + }]; + + let l2_to_l1_messages = vec![hex::decode("deadbeef").unwrap()]; + + let published_bytecodes = vec![hex::decode("aaaabbbb").unwrap()]; + + // For covering more cases, we have two state diffs: + // One with enumeration index present (and so it is a repeated write) and the one without it. + let state_diffs = vec![ + StateDiffRecord { + address: addr2, + key: 155.into(), + derived_key: u256_to_h256(125.into()).0, + enumeration_index: 12, + initial_value: 11.into(), + final_value: 12.into(), + }, + StateDiffRecord { + address: addr2, + key: 156.into(), + derived_key: u256_to_h256(126.into()).0, + enumeration_index: 0, + initial_value: 0.into(), + final_value: 14.into(), + }, + ]; + + let input = PubdataInput { + user_logs, + l2_to_l1_messages, + published_bytecodes, + state_diffs, + }; + + let pubdata = + ethabi::encode(&[ethabi::Token::Bytes(input.build_pubdata(true))])[32..].to_vec(); + + assert_eq!(hex::encode(pubdata), "00000000000000000000000000000000000000000000000000000000000002c700000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000004aaaabbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901000000020000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009b000000000000000000000000000000000000000000000000000000000000007d000000000000000c000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009c000000000000000000000000000000000000000000000000000000000000007e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"); + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/refund.rs b/core/lib/multivm/src/versions/vm_fast/refund.rs new file mode 100644 index 000000000000..524a6ca4c3bc --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/refund.rs @@ -0,0 +1,66 @@ +use zksync_types::{H256, U256}; +use zksync_utils::ceil_div_u256; + +use crate::vm_latest::{utils::fee::get_batch_base_fee, L1BatchEnv}; + +pub(crate) fn compute_refund( + l1_batch: &L1BatchEnv, + bootloader_refund: u64, + gas_spent_on_pubdata: u64, + tx_gas_limit: u64, + current_ergs_per_pubdata_byte: u32, + pubdata_published: u32, + tx_hash: H256, +) -> u64 { + let total_gas_spent = tx_gas_limit - bootloader_refund; + + let gas_spent_on_computation = total_gas_spent + .checked_sub(gas_spent_on_pubdata) + .unwrap_or_else(|| { + tracing::error!( + "Gas spent on pubdata is greater than total gas spent. On pubdata: {}, total: {}", + gas_spent_on_pubdata, + total_gas_spent + ); + 0 + }); + + // For now, bootloader charges only for base fee. + let effective_gas_price = get_batch_base_fee(l1_batch); + + let bootloader_eth_price_per_pubdata_byte = + U256::from(effective_gas_price) * U256::from(current_ergs_per_pubdata_byte); + + let fair_eth_price_per_pubdata_byte = U256::from(l1_batch.fee_input.fair_pubdata_price()); + + // For now, L1 originated transactions are allowed to pay less than fair fee per pubdata, + // so we should take it into account. + let eth_price_per_pubdata_byte_for_calculation = std::cmp::min( + bootloader_eth_price_per_pubdata_byte, + fair_eth_price_per_pubdata_byte, + ); + + let fair_fee_eth = U256::from(gas_spent_on_computation) + * U256::from(l1_batch.fee_input.fair_l2_gas_price()) + + U256::from(pubdata_published) * eth_price_per_pubdata_byte_for_calculation; + let pre_paid_eth = U256::from(tx_gas_limit) * U256::from(effective_gas_price); + let refund_eth = pre_paid_eth.checked_sub(fair_fee_eth).unwrap_or_else(|| { + tracing::error!( + "Fair fee is greater than pre paid. Fair fee: {} wei, pre paid: {} wei", + fair_fee_eth, + pre_paid_eth + ); + U256::zero() + }); + + tracing::trace!( + "Fee benchmark for transaction with hash {}", + hex::encode(tx_hash.as_bytes()) + ); + tracing::trace!("Gas Limit: {}", tx_gas_limit); + tracing::trace!("Gas spent on computation: {}", gas_spent_on_computation); + tracing::trace!("Gas spent on pubdata: {}", gas_spent_on_pubdata); + tracing::trace!("Pubdata published: {}", pubdata_published); + + ceil_div_u256(refund_eth, effective_gas_price.into()).as_u64() +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs new file mode 100644 index 000000000000..239d40947a67 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs @@ -0,0 +1,423 @@ +use std::borrow::BorrowMut; + +use ethabi::Token; +use itertools::Itertools; +use zksync_contracts::load_sys_contract; +use zksync_system_constants::{ + CONTRACT_FORCE_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, +}; +use zksync_types::{ + commitment::SerializeCommitment, fee_model::BatchFeeInput, get_code_key, + l2_to_l1_log::L2ToL1Log, writes::StateDiffRecord, Address, Execute, H256, U256, +}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; + +use super::utils::{get_complex_upgrade_abi, read_complex_upgrade}; +use crate::versions::vm_fast::tests::tester::{ + default_l1_batch, get_empty_storage, InMemoryStorageView, VmTesterBuilder, +}; +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_latest::{ + constants::{ + BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, + BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, + MAX_VM_PUBDATA_PER_BATCH, + }, + tracers::PubdataTracer, + L1BatchEnv, TracerDispatcher, + }, +}; + +#[derive(Debug, Clone, Default)] +struct L1MessengerTestData { + l2_to_l1_logs: usize, + messages: Vec>, + bytecodes: Vec>, + state_diffs: Vec, +} + +struct MimicCallInfo { + to: Address, + who_to_mimic: Address, + data: Vec, +} + +const CALLS_PER_TX: usize = 1_000; +fn populate_mimic_calls(data: L1MessengerTestData) -> Vec> { + let complex_upgrade = get_complex_upgrade_abi(); + let l1_messenger = load_sys_contract("L1Messenger"); + + let logs_mimic_calls = (0..data.l2_to_l1_logs).map(|_| MimicCallInfo { + to: L1_MESSENGER_ADDRESS, + who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, + data: l1_messenger + .function("sendL2ToL1Log") + .unwrap() + .encode_input(&[ + Token::Bool(false), + Token::FixedBytes(H256::random().0.to_vec()), + Token::FixedBytes(H256::random().0.to_vec()), + ]) + .unwrap(), + }); + let messages_mimic_calls = data.messages.iter().map(|message| MimicCallInfo { + to: L1_MESSENGER_ADDRESS, + who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, + data: l1_messenger + .function("sendToL1") + .unwrap() + .encode_input(&[Token::Bytes(message.clone())]) + .unwrap(), + }); + let bytecodes_mimic_calls = data.bytecodes.iter().map(|bytecode| MimicCallInfo { + to: L1_MESSENGER_ADDRESS, + who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, + data: l1_messenger + .function("requestBytecodeL1Publication") + .unwrap() + .encode_input(&[Token::FixedBytes(hash_bytecode(bytecode).0.to_vec())]) + .unwrap(), + }); + + let encoded_calls = logs_mimic_calls + .chain(messages_mimic_calls) + .chain(bytecodes_mimic_calls) + .map(|call| { + Token::Tuple(vec![ + Token::Address(call.to), + Token::Address(call.who_to_mimic), + Token::Bytes(call.data), + ]) + }) + .chunks(CALLS_PER_TX) + .into_iter() + .map(|chunk| { + complex_upgrade + .function("mimicCalls") + .unwrap() + .encode_input(&[Token::Array(chunk.collect_vec())]) + .unwrap() + }) + .collect_vec(); + + encoded_calls +} + +struct TestStatistics { + pub max_used_gas: u32, + pub circuit_statistics: u64, + pub execution_metrics_size: u64, +} + +struct StatisticsTagged { + pub statistics: TestStatistics, + pub tag: String, +} + +fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { + let mut storage = get_empty_storage(); + let complex_upgrade_code = read_complex_upgrade(); + + // For this test we'll just put the bytecode onto the force deployer address + storage.borrow_mut().set_value( + get_code_key(&CONTRACT_FORCE_DEPLOYER_ADDRESS), + hash_bytecode(&complex_upgrade_code), + ); + storage + .borrow_mut() + .store_factory_dep(hash_bytecode(&complex_upgrade_code), complex_upgrade_code); + + // We are measuring computational cost, so prices for pubdata don't matter, while they artificially dilute + // the gas limit + + let batch_env = L1BatchEnv { + fee_input: BatchFeeInput::pubdata_independent(100_000, 100_000, 100_000), + ..default_l1_batch(zksync_types::L1BatchNumber(1)) + }; + + let mut vm = VmTesterBuilder::new() + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .with_l1_batch_env(batch_env) + .build(); + + let bytecodes = test_data + .bytecodes + .iter() + .map(|bytecode| { + let hash = hash_bytecode(bytecode); + let words = bytes_to_be_words(bytecode.clone()); + (h256_to_u256(hash), words) + }) + .collect(); + vm.vm.insert_bytecodes(bytecodes); + + let txs_data = populate_mimic_calls(test_data.clone()); + let account = &mut vm.rich_accounts[0]; + + for (i, data) in txs_data.into_iter().enumerate() { + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: CONTRACT_FORCE_DEPLOYER_ADDRESS, + calldata: data, + value: U256::zero(), + factory_deps: None, + }, + None, + ); + + vm.vm.push_transaction(tx); + + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction {i} wasn't successful for input: {:#?}", + test_data + ); + } + + // Now we count how much ergs were spent at the end of the batch + // It is assumed that the top level frame is the bootloader + + let ergs_before = vm.vm.gas_remaining(); + + // We ensure that indeed the provided state diffs are used + let pubdata_tracer = PubdataTracer::::new_with_forced_state_diffs( + vm.vm.batch_env.clone(), + VmExecutionMode::Batch, + test_data.state_diffs.clone(), + ); + + let result = vm.vm.inspect_inner( + TracerDispatcher::default(), + VmExecutionMode::Batch, + Some(pubdata_tracer), + ); + + assert!( + !result.result.is_failed(), + "Batch wasn't successful for input: {:?}", + test_data + ); + + let ergs_after = vm.vm.gas_remaining(); + + assert_eq!( + (ergs_before - ergs_after) as u64, + result.statistics.gas_used + ); + + TestStatistics { + max_used_gas: ergs_before - ergs_after, + circuit_statistics: result.statistics.circuit_statistic.total() as u64, + execution_metrics_size: result.get_execution_metrics(None).size() as u64, + } +} + +fn generate_state_diffs( + repeated_writes: bool, + small_diff: bool, + number_of_state_diffs: usize, +) -> Vec { + (0..number_of_state_diffs) + .map(|i| { + let address = Address::from_low_u64_be(i as u64); + let key = U256::from(i); + let enumeration_index = if repeated_writes { i + 1 } else { 0 }; + + let (initial_value, final_value) = if small_diff { + // As small as it gets, one byte to denote zeroing out the value + (U256::from(1), U256::from(0)) + } else { + // As large as it gets + (U256::from(0), U256::from(2).pow(255.into())) + }; + + StateDiffRecord { + address, + key, + derived_key: u256_to_h256(i.into()).0, + enumeration_index: enumeration_index as u64, + initial_value, + final_value, + } + }) + .collect() +} + +// A valid zkEVM bytecode has odd number of 32 byte words +fn get_valid_bytecode_length(length: usize) -> usize { + // Firstly ensure that the length is divisible by 32 + let length_padded_to_32 = if length % 32 == 0 { + length + } else { + length + 32 - (length % 32) + }; + + // Then we ensure that the number returned by division by 32 is odd + if length_padded_to_32 % 64 == 0 { + length_padded_to_32 + 32 + } else { + length_padded_to_32 + } +} + +#[test] +fn test_dry_run_upper_bound() { + // Some of the pubdata is consumed by constant fields (such as length of messages, number of logs, etc.). + // While this leaves some room for error, at the end of the test we require that the `BOOTLOADER_BATCH_TIP_OVERHEAD` + // is sufficient with a very large margin, so it is okay to ignore 1% of possible pubdata. + const MAX_EFFECTIVE_PUBDATA_PER_BATCH: usize = + (MAX_VM_PUBDATA_PER_BATCH as f64 * 0.99) as usize; + + // We are re-using the `ComplexUpgrade` contract as it already has the `mimicCall` functionality. + // To get the upper bound, we'll try to do the following: + // 1. Max number of logs. + // 2. Lots of small L2->L1 messages / one large L2->L1 message. + // 3. Lots of small bytecodes / one large bytecode. + // 4. Lots of storage slot updates. + + let statistics = vec![ + // max logs + StatisticsTagged { + statistics: execute_test(L1MessengerTestData { + l2_to_l1_logs: MAX_EFFECTIVE_PUBDATA_PER_BATCH / L2ToL1Log::SERIALIZED_SIZE, + ..Default::default() + }), + tag: "max_logs".to_string(), + }, + // max messages + StatisticsTagged { + statistics: execute_test(L1MessengerTestData { + // Each L2->L1 message is accompanied by a Log + its length, which is a 4 byte number, + // so the max number of pubdata is bound by it + messages: vec![ + vec![0; 0]; + MAX_EFFECTIVE_PUBDATA_PER_BATCH / (L2ToL1Log::SERIALIZED_SIZE + 4) + ], + ..Default::default() + }), + tag: "max_messages".to_string(), + }, + // long message + StatisticsTagged { + statistics: execute_test(L1MessengerTestData { + // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it + messages: vec![vec![0; MAX_EFFECTIVE_PUBDATA_PER_BATCH]; 1], + ..Default::default() + }), + tag: "long_message".to_string(), + }, + // max bytecodes + StatisticsTagged { + statistics: execute_test(L1MessengerTestData { + // Each bytecode must be at least 32 bytes long. + // Each uncompressed bytecode is accompanied by its length, which is a 4 byte number + bytecodes: vec![vec![0; 32]; MAX_EFFECTIVE_PUBDATA_PER_BATCH / (32 + 4)], + ..Default::default() + }), + tag: "max_bytecodes".to_string(), + }, + // long bytecode + StatisticsTagged { + statistics: execute_test(L1MessengerTestData { + bytecodes: vec![ + vec![0; get_valid_bytecode_length(MAX_EFFECTIVE_PUBDATA_PER_BATCH)]; + 1 + ], + ..Default::default() + }), + tag: "long_bytecode".to_string(), + }, + // lots of small repeated writes + StatisticsTagged { + statistics: execute_test(L1MessengerTestData { + // In theory each state diff can require only 5 bytes to be published (enum index + 4 bytes for the key) + state_diffs: generate_state_diffs(true, true, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 5), + ..Default::default() + }), + tag: "small_repeated_writes".to_string(), + }, + // lots of big repeated writes + StatisticsTagged { + statistics: execute_test(L1MessengerTestData { + // Each big repeated write will approximately require 4 bytes for key + 1 byte for encoding type + 32 bytes for value + state_diffs: generate_state_diffs( + true, + false, + MAX_EFFECTIVE_PUBDATA_PER_BATCH / 37, + ), + ..Default::default() + }), + tag: "big_repeated_writes".to_string(), + }, + // lots of small initial writes + StatisticsTagged { + statistics: execute_test(L1MessengerTestData { + // Each small initial write will take at least 32 bytes for derived key + 1 bytes encoding zeroing out + state_diffs: generate_state_diffs( + false, + true, + MAX_EFFECTIVE_PUBDATA_PER_BATCH / 33, + ), + ..Default::default() + }), + tag: "small_initial_writes".to_string(), + }, + // lots of large initial writes + StatisticsTagged { + statistics: execute_test(L1MessengerTestData { + // Each big write will take at least 32 bytes for derived key + 1 byte for encoding type + 32 bytes for value + state_diffs: generate_state_diffs( + false, + false, + MAX_EFFECTIVE_PUBDATA_PER_BATCH / 65, + ), + ..Default::default() + }), + tag: "big_initial_writes".to_string(), + }, + ]; + + // We use 2x overhead for the batch tip compared to the worst estimated scenario. + let max_used_gas = statistics + .iter() + .map(|s| (s.statistics.max_used_gas, s.tag.clone())) + .max() + .unwrap(); + assert!( + max_used_gas.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_OVERHEAD, + "BOOTLOADER_BATCH_TIP_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_OVERHEAD = {}", + max_used_gas.1, + max_used_gas.0, + BOOTLOADER_BATCH_TIP_OVERHEAD + ); + + let circuit_statistics = statistics + .iter() + .map(|s| (s.statistics.circuit_statistics, s.tag.clone())) + .max() + .unwrap(); + assert!( + circuit_statistics.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD as u64, + "BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD = {}", + circuit_statistics.1, + circuit_statistics.0, + BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD + ); + + let execution_metrics_size = statistics + .iter() + .map(|s| (s.statistics.execution_metrics_size, s.tag.clone())) + .max() + .unwrap(); + assert!( + execution_metrics_size.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD as u64, + "BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD = {}", + execution_metrics_size.1, + execution_metrics_size.0, + BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD + ); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs new file mode 100644 index 000000000000..c698d36683ef --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs @@ -0,0 +1,53 @@ +use zksync_types::U256; + +use crate::{ + interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface}, + versions::vm_fast::tests::{ + tester::VmTesterBuilder, + utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, + }, +}; + +#[test] +fn test_dummy_bootloader() { + let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); + base_system_contracts.bootloader = get_bootloader("dummy"); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(base_system_contracts) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build(); + + let result = vm.vm.execute(VmExecutionMode::Batch); + assert!(!result.result.is_failed()); + + let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); + + verify_required_memory( + &vm.vm.inner.state, + vec![(correct_first_cell, vm2::FIRST_HEAP, 0)], + ); +} + +#[test] +fn test_bootloader_out_of_gas() { + let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); + base_system_contracts.bootloader = get_bootloader("dummy"); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(base_system_contracts) + .with_bootloader_gas_limit(10) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build(); + + let res = vm.vm.execute(VmExecutionMode::Batch); + + assert!(matches!( + res.result, + ExecutionResult::Halt { + reason: Halt::BootloaderOutOfGas + } + )); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs new file mode 100644 index 000000000000..01fc8dc07d0b --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs @@ -0,0 +1,40 @@ +use zksync_types::event::extract_long_l2_to_l1_messages; +use zksync_utils::bytecode::compress_bytecode; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_fast::tests::{ + tester::{DeployContractsTx, TxType, VmTesterBuilder}, + utils::read_test_contract, + }, +}; + +#[test] +fn test_bytecode_publishing() { + // In this test, we aim to ensure that the contents of the compressed bytecodes + // are included as part of the L2->L1 long messages + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let counter = read_test_contract(); + let account = &mut vm.rich_accounts[0]; + + let compressed_bytecode = compress_bytecode(&counter).unwrap(); + + let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); + vm.vm.push_transaction(tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Transaction wasn't successful"); + + vm.vm.execute(VmExecutionMode::Batch); + + let state = vm.vm.get_current_execution_state(); + let long_messages = extract_long_l2_to_l1_messages(&state.events); + assert!( + long_messages.contains(&compressed_bytecode), + "Bytecode not published" + ); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_fast/tests/call_tracer.rs new file mode 100644 index 000000000000..c97b38b6afc4 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/call_tracer.rs @@ -0,0 +1,92 @@ +use std::sync::Arc; + +use once_cell::sync::OnceCell; +use zksync_types::{Address, Execute}; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + tracers::CallTracer, + vm_latest::{ + constants::BATCH_COMPUTATIONAL_GAS_LIMIT, + tests::{ + tester::VmTesterBuilder, + utils::{read_max_depth_contract, read_test_contract}, + }, + HistoryEnabled, ToTracerPointer, + }, +}; + +// This test is ultra slow, so it's ignored by default. +#[test] +#[ignore] +fn test_max_depth() { + let contarct = read_max_depth_contract(); + let address = Address::random(); + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_random_rich_accounts(1) + .with_deployer() + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_custom_contracts(vec![(contarct, address, true)]) + .build(); + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: address, + calldata: vec![], + value: Default::default(), + factory_deps: None, + }, + None, + ); + + let result = Arc::new(OnceCell::new()); + let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); + vm.vm.push_transaction(tx); + let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); + assert!(result.get().is_some()); + assert!(res.result.is_failed()); +} + +#[test] +fn test_basic_behavior() { + let contarct = read_test_contract(); + let address = Address::random(); + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_random_rich_accounts(1) + .with_deployer() + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_custom_contracts(vec![(contarct, address, true)]) + .build(); + + let increment_by_6_calldata = + "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: address, + calldata: hex::decode(increment_by_6_calldata).unwrap(), + value: Default::default(), + factory_deps: None, + }, + None, + ); + + let result = Arc::new(OnceCell::new()); + let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); + vm.vm.push_transaction(tx); + let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); + + let call_tracer_result = result.get().unwrap(); + + assert_eq!(call_tracer_result.len(), 1); + // Expect that there are a plenty of subcalls underneath. + let subcall = &call_tracer_result[0].calls; + assert!(subcall.len() > 10); + assert!(!res.result.is_failed()); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs new file mode 100644 index 000000000000..c582bd28c882 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs @@ -0,0 +1,74 @@ +use zksync_types::{Address, Execute, U256}; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_latest::{ + constants::BATCH_COMPUTATIONAL_GAS_LIMIT, tests::tester::VmTesterBuilder, HistoryEnabled, + }, +}; + +// Checks that estimated number of circuits for simple transfer doesn't differ much +// from hardcoded expected value. +#[test] +fn test_circuits() { + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_random_rich_accounts(1) + .with_deployer() + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build(); + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Address::random(), + calldata: Vec::new(), + value: U256::from(1u8), + factory_deps: None, + }, + None, + ); + vm.vm.push_transaction(tx); + let res = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); + + let s = res.statistics.circuit_statistic; + // Check `circuit_statistic`. + const EXPECTED: [f32; 13] = [ + 1.34935, 0.15026, 1.66666, 0.00315, 1.0594, 0.00058, 0.00348, 0.00076, 0.11945, 0.14285, + 0.0, 0.0, 0.0, + ]; + let actual = [ + (s.main_vm, "main_vm"), + (s.ram_permutation, "ram_permutation"), + (s.storage_application, "storage_application"), + (s.storage_sorter, "storage_sorter"), + (s.code_decommitter, "code_decommitter"), + (s.code_decommitter_sorter, "code_decommitter_sorter"), + (s.log_demuxer, "log_demuxer"), + (s.events_sorter, "events_sorter"), + (s.keccak256, "keccak256"), + (s.ecrecover, "ecrecover"), + (s.sha256, "sha256"), + (s.secp256k1_verify, "secp256k1_verify"), + (s.transient_storage_checker, "transient_storage_checker"), + ]; + for ((actual, name), expected) in actual.iter().zip(EXPECTED) { + if expected == 0.0 { + assert_eq!( + *actual, expected, + "Check failed for {}, expected {}, actual {}", + name, expected, actual + ); + } else { + let diff = (actual - expected) / expected; + assert!( + diff.abs() < 0.1, + "Check failed for {}, expected {}, actual {}", + name, + expected, + actual + ); + } + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs new file mode 100644 index 000000000000..24fda3beed4b --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs @@ -0,0 +1,251 @@ +use ethabi::Token; +use zksync_types::{ + get_known_code_key, web3::keccak256, Address, Execute, StorageLogWithPreviousValue, U256, +}; +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_fast::tests::{ + tester::{get_empty_storage, VmTesterBuilder}, + utils::{load_precompiles_contract, read_precompiles_contract, read_test_contract}, + }, +}; + +fn generate_large_bytecode() -> Vec { + // This is the maximal possible size of a zkEVM bytecode + vec![2u8; ((1 << 16) - 1) * 32] +} + +#[test] +fn test_code_oracle() { + let precompiles_contract_address = Address::random(); + let precompile_contract_bytecode = read_precompiles_contract(); + + // Filling the zkevm bytecode + let normal_zkevm_bytecode = read_test_contract(); + let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); + let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); + let mut storage = get_empty_storage(); + storage.set_value( + get_known_code_key(&normal_zkevm_bytecode_hash), + u256_to_h256(U256::one()), + ); + + // In this test, we aim to test whether a simple account interaction (without any fee logic) + // will work. The account will try to deploy a simple contract from integration tests. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .with_custom_contracts(vec![( + precompile_contract_bytecode, + precompiles_contract_address, + false, + )]) + .with_storage(storage) + .build(); + + let precompile_contract = load_precompiles_contract(); + let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); + + vm.vm.insert_bytecodes([normal_zkevm_bytecode.as_slice()]); + let account = &mut vm.rich_accounts[0]; + + // Firstly, let's ensure that the contract works. + let tx1 = account.get_l2_tx_for_execute( + Execute { + contract_address: precompiles_contract_address, + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx1); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + + // Now, we ask for the same bytecode. We use to partially check whether the memory page with + // the decommitted bytecode gets erased (it shouldn't). + let tx2 = account.get_l2_tx_for_execute( + Execute { + contract_address: precompiles_contract_address, + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(tx2); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); +} + +fn find_code_oracle_cost_log( + precompiles_contract_address: Address, + logs: &[StorageLogWithPreviousValue], +) -> &StorageLogWithPreviousValue { + logs.iter() + .find(|log| { + *log.log.key.address() == precompiles_contract_address && log.log.key.key().is_zero() + }) + .expect("no code oracle cost log") +} + +#[test] +fn test_code_oracle_big_bytecode() { + let precompiles_contract_address = Address::random(); + let precompile_contract_bytecode = read_precompiles_contract(); + + let big_zkevm_bytecode = generate_large_bytecode(); + let big_zkevm_bytecode_hash = hash_bytecode(&big_zkevm_bytecode); + let big_zkevm_bytecode_keccak_hash = keccak256(&big_zkevm_bytecode); + + let mut storage = get_empty_storage(); + storage.set_value( + get_known_code_key(&big_zkevm_bytecode_hash), + u256_to_h256(U256::one()), + ); + + // In this test, we aim to test whether a simple account interaction (without any fee logic) + // will work. The account will try to deploy a simple contract from integration tests. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .with_custom_contracts(vec![( + precompile_contract_bytecode, + precompiles_contract_address, + false, + )]) + .with_storage(storage) + .build(); + + let precompile_contract = load_precompiles_contract(); + let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); + + vm.vm.insert_bytecodes([big_zkevm_bytecode.as_slice()]); + + let account = &mut vm.rich_accounts[0]; + + // Firstly, let's ensure that the contract works. + let tx1 = account.get_l2_tx_for_execute( + Execute { + contract_address: precompiles_contract_address, + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(big_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(big_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx1); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); +} + +#[test] +fn refunds_in_code_oracle() { + let precompiles_contract_address = Address::random(); + let precompile_contract_bytecode = read_precompiles_contract(); + + let normal_zkevm_bytecode = read_test_contract(); + let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); + let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); + let mut storage = get_empty_storage(); + storage.set_value( + get_known_code_key(&normal_zkevm_bytecode_hash), + u256_to_h256(U256::one()), + ); + + let precompile_contract = load_precompiles_contract(); + let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); + + // Execute code oracle twice with identical VM state that only differs in that the queried bytecode + // is already decommitted the second time. The second call must consume less gas (`decommit` doesn't charge additional gas + // for already decommitted codes). + let mut oracle_costs = vec![]; + for decommit in [false, true] { + let mut vm = VmTesterBuilder::new() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .with_custom_contracts(vec![( + precompile_contract_bytecode.clone(), + precompiles_contract_address, + false, + )]) + .with_storage(storage.clone()) + .build(); + + vm.vm.insert_bytecodes([normal_zkevm_bytecode.as_slice()]); + + let account = &mut vm.rich_accounts[0]; + if decommit { + let (_, is_fresh) = vm + .vm + .inner + .world_diff + .decommit_opcode(&mut vm.vm.world, h256_to_u256(normal_zkevm_bytecode_hash)); + assert!(is_fresh); + } + + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: precompiles_contract_address, + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + let log = + find_code_oracle_cost_log(precompiles_contract_address, &result.logs.storage_logs); + oracle_costs.push(log.log.value); + } + + // The refund is equal to `gasCost` parameter passed to the `decommit` opcode, which is defined as `4 * contract_length_in_words` + // in `CodeOracle.yul`. + let code_oracle_refund = h256_to_u256(oracle_costs[0]) - h256_to_u256(oracle_costs[1]); + assert_eq!( + code_oracle_refund, + (4 * (normal_zkevm_bytecode.len() / 32)).into() + ); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs new file mode 100644 index 000000000000..460c8251652b --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs @@ -0,0 +1,81 @@ +use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; +use zksync_types::{ + get_code_key, get_known_code_key, get_nonce_key, + system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, + AccountTreeId, U256, +}; +use zksync_utils::u256_to_h256; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_fast::tests::{ + tester::{DeployContractsTx, TxType, VmTesterBuilder}, + utils::{get_balance, read_test_contract, verify_required_storage}, + }, + vm_latest::utils::fee::get_batch_base_fee, +}; + +#[test] +fn test_default_aa_interaction() { + // In this test, we aim to test whether a simple account interaction (without any fee logic) + // will work. The account will try to deploy a simple contract from integration tests. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let counter = read_test_contract(); + let account = &mut vm.rich_accounts[0]; + let DeployContractsTx { + tx, + bytecode_hash, + address, + } = account.get_deploy_tx(&counter, None, TxType::L2); + let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.vm.batch_env); + + vm.vm.push_transaction(tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Transaction wasn't successful"); + + vm.vm.execute(VmExecutionMode::Batch); + + vm.vm.get_current_execution_state(); + + // Both deployment and ordinary nonce should be incremented by one. + let account_nonce_key = get_nonce_key(&account.address); + let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; + + // The code hash of the deployed contract should be marked as republished. + let known_codes_key = get_known_code_key(&bytecode_hash); + + // The contract should be deployed successfully. + let account_code_key = get_code_key(&address); + + let expected_slots = [ + (u256_to_h256(expected_nonce), account_nonce_key), + (u256_to_h256(U256::from(1u32)), known_codes_key), + (bytecode_hash, account_code_key), + ]; + + verify_required_storage( + &expected_slots, + &mut vm.vm.world.storage, + vm.vm.inner.world_diff.get_storage_state(), + ); + + let expected_fee = maximal_fee + - U256::from(result.refunds.gas_refunded) + * U256::from(get_batch_base_fee(&vm.vm.batch_env)); + let operator_balance = get_balance( + AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), + &vm.fee_account, + &mut vm.vm.world.storage, + vm.vm.inner.world_diff.get_storage_state(), + ); + + assert_eq!( + operator_balance, expected_fee, + "Operator did not receive his fee" + ); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs new file mode 100644 index 000000000000..e0c55c5a685a --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs @@ -0,0 +1,36 @@ +use zksync_test_account::Account; +use zksync_types::{fee::Fee, Execute}; + +use crate::{ + interface::{TxExecutionMode, VmInterface}, + vm_fast::tests::tester::VmTesterBuilder, + vm_latest::constants::{TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, +}; + +/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. +#[test] +fn test_tx_gas_limit_offset() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let gas_limit = 9999.into(); + let tx = vm.rich_accounts[0].get_l2_tx_for_execute( + Execute::default(), + Some(Fee { + gas_limit, + ..Account::default_fee() + }), + ); + + vm.vm.push_transaction(tx); + + assert!(vm.vm.inner.state.previous_frames.is_empty()); + let gas_limit_from_memory = vm + .vm + .read_word_from_bootloader_heap(TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET); + + assert_eq!(gas_limit_from_memory, gas_limit); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs new file mode 100644 index 000000000000..af90566671ee --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs @@ -0,0 +1,96 @@ +use std::collections::HashSet; + +use itertools::Itertools; +use zksync_state::ReadStorage; +use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; +use zksync_test_account::Account; +use zksync_types::{Execute, U256}; +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_fast::{ + tests::{ + tester::{TxType, VmTesterBuilder}, + utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, + }, + vm::Vm, + }, +}; + +#[test] +fn test_get_used_contracts() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build(); + + assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); + + // create and push and execute some not-empty factory deps transaction with success status + // to check that `get_decommitted_hashes()` updates + let contract_code = read_test_contract(); + let mut account = Account::random(); + let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); + vm.vm.push_transaction(tx.tx.clone()); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed()); + + assert!(vm + .vm + .decommitted_hashes() + .contains(&h256_to_u256(tx.bytecode_hash))); + + // Note: `Default_AA` will be in the list of used contracts if L2 tx is used + assert_eq!( + vm.vm.decommitted_hashes().collect::>(), + known_bytecodes_without_aa_code(&vm.vm) + ); + + // create push and execute some non-empty factory deps transaction that fails + // (`known_bytecodes` will be updated but we expect `get_decommitted_hashes()` to not be updated) + + let calldata = [1, 2, 3]; + let big_calldata: Vec = calldata + .iter() + .cycle() + .take(calldata.len() * 1024) + .cloned() + .collect(); + let account2 = Account::random(); + let tx2 = account2.get_l1_tx( + Execute { + contract_address: CONTRACT_DEPLOYER_ADDRESS, + calldata: big_calldata, + value: Default::default(), + factory_deps: vec![vec![1; 32]], + }, + 1, + ); + + vm.vm.push_transaction(tx2.clone()); + + let res2 = vm.vm.execute(VmExecutionMode::OneTx); + + assert!(res2.result.is_failed()); + + for factory_dep in tx2.execute.factory_deps { + let hash = hash_bytecode(&factory_dep); + let hash_to_u256 = h256_to_u256(hash); + assert!(known_bytecodes_without_aa_code(&vm.vm).contains(&hash_to_u256)); + assert!(!vm.vm.decommitted_hashes().contains(&hash_to_u256)); + } +} + +fn known_bytecodes_without_aa_code(vm: &Vm) -> HashSet { + let mut known_bytecodes_without_aa_code = vm + .world + .bytecode_cache + .keys() + .cloned() + .collect::>(); + + known_bytecodes_without_aa_code.remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)); + + known_bytecodes_without_aa_code +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_fast/tests/invalid_bytecode.rs new file mode 100644 index 000000000000..dde83d8a9f36 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/invalid_bytecode.rs @@ -0,0 +1,120 @@ +use zksync_types::H256; +use zksync_utils::h256_to_u256; + +use crate::vm_latest::tests::tester::VmTesterBuilder; +use crate::vm_latest::types::inputs::system_env::TxExecutionMode; +use crate::vm_latest::{HistoryEnabled, TxRevertReason}; + +// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. +// Port it later, it's not significant. for now + +#[test] +fn test_invalid_bytecode() { + let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) + .with_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1); + let mut storage = vm_builder.take_storage(); + let mut vm = vm_builder.build(&mut storage); + + let block_gas_per_pubdata = vm_test_env + .block_context + .context + .block_gas_price_per_pubdata(); + + let mut test_vm_with_custom_bytecode_hash = + |bytecode_hash: H256, expected_revert_reason: Option| { + let mut oracle_tools = + OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); + + let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( + h256_to_u256(bytecode_hash), + block_gas_per_pubdata as u32, + ); + + run_vm_with_custom_factory_deps( + &mut oracle_tools, + vm_test_env.block_context.context, + &vm_test_env.block_properties, + encoded_tx, + predefined_overhead, + expected_revert_reason, + ); + }; + + let failed_to_mark_factory_deps = |msg: &str, data: Vec| { + TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { + msg: msg.to_string(), + data, + }) + }; + + // Here we provide the correctly-formatted bytecode hash of + // odd length, so it should work. + test_vm_with_custom_bytecode_hash( + H256([ + 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ]), + None, + ); + + // Here we provide correctly formatted bytecode of even length, so + // it should fail. + test_vm_with_custom_bytecode_hash( + H256([ + 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ]), + Some(failed_to_mark_factory_deps( + "Code length in words must be odd", + vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, + 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, + 32, 98, 101, 32, 111, 100, 100, + ], + )), + ); + + // Here we provide incorrectly formatted bytecode of odd length, so + // it should fail. + test_vm_with_custom_bytecode_hash( + H256([ + 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ]), + Some(failed_to_mark_factory_deps( + "Incorrectly formatted bytecodeHash", + vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, + 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, + 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + )), + ); + + // Here we provide incorrectly formatted bytecode of odd length, so + // it should fail. + test_vm_with_custom_bytecode_hash( + H256([ + 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ]), + Some(failed_to_mark_factory_deps( + "Incorrectly formatted bytecodeHash", + vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, + 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, + 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + )), + ); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs new file mode 100644 index 000000000000..0bbf633254eb --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs @@ -0,0 +1,45 @@ +use zksync_state::ReadStorage; +use zksync_types::get_nonce_key; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_fast::tests::{ + tester::{Account, TxType, VmTesterBuilder}, + utils::read_test_contract, + }, +}; + +#[test] +fn test_is_write_initial_behaviour() { + // In this test, we check result of `is_write_initial` at different stages. + // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't + // messed up it with the repeated writes during the one batch execution. + + let mut account = Account::random(); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(vec![account.clone()]) + .build(); + + let nonce_key = get_nonce_key(&account.address); + // Check that the next write to the nonce key will be initial. + assert!(vm + .storage + .as_ref() + .borrow_mut() + .is_write_initial(&nonce_key)); + + let contract_code = read_test_contract(); + let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; + + vm.vm.push_transaction(tx); + vm.vm.execute(VmExecutionMode::OneTx); + + // Check that `is_write_initial` still returns true for the nonce key. + assert!(vm + .storage + .as_ref() + .borrow_mut() + .is_write_initial(&nonce_key)); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs new file mode 100644 index 000000000000..033a7b2658fa --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs @@ -0,0 +1,199 @@ +use ethabi::Token; +use zksync_contracts::l1_messenger_contract; +use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; +use zksync_types::{ + get_code_key, get_known_code_key, + l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, + storage_writes_deduplicator::StorageWritesDeduplicator, + Execute, ExecuteTransactionCommon, U256, +}; +use zksync_utils::{h256_to_u256, u256_to_h256}; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_fast::{ + tests::{ + tester::{TxType, VmTesterBuilder}, + utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, + }, + transaction_data::TransactionData, + }, +}; + +#[test] +fn test_l1_tx_execution() { + // In this test, we try to execute a contract deployment from L1 + // Here instead of marking code hash via the bootloader means, we will be + // using L1->L2 communication, the same it would likely be done during the priority mode. + + // There are always at least 9 initial writes here, because we pay fees from l1: + // - `totalSupply` of ETH token + // - balance of the refund recipient + // - balance of the bootloader + // - `tx_rolling` hash + // - `gasPerPubdataByte` + // - `basePubdataSpent` + // - rolling hash of L2->L1 logs + // - transaction number in block counter + // - L2->L1 log counter in `L1Messenger` + + // TODO(PLA-537): right now we are using 5 slots instead of 9 due to 0 fee for transaction. + let basic_initial_writes = 5; + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let contract_code = read_test_contract(); + let account = &mut vm.rich_accounts[0]; + let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); + let tx_data: TransactionData = deploy_tx.tx.clone().into(); + + let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { + shard_id: 0, + is_service: true, + tx_number_in_block: 0, + sender: BOOTLOADER_ADDRESS, + key: tx_data.tx_hash(0.into()), + value: u256_to_h256(U256::from(1u32)), + }] + .into_iter() + .map(UserL2ToL1Log) + .collect(); + + vm.vm.push_transaction(deploy_tx.tx.clone()); + + let res = vm.vm.execute(VmExecutionMode::OneTx); + + // The code hash of the deployed contract should be marked as republished. + let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); + + // The contract should be deployed successfully. + let account_code_key = get_code_key(&deploy_tx.address); + + assert!(!res.result.is_failed()); + + for (expected_value, storage_location) in [ + (U256::from(1u32), known_codes_key), + (h256_to_u256(deploy_tx.bytecode_hash), account_code_key), + ] { + assert_eq!( + expected_value, + vm.vm.inner.world_diff.get_storage_state()[&( + *storage_location.address(), + h256_to_u256(*storage_location.key()) + )] + ); + } + + assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); + + let tx = account.get_test_contract_transaction( + deploy_tx.address, + true, + None, + false, + TxType::L1 { serial_id: 0 }, + ); + vm.vm.push_transaction(tx); + let res = vm.vm.execute(VmExecutionMode::OneTx); + let storage_logs = res.logs.storage_logs; + let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); + + // Tx panicked + assert_eq!(res.initial_storage_writes, basic_initial_writes); + + let tx = account.get_test_contract_transaction( + deploy_tx.address, + false, + None, + false, + TxType::L1 { serial_id: 0 }, + ); + vm.vm.push_transaction(tx.clone()); + let res = vm.vm.execute(VmExecutionMode::OneTx); + let storage_logs = res.logs.storage_logs; + let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); + // We changed one slot inside contract. However, the rewrite of the `basePubdataSpent` didn't happen, since it was the same + // as the start of the previous tx. Thus we have `+1` slot for the changed counter and `-1` slot for base pubdata spent + assert_eq!(res.initial_storage_writes, basic_initial_writes); + + // No repeated writes + let repeated_writes = res.repeated_storage_writes; + assert_eq!(res.repeated_storage_writes, 0); + + vm.vm.push_transaction(tx); + let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; + let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); + // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated. + // But now the base pubdata spent has changed too. + assert_eq!(res.initial_storage_writes, basic_initial_writes + 1); + assert_eq!(res.repeated_storage_writes, repeated_writes); + + let tx = account.get_test_contract_transaction( + deploy_tx.address, + false, + Some(10.into()), + false, + TxType::L1 { serial_id: 1 }, + ); + vm.vm.push_transaction(tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + // Method is not payable tx should fail + assert!(result.result.is_failed(), "The transaction should fail"); + + let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); + assert_eq!(res.initial_storage_writes, basic_initial_writes); + assert_eq!(res.repeated_storage_writes, 1); +} + +#[test] +fn test_l1_tx_execution_high_gas_limit() { + // In this test, we try to execute an L1->L2 transaction with a high gas limit. + // Usually priority transactions with dangerously gas limit should even pass the checks on the L1, + // however, they might pass during the transition period to the new fee model, so we check that we can safely process those. + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let account = &mut vm.rich_accounts[0]; + + let l1_messenger = l1_messenger_contract(); + + let contract_function = l1_messenger.function("sendToL1").unwrap(); + let params = [ + // Even a message of size 100k should not be able to be sent by a priority transaction + Token::Bytes(vec![0u8; 100_000]), + ]; + let calldata = contract_function.encode_input(¶ms).unwrap(); + + let mut tx = account.get_l1_tx( + Execute { + contract_address: L1_MESSENGER_ADDRESS, + value: 0.into(), + factory_deps: vec![], + calldata, + }, + 0, + ); + + if let ExecuteTransactionCommon::L1(data) = &mut tx.common_data { + // Using some large gas limit + data.gas_limit = 300_000_000.into(); + } else { + unreachable!() + }; + + vm.vm.push_transaction(tx); + + let res = vm.vm.execute(VmExecutionMode::OneTx); + + assert!(res.result.is_failed(), "The transaction should've failed"); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs new file mode 100644 index 000000000000..1f9d0aaff091 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs @@ -0,0 +1,424 @@ +//! +//! Tests for the bootloader +//! The description for each of the tests can be found in the corresponding `.yul` file. +//! + +use zksync_state::ReadStorage; +use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; +use zksync_types::{ + block::{pack_block_info, L2BlockHasher}, + AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, L2BlockNumber, + ProtocolVersionId, StorageKey, Transaction, H160, H256, SYSTEM_CONTEXT_ADDRESS, + SYSTEM_CONTEXT_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, U256, +}; +use zksync_utils::{h256_to_u256, u256_to_h256}; + +use crate::{ + interface::{ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface}, + vm_fast::{ + tests::tester::{default_l1_batch, VmTesterBuilder}, + vm::Vm, + }, + vm_latest::{ + constants::{TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO}, + utils::l2_blocks::get_l2_block_hash_key, + }, +}; + +fn get_l1_noop() -> Transaction { + Transaction { + common_data: ExecuteTransactionCommon::L1(L1TxCommonData { + sender: H160::random(), + gas_limit: U256::from(2000000u32), + gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), + ..Default::default() + }), + execute: Execute { + contract_address: H160::zero(), + calldata: vec![], + value: U256::zero(), + factory_deps: vec![], + }, + received_timestamp_ms: 0, + raw_bytes: None, + } +} + +#[test] +fn test_l2_block_initialization_timestamp() { + // This test checks that the L2 block initialization works correctly. + // Here we check that that the first block must have timestamp that is greater or equal to the timestamp + // of the current batch. + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + // Override the timestamp of the current L2 block to be 0. + vm.vm.bootloader_state.push_l2_block(L2BlockEnv { + number: 1, + timestamp: 0, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 1, + }); + let l1_tx = get_l1_noop(); + + vm.vm.push_transaction(l1_tx); + let res = vm.vm.execute(VmExecutionMode::OneTx); + + assert_eq!( + res.result, + ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} + ); +} + +#[test] +fn test_l2_block_initialization_number_non_zero() { + // This test checks that the L2 block initialization works correctly. + // Here we check that the first L2 block number can not be zero. + + let l1_batch = default_l1_batch(L1BatchNumber(1)); + let first_l2_block = L2BlockEnv { + number: 0, + timestamp: l1_batch.timestamp, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 1, + }; + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_l1_batch_env(l1_batch) + .with_random_rich_accounts(1) + .build(); + + let l1_tx = get_l1_noop(); + + vm.vm.push_transaction(l1_tx); + + set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block); + + let res = vm.vm.execute(VmExecutionMode::OneTx); + + assert_eq!( + res.result, + ExecutionResult::Halt { + reason: Halt::FailedToSetL2Block( + "L2 block number is never expected to be zero".to_string() + ) + } + ); +} + +fn test_same_l2_block( + expected_error: Option, + override_timestamp: Option, + override_prev_block_hash: Option, +) { + let mut l1_batch = default_l1_batch(L1BatchNumber(1)); + l1_batch.timestamp = 1; + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_l1_batch_env(l1_batch) + .with_random_rich_accounts(1) + .build(); + + let l1_tx = get_l1_noop(); + vm.vm.push_transaction(l1_tx.clone()); + let res = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!res.result.is_failed()); + + let mut current_l2_block = vm.vm.batch_env.first_l2_block; + + if let Some(timestamp) = override_timestamp { + current_l2_block.timestamp = timestamp; + } + if let Some(prev_block_hash) = override_prev_block_hash { + current_l2_block.prev_block_hash = prev_block_hash; + } + + if (None, None) == (override_timestamp, override_prev_block_hash) { + current_l2_block.max_virtual_blocks_to_create = 0; + } + + vm.vm.push_transaction(l1_tx); + set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block); + + let result = vm.vm.execute(VmExecutionMode::OneTx); + + if let Some(err) = expected_error { + assert_eq!(result.result, ExecutionResult::Halt { reason: err }); + } else { + assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); + } +} + +#[test] +fn test_l2_block_same_l2_block() { + // This test aims to test the case when there are multiple transactions inside the same L2 block. + + // Case 1: Incorrect timestamp + test_same_l2_block( + Some(Halt::FailedToSetL2Block( + "The timestamp of the same L2 block must be same".to_string(), + )), + Some(0), + None, + ); + + // Case 2: Incorrect previous block hash + test_same_l2_block( + Some(Halt::FailedToSetL2Block( + "The previous hash of the same L2 block must be same".to_string(), + )), + None, + Some(H256::zero()), + ); + + // Case 3: Correct continuation of the same L2 block + test_same_l2_block(None, None, None); +} + +fn test_new_l2_block( + first_l2_block: L2BlockEnv, + overriden_second_block_number: Option, + overriden_second_block_timestamp: Option, + overriden_second_block_prev_block_hash: Option, + expected_error: Option, +) { + let mut l1_batch = default_l1_batch(L1BatchNumber(1)); + l1_batch.timestamp = 1; + l1_batch.first_l2_block = first_l2_block; + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_l1_batch_env(l1_batch) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let l1_tx = get_l1_noop(); + + // Firstly we execute the first transaction + vm.vm.push_transaction(l1_tx.clone()); + vm.vm.execute(VmExecutionMode::OneTx); + + let mut second_l2_block = vm.vm.batch_env.first_l2_block; + second_l2_block.number += 1; + second_l2_block.timestamp += 1; + second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); + + if let Some(block_number) = overriden_second_block_number { + second_l2_block.number = block_number; + } + if let Some(timestamp) = overriden_second_block_timestamp { + second_l2_block.timestamp = timestamp; + } + if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { + second_l2_block.prev_block_hash = prev_block_hash; + } + + vm.vm.bootloader_state.push_l2_block(second_l2_block); + + vm.vm.push_transaction(l1_tx); + + let result = vm.vm.execute(VmExecutionMode::OneTx); + if let Some(err) = expected_error { + assert_eq!(result.result, ExecutionResult::Halt { reason: err }); + } else { + assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); + } +} + +#[test] +fn test_l2_block_new_l2_block() { + // This test is aimed to cover potential issue + + let correct_first_block = L2BlockEnv { + number: 1, + timestamp: 1, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 1, + }; + + // Case 1: Block number increasing by more than 1 + test_new_l2_block( + correct_first_block, + Some(3), + None, + None, + Some(Halt::FailedToSetL2Block( + "Invalid new L2 block number".to_string(), + )), + ); + + // Case 2: Timestamp not increasing + test_new_l2_block( + correct_first_block, + None, + Some(1), + None, + Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), + ); + + // Case 3: Incorrect previous block hash + test_new_l2_block( + correct_first_block, + None, + None, + Some(H256::zero()), + Some(Halt::FailedToSetL2Block( + "The current L2 block hash is incorrect".to_string(), + )), + ); + + // Case 4: Correct new block + test_new_l2_block(correct_first_block, None, None, None, None); +} + +#[allow(clippy::too_many_arguments)] +fn test_first_in_batch( + miniblock_timestamp: u64, + miniblock_number: u32, + pending_txs_hash: H256, + batch_timestamp: u64, + new_batch_timestamp: u64, + batch_number: u32, + proposed_block: L2BlockEnv, + expected_error: Option, +) { + let mut l1_batch = default_l1_batch(L1BatchNumber(1)); + l1_batch.number += 1; + l1_batch.timestamp = new_batch_timestamp; + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_l1_batch_env(l1_batch) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + let l1_tx = get_l1_noop(); + + // Setting the values provided. + let mut storage_ptr = vm.vm.world.storage.borrow_mut(); + let miniblock_info_slot = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + ); + let pending_txs_hash_slot = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, + ); + let batch_info_slot = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_BLOCK_INFO_POSITION, + ); + let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); + + storage_ptr.set_value( + miniblock_info_slot, + u256_to_h256(pack_block_info( + miniblock_number as u64, + miniblock_timestamp, + )), + ); + storage_ptr.set_value(pending_txs_hash_slot, pending_txs_hash); + storage_ptr.set_value( + batch_info_slot, + u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), + ); + storage_ptr.set_value( + prev_block_hash_position, + L2BlockHasher::legacy_hash(L2BlockNumber(miniblock_number - 1)), + ); + drop(storage_ptr); + + // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. + // And then override it with the user-provided value + + let last_l2_block = vm.vm.bootloader_state.last_l2_block(); + let new_l2_block = L2BlockEnv { + number: last_l2_block.number + 1, + timestamp: last_l2_block.timestamp + 1, + prev_block_hash: last_l2_block.get_hash(), + max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, + }; + + vm.vm.bootloader_state.push_l2_block(new_l2_block); + vm.vm.push_transaction(l1_tx); + set_manual_l2_block_info(&mut vm.vm, 0, proposed_block); + + let result = vm.vm.execute(VmExecutionMode::OneTx); + if let Some(err) = expected_error { + assert_eq!(result.result, ExecutionResult::Halt { reason: err }); + } else { + assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); + } +} + +#[test] +fn test_l2_block_first_in_batch() { + let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); + let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 1, prev_block_hash) + .finalize(ProtocolVersionId::latest()); + test_first_in_batch( + 1, + 1, + H256::zero(), + 1, + 2, + 1, + L2BlockEnv { + number: 2, + timestamp: 2, + prev_block_hash, + max_virtual_blocks_to_create: 1, + }, + None, + ); + + let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); + let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 8, prev_block_hash) + .finalize(ProtocolVersionId::latest()); + test_first_in_batch( + 8, + 1, + H256::zero(), + 5, + 12, + 1, + L2BlockEnv { + number: 2, + timestamp: 9, + prev_block_hash, + max_virtual_blocks_to_create: 1, + }, + Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), + ); +} + +fn set_manual_l2_block_info( + vm: &mut Vm, + tx_number: usize, + block_info: L2BlockEnv, +) { + let fictive_miniblock_position = + TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; + + vm.write_to_bootloader_heap([ + (fictive_miniblock_position, block_info.number.into()), + (fictive_miniblock_position + 1, block_info.timestamp.into()), + ( + fictive_miniblock_position + 2, + h256_to_u256(block_info.prev_block_hash), + ), + ( + fictive_miniblock_position + 3, + block_info.max_virtual_blocks_to_create.into(), + ), + ]) +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs new file mode 100644 index 000000000000..9d5b229f23a9 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs @@ -0,0 +1,26 @@ +mod bootloader; +mod default_aa; +//mod block_tip; FIXME: requires vm metrics +mod bytecode_publishing; +// mod call_tracer; FIXME: requires tracers +// mod circuits; FIXME: requires tracers / circuit stats +mod code_oracle; +mod gas_limit; +mod get_used_contracts; +mod is_write_initial; +mod l1_tx_execution; +mod l2_blocks; +mod nonce_holder; +// mod precompiles; FIXME: requires tracers / circuit stats +// mod prestate_tracer; FIXME: is pre-state tracer still relevant? +mod refunds; +mod require_eip712; +mod rollbacks; +mod sekp256r1; +mod simple_execution; +mod storage; +mod tester; +mod tracing_execution_error; +mod transfer; +mod upgrade; +mod utils; diff --git a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs new file mode 100644 index 000000000000..b18676cf2ba6 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs @@ -0,0 +1,179 @@ +use zksync_types::{Execute, ExecuteTransactionCommon, Nonce}; + +use crate::{ + interface::{ + ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterface, + VmRevertReason, + }, + vm_fast::tests::{ + tester::{Account, VmTesterBuilder}, + utils::read_nonce_holder_tester, + }, +}; + +pub enum NonceHolderTestMode { + SetValueUnderNonce, + IncreaseMinNonceBy5, + IncreaseMinNonceTooMuch, + LeaveNonceUnused, + IncreaseMinNonceBy1, + SwitchToArbitraryOrdering, +} + +impl From for u8 { + fn from(mode: NonceHolderTestMode) -> u8 { + match mode { + NonceHolderTestMode::SetValueUnderNonce => 0, + NonceHolderTestMode::IncreaseMinNonceBy5 => 1, + NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, + NonceHolderTestMode::LeaveNonceUnused => 3, + NonceHolderTestMode::IncreaseMinNonceBy1 => 4, + NonceHolderTestMode::SwitchToArbitraryOrdering => 5, + } + } +} + +#[test] +fn test_nonce_holder() { + let mut account = Account::random(); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_deployer() + .with_custom_contracts(vec![( + read_nonce_holder_tester().to_vec(), + account.address, + true, + )]) + .with_rich_accounts(vec![account.clone()]) + .build(); + + let mut run_nonce_test = |nonce: u32, + test_mode: NonceHolderTestMode, + error_message: Option, + comment: &'static str| { + // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, + // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. + // The easiest way in terms of lifetimes is to reuse `vm_builder` to achieve it. + vm.reset_state(true); + let mut transaction = account.get_l2_tx_for_execute_with_nonce( + Execute { + contract_address: account.address, + calldata: vec![12], + value: Default::default(), + factory_deps: vec![], + }, + None, + Nonce(nonce), + ); + let ExecuteTransactionCommon::L2(tx_data) = &mut transaction.common_data else { + unreachable!(); + }; + tx_data.signature = vec![test_mode.into()]; + vm.vm.push_transaction_inner(transaction, 0, true); + let result = vm.vm.execute(VmExecutionMode::OneTx); + + if let Some(msg) = error_message { + let expected_error = + TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { + msg, + data: vec![], + })); + let ExecutionResult::Halt { reason } = result.result else { + panic!("Expected revert, got {:?}", result.result); + }; + assert_eq!(reason.to_string(), expected_error.to_string(), "{comment}"); + } else { + assert!(!result.result.is_failed(), "{comment}: {result:?}"); + } + }; + // Test 1: trying to set value under non sequential nonce value. + run_nonce_test( + 1u32, + NonceHolderTestMode::SetValueUnderNonce, + Some("Previous nonce has not been used".to_string()), + "Allowed to set value under non sequential value", + ); + + // Test 2: increase min nonce by 1 with sequential nonce ordering: + run_nonce_test( + 0u32, + NonceHolderTestMode::IncreaseMinNonceBy1, + None, + "Failed to increment nonce by 1 for sequential account", + ); + + // Test 3: correctly set value under nonce with sequential nonce ordering: + run_nonce_test( + 1u32, + NonceHolderTestMode::SetValueUnderNonce, + None, + "Failed to set value under nonce sequential value", + ); + + // Test 5: migrate to the arbitrary nonce ordering: + run_nonce_test( + 2u32, + NonceHolderTestMode::SwitchToArbitraryOrdering, + None, + "Failed to switch to arbitrary ordering", + ); + + // Test 6: increase min nonce by 5 + run_nonce_test( + 6u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + None, + "Failed to increase min nonce by 5", + ); + + // Test 7: since the nonces in range [6,10] are no longer allowed, the + // tx with nonce 10 should not be allowed + run_nonce_test( + 10u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + Some("Reusing the same nonce twice".to_string()), + "Allowed to reuse nonce below the minimal one", + ); + + // Test 8: we should be able to use nonce 13 + run_nonce_test( + 13u32, + NonceHolderTestMode::SetValueUnderNonce, + None, + "Did not allow to use unused nonce 10", + ); + + // Test 9: we should not be able to reuse nonce 13 + run_nonce_test( + 13u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + Some("Reusing the same nonce twice".to_string()), + "Allowed to reuse the same nonce twice", + ); + + // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 + run_nonce_test( + 14u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + None, + "Did not allow to use a bumped nonce", + ); + + // Test 11: Do not allow bumping nonce by too much + run_nonce_test( + 16u32, + NonceHolderTestMode::IncreaseMinNonceTooMuch, + Some("The value for incrementing the nonce is too high".to_string()), + "Allowed for incrementing min nonce too much", + ); + + // Test 12: Do not allow not setting a nonce as used + run_nonce_test( + 16u32, + NonceHolderTestMode::LeaveNonceUnused, + Some("The nonce was not set as used".to_string()), + "Allowed to leave nonce as unused", + ); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs new file mode 100644 index 000000000000..5bdf0930d558 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs @@ -0,0 +1,133 @@ +use zk_evm_1_5_0::zk_evm_abstractions::precompiles::PrecompileAddress; +use zksync_types::{Address, Execute}; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_fast::tests::{tester::VmTesterBuilder, utils::read_precompiles_contract}, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, +}; + +#[test] +fn test_keccak() { + // Execute special transaction and check that at least 1000 keccak calls were made. + let contract = read_precompiles_contract(); + let address = Address::random(); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_random_rich_accounts(1) + .with_deployer() + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_custom_contracts(vec![(contract, address, true)]) + .build(); + + // calldata for `doKeccak(1000)`. + let keccak1000_calldata = + "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: address, + calldata: hex::decode(keccak1000_calldata).unwrap(), + value: Default::default(), + factory_deps: None, + }, + None, + ); + vm.vm.push_transaction(tx); + let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); + + let keccak_count = vm + .vm + .state + .precompiles_processor + .precompile_cycles_history + .inner() + .iter() + .filter(|(precompile, _)| precompile == &PrecompileAddress::Keccak256) + .count(); + + assert!(keccak_count >= 1000); +} + +#[test] +fn test_sha256() { + // Execute special transaction and check that at least 1000 `sha256` calls were made. + let contract = read_precompiles_contract(); + let address = Address::random(); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_random_rich_accounts(1) + .with_deployer() + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_custom_contracts(vec![(contract, address, true)]) + .build(); + + // calldata for `doSha256(1000)`. + let sha1000_calldata = + "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: address, + calldata: hex::decode(sha1000_calldata).unwrap(), + value: Default::default(), + factory_deps: None, + }, + None, + ); + vm.vm.push_transaction(tx); + let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); + + let sha_count = vm + .vm + .state + .precompiles_processor + .precompile_cycles_history + .inner() + .iter() + .filter(|(precompile, _)| precompile == &PrecompileAddress::SHA256) + .count(); + + assert!(sha_count >= 1000); +} + +#[test] +fn test_ecrecover() { + // Execute simple transfer and check that exactly 1 `ecrecover` call was made (it's done during tx validation). + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_random_rich_accounts(1) + .with_deployer() + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build(); + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: account.address, + calldata: Vec::new(), + value: Default::default(), + factory_deps: None, + }, + None, + ); + vm.vm.push_transaction(tx); + let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); + + let ecrecover_count = vm + .vm + .state + .precompiles_processor + .precompile_cycles_history + .inner() + .iter() + .filter(|(precompile, _)| precompile == &PrecompileAddress::Ecrecover) + .count(); + + assert_eq!(ecrecover_count, 1); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/prestate_tracer.rs b/core/lib/multivm/src/versions/vm_fast/tests/prestate_tracer.rs new file mode 100644 index 000000000000..63620c7d9ff8 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/prestate_tracer.rs @@ -0,0 +1,143 @@ +use std::sync::Arc; + +use once_cell::sync::OnceCell; +use zksync_test_account::TxType; +use zksync_types::{utils::deployed_address_create, Execute, U256}; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + tracers::PrestateTracer, + vm_latest::{ + constants::BATCH_COMPUTATIONAL_GAS_LIMIT, + tests::{tester::VmTesterBuilder, utils::read_simple_transfer_contract}, + HistoryEnabled, ToTracerPointer, + }, +}; + +#[test] +fn test_prestate_tracer() { + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_random_rich_accounts(1) + .with_deployer() + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build(); + + vm.deploy_test_contract(); + let account = &mut vm.rich_accounts[0]; + + let tx1 = account.get_test_contract_transaction( + vm.test_contract.unwrap(), + false, + Default::default(), + true, + TxType::L2, + ); + vm.vm.push_transaction(tx1); + + let contract_address = vm.test_contract.unwrap(); + let prestate_tracer_result = Arc::new(OnceCell::default()); + let prestate_tracer = PrestateTracer::new(false, prestate_tracer_result.clone()); + let tracer_ptr = prestate_tracer.into_tracer_pointer(); + vm.vm.inspect(tracer_ptr.into(), VmExecutionMode::Batch); + + let prestate_result = Arc::try_unwrap(prestate_tracer_result) + .unwrap() + .take() + .unwrap_or_default(); + + assert!(prestate_result.1.contains_key(&contract_address)); +} + +#[test] +fn test_prestate_tracer_diff_mode() { + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_random_rich_accounts(1) + .with_deployer() + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build(); + let contract = read_simple_transfer_contract(); + let tx = vm + .deployer + .as_mut() + .expect("You have to initialize builder with deployer") + .get_deploy_tx(&contract, None, TxType::L2) + .tx; + let nonce = tx.nonce().unwrap().0.into(); + vm.vm.push_transaction(tx); + vm.vm.execute(VmExecutionMode::OneTx); + let deployed_address = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce); + vm.test_contract = Some(deployed_address); + + // Deploy a second copy of the contract to see its appearance in the pre-state + let tx2 = vm + .deployer + .as_mut() + .expect("You have to initialize builder with deployer") + .get_deploy_tx(&contract, None, TxType::L2) + .tx; + let nonce2 = tx2.nonce().unwrap().0.into(); + vm.vm.push_transaction(tx2); + vm.vm.execute(VmExecutionMode::OneTx); + let deployed_address2 = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce2); + + let account = &mut vm.rich_accounts[0]; + + //enter ether to contract to see difference in the balance post execution + let tx0 = Execute { + contract_address: vm.test_contract.unwrap(), + calldata: Default::default(), + value: U256::from(100000), + factory_deps: None, + }; + + vm.vm + .push_transaction(account.get_l2_tx_for_execute(tx0.clone(), None)); + + let tx1 = Execute { + contract_address: deployed_address2, + calldata: Default::default(), + value: U256::from(200000), + factory_deps: None, + }; + + vm.vm + .push_transaction(account.get_l2_tx_for_execute(tx1, None)); + let prestate_tracer_result = Arc::new(OnceCell::default()); + let prestate_tracer = PrestateTracer::new(true, prestate_tracer_result.clone()); + let tracer_ptr = prestate_tracer.into_tracer_pointer(); + vm.vm + .inspect(tracer_ptr.into(), VmExecutionMode::Bootloader); + + let prestate_result = Arc::try_unwrap(prestate_tracer_result) + .unwrap() + .take() + .unwrap_or_default(); + + //assert that the pre-state contains both deployed contracts with balance zero + assert!(prestate_result.0.contains_key(&deployed_address)); + assert!(prestate_result.0.contains_key(&deployed_address2)); + assert_eq!( + prestate_result.0[&deployed_address].balance, + Some(U256::zero()) + ); + assert_eq!( + prestate_result.0[&deployed_address2].balance, + Some(U256::zero()) + ); + + //assert that the post-state contains both deployed contracts with the correct balance + assert!(prestate_result.1.contains_key(&deployed_address)); + assert!(prestate_result.1.contains_key(&deployed_address2)); + assert_eq!( + prestate_result.1[&deployed_address].balance, + Some(U256::from(100000)) + ); + assert_eq!( + prestate_result.1[&deployed_address2].balance, + Some(U256::from(200000)) + ); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs b/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs new file mode 100644 index 000000000000..21a3129a3a61 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs @@ -0,0 +1,221 @@ +use ethabi::Token; +use zksync_types::{Address, Execute, U256}; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_fast::tests::{ + tester::{DeployContractsTx, TxType, VmTesterBuilder}, + utils::{read_expensive_contract, read_test_contract}, + }, +}; + +#[test] +fn test_predetermined_refunded_gas() { + // In this test, we compare the execution of the bootloader with the predefined + // refunded gas and without them + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + let l1_batch = vm.vm.batch_env.clone(); + + let counter = read_test_contract(); + let account = &mut vm.rich_accounts[0]; + + let DeployContractsTx { + tx, + bytecode_hash: _, + address: _, + } = account.get_deploy_tx(&counter, None, TxType::L2); + vm.vm.push_transaction(tx.clone()); + let result = vm.vm.execute(VmExecutionMode::OneTx); + + assert!(!result.result.is_failed()); + + // If the refund provided by the operator or the final refund are the 0 + // there is no impact of the operator's refund at all and so this test does not + // make much sense. + assert!( + result.refunds.operator_suggested_refund > 0, + "The operator's refund is 0" + ); + assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); + + let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); + let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); + assert!(!result_without_predefined_refunds.result.is_failed(),); + + // Here we want to provide the same refund from the operator and check that it's the correct one. + // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. + // But the overall result should be the same + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_l1_batch_env(l1_batch.clone()) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(vec![account.clone()]) + .build(); + + vm.vm + .push_transaction_inner(tx.clone(), result.refunds.gas_refunded, true); + + let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); + let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); + + assert!(!result_with_predefined_refunds.result.is_failed()); + + // We need to sort these lists as those are flattened from HashMaps + current_state_with_predefined_refunds + .used_contract_hashes + .sort(); + current_state_without_predefined_refunds + .used_contract_hashes + .sort(); + + assert_eq!( + current_state_with_predefined_refunds.events, + current_state_without_predefined_refunds.events + ); + + assert_eq!( + current_state_with_predefined_refunds.user_l2_to_l1_logs, + current_state_without_predefined_refunds.user_l2_to_l1_logs + ); + + assert_eq!( + current_state_with_predefined_refunds.system_logs, + current_state_without_predefined_refunds.system_logs + ); + + assert_eq!( + current_state_with_predefined_refunds.deduplicated_storage_logs, + current_state_without_predefined_refunds.deduplicated_storage_logs + ); + assert_eq!( + current_state_with_predefined_refunds.used_contract_hashes, + current_state_without_predefined_refunds.used_contract_hashes + ); + + // In this test we put the different refund from the operator. + // We still can't use the refund tracer, because it will override the refund. + // But we can check that the logs and events have changed. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_l1_batch_env(l1_batch) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(vec![account.clone()]) + .build(); + + let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; + vm.vm + .push_transaction_inner(tx, changed_operator_suggested_refund, true); + let result = vm.vm.execute(VmExecutionMode::Batch); + let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); + + assert!(!result.result.is_failed()); + current_state_with_changed_predefined_refunds + .used_contract_hashes + .sort(); + current_state_without_predefined_refunds + .used_contract_hashes + .sort(); + + assert_eq!( + current_state_with_changed_predefined_refunds.events.len(), + current_state_without_predefined_refunds.events.len() + ); + + assert_ne!( + current_state_with_changed_predefined_refunds.events, + current_state_without_predefined_refunds.events + ); + + assert_eq!( + current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, + current_state_without_predefined_refunds.user_l2_to_l1_logs + ); + + assert_ne!( + current_state_with_changed_predefined_refunds.system_logs, + current_state_without_predefined_refunds.system_logs + ); + + assert_eq!( + current_state_with_changed_predefined_refunds + .deduplicated_storage_logs + .len(), + current_state_without_predefined_refunds + .deduplicated_storage_logs + .len() + ); + + assert_ne!( + current_state_with_changed_predefined_refunds.deduplicated_storage_logs, + current_state_without_predefined_refunds.deduplicated_storage_logs + ); + assert_eq!( + current_state_with_changed_predefined_refunds.used_contract_hashes, + current_state_without_predefined_refunds.used_contract_hashes + ); +} + +#[test] +fn negative_pubdata_for_transaction() { + let expensive_contract_address = Address::random(); + let (expensive_contract_bytecode, expensive_contract) = read_expensive_contract(); + let expensive_function = expensive_contract.function("expensive").unwrap(); + let cleanup_function = expensive_contract.function("cleanUp").unwrap(); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .with_custom_contracts(vec![( + expensive_contract_bytecode, + expensive_contract_address, + false, + )]) + .build(); + + let expensive_tx = vm.rich_accounts[0].get_l2_tx_for_execute( + Execute { + contract_address: expensive_contract_address, + calldata: expensive_function + .encode_input(&[Token::Uint(10.into())]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(expensive_tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + + // This transaction cleans all initial writes in the contract, thus having negative `pubdata` impact. + let clean_up_tx = vm.rich_accounts[0].get_l2_tx_for_execute( + Execute { + contract_address: expensive_contract_address, + calldata: cleanup_function.encode_input(&[]).unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(clean_up_tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + assert!(result.refunds.operator_suggested_refund > 0); + assert_eq!( + result.refunds.gas_refunded, + result.refunds.operator_suggested_refund + ); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs new file mode 100644 index 000000000000..7e378a2b62c4 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs @@ -0,0 +1,171 @@ +use ethabi::Token; +use zksync_eth_signer::{EthereumSigner, TransactionParameters}; +use zksync_state::ReadStorage; +use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; +use zksync_types::{ + fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, + utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, + L2ChainId, Nonce, Transaction, U256, +}; +use zksync_utils::h256_to_u256; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_fast::tests::{ + tester::{Account, VmTester, VmTesterBuilder}, + utils::read_many_owners_custom_account_contract, + }, +}; + +impl VmTester { + pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { + let key = storage_key_for_standard_token_balance( + AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), + &address, + ); + self.vm + .inner + .world_diff + .get_storage_state() + .get(&(L2_BASE_TOKEN_ADDRESS, h256_to_u256(*key.key()))) + .copied() + .unwrap_or_else(|| h256_to_u256(self.vm.world.storage.read_value(&key))) + } +} + +/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy +/// and EIP712 transactions. +/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. +#[tokio::test] +async fn test_require_eip712() { + // Use 3 accounts: + // - `private_address` - EOA account, where we have the key + // - `account_address` - AA account, where the contract is deployed + // - beneficiary - an EOA account, where we'll try to transfer the tokens. + let account_abstraction = Account::random(); + let mut private_account = Account::random(); + let beneficiary = Account::random(); + + let (bytecode, contract) = read_many_owners_custom_account_contract(); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) + .build(); + + assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); + + let chain_id: u32 = 270; + + // First, let's set the owners of the AA account to the `private_address`. + // (so that messages signed by `private_address`, are authorized to act on behalf of the AA account). + let set_owners_function = contract.function("setOwners").unwrap(); + let encoded_input = set_owners_function + .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) + .unwrap(); + + let tx = private_account.get_l2_tx_for_execute( + Execute { + contract_address: account_abstraction.address, + calldata: encoded_input, + value: Default::default(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed()); + + let private_account_balance = vm.get_eth_balance(private_account.address); + + // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). + // Normally this would not work - unless the operator is malicious. + let aa_raw_tx = TransactionParameters { + nonce: U256::from(0), + to: Some(beneficiary.address), + gas: U256::from(100000000), + gas_price: Some(U256::from(10000000)), + value: U256::from(888000088), + data: vec![], + chain_id: 270, + transaction_type: None, + access_list: None, + max_fee_per_gas: U256::from(1000000000), + max_priority_fee_per_gas: U256::from(1000000000), + max_fee_per_blob_gas: None, + blob_versioned_hashes: None, + }; + + let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; + let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); + + let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); + l2_tx.set_input(aa_tx, hash); + // Pretend that operator is malicious and sets the initiator to the AA account. + l2_tx.common_data.initiator_address = account_abstraction.address; + let transaction: Transaction = l2_tx.into(); + + vm.vm.push_transaction(transaction); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed()); + + assert_eq!( + vm.get_eth_balance(beneficiary.address), + U256::from(888000088) + ); + // Make sure that the tokens were transferred from the AA account. + assert_eq!( + private_account_balance, + vm.get_eth_balance(private_account.address) + ); + + // // Now send the 'classic' EIP712 transaction + let tx_712 = L2Tx::new( + beneficiary.address, + vec![], + Nonce(1), + Fee { + gas_limit: U256::from(1000000000), + max_fee_per_gas: U256::from(1000000000), + max_priority_fee_per_gas: U256::from(1000000000), + gas_per_pubdata_limit: U256::from(1000000000), + }, + account_abstraction.address, + U256::from(28374938), + vec![], + Default::default(), + ); + + let mut transaction_request: TransactionRequest = tx_712.into(); + transaction_request.chain_id = Some(chain_id.into()); + + let domain = Eip712Domain::new(L2ChainId::from(chain_id)); + let signature = private_account + .get_pk_signer() + .sign_typed_data(&domain, &transaction_request) + .await + .unwrap(); + let encoded_tx = transaction_request.get_signed_bytes(&signature).unwrap(); + + let (aa_txn_request, aa_hash) = + TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); + + let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); + l2_tx.set_input(encoded_tx, aa_hash); + + let transaction: Transaction = l2_tx.into(); + vm.vm.push_transaction(transaction); + vm.vm.execute(VmExecutionMode::OneTx); + + assert_eq!( + vm.get_eth_balance(beneficiary.address), + U256::from(916375026) + ); + assert_eq!( + private_account_balance, + vm.get_eth_balance(private_account.address) + ); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs new file mode 100644 index 000000000000..c530c5af18ea --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs @@ -0,0 +1,144 @@ +use ethabi::Token; +use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; +use zksync_types::{Execute, U256}; + +use crate::{ + interface::TxExecutionMode, + vm_fast::tests::{ + tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, + utils::read_test_contract, + }, +}; + +#[test] +fn test_vm_rollbacks() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let mut account = vm.rich_accounts[0].clone(); + let counter = read_test_contract(); + let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; + let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; + let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; + + let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ + TransactionTestInfo::new_processed(tx_0.clone(), false), + TransactionTestInfo::new_processed(tx_1.clone(), false), + TransactionTestInfo::new_processed(tx_2.clone(), false), + ]); + + // reset vm + vm.reset_with_empty_storage(); + + let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ + TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), + TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), + TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), + // The correct nonce is 0, this tx will fail + TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), + // This tx will succeed + TransactionTestInfo::new_processed(tx_0.clone(), false), + // The correct nonce is 1, this tx will fail + TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), + // The correct nonce is 1, this tx will fail + TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), + // This tx will succeed + TransactionTestInfo::new_processed(tx_1, false), + // The correct nonce is 2, this tx will fail + TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), + // This tx will succeed + TransactionTestInfo::new_processed(tx_2.clone(), false), + // This tx will fail + TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), + ]); + + assert_eq!(result_without_rollbacks, result_with_rollbacks); +} + +#[test] +fn test_vm_loadnext_rollbacks() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + let mut account = vm.rich_accounts[0].clone(); + + let loadnext_contract = get_loadnext_contract(); + let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; + let DeployContractsTx { + tx: loadnext_deploy_tx, + address, + .. + } = account.get_deploy_tx_with_factory_deps( + &loadnext_contract.bytecode, + Some(loadnext_constructor_data), + loadnext_contract.factory_deps.clone(), + TxType::L2, + ); + + let loadnext_tx_1 = account.get_l2_tx_for_execute( + Execute { + contract_address: address, + calldata: LoadnextContractExecutionParams { + reads: 100, + writes: 100, + events: 100, + hashes: 500, + recursive_calls: 10, + deploys: 60, + } + .to_bytes(), + value: Default::default(), + factory_deps: vec![], + }, + None, + ); + + let loadnext_tx_2 = account.get_l2_tx_for_execute( + Execute { + contract_address: address, + calldata: LoadnextContractExecutionParams { + reads: 100, + writes: 100, + events: 100, + hashes: 500, + recursive_calls: 10, + deploys: 60, + } + .to_bytes(), + value: Default::default(), + factory_deps: vec![], + }, + None, + ); + + let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ + TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), + TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), + TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), + ]); + + // reset vm + vm.reset_with_empty_storage(); + + let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ + TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), + TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), + TransactionTestInfo::new_rejected( + loadnext_deploy_tx.clone(), + TxModifier::NonceReused.into(), + ), + TransactionTestInfo::new_processed(loadnext_tx_1, false), + TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), + TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), + TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), + TransactionTestInfo::new_processed(loadnext_tx_2, false), + ]); + + assert_eq!(result_without_rollbacks, result_with_rollbacks); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs b/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs new file mode 100644 index 000000000000..1e761b30ca62 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs @@ -0,0 +1,75 @@ +use zk_evm_1_5_0::zkevm_opcode_defs::p256; +use zksync_system_constants::P256VERIFY_PRECOMPILE_ADDRESS; +use zksync_types::{web3::keccak256, Execute, H256, U256}; +use zksync_utils::h256_to_u256; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_fast::tests::tester::VmTesterBuilder, + vm_latest::ExecutionResult, +}; + +#[test] +fn test_sekp256r1() { + // In this test, we aim to test whether a simple account interaction (without any fee logic) + // will work. The account will try to deploy a simple contract from integration tests. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_execution_mode(TxExecutionMode::EthCall) + .with_random_rich_accounts(1) + .build(); + + let account = &mut vm.rich_accounts[0]; + + // The digest, secret key and public key were copied from the following test suit: `https://github.com/hyperledger/besu/blob/b6a6402be90339367d5bcabcd1cfd60df4832465/crypto/algorithms/src/test/java/org/hyperledger/besu/crypto/SECP256R1Test.java#L36` + let sk = p256::SecretKey::from_slice( + &hex::decode("519b423d715f8b581f4fa8ee59f4771a5b44c8130b4e3eacca54a56dda72b464").unwrap(), + ) + .unwrap(); + let sk = p256::ecdsa::SigningKey::from(sk); + + let digest = keccak256(&hex::decode("5905238877c77421f73e43ee3da6f2d9e2ccad5fc942dcec0cbd25482935faaf416983fe165b1a045ee2bcd2e6dca3bdf46c4310a7461f9a37960ca672d3feb5473e253605fb1ddfd28065b53cb5858a8ad28175bf9bd386a5e471ea7a65c17cc934a9d791e91491eb3754d03799790fe2d308d16146d5c9b0d0debd97d79ce8").unwrap()); + let public_key_encoded = hex::decode("1ccbe91c075fc7f4f033bfa248db8fccd3565de94bbfb12f3c59ff46c271bf83ce4014c68811f9a21a1fdb2c0e6113e06db7ca93b7404e78dc7ccd5ca89a4ca9").unwrap(); + + let (sig, _) = sk.sign_prehash_recoverable(&digest).unwrap(); + let (r, s) = sig.split_bytes(); + + let mut encoded_r = [0u8; 32]; + encoded_r.copy_from_slice(&r); + + let mut encoded_s = [0u8; 32]; + encoded_s.copy_from_slice(&s); + + let mut x = [0u8; 32]; + x.copy_from_slice(&public_key_encoded[0..32]); + + let mut y = [0u8; 32]; + y.copy_from_slice(&public_key_encoded[32..64]); + + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: P256VERIFY_PRECOMPILE_ADDRESS, + calldata: [digest, encoded_r, encoded_s, x, y].concat(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx); + + let execution_result = vm.vm.execute(VmExecutionMode::Batch); + + let ExecutionResult::Success { output } = execution_result.result else { + panic!("batch failed") + }; + + let output = H256::from_slice(&output); + + assert_eq!( + h256_to_u256(output), + U256::from(1u32), + "verification was not successful" + ); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs new file mode 100644 index 000000000000..7d866e1539b0 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs @@ -0,0 +1,78 @@ +use crate::{ + interface::{ExecutionResult, VmExecutionMode, VmInterface}, + vm_fast::tests::tester::{TxType, VmTesterBuilder}, +}; + +#[test] +fn estimate_fee() { + let mut vm_tester = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_deployer() + .with_random_rich_accounts(1) + .build(); + + vm_tester.deploy_test_contract(); + let account = &mut vm_tester.rich_accounts[0]; + + let tx = account.get_test_contract_transaction( + vm_tester.test_contract.unwrap(), + false, + Default::default(), + false, + TxType::L2, + ); + + vm_tester.vm.push_transaction(tx); + + let result = vm_tester.vm.execute(VmExecutionMode::OneTx); + assert!(matches!(result.result, ExecutionResult::Success { .. })); +} + +#[test] +fn simple_execute() { + let mut vm_tester = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_deployer() + .with_random_rich_accounts(1) + .build(); + + vm_tester.deploy_test_contract(); + + let account = &mut vm_tester.rich_accounts[0]; + + let tx1 = account.get_test_contract_transaction( + vm_tester.test_contract.unwrap(), + false, + Default::default(), + false, + TxType::L1 { serial_id: 1 }, + ); + + let tx2 = account.get_test_contract_transaction( + vm_tester.test_contract.unwrap(), + true, + Default::default(), + false, + TxType::L1 { serial_id: 1 }, + ); + + let tx3 = account.get_test_contract_transaction( + vm_tester.test_contract.unwrap(), + false, + Default::default(), + false, + TxType::L1 { serial_id: 1 }, + ); + let vm = &mut vm_tester.vm; + vm.push_transaction(tx1); + vm.push_transaction(tx2); + vm.push_transaction(tx3); + let tx = vm.execute(VmExecutionMode::OneTx); + assert!(matches!(tx.result, ExecutionResult::Success { .. })); + let tx = vm.execute(VmExecutionMode::OneTx); + assert!(matches!(tx.result, ExecutionResult::Revert { .. })); + let tx = vm.execute(VmExecutionMode::OneTx); + assert!(matches!(tx.result, ExecutionResult::Success { .. })); + let block_tip = vm.execute(VmExecutionMode::Batch); + assert!(matches!(block_tip.result, ExecutionResult::Success { .. })); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/storage.rs b/core/lib/multivm/src/versions/vm_fast/tests/storage.rs new file mode 100644 index 000000000000..733ce1f0618c --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/storage.rs @@ -0,0 +1,130 @@ +use ethabi::Token; +use zksync_contracts::{load_contract, read_bytecode}; +use zksync_types::{Address, Execute, U256}; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled}, + vm_fast::tests::tester::VmTesterBuilder, +}; + +fn test_storage(first_tx_calldata: Vec, second_tx_calldata: Vec) -> u32 { + let bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", + ); + + let test_contract_address = Address::random(); + + // In this test, we aim to test whether a simple account interaction (without any fee logic) + // will work. The account will try to deploy a simple contract from integration tests. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_deployer() + .with_random_rich_accounts(1) + .with_custom_contracts(vec![(bytecode, test_contract_address, false)]) + .build(); + + let account = &mut vm.rich_accounts[0]; + + let tx1 = account.get_l2_tx_for_execute( + Execute { + contract_address: test_contract_address, + calldata: first_tx_calldata, + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + + let tx2 = account.get_l2_tx_for_execute( + Execute { + contract_address: test_contract_address, + calldata: second_tx_calldata, + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.make_snapshot(); + vm.vm.push_transaction(tx1); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed(), "First tx failed"); + vm.vm.pop_snapshot_no_rollback(); + + // We rollback once because transient storage and rollbacks are a tricky combination. + vm.vm.make_snapshot(); + vm.vm.push_transaction(tx2.clone()); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Second tx failed"); + vm.vm.rollback_to_the_latest_snapshot(); + + vm.vm.make_snapshot(); + vm.vm.push_transaction(tx2); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Second tx failed on second run"); + + result.statistics.pubdata_published +} + +fn test_storage_one_tx(second_tx_calldata: Vec) -> u32 { + test_storage(vec![], second_tx_calldata) +} + +#[test] +fn test_storage_behavior() { + let contract = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", + ); + + // In all of the tests below we provide the first tx to ensure that the tracers will not include + // the statistics from the start of the bootloader and will only include those for the transaction itself. + + let base_pubdata = test_storage_one_tx(vec![]); + let simple_test_pubdata = test_storage_one_tx( + contract + .function("simpleWrite") + .unwrap() + .encode_input(&[]) + .unwrap(), + ); + let resetting_write_pubdata = test_storage_one_tx( + contract + .function("resettingWrite") + .unwrap() + .encode_input(&[]) + .unwrap(), + ); + let resetting_write_via_revert_pubdata = test_storage_one_tx( + contract + .function("resettingWriteViaRevert") + .unwrap() + .encode_input(&[]) + .unwrap(), + ); + + assert_eq!(simple_test_pubdata - base_pubdata, 65); + assert_eq!(resetting_write_pubdata - base_pubdata, 34); + assert_eq!(resetting_write_via_revert_pubdata - base_pubdata, 34); +} + +#[test] +fn test_transient_storage_behavior() { + let contract = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", + ); + + let first_tstore_test = contract + .function("testTransientStore") + .unwrap() + .encode_input(&[]) + .unwrap(); + // Second transaction checks that, as expected, the transient storage is cleared after the first transaction. + let second_tstore_test = contract + .function("assertTValue") + .unwrap() + .encode_input(&[Token::Uint(U256::zero())]) + .unwrap(); + + test_storage(first_tstore_test, second_tstore_test); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs new file mode 100644 index 000000000000..781069ddf499 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs @@ -0,0 +1,6 @@ +pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; +pub(crate) use vm_tester::{default_l1_batch, get_empty_storage, VmTester, VmTesterBuilder}; +pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; + +mod transaction_test_info; +mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs new file mode 100644 index 000000000000..9bb013542c7d --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs @@ -0,0 +1,246 @@ +use zksync_state::ReadStorage; +use zksync_types::{ExecuteTransactionCommon, Transaction, H160, U256}; + +use super::VmTester; +use crate::{ + interface::{ + CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, + VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, + }, + vm_fast::Vm, +}; + +#[derive(Debug, Clone)] +pub(crate) enum TxModifier { + WrongSignatureLength, + WrongSignature, + WrongMagicValue, + WrongNonce, + NonceReused, +} + +#[derive(Debug, Clone)] +pub(crate) enum TxExpectedResult { + Rejected { error: ExpectedError }, + Processed { rollback: bool }, +} + +#[derive(Debug, Clone)] +pub(crate) struct TransactionTestInfo { + tx: Transaction, + result: TxExpectedResult, +} + +#[derive(Debug, Clone)] +pub(crate) struct ExpectedError { + pub(crate) revert_reason: TxRevertReason, + pub(crate) modifier: Option, +} + +impl From for ExpectedError { + fn from(value: TxModifier) -> Self { + let revert_reason = match value { + TxModifier::WrongSignatureLength => { + Halt::ValidationFailed(VmRevertReason::General { + msg: "Signature length is incorrect".to_string(), + data: vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, + 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, + 116, 0, 0, 0, + ], + }) + } + TxModifier::WrongSignature => { + Halt::ValidationFailed(VmRevertReason::General { + msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), + data: vec![], + }) + } + TxModifier::WrongMagicValue => { + Halt::ValidationFailed(VmRevertReason::General { + msg: "v is neither 27 nor 28".to_string(), + data: vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, + 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }) + + } + TxModifier::WrongNonce => { + Halt::ValidationFailed(VmRevertReason::General { + msg: "Incorrect nonce".to_string(), + data: vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, + 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }) + } + TxModifier::NonceReused => { + Halt::ValidationFailed(VmRevertReason::General { + msg: "Reusing the same nonce twice".to_string(), + data: vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, + 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, + 0, 0, 0, + ], + }) + } + }; + + ExpectedError { + revert_reason: TxRevertReason::Halt(revert_reason), + modifier: Some(value), + } + } +} + +impl TransactionTestInfo { + pub(crate) fn new_rejected( + mut transaction: Transaction, + expected_error: ExpectedError, + ) -> Self { + transaction.common_data = match transaction.common_data { + ExecuteTransactionCommon::L2(mut data) => { + if let Some(modifier) = &expected_error.modifier { + match modifier { + TxModifier::WrongSignatureLength => { + data.signature = data.signature[..data.signature.len() - 20].to_vec() + } + TxModifier::WrongSignature => data.signature = vec![27u8; 65], + TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], + TxModifier::WrongNonce => { + // Do not need to modify signature for nonce error + } + TxModifier::NonceReused => { + // Do not need to modify signature for nonce error + } + } + } + ExecuteTransactionCommon::L2(data) + } + _ => panic!("L1 transactions are not supported"), + }; + + Self { + tx: transaction, + result: TxExpectedResult::Rejected { + error: expected_error, + }, + } + } + + pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { + Self { + tx: transaction, + result: TxExpectedResult::Processed { + rollback: should_be_rollbacked, + }, + } + } + + fn verify_result(&self, result: &VmExecutionResultAndLogs) { + match &self.result { + TxExpectedResult::Rejected { error } => match &result.result { + ExecutionResult::Success { .. } => { + panic!("Transaction should be reverted {:?}", self.tx.nonce()) + } + ExecutionResult::Revert { output } => match &error.revert_reason { + TxRevertReason::TxReverted(expected) => { + assert_eq!(output, expected) + } + _ => { + panic!("Error types mismatch"); + } + }, + ExecutionResult::Halt { reason } => match &error.revert_reason { + TxRevertReason::Halt(expected) => { + assert_eq!(reason, expected) + } + _ => { + panic!("Error types mismatch"); + } + }, + }, + TxExpectedResult::Processed { .. } => { + assert!(!result.result.is_failed()); + } + } + } + + fn should_rollback(&self) -> bool { + match &self.result { + TxExpectedResult::Rejected { .. } => true, + TxExpectedResult::Processed { rollback } => *rollback, + } + } +} + +// TODO this doesn't include all the state of ModifiedWorld +#[derive(Debug, PartialEq)] +struct VmStateDump { + state: vm2::State, + storage_writes: Vec<((H160, U256), U256)>, + events: Box<[vm2::Event]>, +} + +impl Vm { + fn dump_state(&self) -> VmStateDump { + VmStateDump { + state: self.inner.state.clone(), + storage_writes: self + .inner + .world_diff + .get_storage_state() + .iter() + .map(|(k, v)| (*k, *v)) + .collect(), + events: self.inner.world_diff.events().into(), + } + } +} + +impl VmTester { + pub(crate) fn execute_and_verify_txs( + &mut self, + txs: &[TransactionTestInfo], + ) -> CurrentExecutionState { + for tx_test_info in txs { + self.execute_tx_and_verify(tx_test_info.clone()); + } + self.vm.execute(VmExecutionMode::Batch); + let mut state = self.vm.get_current_execution_state(); + state.used_contract_hashes.sort(); + state + } + + pub(crate) fn execute_tx_and_verify( + &mut self, + tx_test_info: TransactionTestInfo, + ) -> VmExecutionResultAndLogs { + self.vm.make_snapshot(); + let inner_state_before = self.vm.dump_state(); + self.vm.push_transaction(tx_test_info.tx.clone()); + let result = self.vm.execute(VmExecutionMode::OneTx); + tx_test_info.verify_result(&result); + if tx_test_info.should_rollback() { + self.vm.rollback_to_the_latest_snapshot(); + let inner_state_after = self.vm.dump_state(); + pretty_assertions::assert_eq!( + inner_state_before, + inner_state_after, + "Inner state before and after rollback should be equal" + ); + } else { + self.vm.pop_snapshot_no_rollback(); + } + result + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs new file mode 100644 index 000000000000..7715dd0a6d49 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs @@ -0,0 +1,296 @@ +use std::{cell::RefCell, rc::Rc}; + +use vm2::WorldDiff; +use zksync_contracts::BaseSystemContracts; +use zksync_state::{InMemoryStorage, StoragePtr}; +use zksync_test_account::{Account, TxType}; +use zksync_types::{ + block::L2BlockHasher, + fee_model::BatchFeeInput, + get_code_key, get_is_account_key, + helpers::unix_timestamp_ms, + utils::{deployed_address_create, storage_key_for_eth_balance}, + AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, + StorageKey, U256, +}; +use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; + +use crate::{ + interface::{ + L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, + }, + versions::vm_fast::{tests::utils::read_test_contract, vm::Vm}, + vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, utils::l2_blocks::load_last_l2_block}, +}; + +pub(crate) struct VmTester { + pub(crate) vm: Vm>, + pub(crate) storage: StoragePtr, + pub(crate) deployer: Option, + pub(crate) test_contract: Option
, + pub(crate) fee_account: Address, + pub(crate) rich_accounts: Vec, + pub(crate) custom_contracts: Vec, +} + +impl VmTester { + pub(crate) fn deploy_test_contract(&mut self) { + let contract = read_test_contract(); + let tx = self + .deployer + .as_mut() + .expect("You have to initialize builder with deployer") + .get_deploy_tx(&contract, None, TxType::L2) + .tx; + let nonce = tx.nonce().unwrap().0.into(); + self.vm.push_transaction(tx); + self.vm.execute(VmExecutionMode::OneTx); + let deployed_address = + deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); + self.test_contract = Some(deployed_address); + } + + pub(crate) fn reset_with_empty_storage(&mut self) { + self.storage = Rc::new(RefCell::new(get_empty_storage())); + self.vm.inner.world_diff = WorldDiff::default(); + self.reset_state(false); + } + + /// Reset the state of the VM to the initial state. + /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, + /// otherwise it will use the first L2 block of l1 batch env + pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { + for account in self.rich_accounts.iter_mut() { + account.nonce = Nonce(0); + make_account_rich(self.storage.clone(), account); + } + if let Some(deployer) = &self.deployer { + make_account_rich(self.storage.clone(), deployer); + } + + if !self.custom_contracts.is_empty() { + println!("Inserting custom contracts is not yet supported") + // `insert_contracts(&mut self.storage, &self.custom_contracts);` + } + + let storage = self.storage.clone(); + { + let mut storage = storage.borrow_mut(); + // Commit pending storage changes (old VM versions commit them on successful execution) + for (&(address, slot), &value) in self.vm.inner.world_diff.get_storage_state() { + let key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(slot)); + storage.set_value(key, u256_to_h256(value)); + } + } + + let mut l1_batch = self.vm.batch_env.clone(); + if use_latest_l2_block { + let last_l2_block = load_last_l2_block(&storage).unwrap_or(L2Block { + number: 0, + timestamp: 0, + hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + }); + l1_batch.first_l2_block = L2BlockEnv { + number: last_l2_block.number + 1, + timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), + prev_block_hash: last_l2_block.hash, + max_virtual_blocks_to_create: 1, + }; + } + + let vm = Vm::new(l1_batch, self.vm.system_env.clone(), storage); + + if self.test_contract.is_some() { + self.deploy_test_contract(); + } + self.vm = vm; + } +} + +pub(crate) type ContractsToDeploy = (Vec, Address, bool); + +pub(crate) struct VmTesterBuilder { + storage: Option, + l1_batch_env: Option, + system_env: SystemEnv, + deployer: Option, + rich_accounts: Vec, + custom_contracts: Vec, +} + +impl Clone for VmTesterBuilder { + fn clone(&self) -> Self { + Self { + storage: None, + l1_batch_env: self.l1_batch_env.clone(), + system_env: self.system_env.clone(), + deployer: self.deployer.clone(), + rich_accounts: self.rich_accounts.clone(), + custom_contracts: self.custom_contracts.clone(), + } + } +} + +#[allow(dead_code)] +impl VmTesterBuilder { + pub(crate) fn new() -> Self { + Self { + storage: None, + l1_batch_env: None, + system_env: SystemEnv { + zk_porter_available: false, + version: ProtocolVersionId::latest(), + base_system_smart_contracts: BaseSystemContracts::playground(), + bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + chain_id: L2ChainId::from(270), + }, + deployer: None, + rich_accounts: vec![], + custom_contracts: vec![], + } + } + + pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { + self.l1_batch_env = Some(l1_batch_env); + self + } + + pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { + self.system_env = system_env; + self + } + + pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { + self.storage = Some(storage); + self + } + + pub(crate) fn with_base_system_smart_contracts( + mut self, + base_system_smart_contracts: BaseSystemContracts, + ) -> Self { + self.system_env.base_system_smart_contracts = base_system_smart_contracts; + self + } + + pub(crate) fn with_bootloader_gas_limit(mut self, gas_limit: u32) -> Self { + self.system_env.bootloader_gas_limit = gas_limit; + self + } + + pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { + self.system_env.execution_mode = execution_mode; + self + } + + pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { + self.storage = Some(get_empty_storage()); + self + } + + pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { + for _ in 0..number { + let account = Account::random(); + self.rich_accounts.push(account); + } + self + } + + pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { + self.rich_accounts.extend(accounts); + self + } + + pub(crate) fn with_deployer(mut self) -> Self { + let deployer = Account::random(); + self.deployer = Some(deployer); + self + } + + pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { + self.custom_contracts = contracts; + self + } + + pub(crate) fn build(self) -> VmTester { + let l1_batch_env = self + .l1_batch_env + .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); + + let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); + insert_contracts(&mut raw_storage, &self.custom_contracts); + let storage_ptr = Rc::new(RefCell::new(raw_storage)); + for account in self.rich_accounts.iter() { + make_account_rich(storage_ptr.clone(), account); + } + if let Some(deployer) = &self.deployer { + make_account_rich(storage_ptr.clone(), deployer); + } + + let fee_account = l1_batch_env.fee_account; + let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); + + VmTester { + vm, + storage: storage_ptr, + deployer: self.deployer, + test_contract: None, + fee_account, + rich_accounts: self.rich_accounts.clone(), + custom_contracts: self.custom_contracts.clone(), + } + } +} + +pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { + let timestamp = unix_timestamp_ms(); + L1BatchEnv { + previous_batch_hash: None, + number, + timestamp, + fee_input: BatchFeeInput::l1_pegged( + 50_000_000_000, // 50 gwei + 250_000_000, // 0.25 gwei + ), + fee_account: Address::random(), + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number: 1, + timestamp, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 100, + }, + } +} + +pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { + let key = storage_key_for_eth_balance(&account.address); + storage + .as_ref() + .borrow_mut() + .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); +} + +pub(crate) fn get_empty_storage() -> InMemoryStorage { + InMemoryStorage::with_system_contracts(hash_bytecode) +} + +// Inserts the contracts into the test environment, bypassing the +// deployer system contract. Besides the reference to storage +// it accepts a `contracts` tuple of information about the contract +// and whether or not it is an account. +fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { + for (contract, address, is_account) in contracts { + let deployer_code_key = get_code_key(address); + raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); + + if *is_account { + let is_account_key = get_is_account_key(address); + raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); + } + + raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs new file mode 100644 index 000000000000..75144839006e --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs @@ -0,0 +1,51 @@ +use zksync_types::{Execute, H160}; + +use crate::{ + interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, + vm_fast::tests::{ + tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, + utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, + }, +}; + +#[test] +fn test_tracing_of_execution_errors() { + let contract_address = H160::random(); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) + .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_deployer() + .with_random_rich_accounts(1) + .build(); + + let account = &mut vm.rich_accounts[0]; + + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address, + calldata: get_execute_error_calldata(), + value: Default::default(), + factory_deps: vec![], + }, + None, + ); + + vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( + tx, + ExpectedError { + revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { + msg: "short".to_string(), + data: vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, + ], + }), + modifier: None, + }, + )); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs new file mode 100644 index 000000000000..3b61b8ac7f1e --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs @@ -0,0 +1,218 @@ +use ethabi::Token; +use zksync_contracts::{load_contract, read_bytecode}; +use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; +use zksync_types::{utils::storage_key_for_eth_balance, AccountTreeId, Address, Execute, U256}; +use zksync_utils::u256_to_h256; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_fast::tests::{ + tester::{get_empty_storage, VmTesterBuilder}, + utils::get_balance, + }, +}; + +enum TestOptions { + Send(U256), + Transfer(U256), +} + +fn test_send_or_transfer(test_option: TestOptions) { + let test_bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", + ); + let recipeint_bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/Recipient.json", + ); + let test_abi = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", + ); + + let test_contract_address = Address::random(); + let recipient_address = Address::random(); + + let (value, calldata) = match test_option { + TestOptions::Send(value) => ( + value, + test_abi + .function("send") + .unwrap() + .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) + .unwrap(), + ), + TestOptions::Transfer(value) => ( + value, + test_abi + .function("transfer") + .unwrap() + .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) + .unwrap(), + ), + }; + + let mut storage = get_empty_storage(); + storage.set_value( + storage_key_for_eth_balance(&test_contract_address), + u256_to_h256(value), + ); + + let mut vm = VmTesterBuilder::new() + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_deployer() + .with_random_rich_accounts(1) + .with_custom_contracts(vec![ + (test_bytecode, test_contract_address, false), + (recipeint_bytecode, recipient_address, false), + ]) + .build(); + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: test_contract_address, + calldata, + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx); + let tx_result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !tx_result.result.is_failed(), + "Transaction wasn't successful" + ); + + let batch_result = vm.vm.execute(VmExecutionMode::Batch); + assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); + + let new_recipient_balance = get_balance( + AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), + &recipient_address, + &mut vm.vm.world.storage, + vm.vm.inner.world_diff.get_storage_state(), + ); + + assert_eq!(new_recipient_balance, value); +} + +#[test] +fn test_send_and_transfer() { + test_send_or_transfer(TestOptions::Send(U256::zero())); + test_send_or_transfer(TestOptions::Send(U256::from(10).pow(18.into()))); + test_send_or_transfer(TestOptions::Transfer(U256::zero())); + test_send_or_transfer(TestOptions::Transfer(U256::from(10).pow(18.into()))); +} + +fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { + let test_bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", + ); + let reentrant_recipeint_bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", + ); + let test_abi = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", + ); + let reentrant_recipient_abi = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", + ); + + let test_contract_address = Address::random(); + let reentrant_recipeint_address = Address::random(); + + let (value, calldata) = match test_option { + TestOptions::Send(value) => ( + value, + test_abi + .function("send") + .unwrap() + .encode_input(&[ + Token::Address(reentrant_recipeint_address), + Token::Uint(value), + ]) + .unwrap(), + ), + TestOptions::Transfer(value) => ( + value, + test_abi + .function("transfer") + .unwrap() + .encode_input(&[ + Token::Address(reentrant_recipeint_address), + Token::Uint(value), + ]) + .unwrap(), + ), + }; + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_deployer() + .with_random_rich_accounts(1) + .with_custom_contracts(vec![ + (test_bytecode, test_contract_address, false), + ( + reentrant_recipeint_bytecode, + reentrant_recipeint_address, + false, + ), + ]) + .build(); + + // First transaction, the job of which is to warm up the slots for balance of the recipient as well as its storage variable. + let account = &mut vm.rich_accounts[0]; + let tx1 = account.get_l2_tx_for_execute( + Execute { + contract_address: reentrant_recipeint_address, + calldata: reentrant_recipient_abi + .function("setX") + .unwrap() + .encode_input(&[]) + .unwrap(), + value: U256::from(1), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx1); + let tx1_result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !tx1_result.result.is_failed(), + "Transaction 1 wasn't successful" + ); + + let tx2 = account.get_l2_tx_for_execute( + Execute { + contract_address: test_contract_address, + calldata, + value, + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx2); + let tx2_result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + tx2_result.result.is_failed(), + "Transaction 2 should have failed, but it succeeded" + ); + + let batch_result = vm.vm.execute(VmExecutionMode::Batch); + assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); +} + +#[test] +fn test_reentrancy_protection_send_and_transfer() { + test_reentrancy_protection_send_or_transfer(TestOptions::Send(U256::zero())); + test_reentrancy_protection_send_or_transfer(TestOptions::Send(U256::from(10).pow(18.into()))); + test_reentrancy_protection_send_or_transfer(TestOptions::Transfer(U256::zero())); + test_reentrancy_protection_send_or_transfer(TestOptions::Transfer( + U256::from(10).pow(18.into()), + )); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs new file mode 100644 index 000000000000..616436776090 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs @@ -0,0 +1,343 @@ +use zksync_contracts::{deployer_contract, load_sys_contract, read_bytecode}; +use zksync_test_account::TxType; +use zksync_types::{ + ethabi::{Contract, Token}, + get_code_key, get_known_code_key, + protocol_upgrade::ProtocolUpgradeTxCommonData, + Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, + CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, + REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, +}; +use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; + +use crate::{ + interface::{ + ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, + VmInterfaceHistoryEnabled, + }, + vm_fast::tests::{ + tester::VmTesterBuilder, + utils::{ + get_complex_upgrade_abi, read_complex_upgrade, read_test_contract, + verify_required_storage, + }, + }, +}; + +/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: +/// - This transaction must be the only one in block +/// - If present, this transaction must be the first one in block +#[test] +fn test_protocol_upgrade_is_first() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let bytecode_hash = hash_bytecode(&read_test_contract()); + vm.storage + .borrow_mut() + .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); + + // Here we just use some random transaction of protocol upgrade type: + let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { + // The bytecode hash to put on an address + bytecode_hash, + // The address on which to deploy the bytecode hash to + address: H160::random(), + // Whether to run the constructor on the force deployment + call_constructor: false, + // The value with which to initialize a contract + value: U256::zero(), + // The constructor calldata + input: vec![], + }]); + + // Another random upgrade transaction + let another_protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { + // The bytecode hash to put on an address + bytecode_hash, + // The address on which to deploy the bytecode hash to + address: H160::random(), + // Whether to run the constructor on the force deployment + call_constructor: false, + // The value with which to initialize a contract + value: U256::zero(), + // The constructor calldata + input: vec![], + }]); + + let normal_l1_transaction = vm.rich_accounts[0] + .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) + .tx; + + let expected_error = + Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); + + vm.vm.make_snapshot(); + // Test 1: there must be only one system transaction in block + vm.vm.push_transaction(protocol_upgrade_transaction.clone()); + vm.vm.push_transaction(normal_l1_transaction.clone()); + vm.vm.push_transaction(another_protocol_upgrade_transaction); + + vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert_eq!( + result.result, + ExecutionResult::Halt { + reason: expected_error.clone() + } + ); + + // Test 2: the protocol upgrade tx must be the first one in block + vm.vm.rollback_to_the_latest_snapshot(); + vm.vm.make_snapshot(); + vm.vm.push_transaction(normal_l1_transaction.clone()); + vm.vm.push_transaction(protocol_upgrade_transaction.clone()); + + vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert_eq!( + result.result, + ExecutionResult::Halt { + reason: expected_error + } + ); + + vm.vm.rollback_to_the_latest_snapshot(); + vm.vm.make_snapshot(); + vm.vm.push_transaction(protocol_upgrade_transaction); + vm.vm.push_transaction(normal_l1_transaction); + + vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed()); +} + +/// In this test we try to test how force deployments could be done via protocol upgrade transactions. +#[test] +fn test_force_deploy_upgrade() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let storage_view = vm.storage.clone(); + let bytecode_hash = hash_bytecode(&read_test_contract()); + + let known_code_key = get_known_code_key(&bytecode_hash); + // It is generally expected that all the keys will be set as known prior to the protocol upgrade. + storage_view + .borrow_mut() + .set_value(known_code_key, u256_to_h256(1.into())); + drop(storage_view); + + let address_to_deploy = H160::random(); + // Here we just use some random transaction of protocol upgrade type: + let transaction = get_forced_deploy_tx(&[ForceDeployment { + // The bytecode hash to put on an address + bytecode_hash, + // The address on which to deploy the bytecode hash to + address: address_to_deploy, + // Whether to run the constructor on the force deployment + call_constructor: false, + // The value with which to initialize a contract + value: U256::zero(), + // The constructor calldata + input: vec![], + }]); + + vm.vm.push_transaction(transaction); + + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "The force upgrade was not successful" + ); + + let expected_slots = [(bytecode_hash, get_code_key(&address_to_deploy))]; + + // Verify that the bytecode has been set correctly + verify_required_storage( + &expected_slots, + &mut *vm.storage.borrow_mut(), + vm.vm.inner.world_diff.get_storage_state(), + ); +} + +/// Here we show how the work with the complex upgrader could be done +#[test] +fn test_complex_upgrader() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let bytecode_hash = hash_bytecode(&read_complex_upgrade()); + let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); + + // Let's assume that the bytecode for the implementation of the complex upgrade + // is already deployed in some address in user space + let upgrade_impl = H160::random(); + let account_code_key = get_code_key(&upgrade_impl); + + { + let mut storage = vm.storage.borrow_mut(); + storage.set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); + storage.set_value( + get_known_code_key(&msg_sender_test_hash), + u256_to_h256(1.into()), + ); + storage.set_value(account_code_key, bytecode_hash); + storage.store_factory_dep(bytecode_hash, read_complex_upgrade()); + storage.store_factory_dep(msg_sender_test_hash, read_msg_sender_test()); + } + + let address_to_deploy1 = H160::random(); + let address_to_deploy2 = H160::random(); + + let transaction = get_complex_upgrade_tx( + upgrade_impl, + address_to_deploy1, + address_to_deploy2, + bytecode_hash, + ); + + vm.vm.push_transaction(transaction); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "The force upgrade was not successful" + ); + + let expected_slots = [ + (bytecode_hash, get_code_key(&address_to_deploy1)), + (bytecode_hash, get_code_key(&address_to_deploy2)), + ]; + + // Verify that the bytecode has been set correctly + verify_required_storage( + &expected_slots, + &mut *vm.storage.borrow_mut(), + vm.vm.inner.world_diff.get_storage_state(), + ); +} + +#[derive(Debug, Clone)] +struct ForceDeployment { + // The bytecode hash to put on an address + bytecode_hash: H256, + // The address on which to deploy the bytecode hash to + address: Address, + // Whether to run the constructor on the force deployment + call_constructor: bool, + // The value with which to initialize a contract + value: U256, + // The constructor calldata + input: Vec, +} + +fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { + let deployer = deployer_contract(); + let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); + + let encoded_deployments: Vec<_> = deployment + .iter() + .map(|deployment| { + Token::Tuple(vec![ + Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), + Token::Address(deployment.address), + Token::Bool(deployment.call_constructor), + Token::Uint(deployment.value), + Token::Bytes(deployment.input.clone()), + ]) + }) + .collect(); + + let params = [Token::Array(encoded_deployments)]; + + let calldata = contract_function + .encode_input(¶ms) + .expect("failed to encode parameters"); + + let execute = Execute { + contract_address: CONTRACT_DEPLOYER_ADDRESS, + calldata, + factory_deps: vec![], + value: U256::zero(), + }; + + Transaction { + common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { + sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, + gas_limit: U256::from(200_000_000u32), + gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), + ..Default::default() + }), + execute, + received_timestamp_ms: 0, + raw_bytes: None, + } +} + +// Returns the transaction that performs a complex protocol upgrade. +// The first param is the address of the implementation of the complex upgrade +// in user-space, while the next 3 params are params of the implementation itself +// For the explanation for the parameters, please refer to: +// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol +fn get_complex_upgrade_tx( + implementation_address: Address, + address1: Address, + address2: Address, + bytecode_hash: H256, +) -> Transaction { + let impl_contract = get_complex_upgrade_abi(); + let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); + let impl_calldata = impl_function + .encode_input(&[ + Token::Address(address1), + Token::Address(address2), + Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), + ]) + .unwrap(); + + let complex_upgrader = get_complex_upgrader_abi(); + let upgrade_function = complex_upgrader.function("upgrade").unwrap(); + let complex_upgrader_calldata = upgrade_function + .encode_input(&[ + Token::Address(implementation_address), + Token::Bytes(impl_calldata), + ]) + .unwrap(); + + let execute = Execute { + contract_address: COMPLEX_UPGRADER_ADDRESS, + calldata: complex_upgrader_calldata, + factory_deps: vec![], + value: U256::zero(), + }; + + Transaction { + common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { + sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, + gas_limit: U256::from(200_000_000u32), + gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), + ..Default::default() + }), + execute, + received_timestamp_ms: 0, + raw_bytes: None, + } +} + +fn read_msg_sender_test() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") +} + +fn get_complex_upgrader_abi() -> Contract { + load_sys_contract("ComplexUpgrader") +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs new file mode 100644 index 000000000000..0a72667bd80e --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs @@ -0,0 +1,128 @@ +use std::collections::BTreeMap; + +use ethabi::Contract; +use once_cell::sync::Lazy; +use vm2::{instruction_handlers::HeapInterface, HeapId, State}; +use zksync_contracts::{ + load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, +}; +use zksync_state::ReadStorage; +use zksync_types::{ + utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H160, H256, + U256, +}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; + +pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = + Lazy::new(BaseSystemContracts::load_from_disk); + +pub(crate) fn verify_required_memory(state: &State, required_values: Vec<(U256, HeapId, u32)>) { + for (required_value, memory_page, cell) in required_values { + let current_value = state.heaps[memory_page].read_u256(cell * 32); + assert_eq!(current_value, required_value); + } +} + +pub(crate) fn verify_required_storage( + required_values: &[(H256, StorageKey)], + main_storage: &mut impl ReadStorage, + storage_changes: &BTreeMap<(H160, U256), U256>, +) { + for &(required_value, key) in required_values { + let current_value = storage_changes + .get(&(*key.account().address(), h256_to_u256(*key.key()))) + .copied() + .unwrap_or_else(|| h256_to_u256(main_storage.read_value(&key))); + + assert_eq!( + u256_to_h256(current_value), + required_value, + "Invalid value at key {key:?}" + ); + } +} +pub(crate) fn get_balance( + token_id: AccountTreeId, + account: &Address, + main_storage: &mut impl ReadStorage, + storage_changes: &BTreeMap<(H160, U256), U256>, +) -> U256 { + let key = storage_key_for_standard_token_balance(token_id, account); + + storage_changes + .get(&(*key.account().address(), h256_to_u256(*key.key()))) + .copied() + .unwrap_or_else(|| h256_to_u256(main_storage.read_value(&key))) +} + +pub(crate) fn read_test_contract() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") +} + +pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { + let bootloader_code = read_zbin_bytecode(format!( + "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", + test + )); + + let bootloader_hash = hash_bytecode(&bootloader_code); + SystemContractCode { + code: bytes_to_be_words(bootloader_code), + hash: bootloader_hash, + } +} + +pub(crate) fn read_error_contract() -> Vec { + read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", + ) +} + +pub(crate) fn get_execute_error_calldata() -> Vec { + let test_contract = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", + ); + + let function = test_contract.function("require_short").unwrap(); + + function + .encode_input(&[]) + .expect("failed to encode parameters") +} + +pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { + let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; + (read_bytecode(path), load_contract(path)) +} + +pub(crate) fn read_precompiles_contract() -> Vec { + read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", + ) +} + +pub(crate) fn load_precompiles_contract() -> Contract { + load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", + ) +} + +pub(crate) fn read_nonce_holder_tester() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") +} + +pub(crate) fn read_complex_upgrade() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") +} + +pub(crate) fn get_complex_upgrade_abi() -> Contract { + load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" + ) +} + +pub(crate) fn read_expensive_contract() -> (Vec, Contract) { + const PATH: &str = + "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; + (read_bytecode(PATH), load_contract(PATH)) +} diff --git a/core/lib/multivm/src/versions/vm_fast/transaction_data.rs b/core/lib/multivm/src/versions/vm_fast/transaction_data.rs new file mode 100644 index 000000000000..502be0dc22cc --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/transaction_data.rs @@ -0,0 +1,338 @@ +use std::convert::TryInto; + +use zksync_types::{ + ethabi::{encode, Address, Token}, + fee::{encoding_len, Fee}, + l1::is_l1_tx_type, + l2::{L2Tx, TransactionType}, + transaction_request::{PaymasterParams, TransactionRequest}, + web3::Bytes, + Execute, ExecuteTransactionCommon, L2ChainId, L2TxCommonData, Nonce, Transaction, H256, U256, +}; +use zksync_utils::{address_to_h256, bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; + +use crate::vm_latest::{ + constants::{MAX_GAS_PER_PUBDATA_BYTE, TX_MAX_COMPUTE_GAS_LIMIT}, + utils::overhead::derive_overhead, +}; + +/// This structure represents the data that is used by +/// the Bootloader to describe the transaction. +#[derive(Debug, Default, Clone)] +pub(crate) struct TransactionData { + pub(crate) tx_type: u8, + pub(crate) from: Address, + pub(crate) to: Address, + pub(crate) gas_limit: U256, + pub(crate) pubdata_price_limit: U256, + pub(crate) max_fee_per_gas: U256, + pub(crate) max_priority_fee_per_gas: U256, + pub(crate) paymaster: Address, + pub(crate) nonce: U256, + pub(crate) value: U256, + // The reserved fields that are unique for different types of transactions. + // E.g. nonce is currently used in all transaction, but it should not be mandatory + // in the long run. + pub(crate) reserved: [U256; 4], + pub(crate) data: Vec, + pub(crate) signature: Vec, + // The factory deps provided with the transaction. + // Note that *only hashes* of these bytecodes are signed by the user + // and they are used in the ABI encoding of the struct. + // TODO: include this into the tx signature as part of SMA-1010 + pub(crate) factory_deps: Vec>, + pub(crate) paymaster_input: Vec, + pub(crate) reserved_dynamic: Vec, + pub(crate) raw_bytes: Option>, +} + +impl From for TransactionData { + fn from(execute_tx: Transaction) -> Self { + match execute_tx.common_data { + ExecuteTransactionCommon::L2(common_data) => { + let nonce = U256::from_big_endian(&common_data.nonce.to_be_bytes()); + + let should_check_chain_id = if matches!( + common_data.transaction_type, + TransactionType::LegacyTransaction + ) && common_data.extract_chain_id().is_some() + { + U256([1, 0, 0, 0]) + } else { + U256::zero() + }; + + // Ethereum transactions do not sign gas per pubdata limit, and so for them we need to use + // some default value. We use the maximum possible value that is allowed by the bootloader + // (i.e. we can not use u64::MAX, because the bootloader requires gas per pubdata for such + // transactions to be higher than `MAX_GAS_PER_PUBDATA_BYTE`). + let gas_per_pubdata_limit = if common_data.transaction_type.is_ethereum_type() { + MAX_GAS_PER_PUBDATA_BYTE.into() + } else { + common_data.fee.gas_per_pubdata_limit + }; + + TransactionData { + tx_type: (common_data.transaction_type as u32) as u8, + from: common_data.initiator_address, + to: execute_tx.execute.contract_address, + gas_limit: common_data.fee.gas_limit, + pubdata_price_limit: gas_per_pubdata_limit, + max_fee_per_gas: common_data.fee.max_fee_per_gas, + max_priority_fee_per_gas: common_data.fee.max_priority_fee_per_gas, + paymaster: common_data.paymaster_params.paymaster, + nonce, + value: execute_tx.execute.value, + reserved: [ + should_check_chain_id, + U256::zero(), + U256::zero(), + U256::zero(), + ], + data: execute_tx.execute.calldata, + signature: common_data.signature, + factory_deps: execute_tx.execute.factory_deps, + paymaster_input: common_data.paymaster_params.paymaster_input, + reserved_dynamic: vec![], + raw_bytes: execute_tx.raw_bytes.map(|a| a.0), + } + } + ExecuteTransactionCommon::L1(common_data) => { + let refund_recipient = h256_to_u256(address_to_h256(&common_data.refund_recipient)); + TransactionData { + tx_type: common_data.tx_format() as u8, + from: common_data.sender, + to: execute_tx.execute.contract_address, + gas_limit: common_data.gas_limit, + pubdata_price_limit: common_data.gas_per_pubdata_limit, + // It doesn't matter what we put here, since + // the bootloader does not charge anything + max_fee_per_gas: common_data.max_fee_per_gas, + max_priority_fee_per_gas: U256::zero(), + paymaster: Address::default(), + nonce: U256::from(common_data.serial_id.0), // priority op ID + value: execute_tx.execute.value, + reserved: [ + common_data.to_mint, + refund_recipient, + U256::zero(), + U256::zero(), + ], + data: execute_tx.execute.calldata, + // The signature isn't checked for L1 transactions so we don't care + signature: vec![], + factory_deps: execute_tx.execute.factory_deps, + paymaster_input: vec![], + reserved_dynamic: vec![], + raw_bytes: None, + } + } + ExecuteTransactionCommon::ProtocolUpgrade(common_data) => { + let refund_recipient = h256_to_u256(address_to_h256(&common_data.refund_recipient)); + TransactionData { + tx_type: common_data.tx_format() as u8, + from: common_data.sender, + to: execute_tx.execute.contract_address, + gas_limit: common_data.gas_limit, + pubdata_price_limit: common_data.gas_per_pubdata_limit, + // It doesn't matter what we put here, since + // the bootloader does not charge anything + max_fee_per_gas: common_data.max_fee_per_gas, + max_priority_fee_per_gas: U256::zero(), + paymaster: Address::default(), + nonce: U256::from(common_data.upgrade_id as u16), + value: execute_tx.execute.value, + reserved: [ + common_data.to_mint, + refund_recipient, + U256::zero(), + U256::zero(), + ], + data: execute_tx.execute.calldata, + // The signature isn't checked for L1 transactions so we don't care + signature: vec![], + factory_deps: execute_tx.execute.factory_deps, + paymaster_input: vec![], + reserved_dynamic: vec![], + raw_bytes: None, + } + } + } + } +} + +impl TransactionData { + pub(crate) fn abi_encode_with_custom_factory_deps( + self, + factory_deps_hashes: Vec, + ) -> Vec { + encode(&[Token::Tuple(vec![ + Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), + Token::Address(self.from), + Token::Address(self.to), + Token::Uint(self.gas_limit), + Token::Uint(self.pubdata_price_limit), + Token::Uint(self.max_fee_per_gas), + Token::Uint(self.max_priority_fee_per_gas), + Token::Address(self.paymaster), + Token::Uint(self.nonce), + Token::Uint(self.value), + Token::FixedArray(self.reserved.iter().copied().map(Token::Uint).collect()), + Token::Bytes(self.data), + Token::Bytes(self.signature), + Token::Array(factory_deps_hashes.into_iter().map(Token::Uint).collect()), + Token::Bytes(self.paymaster_input), + Token::Bytes(self.reserved_dynamic), + ])]) + } + + pub(crate) fn abi_encode(self) -> Vec { + let factory_deps_hashes = self + .factory_deps + .iter() + .map(|dep| h256_to_u256(hash_bytecode(dep))) + .collect(); + self.abi_encode_with_custom_factory_deps(factory_deps_hashes) + } + + pub(crate) fn into_tokens(self) -> Vec { + let bytes = self.abi_encode(); + assert!(bytes.len() % 32 == 0); + + bytes_to_be_words(bytes) + } + + pub(crate) fn overhead_gas(&self) -> u32 { + let encoded_len = encoding_len( + self.data.len() as u64, + self.signature.len() as u64, + self.factory_deps.len() as u64, + self.paymaster_input.len() as u64, + self.reserved_dynamic.len() as u64, + ); + + derive_overhead(encoded_len) + } + + pub(crate) fn trusted_ergs_limit(&self) -> U256 { + // No transaction is allowed to spend more than `TX_MAX_COMPUTE_GAS_LIMIT` gas on compute. + U256::from(TX_MAX_COMPUTE_GAS_LIMIT).min(self.gas_limit) + } + + pub(crate) fn tx_hash(&self, chain_id: L2ChainId) -> H256 { + if is_l1_tx_type(self.tx_type) { + return self.canonical_l1_tx_hash().unwrap(); + } + + let l2_tx: L2Tx = self.clone().try_into().unwrap(); + let mut transaction_request: TransactionRequest = l2_tx.into(); + transaction_request.chain_id = Some(chain_id.as_u64()); + + // It is assumed that the `TransactionData` always has all the necessary components to recover the hash. + transaction_request + .get_tx_hash() + .expect("Could not recover L2 transaction hash") + } + + fn canonical_l1_tx_hash(&self) -> Result { + use zksync_types::web3::keccak256; + + if !is_l1_tx_type(self.tx_type) { + return Err(TxHashCalculationError::CannotCalculateL1HashForL2Tx); + } + + let encoded_bytes = self.clone().abi_encode(); + + Ok(H256(keccak256(&encoded_bytes))) + } +} + +#[derive(Debug, Clone, Copy)] +pub(crate) enum TxHashCalculationError { + CannotCalculateL1HashForL2Tx, + CannotCalculateL2HashForL1Tx, +} + +impl TryInto for TransactionData { + type Error = TxHashCalculationError; + + fn try_into(self) -> Result { + if is_l1_tx_type(self.tx_type) { + return Err(TxHashCalculationError::CannotCalculateL2HashForL1Tx); + } + + let common_data = L2TxCommonData { + transaction_type: (self.tx_type as u32).try_into().unwrap(), + nonce: Nonce(self.nonce.as_u32()), + fee: Fee { + max_fee_per_gas: self.max_fee_per_gas, + max_priority_fee_per_gas: self.max_priority_fee_per_gas, + gas_limit: self.gas_limit, + gas_per_pubdata_limit: self.pubdata_price_limit, + }, + signature: self.signature, + input: None, + initiator_address: self.from, + paymaster_params: PaymasterParams { + paymaster: self.paymaster, + paymaster_input: self.paymaster_input, + }, + }; + let execute = Execute { + contract_address: self.to, + value: self.value, + calldata: self.data, + factory_deps: self.factory_deps, + }; + + Ok(L2Tx { + execute, + common_data, + received_timestamp_ms: 0, + raw_bytes: self.raw_bytes.map(Bytes::from), + }) + } +} + +#[cfg(test)] +mod tests { + use zksync_types::fee::encoding_len; + + use super::*; + + #[test] + fn test_consistency_with_encoding_length() { + let transaction = TransactionData { + tx_type: 113, + from: Address::random(), + to: Address::random(), + gas_limit: U256::from(1u32), + pubdata_price_limit: U256::from(1u32), + max_fee_per_gas: U256::from(1u32), + max_priority_fee_per_gas: U256::from(1u32), + paymaster: Address::random(), + nonce: U256::zero(), + value: U256::zero(), + // The reserved fields that are unique for different types of transactions. + // E.g. nonce is currently used in all transaction, but it should not be mandatory + // in the long run. + reserved: [U256::zero(); 4], + data: vec![0u8; 65], + signature: vec![0u8; 75], + // The factory deps provided with the transaction. + // Note that *only hashes* of these bytecodes are signed by the user + // and they are used in the ABI encoding of the struct. + // TODO: include this into the tx signature as part of SMA-1010 + factory_deps: vec![vec![0u8; 32], vec![1u8; 32]], + paymaster_input: vec![0u8; 85], + reserved_dynamic: vec![0u8; 32], + raw_bytes: None, + }; + + let assumed_encoded_len = encoding_len(65, 75, 2, 85, 32); + + let true_encoding_len = transaction.into_tokens().len(); + + assert_eq!(assumed_encoded_len, true_encoding_len); + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs new file mode 100644 index 000000000000..a4dad0b324de --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -0,0 +1,794 @@ +use std::{collections::HashMap, fmt}; + +use vm2::{ + decode::decode_program, fat_pointer::FatPointer, instruction_handlers::HeapInterface, + ExecutionEnd, Program, Settings, VirtualMachine, +}; +use zk_evm_1_5_0::zkevm_opcode_defs::system_params::INITIAL_FRAME_FORMAL_EH_LOCATION; +use zksync_contracts::SystemContractCode; +use zksync_state::ReadStorage; +use zksync_types::{ + event::{ + extract_l2tol1logs_from_l1_messenger, extract_long_l2_to_l1_messages, + L1_MESSENGER_BYTECODE_PUBLICATION_EVENT_SIGNATURE, + }, + l1::is_l1_tx_type, + l2_to_l1_log::UserL2ToL1Log, + utils::key_for_eth_balance, + writes::{ + compression::compress_with_best_strategy, StateDiffRecord, BYTES_PER_DERIVED_KEY, + BYTES_PER_ENUMERATION_INDEX, + }, + AccountTreeId, StorageKey, StorageLog, StorageLogKind, StorageLogWithPreviousValue, + BOOTLOADER_ADDRESS, H160, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, + L2_BASE_TOKEN_ADDRESS, U256, +}; +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; + +use super::{ + bootloader_state::{BootloaderState, BootloaderStateSnapshot}, + bytecode::compress_bytecodes, + hook::Hook, + initial_bootloader_memory::bootloader_initial_memory, + transaction_data::TransactionData, +}; +use crate::{ + glue::GlueInto, + interface::{ + BytecodeCompressionError, Halt, TxRevertReason, VmInterface, VmInterfaceHistoryEnabled, + VmRevertReason, + }, + vm_fast::{ + bootloader_state::utils::{apply_l2_block, apply_pubdata_to_memory}, + events::merge_events, + pubdata::PubdataInput, + refund::compute_refund, + }, + vm_latest::{ + constants::{ + get_vm_hook_params_start_position, get_vm_hook_position, OPERATOR_REFUNDS_OFFSET, + TX_GAS_LIMIT_OFFSET, VM_HOOK_PARAMS_COUNT, + }, + BootloaderMemory, CurrentExecutionState, ExecutionResult, FinishedL1Batch, L1BatchEnv, + L2BlockEnv, MultiVMSubversion, Refunds, SystemEnv, VmExecutionLogs, VmExecutionMode, + VmExecutionResultAndLogs, VmExecutionStatistics, + }, +}; + +const VM_VERSION: MultiVMSubversion = MultiVMSubversion::IncreasedBootloaderMemory; + +pub struct Vm { + pub(crate) world: World, + pub(crate) inner: VirtualMachine, + suspended_at: u16, + gas_for_account_validation: u32, + pub(crate) bootloader_state: BootloaderState, + pub(crate) batch_env: L1BatchEnv, + pub(crate) system_env: SystemEnv, + snapshot: Option, +} + +impl Vm { + fn run( + &mut self, + execution_mode: VmExecutionMode, + track_refunds: bool, + ) -> (ExecutionResult, Refunds) { + let mut refunds = Refunds { + gas_refunded: 0, + operator_suggested_refund: 0, + }; + let mut last_tx_result = None; + let mut pubdata_before = self.inner.world_diff.pubdata() as u32; + + let result = loop { + let hook = match self.inner.resume_from(self.suspended_at, &mut self.world) { + ExecutionEnd::SuspendedOnHook { + hook, + pc_to_resume_from, + } => { + self.suspended_at = pc_to_resume_from; + hook + } + ExecutionEnd::ProgramFinished(output) => break ExecutionResult::Success { output }, + ExecutionEnd::Reverted(output) => { + break match TxRevertReason::parse_error(&output) { + TxRevertReason::TxReverted(output) => ExecutionResult::Revert { output }, + TxRevertReason::Halt(reason) => ExecutionResult::Halt { reason }, + } + } + ExecutionEnd::Panicked => { + break ExecutionResult::Halt { + reason: if self.gas_remaining() == 0 { + Halt::BootloaderOutOfGas + } else { + Halt::VMPanic + }, + } + } + }; + + match Hook::from_u32(hook) { + Hook::AccountValidationEntered | Hook::AccountValidationExited => { + // TODO (PLA-908): implement account validation + } + Hook::TxHasEnded => { + if let VmExecutionMode::OneTx = execution_mode { + break last_tx_result.take().unwrap(); + } + } + Hook::AskOperatorForRefund => { + if track_refunds { + let [bootloader_refund, gas_spent_on_pubdata, gas_per_pubdata_byte] = + self.get_hook_params(); + let current_tx_index = self.bootloader_state.current_tx(); + let tx_description_offset = self + .bootloader_state + .get_tx_description_offset(current_tx_index); + let tx_gas_limit = self + .read_word_from_bootloader_heap( + tx_description_offset + TX_GAS_LIMIT_OFFSET, + ) + .as_u64(); + + let pubdata_published = self.inner.world_diff.pubdata() as u32; + + refunds.operator_suggested_refund = compute_refund( + &self.batch_env, + bootloader_refund.as_u64(), + gas_spent_on_pubdata.as_u64(), + tx_gas_limit, + gas_per_pubdata_byte.low_u32(), + pubdata_published.saturating_sub(pubdata_before), + self.bootloader_state + .last_l2_block() + .txs + .last() + .unwrap() + .hash, + ); + + pubdata_before = pubdata_published; + let refund_value = refunds.operator_suggested_refund; + self.write_to_bootloader_heap([( + OPERATOR_REFUNDS_OFFSET + current_tx_index, + refund_value.into(), + )]); + self.bootloader_state + .set_refund_for_current_tx(refund_value); + } + } + Hook::NotifyAboutRefund => { + if track_refunds { + refunds.gas_refunded = self.get_hook_params()[0].low_u64() + } + } + Hook::PostResult => { + let result = self.get_hook_params()[0]; + let value = self.get_hook_params()[1]; + let fp = FatPointer::from(value); + assert_eq!(fp.offset, 0); + + let return_data = self.inner.state.heaps[fp.memory_page] + .read_range_big_endian(fp.start..fp.start + fp.length); + + last_tx_result = Some(if result.is_zero() { + ExecutionResult::Revert { + output: VmRevertReason::from(return_data.as_slice()), + } + } else { + ExecutionResult::Success { + output: return_data, + } + }); + } + Hook::FinalBatchInfo => { + // set fictive l2 block + let txs_index = self.bootloader_state.free_tx_index(); + let l2_block = self.bootloader_state.insert_fictive_l2_block(); + let mut memory = vec![]; + apply_l2_block(&mut memory, l2_block, txs_index); + self.write_to_bootloader_heap(memory); + } + Hook::PubdataRequested => { + if !matches!(execution_mode, VmExecutionMode::Batch) { + unreachable!("We do not provide the pubdata when executing the block tip or a single transaction"); + } + + let events = + merge_events(self.inner.world_diff.events(), self.batch_env.number); + + let published_bytecodes = events + .iter() + .filter(|event| { + // Filter events from the l1 messenger contract that match the expected signature. + event.address == L1_MESSENGER_ADDRESS + && !event.indexed_topics.is_empty() + && event.indexed_topics[0] + == *L1_MESSENGER_BYTECODE_PUBLICATION_EVENT_SIGNATURE + }) + .map(|event| { + let hash = U256::from_big_endian(&event.value[..32]); + self.world + .bytecode_cache + .get(&hash) + .expect("published unknown bytecode") + .clone() + }) + .collect(); + + let pubdata_input = PubdataInput { + user_logs: extract_l2tol1logs_from_l1_messenger(&events), + l2_to_l1_messages: extract_long_l2_to_l1_messages(&events), + published_bytecodes, + state_diffs: self + .compute_state_diffs() + .filter(|diff| diff.address != L1_MESSENGER_ADDRESS) + .collect(), + }; + + // Save the pubdata for the future initial bootloader memory building + self.bootloader_state + .set_pubdata_input(pubdata_input.clone()); + + // Apply the pubdata to the current memory + let mut memory_to_apply = vec![]; + + apply_pubdata_to_memory(&mut memory_to_apply, pubdata_input); + self.write_to_bootloader_heap(memory_to_apply); + } + + Hook::PaymasterValidationEntered | Hook::ValidationStepEnded => { /* unused */ } + Hook::DebugLog | Hook::DebugReturnData | Hook::NearCallCatch => { + // These hooks are for debug purposes only + } + } + }; + + (result, refunds) + } + + fn get_hook_params(&self) -> [U256; 3] { + (get_vm_hook_params_start_position(VM_VERSION) + ..get_vm_hook_params_start_position(VM_VERSION) + VM_HOOK_PARAMS_COUNT) + .map(|word| self.read_word_from_bootloader_heap(word as usize)) + .collect::>() + .try_into() + .unwrap() + } + + /// Should only be used when the bootloader is executing (e.g., when handling hooks). + pub(crate) fn read_word_from_bootloader_heap(&self, word: usize) -> U256 { + self.inner.state.heaps[vm2::FIRST_HEAP].read_u256(word as u32 * 32) + } + + /// Should only be used when the bootloader is executing (e.g., when handling hooks). + pub(crate) fn write_to_bootloader_heap( + &mut self, + memory: impl IntoIterator, + ) { + assert!(self.inner.state.previous_frames.is_empty()); + for (slot, value) in memory { + self.inner + .state + .heaps + .write_u256(vm2::FIRST_HEAP, slot as u32 * 32, value); + } + } + + pub(crate) fn insert_bytecodes<'a>(&mut self, bytecodes: impl IntoIterator) { + for code in bytecodes { + let hash = h256_to_u256(hash_bytecode(code)); + self.world.bytecode_cache.insert(hash, code.into()); + } + } + + pub(crate) fn push_transaction_inner( + &mut self, + tx: zksync_types::Transaction, + refund: u64, + with_compression: bool, + ) { + let tx: TransactionData = tx.into(); + let overhead = tx.overhead_gas(); + + self.insert_bytecodes(tx.factory_deps.iter().map(|dep| &dep[..])); + + let compressed_bytecodes = if is_l1_tx_type(tx.tx_type) || !with_compression { + // L1 transactions do not need compression + vec![] + } else { + compress_bytecodes(&tx.factory_deps, |hash| { + self.inner + .world_diff + .get_storage_state() + .get(&(KNOWN_CODES_STORAGE_ADDRESS, h256_to_u256(hash))) + .map(|x| !x.is_zero()) + .unwrap_or_else(|| self.world.storage.is_bytecode_known(&hash)) + }) + }; + + let trusted_ergs_limit = tx.trusted_ergs_limit(); + + let memory = self.bootloader_state.push_tx( + tx, + overhead, + refund, + compressed_bytecodes, + trusted_ergs_limit, + self.system_env.chain_id, + ); + + self.write_to_bootloader_heap(memory); + } + + fn compute_state_diffs(&mut self) -> impl Iterator + '_ { + let storage = &mut self.world.storage; + + self.inner.world_diff.get_storage_changes().map( + move |((address, key), (initial_value, final_value))| { + let storage_key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)); + StateDiffRecord { + address, + key, + derived_key: + zk_evm_1_5_0::aux_structures::LogQuery::derive_final_address_for_params( + &address, &key, + ), + enumeration_index: storage + .get_enumeration_index(&storage_key) + .unwrap_or_default(), + initial_value: initial_value.unwrap_or_default(), + final_value, + } + }, + ) + } + + pub(crate) fn decommitted_hashes(&self) -> impl Iterator + '_ { + self.inner.world_diff.decommitted_hashes() + } +} + +// We don't implement `VmFactory` trait because, unlike old VMs, the new VM doesn't require storage to be writable; +// it maintains its own storage cache and a write buffer. +impl Vm { + pub fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: S) -> Self { + let default_aa_code_hash = system_env + .base_system_smart_contracts + .default_aa + .hash + .into(); + + let program_cache = HashMap::from([convert_system_contract_code( + &system_env.base_system_smart_contracts.default_aa, + false, + )]); + + let (_, bootloader) = + convert_system_contract_code(&system_env.base_system_smart_contracts.bootloader, true); + let bootloader_memory = bootloader_initial_memory(&batch_env); + + let mut inner = VirtualMachine::new( + BOOTLOADER_ADDRESS, + bootloader, + H160::zero(), + vec![], + system_env.bootloader_gas_limit, + Settings { + default_aa_code_hash, + // this will change after 1.5 + evm_interpreter_code_hash: default_aa_code_hash, + hook_address: get_vm_hook_position(VM_VERSION) * 32, + }, + ); + + inner.state.current_frame.sp = 0; + + // The bootloader writes results to high addresses in its heap, so it makes sense to preallocate it. + inner.state.current_frame.heap_size = u32::MAX; + inner.state.current_frame.aux_heap_size = u32::MAX; + inner.state.current_frame.exception_handler = INITIAL_FRAME_FORMAL_EH_LOCATION; + + let mut me = Self { + world: World::new(storage, program_cache), + inner, + suspended_at: 0, + gas_for_account_validation: system_env.default_validation_computational_gas_limit, + bootloader_state: BootloaderState::new( + system_env.execution_mode, + bootloader_memory.clone(), + batch_env.first_l2_block, + ), + system_env, + batch_env, + snapshot: None, + }; + + me.write_to_bootloader_heap(bootloader_memory); + + me + } + + fn delete_history_if_appropriate(&mut self) { + if self.snapshot.is_none() && self.inner.state.previous_frames.is_empty() { + self.inner.delete_history(); + } + } +} + +impl VmInterface for Vm { + type TracerDispatcher = (); + + fn push_transaction(&mut self, tx: zksync_types::Transaction) { + self.push_transaction_inner(tx, 0, true); + } + + fn inspect( + &mut self, + (): Self::TracerDispatcher, + execution_mode: VmExecutionMode, + ) -> VmExecutionResultAndLogs { + let mut track_refunds = false; + if matches!(execution_mode, VmExecutionMode::OneTx) { + // Move the pointer to the next transaction + self.bootloader_state.move_tx_to_execute_pointer(); + track_refunds = true; + } + + let start = self.inner.world_diff.snapshot(); + let pubdata_before = self.inner.world_diff.pubdata(); + + let (result, refunds) = self.run(execution_mode, track_refunds); + let ignore_world_diff = matches!(execution_mode, VmExecutionMode::OneTx) + && matches!(result, ExecutionResult::Halt { .. }); + + // If the execution is halted, the VM changes are expected to be rolled back by the caller. + // Earlier VMs return empty execution logs in this case, so we follow this behavior. + let logs = if ignore_world_diff { + VmExecutionLogs::default() + } else { + let storage_logs = self + .inner + .world_diff + .get_storage_changes_after(&start) + .map(|((address, key), change)| StorageLogWithPreviousValue { + log: StorageLog { + key: StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)), + value: u256_to_h256(change.after), + kind: if change.is_initial { + StorageLogKind::InitialWrite + } else { + StorageLogKind::RepeatedWrite + }, + }, + previous_value: u256_to_h256(change.before.unwrap_or_default()), + }) + .collect(); + let events = merge_events( + self.inner.world_diff.events_after(&start), + self.batch_env.number, + ); + let user_l2_to_l1_logs = extract_l2tol1logs_from_l1_messenger(&events) + .into_iter() + .map(Into::into) + .map(UserL2ToL1Log) + .collect(); + let system_l2_to_l1_logs = self + .inner + .world_diff + .l2_to_l1_logs_after(&start) + .iter() + .map(|x| x.glue_into()) + .collect(); + VmExecutionLogs { + storage_logs, + events, + user_l2_to_l1_logs, + system_l2_to_l1_logs, + total_log_queries_count: 0, // This field is unused + } + }; + + let pubdata_after = self.inner.world_diff.pubdata(); + VmExecutionResultAndLogs { + result, + logs, + // TODO (PLA-936): Fill statistics; investigate whether they should be zeroed on `Halt` + statistics: VmExecutionStatistics { + contracts_used: 0, + cycles_used: 0, + gas_used: 0, + gas_remaining: 0, + computational_gas_used: 0, + total_log_queries: 0, + pubdata_published: (pubdata_after - pubdata_before).max(0) as u32, + circuit_statistic: Default::default(), + }, + refunds, + } + } + + fn inspect_transaction_with_bytecode_compression( + &mut self, + (): Self::TracerDispatcher, + tx: zksync_types::Transaction, + with_compression: bool, + ) -> ( + Result<(), BytecodeCompressionError>, + VmExecutionResultAndLogs, + ) { + self.push_transaction_inner(tx, 0, with_compression); + let result = self.inspect((), VmExecutionMode::OneTx); + + let compression_result = if self.has_unpublished_bytecodes() { + Err(BytecodeCompressionError::BytecodeCompressionFailed) + } else { + Ok(()) + }; + (compression_result, result) + } + + fn get_bootloader_memory(&self) -> BootloaderMemory { + self.bootloader_state.bootloader_memory() + } + + fn get_last_tx_compressed_bytecodes( + &self, + ) -> Vec { + self.bootloader_state.get_last_tx_compressed_bytecodes() + } + + fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { + self.bootloader_state.start_new_l2_block(l2_block_env) + } + + fn get_current_execution_state(&self) -> CurrentExecutionState { + let world_diff = &self.inner.world_diff; + let events = merge_events(world_diff.events(), self.batch_env.number); + + let user_l2_to_l1_logs = extract_l2tol1logs_from_l1_messenger(&events) + .into_iter() + .map(Into::into) + .map(UserL2ToL1Log) + .collect(); + + CurrentExecutionState { + events, + deduplicated_storage_logs: world_diff + .get_storage_changes() + .map(|((address, key), (_, value))| StorageLog { + key: StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)), + value: u256_to_h256(value), + kind: StorageLogKind::RepeatedWrite, // Initialness doesn't matter here + }) + .collect(), + used_contract_hashes: self.decommitted_hashes().collect(), + system_logs: world_diff + .l2_to_l1_logs() + .iter() + .map(|x| x.glue_into()) + .collect(), + user_l2_to_l1_logs, + storage_refunds: world_diff.storage_refunds().to_vec(), + pubdata_costs: world_diff.pubdata_costs().to_vec(), + } + } + + fn record_vm_memory_metrics(&self) -> crate::vm_latest::VmMemoryMetrics { + todo!("Unused during batch execution") + } + + fn gas_remaining(&self) -> u32 { + self.inner.state.current_frame.gas + } + + fn finish_batch(&mut self) -> FinishedL1Batch { + let result = self.execute(VmExecutionMode::Batch); + let execution_state = self.get_current_execution_state(); + let bootloader_memory = self.get_bootloader_memory(); + FinishedL1Batch { + block_tip_execution_result: result, + final_execution_state: execution_state, + final_bootloader_memory: Some(bootloader_memory), + pubdata_input: Some( + self.bootloader_state + .get_pubdata_information() + .clone() + .build_pubdata(false), + ), + state_diffs: Some( + self.bootloader_state + .get_pubdata_information() + .state_diffs + .to_vec(), + ), + } + } +} + +#[derive(Debug)] +struct VmSnapshot { + vm_snapshot: vm2::Snapshot, + bootloader_snapshot: BootloaderStateSnapshot, + suspended_at: u16, + gas_for_account_validation: u32, +} + +impl VmInterfaceHistoryEnabled for Vm { + fn make_snapshot(&mut self) { + assert!( + self.snapshot.is_none(), + "cannot create a VM snapshot until a previous snapshot is rolled back to or popped" + ); + + self.delete_history_if_appropriate(); + self.snapshot = Some(VmSnapshot { + vm_snapshot: self.inner.snapshot(), + bootloader_snapshot: self.bootloader_state.get_snapshot(), + suspended_at: self.suspended_at, + gas_for_account_validation: self.gas_for_account_validation, + }); + } + + fn rollback_to_the_latest_snapshot(&mut self) { + let VmSnapshot { + vm_snapshot, + bootloader_snapshot, + suspended_at, + gas_for_account_validation, + } = self.snapshot.take().expect("no snapshots to rollback to"); + + self.inner.rollback(vm_snapshot); + self.bootloader_state.apply_snapshot(bootloader_snapshot); + self.suspended_at = suspended_at; + self.gas_for_account_validation = gas_for_account_validation; + + self.delete_history_if_appropriate(); + } + + fn pop_snapshot_no_rollback(&mut self) { + self.snapshot = None; + self.delete_history_if_appropriate(); + } +} + +impl fmt::Debug for Vm { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Vm") + .field("suspended_at", &self.suspended_at) + .field( + "gas_for_account_validation", + &self.gas_for_account_validation, + ) + .field("bootloader_state", &self.bootloader_state) + .field("storage", &self.world.storage) + .field("program_cache", &self.world.program_cache) + .field("batch_env", &self.batch_env) + .field("system_env", &self.system_env) + .field("snapshot", &self.snapshot.as_ref().map(|_| ())) + .finish() + } +} + +#[derive(Debug)] +pub(crate) struct World { + pub(crate) storage: S, + // TODO (PLA-1008): Store `Program`s in an LRU cache + program_cache: HashMap, + pub(crate) bytecode_cache: HashMap>, +} + +impl World { + fn new(storage: S, program_cache: HashMap) -> Self { + Self { + storage, + program_cache, + bytecode_cache: Default::default(), + } + } +} + +impl vm2::World for World { + fn decommit_code(&mut self, hash: U256) -> Vec { + self.decommit(hash) + .code_page() + .as_ref() + .iter() + .flat_map(|u| { + let mut buffer = [0u8; 32]; + u.to_big_endian(&mut buffer); + buffer + }) + .collect() + } + + fn decommit(&mut self, hash: U256) -> Program { + self.program_cache + .entry(hash) + .or_insert_with(|| { + bytecode_to_program(self.bytecode_cache.entry(hash).or_insert_with(|| { + self.storage + .load_factory_dep(u256_to_h256(hash)) + .expect("vm tried to decommit nonexistent bytecode") + })) + }) + .clone() + } + + fn read_storage(&mut self, contract: H160, key: U256) -> Option { + let key = &StorageKey::new(AccountTreeId::new(contract), u256_to_h256(key)); + if self.storage.is_write_initial(key) { + None + } else { + Some(self.storage.read_value(key).as_bytes().into()) + } + } + + fn cost_of_writing_storage(&mut self, initial_value: Option, new_value: U256) -> u32 { + let is_initial = initial_value.is_none(); + let initial_value = initial_value.unwrap_or_default(); + + if initial_value == new_value { + return 0; + } + + // Since we need to publish the state diffs onchain, for each of the updated storage slot + // we basically need to publish the following pair: `()`. + // For key we use the following optimization: + // - The first time we publish it, we use 32 bytes. + // Then, we remember a 8-byte id for this slot and assign it to it. We call this initial write. + // - The second time we publish it, we will use the 4/5 byte representation of this 8-byte instead of the 32 + // bytes of the entire key. + // For value compression, we use a metadata byte which holds the length of the value and the operation from the + // previous state to the new state, and the compressed value. The maximum for this is 33 bytes. + // Total bytes for initial writes then becomes 65 bytes and repeated writes becomes 38 bytes. + let compressed_value_size = + compress_with_best_strategy(initial_value, new_value).len() as u32; + + if is_initial { + (BYTES_PER_DERIVED_KEY as u32) + compressed_value_size + } else { + (BYTES_PER_ENUMERATION_INDEX as u32) + compressed_value_size + } + } + + fn is_free_storage_slot(&self, contract: &H160, key: &U256) -> bool { + contract == &zksync_system_constants::SYSTEM_CONTEXT_ADDRESS + || contract == &L2_BASE_TOKEN_ADDRESS + && u256_to_h256(*key) == key_for_eth_balance(&BOOTLOADER_ADDRESS) + } +} + +fn bytecode_to_program(bytecode: &[u8]) -> Program { + Program::new( + decode_program( + &bytecode + .chunks_exact(8) + .map(|chunk| u64::from_be_bytes(chunk.try_into().unwrap())) + .collect::>(), + false, + ), + bytecode + .chunks_exact(32) + .map(U256::from_big_endian) + .collect::>(), + ) +} + +fn convert_system_contract_code(code: &SystemContractCode, is_bootloader: bool) -> (U256, Program) { + ( + h256_to_u256(code.hash), + Program::new( + decode_program( + &code + .code + .iter() + .flat_map(|x| x.0.into_iter().rev()) + .collect::>(), + is_bootloader, + ), + code.code.clone(), + ), + ) +} diff --git a/core/lib/multivm/src/versions/vm_latest/old_vm/event_sink.rs b/core/lib/multivm/src/versions/vm_latest/old_vm/event_sink.rs index 58fad96dec86..aca2bc497074 100644 --- a/core/lib/multivm/src/versions/vm_latest/old_vm/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_latest/old_vm/event_sink.rs @@ -1,6 +1,5 @@ use std::collections::HashMap; -use itertools::Itertools; use zk_evm_1_5_0::{ abstractions::EventSink, aux_structures::{LogQuery, Timestamp}, @@ -9,7 +8,6 @@ use zk_evm_1_5_0::{ BOOTLOADER_FORMAL_ADDRESS, EVENT_AUX_BYTE, L1_MESSAGE_AUX_BYTE, }, }; -use zksync_types::U256; use crate::vm_latest::old_vm::{ history_recorder::{AppDataFrameManagerWithHistory, HistoryEnabled, HistoryMode}, @@ -31,7 +29,7 @@ impl OracleWithHistory for InMemoryEventSink { // otherwise we carry rollbacks to the parent's frames impl InMemoryEventSink { - pub fn flatten(&self) -> (Vec, Vec, Vec) { + pub fn flatten(&self) -> (Vec, Vec) { assert_eq!( self.frames_stack.len(), 1, @@ -40,10 +38,7 @@ impl InMemoryEventSink { // we forget rollbacks as we have finished the execution and can just apply them let history = self.frames_stack.forward().current_frame(); - let (events, l1_messages) = Self::events_and_l1_messages_from_history(history); - let events_logs = Self::events_logs_from_history(history); - - (events_logs, events, l1_messages) + Self::events_and_l1_messages_from_history(history) } pub fn get_log_queries(&self) -> usize { @@ -69,92 +64,6 @@ impl InMemoryEventSink { Self::events_and_l1_messages_from_history(self.log_queries_after_timestamp(from_timestamp)) } - fn events_logs_from_history(history: &[Box]) -> Vec { - // Filter out all the L2->L1 logs and leave only events - let mut events = history - .iter() - .filter_map(|log_query| (log_query.aux_byte == EVENT_AUX_BYTE).then_some(**log_query)) - .collect_vec(); - - // Sort the events by timestamp and rollback flag, basically ensuring that - // if an event has been rolled back, the original event and its rollback will be put together - events.sort_by_key(|log| (log.timestamp, log.rollback)); - - let mut stack = Vec::::new(); - let mut net_history = vec![]; - for el in events.iter() { - assert_eq!(el.shard_id, 0, "only rollup shard is supported"); - if stack.is_empty() { - assert!(!el.rollback); - stack.push(*el); - } else { - // we can always pop as it's either one to add to queue, or discard - let previous = stack.pop().unwrap(); - if previous.timestamp == el.timestamp { - // Only rollback can have the same timestamp, so here we do nothing and simply - // double check the invariants - assert!(!previous.rollback); - assert!(el.rollback); - assert!(previous.rw_flag); - assert!(el.rw_flag); - assert_eq!(previous.tx_number_in_block, el.tx_number_in_block); - assert_eq!(previous.shard_id, el.shard_id); - assert_eq!(previous.address, el.address); - assert_eq!(previous.key, el.key); - assert_eq!(previous.written_value, el.written_value); - assert_eq!(previous.is_service, el.is_service); - continue; - } else { - // The event on the stack has not been rolled back. It must be a different event, - // with a different timestamp. - assert!(!el.rollback); - stack.push(*el); - - // cleanup some fields - // flags are conventions - let sorted_log_query = LogQuery { - timestamp: Timestamp(0), - tx_number_in_block: previous.tx_number_in_block, - aux_byte: 0, - shard_id: previous.shard_id, - address: previous.address, - key: previous.key, - read_value: U256::zero(), - written_value: previous.written_value, - rw_flag: false, - rollback: false, - is_service: previous.is_service, - }; - - net_history.push(sorted_log_query); - } - } - } - - // In case the stack is non-empty, then the last element of it has not been rolled back. - if let Some(previous) = stack.pop() { - // cleanup some fields - // flags are conventions - let sorted_log_query = LogQuery { - timestamp: Timestamp(0), - tx_number_in_block: previous.tx_number_in_block, - aux_byte: 0, - shard_id: previous.shard_id, - address: previous.address, - key: previous.key, - read_value: U256::zero(), - written_value: previous.written_value, - rw_flag: false, - rollback: false, - is_service: previous.is_service, - }; - - net_history.push(sorted_log_query); - } - - net_history - } - fn events_and_l1_messages_from_history( history: &[Box], ) -> (Vec, Vec) { diff --git a/core/lib/multivm/src/versions/vm_latest/oracles/storage.rs b/core/lib/multivm/src/versions/vm_latest/oracles/storage.rs index 22503ce9881c..075660ad58aa 100644 --- a/core/lib/multivm/src/versions/vm_latest/oracles/storage.rs +++ b/core/lib/multivm/src/versions/vm_latest/oracles/storage.rs @@ -37,9 +37,9 @@ use crate::{ /// We employ the following rules for cold/warm storage rules: /// - We price a single "I/O" access as 2k ergs. This means that reading a single storage slot -/// would cost 2k ergs, while writing to it would 4k ergs (since it involves both reading during execution and writing at the end of it). +/// would cost 2k ergs, while writing to it would 4k ergs (since it involves both reading during execution and writing at the end of it). /// - Thereafter, "warm" reads cost 30 ergs, while "warm" writes cost 60 ergs. Warm writes to account cost more for the fact that they may be reverted -/// and so require more RAM to store them. +/// and so require more RAM to store them. const WARM_READ_REFUND: u32 = STORAGE_ACCESS_COLD_READ_COST - STORAGE_ACCESS_WARM_READ_COST; const WARM_WRITE_REFUND: u32 = STORAGE_ACCESS_COLD_WRITE_COST - STORAGE_ACCESS_WARM_WRITE_COST; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs index 8c8c6e2d0970..7174e9be67de 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs @@ -1,6 +1,11 @@ use ethabi::Token; -use zk_evm_1_5_0::aux_structures::Timestamp; -use zksync_types::{get_known_code_key, web3::keccak256, Address, Execute, U256}; +use zk_evm_1_5_0::{ + aux_structures::{MemoryPage, Timestamp}, + zkevm_opcode_defs::{ContractCodeSha256Format, VersionedHashLen32}, +}; +use zksync_types::{ + get_known_code_key, web3::keccak256, Address, Execute, StorageLogWithPreviousValue, U256, +}; use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; use crate::{ @@ -79,7 +84,10 @@ fn test_code_oracle() { vm.vm.push_transaction(tx1); let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); // Now, we ask for the same bytecode. We use to partially check whether the memory page with // the decommitted bytecode gets erased (it shouldn't). @@ -99,7 +107,21 @@ fn test_code_oracle() { ); vm.vm.push_transaction(tx2); let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); +} + +fn find_code_oracle_cost_log( + precompiles_contract_address: Address, + logs: &[StorageLogWithPreviousValue], +) -> &StorageLogWithPreviousValue { + logs.iter() + .find(|log| { + *log.log.key.address() == precompiles_contract_address && log.log.key.key().is_zero() + }) + .expect("no code oracle cost log") } #[test] @@ -164,3 +186,97 @@ fn test_code_oracle_big_bytecode() { let result = vm.vm.execute(VmExecutionMode::OneTx); assert!(!result.result.is_failed(), "Transaction wasn't successful"); } + +#[test] +fn refunds_in_code_oracle() { + let precompiles_contract_address = Address::random(); + let precompile_contract_bytecode = read_precompiles_contract(); + + let normal_zkevm_bytecode = read_test_contract(); + let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); + let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); + let normal_zkevm_bytecode_words = bytes_to_be_words(normal_zkevm_bytecode); + let mut storage = get_empty_storage(); + storage.set_value( + get_known_code_key(&normal_zkevm_bytecode_hash), + u256_to_h256(U256::one()), + ); + + let precompile_contract = load_precompiles_contract(); + let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); + + // Execute code oracle twice with identical VM state that only differs in that the queried bytecode + // is already decommitted the second time. The second call must consume less gas (`decommit` doesn't charge additional gas + // for already decommitted codes). + let mut oracle_costs = vec![]; + for decommit in [false, true] { + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .with_custom_contracts(vec![( + precompile_contract_bytecode.clone(), + precompiles_contract_address, + false, + )]) + .with_storage(storage.clone()) + .build(); + + vm.vm.state.decommittment_processor.populate( + vec![( + h256_to_u256(normal_zkevm_bytecode_hash), + normal_zkevm_bytecode_words.clone(), + )], + Timestamp(0), + ); + + let account = &mut vm.rich_accounts[0]; + if decommit { + let (header, normalized_preimage) = + ContractCodeSha256Format::normalize_for_decommitment(&normal_zkevm_bytecode_hash.0); + let query = vm + .vm + .state + .prepare_to_decommit( + 0, + header, + normalized_preimage, + MemoryPage(123), + Timestamp(0), + ) + .unwrap(); + + assert!(query.is_fresh); + vm.vm.state.execute_decommit(0, query).unwrap(); + } + + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: precompiles_contract_address, + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Transaction wasn't successful"); + let log = + find_code_oracle_cost_log(precompiles_contract_address, &result.logs.storage_logs); + oracle_costs.push(log.log.value); + } + + // The refund is equal to `gasCost` parameter passed to the `decommit` opcode, which is defined as `4 * contract_length_in_words` + // in `CodeOracle.yul`. + let code_oracle_refund = h256_to_u256(oracle_costs[0]) - h256_to_u256(oracle_costs[1]); + assert_eq!( + code_oracle_refund, + (4 * normal_zkevm_bytecode_words.len()).into() + ); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs index 359190fc4787..6b3be989fb3a 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs @@ -1,11 +1,12 @@ use ethabi::Token; use zksync_contracts::l1_messenger_contract; use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; +use zksync_test_account::Account; use zksync_types::{ get_code_key, get_known_code_key, l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, storage_writes_deduplicator::StorageWritesDeduplicator, - Execute, ExecuteTransactionCommon, U256, + Execute, ExecuteTransactionCommon, K256PrivateKey, U256, }; use zksync_utils::u256_to_h256; @@ -154,7 +155,9 @@ fn test_l1_tx_execution_high_gas_limit() { .with_empty_in_memory_storage() .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) + .with_rich_accounts(vec![Account::new( + K256PrivateKey::from_bytes([0xad; 32].into()).unwrap(), + )]) .build(); let account = &mut vm.rich_accounts[0]; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs index 72d2271f7158..52dbd6efb339 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs @@ -1,9 +1,12 @@ +use ethabi::Token; +use zksync_types::{Address, Execute, U256}; + use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface}, vm_latest::{ tests::{ tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, + utils::{read_expensive_contract, read_test_contract}, }, types::internals::TransactionData, HistoryEnabled, @@ -164,3 +167,62 @@ fn test_predetermined_refunded_gas() { current_state_without_predefined_refunds.used_contract_hashes ); } + +#[test] +fn negative_pubdata_for_transaction() { + let expensive_contract_address = Address::random(); + let (expensive_contract_bytecode, expensive_contract) = read_expensive_contract(); + let expensive_function = expensive_contract.function("expensive").unwrap(); + let cleanup_function = expensive_contract.function("cleanUp").unwrap(); + + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .with_custom_contracts(vec![( + expensive_contract_bytecode, + expensive_contract_address, + false, + )]) + .build(); + + let expensive_tx = vm.rich_accounts[0].get_l2_tx_for_execute( + Execute { + contract_address: expensive_contract_address, + calldata: expensive_function + .encode_input(&[Token::Uint(10.into())]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(expensive_tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + + // This transaction cleans all initial writes in the contract, thus having negative `pubdata` impact. + let clean_up_tx = vm.rich_accounts[0].get_l2_tx_for_execute( + Execute { + contract_address: expensive_contract_address, + calldata: cleanup_function.encode_input(&[]).unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(clean_up_tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + assert!(result.refunds.operator_suggested_refund > 0); + assert_eq!( + result.refunds.gas_refunded, + result.refunds.operator_suggested_refund + ); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs index 8a55a3fc6a5a..28d853486485 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs @@ -14,7 +14,8 @@ use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; use crate::{ interface::{ - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, + L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmFactory, + VmInterface, }, vm_latest::{ constants::BATCH_COMPUTATIONAL_GAS_LIMIT, @@ -82,7 +83,7 @@ impl VmTester { let mut l1_batch = self.vm.batch_env.clone(); if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { + let last_l2_block = load_last_l2_block(&self.storage).unwrap_or(L2Block { number: 0, timestamp: 0, hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), diff --git a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs b/core/lib/multivm/src/versions/vm_latest/tests/utils.rs index 37bdd0cef8e0..2482df0d0e89 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/utils.rs @@ -131,3 +131,9 @@ pub(crate) fn get_complex_upgrade_abi() -> Contract { "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" ) } + +pub(crate) fn read_expensive_contract() -> (Vec, Contract) { + const PATH: &str = + "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; + (read_bytecode(PATH), load_contract(PATH)) +} diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs index 9a3e70f8dff6..b9ac0bfad229 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs @@ -63,7 +63,7 @@ pub(crate) fn new_vm_state( system_env: &SystemEnv, l1_batch_env: &L1BatchEnv, ) -> (ZkSyncVmState, BootloaderState) { - let last_l2_block = if let Some(last_l2_block) = load_last_l2_block(storage.clone()) { + let last_l2_block = if let Some(last_l2_block) = load_last_l2_block(&storage) { last_l2_block } else { // This is the scenario of either the first L2 block ever or diff --git a/core/lib/multivm/src/versions/vm_latest/utils/l2_blocks.rs b/core/lib/multivm/src/versions/vm_latest/utils/l2_blocks.rs index ec30a86013b9..d3253ffd7fb3 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/l2_blocks.rs @@ -52,7 +52,7 @@ pub(crate) fn l2_block_hash( } /// Get last saved block from storage -pub fn load_last_l2_block(storage: StoragePtr) -> Option { +pub fn load_last_l2_block(storage: &StoragePtr) -> Option { // Get block number and timestamp let current_l2_block_info_key = StorageKey::new( AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), diff --git a/core/lib/multivm/src/versions/vm_latest/vm.rs b/core/lib/multivm/src/versions/vm_latest/vm.rs index a6cd884c7380..f11431f01546 100644 --- a/core/lib/multivm/src/versions/vm_latest/vm.rs +++ b/core/lib/multivm/src/versions/vm_latest/vm.rs @@ -3,7 +3,8 @@ use zksync_state::{StoragePtr, WriteStorage}; use zksync_types::{ event::extract_l2tol1logs_from_l1_messenger, l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, - Transaction, VmVersion, + vm::VmVersion, + Transaction, }; use zksync_utils::bytecode::CompressedBytecodeInfo; @@ -11,8 +12,8 @@ use crate::{ glue::GlueInto, interface::{ BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, + L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, + VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, vm_latest::{ bootloader_state::BootloaderState, @@ -72,19 +73,9 @@ pub struct Vm { _phantom: std::marker::PhantomData, } -impl VmInterface for Vm { +impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { - let vm_version: VmVersion = system_env.version.into(); - Self::new_with_subversion( - batch_env, - system_env, - storage, - vm_version.try_into().expect("Incorrect 1.5.0 VmVersion"), - ) - } - /// Push tx into memory for the future execution fn push_transaction(&mut self, tx: Transaction) { self.push_transaction_with_compression(tx, true); @@ -117,7 +108,7 @@ impl VmInterface for Vm { /// This method should be used only after the batch execution. /// Otherwise it can panic. fn get_current_execution_state(&self) -> CurrentExecutionState { - let (deduplicated_events_logs, raw_events, l1_messages) = self.state.event_sink.flatten(); + let (raw_events, l1_messages) = self.state.event_sink.flatten(); let events: Vec<_> = merge_events(raw_events) .into_iter() .map(|e| e.into_vm_event(self.batch_env.number)) @@ -128,13 +119,6 @@ impl VmInterface for Vm { .into_iter() .map(|log| SystemL2ToL1Log(log.glue_into())) .collect(); - let total_log_queries = self.state.event_sink.get_log_queries() - + self - .state - .precompiles_processor - .get_timestamp_history() - .len() - + self.state.storage.get_final_log_queries().len(); let storage_log_queries = self.state.storage.get_final_log_queries(); let deduped_storage_log_queries = @@ -152,12 +136,6 @@ impl VmInterface for Vm { .map(|log| UserL2ToL1Log(log.into())) .collect(), system_logs, - total_log_queries, - cycles_used: self.state.local_state.monotonic_cycle_counter, - deduplicated_events_logs: deduplicated_events_logs - .into_iter() - .map(GlueInto::glue_into) - .collect(), storage_refunds: self.state.storage.returned_io_refunds.inner().clone(), pubdata_costs: self.state.storage.returned_pubdata_costs.inner().clone(), } @@ -219,6 +197,18 @@ impl VmInterface for Vm { } } +impl VmFactory for Vm { + fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { + let vm_version: VmVersion = system_env.version.into(); + Self::new_with_subversion( + batch_env, + system_env, + storage, + vm_version.try_into().expect("Incorrect 1.5.0 VmVersion"), + ) + } +} + impl Vm { pub(crate) fn new_with_subversion( batch_env: L1BatchEnv, @@ -240,14 +230,11 @@ impl Vm { } } -/// Methods of vm, which required some history manipulations -impl VmInterfaceHistoryEnabled for Vm { - /// Create snapshot of current vm state and push it into the memory +impl VmInterfaceHistoryEnabled for Vm { fn make_snapshot(&mut self) { self.make_snapshot_inner() } - /// Rollback vm state to the latest snapshot and destroy the snapshot fn rollback_to_the_latest_snapshot(&mut self) { let snapshot = self .snapshots @@ -256,10 +243,7 @@ impl VmInterfaceHistoryEnabled for Vm { self.rollback_to_snapshot(snapshot); } - /// Pop the latest snapshot from the memory and destroy it fn pop_snapshot_no_rollback(&mut self) { - self.snapshots - .pop() - .expect("Snapshot should be created before rolling it back"); + self.snapshots.pop(); } } diff --git a/core/lib/multivm/src/versions/vm_m5/event_sink.rs b/core/lib/multivm/src/versions/vm_m5/event_sink.rs index 782aa1d662f7..83b01cb5c594 100644 --- a/core/lib/multivm/src/versions/vm_m5/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_m5/event_sink.rs @@ -33,7 +33,7 @@ impl OracleWithHistory for InMemoryEventSink { // otherwise we carry rollbacks to the parent's frames impl InMemoryEventSink { - pub fn flatten(&self) -> (Vec, Vec, Vec) { + pub fn flatten(&self) -> (Vec, Vec) { assert_eq!( self.frames_stack.inner().len(), 1, @@ -45,9 +45,7 @@ impl InMemoryEventSink { forward, rollbacks: _, } = full_history; - let history = forward.clone(); - let (events, l1_messages) = Self::events_and_l1_messages_from_history(forward); - (history, events, l1_messages) + Self::events_and_l1_messages_from_history(forward) } pub fn get_log_queries(&self) -> usize { diff --git a/core/lib/multivm/src/versions/vm_m5/vm.rs b/core/lib/multivm/src/versions/vm_m5/vm.rs index 67bfec9b9703..53189dbcfef5 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm.rs @@ -4,7 +4,8 @@ use zk_evm_1_3_1::aux_structures::LogQuery; use zksync_state::StoragePtr; use zksync_types::{ l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - Transaction, VmVersion, + vm::VmVersion, + Transaction, }; use zksync_utils::{bytecode::CompressedBytecodeInfo, h256_to_u256, u256_to_h256}; @@ -13,7 +14,8 @@ use crate::{ interface::{ BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, vm_m5::{ events::merge_events, @@ -64,20 +66,10 @@ impl Vm { } } -impl VmInterface for Vm { +impl VmInterface for Vm { /// Tracers are not supported for here we use `()` as a placeholder type TracerDispatcher = (); - fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { - let vm_version: VmVersion = system_env.version.into(); - let vm_sub_version = match vm_version { - VmVersion::M5WithoutRefunds => MultiVMSubversion::V1, - VmVersion::M5WithRefunds => MultiVMSubversion::V2, - _ => panic!("Unsupported protocol version for vm_m5: {:?}", vm_version), - }; - Self::new_with_subversion(batch_env, system_env, storage, vm_sub_version) - } - fn push_transaction(&mut self, tx: Transaction) { crate::vm_m5::vm_with_bootloader::push_transaction_to_bootloader_memory( &mut self.vm, @@ -119,7 +111,7 @@ impl VmInterface for Vm { } fn get_current_execution_state(&self) -> CurrentExecutionState { - let (_full_history, raw_events, l1_messages) = self.vm.state.event_sink.flatten(); + let (raw_events, l1_messages) = self.vm.state.event_sink.flatten(); let events = merge_events(raw_events) .into_iter() .map(|e| e.into_vm_event(self.batch_env.number)) @@ -137,14 +129,6 @@ impl VmInterface for Vm { }) }) .collect(); - let total_log_queries = self.vm.state.event_sink.get_log_queries() - + self - .vm - .state - .precompiles_processor - .get_timestamp_history() - .len() - + self.vm.get_final_log_queries().len(); let used_contract_hashes = self .vm @@ -182,10 +166,7 @@ impl VmInterface for Vm { used_contract_hashes, system_logs: vec![], user_l2_to_l1_logs: l2_to_l1_logs, - total_log_queries, - cycles_used: self.vm.state.local_state.monotonic_cycle_counter, - // It's not applicable for `vm5` - deduplicated_events_logs: vec![], + // Fields below are not produced by `vm5` storage_refunds: vec![], pubdata_costs: vec![], } @@ -234,7 +215,19 @@ impl VmInterface for Vm { } } -impl VmInterfaceHistoryEnabled for Vm { +impl VmFactory for Vm { + fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { + let vm_version: VmVersion = system_env.version.into(); + let vm_sub_version = match vm_version { + VmVersion::M5WithoutRefunds => MultiVMSubversion::V1, + VmVersion::M5WithRefunds => MultiVMSubversion::V2, + _ => panic!("Unsupported protocol version for vm_m5: {:?}", vm_version), + }; + Self::new_with_subversion(batch_env, system_env, storage, vm_sub_version) + } +} + +impl VmInterfaceHistoryEnabled for Vm { fn make_snapshot(&mut self) { self.vm.save_current_vm_as_snapshot() } diff --git a/core/lib/multivm/src/versions/vm_m5/vm_instance.rs b/core/lib/multivm/src/versions/vm_m5/vm_instance.rs index 085c219f43c5..3f708f3470f2 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm_instance.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm_instance.rs @@ -748,7 +748,7 @@ impl VmInstance { // Collecting `block_tip_result` needs logs with timestamp, so we drain events for the `full_result` // after because draining will drop timestamps. - let (_full_history, raw_events, l1_messages) = self.state.event_sink.flatten(); + let (raw_events, l1_messages) = self.state.event_sink.flatten(); full_result.events = merge_events(raw_events) .into_iter() .map(|e| { diff --git a/core/lib/multivm/src/versions/vm_m6/event_sink.rs b/core/lib/multivm/src/versions/vm_m6/event_sink.rs index 56fe8dcb11e2..bf39b5962d97 100644 --- a/core/lib/multivm/src/versions/vm_m6/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_m6/event_sink.rs @@ -30,7 +30,7 @@ impl OracleWithHistory for InMemoryEventSink { // otherwise we carry rollbacks to the parent's frames impl InMemoryEventSink { - pub fn flatten(&self) -> (Vec, Vec, Vec) { + pub fn flatten(&self) -> (Vec, Vec) { assert_eq!( self.frames_stack.len(), 1, @@ -38,8 +38,7 @@ impl InMemoryEventSink { ); // we forget rollbacks as we have finished the execution and can just apply them let history = self.frames_stack.forward().current_frame(); - let (events, l1_messages) = Self::events_and_l1_messages_from_history(history); - (history.to_vec(), events, l1_messages) + Self::events_and_l1_messages_from_history(history) } pub fn get_log_queries(&self) -> usize { diff --git a/core/lib/multivm/src/versions/vm_m6/vm.rs b/core/lib/multivm/src/versions/vm_m6/vm.rs index fe2deb4181a3..634867697a92 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm.rs @@ -6,7 +6,8 @@ use zk_evm_1_3_1::aux_structures::LogQuery; use zksync_state::StoragePtr; use zksync_types::{ l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - Transaction, VmVersion, + vm::VmVersion, + Transaction, }; use zksync_utils::{ bytecode::{hash_bytecode, CompressedBytecodeInfo}, @@ -18,7 +19,8 @@ use crate::{ interface::{ BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, tracers::old_tracers::TracerDispatcher, vm_m6::{events::merge_events, storage::Storage, vm_instance::MultiVMSubversion, VmInstance}, @@ -64,19 +66,9 @@ impl Vm { } } -impl VmInterface for Vm { +impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { - let vm_version: VmVersion = system_env.version.into(); - let vm_sub_version = match vm_version { - VmVersion::M6Initial => MultiVMSubversion::V1, - VmVersion::M6BugWithCompressionFixed => MultiVMSubversion::V2, - _ => panic!("Unsupported protocol version for vm_m6: {:?}", vm_version), - }; - Self::new_with_subversion(batch_env, system_env, storage, vm_sub_version) - } - fn push_transaction(&mut self, tx: Transaction) { crate::vm_m6::vm_with_bootloader::push_transaction_to_bootloader_memory( &mut self.vm, @@ -135,7 +127,7 @@ impl VmInterface for Vm { } fn get_current_execution_state(&self) -> CurrentExecutionState { - let (_full_history, raw_events, l1_messages) = self.vm.state.event_sink.flatten(); + let (raw_events, l1_messages) = self.vm.state.event_sink.flatten(); let events = merge_events(raw_events) .into_iter() .map(|e| e.into_vm_event(self.batch_env.number)) @@ -153,14 +145,6 @@ impl VmInterface for Vm { }) }) .collect(); - let total_log_queries = self.vm.state.event_sink.get_log_queries() - + self - .vm - .state - .precompiles_processor - .get_timestamp_history() - .len() - + self.vm.get_final_log_queries().len(); let used_contract_hashes = self .vm @@ -196,13 +180,10 @@ impl VmInterface for Vm { .map(GlueInto::glue_into) .collect(), used_contract_hashes, + user_l2_to_l1_logs: l2_to_l1_logs, + // Fields below are not produced by `vm6` system_logs: vec![], - total_log_queries, - cycles_used: self.vm.state.local_state.monotonic_cycle_counter, - // It's not applicable for `vm6` - deduplicated_events_logs: vec![], storage_refunds: vec![], - user_l2_to_l1_logs: l2_to_l1_logs, pubdata_costs: vec![], } } @@ -323,7 +304,19 @@ impl VmInterface for Vm { } } -impl VmInterfaceHistoryEnabled for Vm { +impl VmFactory for Vm { + fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { + let vm_version: VmVersion = system_env.version.into(); + let vm_sub_version = match vm_version { + VmVersion::M6Initial => MultiVMSubversion::V1, + VmVersion::M6BugWithCompressionFixed => MultiVMSubversion::V2, + _ => panic!("Unsupported protocol version for vm_m6: {:?}", vm_version), + }; + Self::new_with_subversion(batch_env, system_env, storage, vm_sub_version) + } +} + +impl VmInterfaceHistoryEnabled for Vm { fn make_snapshot(&mut self) { self.vm.save_current_vm_as_snapshot() } @@ -333,6 +326,6 @@ impl VmInterfaceHistoryEnabled for Vm VmInstance { } /// Removes the latest snapshot without rolling back to it. - /// This function expects that there is at least one snapshot present. pub fn pop_snapshot_no_rollback(&mut self) { - self.snapshots.pop().unwrap(); + self.snapshots.pop(); } /// Returns the amount of gas remaining to the VM. @@ -781,7 +780,7 @@ impl VmInstance { // Collecting `block_tip_result` needs logs with timestamp, so we drain events for the `full_result` // after because draining will drop timestamps. - let (_full_history, raw_events, l1_messages) = self.state.event_sink.flatten(); + let (raw_events, l1_messages) = self.state.event_sink.flatten(); full_result.events = merge_events(raw_events) .into_iter() .map(|e| { diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/event_sink.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/event_sink.rs index 2af642d358da..b9e0f1b61b34 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/event_sink.rs @@ -29,7 +29,7 @@ impl OracleWithHistory for InMemoryEventSink { // otherwise we carry rollbacks to the parent's frames impl InMemoryEventSink { - pub fn flatten(&self) -> (Vec, Vec, Vec) { + pub fn flatten(&self) -> (Vec, Vec) { assert_eq!( self.frames_stack.len(), 1, @@ -38,8 +38,7 @@ impl InMemoryEventSink { // we forget rollbacks as we have finished the execution and can just apply them let history = self.frames_stack.forward().current_frame(); - let (events, l1_messages) = Self::events_and_l1_messages_from_history(history); - (history.iter().map(|x| **x).collect(), events, l1_messages) + Self::events_and_l1_messages_from_history(history) } pub fn get_log_queries(&self) -> usize { diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs index 0bac1d7d47de..c580b84e2022 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs @@ -7,7 +7,7 @@ use crate::{ glue::GlueInto, interface::{ BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, L1BatchEnv, L2BlockEnv, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, + SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, vm_latest::HistoryEnabled, @@ -35,22 +35,9 @@ pub struct Vm { _phantom: std::marker::PhantomData, } -impl VmInterface for Vm { +impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { - let (state, bootloader_state) = new_vm_state(storage.clone(), &system_env, &batch_env); - Self { - bootloader_state, - state, - storage, - system_env, - batch_env, - snapshots: vec![], - _phantom: Default::default(), - } - } - /// Push tx into memory for the future execution fn push_transaction(&mut self, tx: Transaction) { self.push_transaction_with_compression(tx, true) @@ -83,7 +70,7 @@ impl VmInterface for Vm { /// This method should be used only after the batch execution. /// Otherwise it can panic. fn get_current_execution_state(&self) -> CurrentExecutionState { - let (deduplicated_events_logs, raw_events, l1_messages) = self.state.event_sink.flatten(); + let (raw_events, l1_messages) = self.state.event_sink.flatten(); let events: Vec<_> = merge_events(raw_events) .into_iter() .map(|e| e.into_vm_event(self.batch_env.number)) @@ -93,13 +80,6 @@ impl VmInterface for Vm { .into_iter() .map(|log| UserL2ToL1Log(log.glue_into())) .collect(); - let total_log_queries = self.state.event_sink.get_log_queries() - + self - .state - .precompiles_processor - .get_timestamp_history() - .len() - + self.state.storage.get_final_log_queries().len(); let storage_log_queries = self.state.storage.get_final_log_queries(); @@ -115,12 +95,6 @@ impl VmInterface for Vm { used_contract_hashes: self.get_used_contracts(), user_l2_to_l1_logs: l2_to_l1_logs, system_logs: vec![], - total_log_queries, - cycles_used: self.state.local_state.monotonic_cycle_counter, - deduplicated_events_logs: deduplicated_events_logs - .into_iter() - .map(GlueInto::glue_into) - .collect(), storage_refunds: self.state.storage.returned_refunds.inner().clone(), pubdata_costs: Vec::new(), } @@ -157,14 +131,27 @@ impl VmInterface for Vm { } } -/// Methods of vm, which required some history manipulations -impl VmInterfaceHistoryEnabled for Vm { +impl VmFactory for Vm { + fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { + let (state, bootloader_state) = new_vm_state(storage.clone(), &system_env, &batch_env); + Self { + bootloader_state, + state, + storage, + system_env, + batch_env, + snapshots: vec![], + _phantom: Default::default(), + } + } +} + +impl VmInterfaceHistoryEnabled for Vm { /// Create snapshot of current vm state and push it into the memory fn make_snapshot(&mut self) { - self.make_snapshot_inner() + self.make_snapshot_inner(); } - /// Rollback vm state to the latest snapshot and destroy the snapshot fn rollback_to_the_latest_snapshot(&mut self) { let snapshot = self .snapshots @@ -173,10 +160,7 @@ impl VmInterfaceHistoryEnabled for Vm { self.rollback_to_snapshot(snapshot); } - /// Pop the latest snapshot from the memory and destroy it fn pop_snapshot_no_rollback(&mut self) { - self.snapshots - .pop() - .expect("Snapshot should be created before rolling it back"); + self.snapshots.pop(); } } diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/event_sink.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/event_sink.rs index eadfe70d0a7e..0d1c8ee554c9 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/event_sink.rs @@ -29,7 +29,7 @@ impl OracleWithHistory for InMemoryEventSink { // otherwise we carry rollbacks to the parent's frames impl InMemoryEventSink { - pub fn flatten(&self) -> (Vec, Vec, Vec) { + pub fn flatten(&self) -> (Vec, Vec) { assert_eq!( self.frames_stack.len(), 1, @@ -38,8 +38,7 @@ impl InMemoryEventSink { // we forget rollbacks as we have finished the execution and can just apply them let history = self.frames_stack.forward().current_frame(); - let (events, l1_messages) = Self::events_and_l1_messages_from_history(history); - (history.iter().map(|x| **x).collect(), events, l1_messages) + Self::events_and_l1_messages_from_history(history) } pub fn get_log_queries(&self) -> usize { diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs index ec9b12e82ed4..a7cef17591ad 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs @@ -7,7 +7,7 @@ use crate::{ glue::GlueInto, interface::{ BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, L1BatchEnv, L2BlockEnv, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, + SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, vm_latest::HistoryEnabled, @@ -35,22 +35,9 @@ pub struct Vm { _phantom: std::marker::PhantomData, } -impl VmInterface for Vm { +impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { - let (state, bootloader_state) = new_vm_state(storage.clone(), &system_env, &batch_env); - Self { - bootloader_state, - state, - storage, - system_env, - batch_env, - snapshots: vec![], - _phantom: Default::default(), - } - } - /// Push tx into memory for the future execution fn push_transaction(&mut self, tx: Transaction) { self.push_transaction_with_compression(tx, true) @@ -83,7 +70,7 @@ impl VmInterface for Vm { /// This method should be used only after the batch execution. /// Otherwise it can panic. fn get_current_execution_state(&self) -> CurrentExecutionState { - let (deduplicated_events_logs, raw_events, l1_messages) = self.state.event_sink.flatten(); + let (raw_events, l1_messages) = self.state.event_sink.flatten(); let events: Vec<_> = merge_events(raw_events) .into_iter() .map(|e| e.into_vm_event(self.batch_env.number)) @@ -93,13 +80,6 @@ impl VmInterface for Vm { .into_iter() .map(|log| UserL2ToL1Log(log.glue_into())) .collect(); - let total_log_queries = self.state.event_sink.get_log_queries() - + self - .state - .precompiles_processor - .get_timestamp_history() - .len() - + self.state.storage.get_final_log_queries().len(); let storage_log_queries = self.state.storage.get_final_log_queries(); @@ -115,12 +95,6 @@ impl VmInterface for Vm { used_contract_hashes: self.get_used_contracts(), user_l2_to_l1_logs: l2_to_l1_logs, system_logs: vec![], - total_log_queries, - cycles_used: self.state.local_state.monotonic_cycle_counter, - deduplicated_events_logs: deduplicated_events_logs - .into_iter() - .map(GlueInto::glue_into) - .collect(), storage_refunds: Vec::new(), pubdata_costs: Vec::new(), } @@ -157,14 +131,26 @@ impl VmInterface for Vm { } } -/// Methods of vm, which required some history manipulations -impl VmInterfaceHistoryEnabled for Vm { - /// Create snapshot of current vm state and push it into the memory +impl VmFactory for Vm { + fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { + let (state, bootloader_state) = new_vm_state(storage.clone(), &system_env, &batch_env); + Self { + bootloader_state, + state, + storage, + system_env, + batch_env, + snapshots: vec![], + _phantom: Default::default(), + } + } +} + +impl VmInterfaceHistoryEnabled for Vm { fn make_snapshot(&mut self) { self.make_snapshot_inner() } - /// Rollback vm state to the latest snapshot and destroy the snapshot fn rollback_to_the_latest_snapshot(&mut self) { let snapshot = self .snapshots @@ -173,10 +159,7 @@ impl VmInterfaceHistoryEnabled for Vm { self.rollback_to_snapshot(snapshot); } - /// Pop the latest snapshot from the memory and destroy it fn pop_snapshot_no_rollback(&mut self) { - self.snapshots - .pop() - .expect("Snapshot should be created before rolling it back"); + self.snapshots.pop(); } } diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index 74cb93c494b1..c8a7ce837991 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -1,28 +1,33 @@ -use zksync_state::{StoragePtr, WriteStorage}; -use zksync_types::VmVersion; +use zksync_state::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}; +use zksync_types::vm::{FastVmMode, VmVersion}; use zksync_utils::bytecode::CompressedBytecodeInfo; use crate::{ glue::history_mode::HistoryMode, interface::{ BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, + L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, + VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, tracers::TracerDispatcher, + versions::shadow::ShadowVm, }; +pub type ShadowedFastVm = ShadowVm, H>>; + #[derive(Debug)] -pub enum VmInstance { - VmM5(crate::vm_m5::Vm), - VmM6(crate::vm_m6::Vm), - Vm1_3_2(crate::vm_1_3_2::Vm), - VmVirtualBlocks(crate::vm_virtual_blocks::Vm), - VmVirtualBlocksRefundsEnhancement(crate::vm_refunds_enhancement::Vm), - VmBoojumIntegration(crate::vm_boojum_integration::Vm), - Vm1_4_1(crate::vm_1_4_1::Vm), - Vm1_4_2(crate::vm_1_4_2::Vm), - Vm1_5_0(crate::vm_latest::Vm), +pub enum VmInstance { + VmM5(crate::vm_m5::Vm, H>), + VmM6(crate::vm_m6::Vm, H>), + Vm1_3_2(crate::vm_1_3_2::Vm, H>), + VmVirtualBlocks(crate::vm_virtual_blocks::Vm, H>), + VmVirtualBlocksRefundsEnhancement(crate::vm_refunds_enhancement::Vm, H>), + VmBoojumIntegration(crate::vm_boojum_integration::Vm, H>), + Vm1_4_1(crate::vm_1_4_1::Vm, H>), + Vm1_4_2(crate::vm_1_4_2::Vm, H>), + Vm1_5_0(crate::vm_latest::Vm, H>), + VmFast(crate::vm_fast::Vm>), + ShadowedVmFast(ShadowedFastVm), } macro_rules! dispatch_vm { @@ -37,18 +42,14 @@ macro_rules! dispatch_vm { VmInstance::Vm1_4_1(vm) => vm.$function($($params)*), VmInstance::Vm1_4_2(vm) => vm.$function($($params)*), VmInstance::Vm1_5_0(vm) => vm.$function($($params)*), + VmInstance::VmFast(vm) => vm.$function($($params)*), + VmInstance::ShadowedVmFast(vm) => vm.$function($($params)*), } }; } -impl VmInterface for VmInstance { - type TracerDispatcher = TracerDispatcher; - - fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage_view: StoragePtr) -> Self { - let protocol_version = system_env.version; - let vm_version: VmVersion = protocol_version.into(); - Self::new_with_specific_version(batch_env, system_env, storage_view, vm_version) - } +impl VmInterface for VmInstance { + type TracerDispatcher = TracerDispatcher, H>; /// Push tx into memory for the future execution fn push_transaction(&mut self, tx: zksync_types::Transaction) { @@ -130,9 +131,19 @@ impl VmInterface for VmInstance { } } -impl VmInterfaceHistoryEnabled - for VmInstance -{ +impl VmFactory> for VmInstance { + fn new( + batch_env: L1BatchEnv, + system_env: SystemEnv, + storage_view: StoragePtr>, + ) -> Self { + let protocol_version = system_env.version; + let vm_version: VmVersion = protocol_version.into(); + Self::new_with_specific_version(batch_env, system_env, storage_view, vm_version) + } +} + +impl VmInterfaceHistoryEnabled for VmInstance { fn make_snapshot(&mut self) { dispatch_vm!(self.make_snapshot()) } @@ -146,11 +157,11 @@ impl VmInterfaceHistoryEnabled } } -impl VmInstance { +impl VmInstance { pub fn new_with_specific_version( l1_batch_env: L1BatchEnv, system_env: SystemEnv, - storage_view: StoragePtr, + storage_view: StoragePtr>, vm_version: VmVersion, ) -> Self { match vm_version { @@ -236,4 +247,27 @@ impl VmInstance { } } } + + /// Creates a VM that may use the fast VM depending on the protocol version in `system_env` and `mode`. + pub fn maybe_fast( + l1_batch_env: L1BatchEnv, + system_env: SystemEnv, + storage_view: StoragePtr>, + mode: FastVmMode, + ) -> Self { + let vm_version = system_env.version.into(); + match vm_version { + VmVersion::Vm1_5_0IncreasedBootloaderMemory => match mode { + FastVmMode::Old => Self::new(l1_batch_env, system_env, storage_view), + FastVmMode::New => { + let storage = ImmutableStorageView::new(storage_view); + Self::VmFast(crate::vm_fast::Vm::new(l1_batch_env, system_env, storage)) + } + FastVmMode::Shadow => { + Self::ShadowedVmFast(ShadowVm::new(l1_batch_env, system_env, storage_view)) + } + }, + _ => Self::new(l1_batch_env, system_env, storage_view), + } + } } diff --git a/core/lib/protobuf_config/src/eth.rs b/core/lib/protobuf_config/src/eth.rs index b713e650d019..c605e6d2cccb 100644 --- a/core/lib/protobuf_config/src/eth.rs +++ b/core/lib/protobuf_config/src/eth.rs @@ -31,6 +31,7 @@ impl proto::PubdataSendingMode { From::Calldata => Self::Calldata, From::Blobs => Self::Blobs, From::Custom => Self::Custom, + From::RelayedL2Calldata => Self::RelayedL2Calldata, } } @@ -40,6 +41,7 @@ impl proto::PubdataSendingMode { Self::Calldata => To::Calldata, Self::Blobs => To::Blobs, Self::Custom => To::Custom, + Self::RelayedL2Calldata => To::RelayedL2Calldata, } } } @@ -174,6 +176,8 @@ impl ProtoRepr for proto::GasAdjuster { ) .context("internal_pubdata_pricing_multiplier")?, max_blob_base_fee: self.max_blob_base_fee, + // TODO(EVM-676): support this field + settlement_mode: Default::default(), }) } diff --git a/core/lib/protobuf_config/src/experimental.rs b/core/lib/protobuf_config/src/experimental.rs index 8d92f3ef87a8..cb959e229047 100644 --- a/core/lib/protobuf_config/src/experimental.rs +++ b/core/lib/protobuf_config/src/experimental.rs @@ -1,10 +1,11 @@ use std::num::NonZeroU32; use anyhow::Context as _; +use zksync_basic_types::{vm::FastVmMode, L1BatchNumber}; use zksync_config::configs; use zksync_protobuf::{repr::ProtoRepr, required}; -use crate::proto::experimental as proto; +use crate::{proto::experimental as proto, read_optional_repr}; impl ProtoRepr for proto::Db { type Type = configs::ExperimentalDBConfig; @@ -49,3 +50,76 @@ impl ProtoRepr for proto::Db { } } } + +impl proto::FastVmMode { + fn new(source: FastVmMode) -> Self { + match source { + FastVmMode::Old => Self::Old, + FastVmMode::New => Self::New, + FastVmMode::Shadow => Self::Shadow, + } + } + + fn parse(&self) -> FastVmMode { + match self { + Self::Old => FastVmMode::Old, + Self::New => FastVmMode::New, + Self::Shadow => FastVmMode::Shadow, + } + } +} + +impl ProtoRepr for proto::VmPlayground { + type Type = configs::ExperimentalVmPlaygroundConfig; + + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + fast_vm_mode: self + .fast_vm_mode + .map(proto::FastVmMode::try_from) + .transpose() + .context("fast_vm_mode")? + .map_or_else(FastVmMode::default, |mode| mode.parse()), + db_path: self + .db_path + .clone() + .unwrap_or_else(Self::Type::default_db_path), + first_processed_batch: L1BatchNumber(self.first_processed_batch.unwrap_or(0)), + reset: self.reset.unwrap_or(false), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + fast_vm_mode: Some(proto::FastVmMode::new(this.fast_vm_mode).into()), + db_path: Some(this.db_path.clone()), + first_processed_batch: Some(this.first_processed_batch.0), + reset: Some(this.reset), + } + } +} + +impl ProtoRepr for proto::Vm { + type Type = configs::ExperimentalVmConfig; + + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + playground: read_optional_repr(&self.playground).unwrap_or_default(), + state_keeper_fast_vm_mode: self + .state_keeper_fast_vm_mode + .map(proto::FastVmMode::try_from) + .transpose() + .context("fast_vm_mode")? + .map_or_else(FastVmMode::default, |mode| mode.parse()), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + playground: Some(ProtoRepr::build(&this.playground)), + state_keeper_fast_vm_mode: Some( + proto::FastVmMode::new(this.state_keeper_fast_vm_mode).into(), + ), + } + } +} diff --git a/core/lib/protobuf_config/src/external_proof_integration_api.rs b/core/lib/protobuf_config/src/external_proof_integration_api.rs new file mode 100644 index 000000000000..e824df50dfc6 --- /dev/null +++ b/core/lib/protobuf_config/src/external_proof_integration_api.rs @@ -0,0 +1,22 @@ +use anyhow::Context; +use zksync_config::ExternalProofIntegrationApiConfig; +use zksync_protobuf::{required, ProtoRepr}; + +use crate::proto::external_proof_integration_api as proto; + +impl ProtoRepr for proto::ExternalProofIntegrationApi { + type Type = ExternalProofIntegrationApiConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + http_port: required(&self.http_port) + .and_then(|p| Ok((*p).try_into()?)) + .context("http_port")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + http_port: Some(this.http_port.into()), + } + } +} diff --git a/core/lib/protobuf_config/src/general.rs b/core/lib/protobuf_config/src/general.rs index 367458f7aa25..af6f690dfc8f 100644 --- a/core/lib/protobuf_config/src/general.rs +++ b/core/lib/protobuf_config/src/general.rs @@ -40,6 +40,10 @@ impl ProtoRepr for proto::GeneralConfig { snapshot_recovery: read_optional_repr(&self.snapshot_recovery), external_price_api_client_config: read_optional_repr(&self.external_price_api_client), consensus_config: read_optional_repr(&self.consensus), + external_proof_integration_api_config: read_optional_repr( + &self.external_proof_integration_api, + ), + experimental_vm_config: read_optional_repr(&self.experimental_vm), }) } @@ -90,6 +94,11 @@ impl ProtoRepr for proto::GeneralConfig { .as_ref() .map(ProtoRepr::build), consensus: this.consensus_config.as_ref().map(ProtoRepr::build), + external_proof_integration_api: this + .external_proof_integration_api_config + .as_ref() + .map(ProtoRepr::build), + experimental_vm: this.experimental_vm_config.as_ref().map(ProtoRepr::build), } } } diff --git a/core/lib/protobuf_config/src/lib.rs b/core/lib/protobuf_config/src/lib.rs index d7a4a4e570ad..ee526b2bb67f 100644 --- a/core/lib/protobuf_config/src/lib.rs +++ b/core/lib/protobuf_config/src/lib.rs @@ -30,6 +30,7 @@ mod secrets; mod snapshots_creator; mod external_price_api_client; +mod external_proof_integration_api; mod snapshot_recovery; #[cfg(test)] mod tests; diff --git a/core/lib/protobuf_config/src/proto/config/eth_sender.proto b/core/lib/protobuf_config/src/proto/config/eth_sender.proto index 839c7f65b973..536ac216863e 100644 --- a/core/lib/protobuf_config/src/proto/config/eth_sender.proto +++ b/core/lib/protobuf_config/src/proto/config/eth_sender.proto @@ -24,6 +24,7 @@ enum PubdataSendingMode { CALLDATA = 0; BLOBS = 1; CUSTOM = 2; + RELAYED_L2_CALLDATA = 3; } message Sender { diff --git a/core/lib/protobuf_config/src/proto/config/experimental.proto b/core/lib/protobuf_config/src/proto/config/experimental.proto index 1336c4719d26..1682b2c9a834 100644 --- a/core/lib/protobuf_config/src/proto/config/experimental.proto +++ b/core/lib/protobuf_config/src/proto/config/experimental.proto @@ -18,3 +18,22 @@ message SnapshotRecovery { optional uint64 tree_recovery_parallel_persistence_buffer = 1; optional bool drop_storage_key_preimages = 2; // optional; false by default } + +enum FastVmMode { + OLD = 0; + NEW = 1; + SHADOW = 2; +} + +// Experimental VM configuration +message VmPlayground { + optional FastVmMode fast_vm_mode = 1; // optional; if not set, fast VM is not used + optional string db_path = 2; // optional; defaults to `./db/vm_playground` + optional uint32 first_processed_batch = 3; // optional; defaults to 0 + optional bool reset = 4; // optional; defaults to false +} + +message Vm { + optional VmPlayground playground = 1; // optional + optional FastVmMode state_keeper_fast_vm_mode = 2; // optional; if not set, fast VM is not used +} diff --git a/core/lib/protobuf_config/src/proto/config/external_proof_integration_api.proto b/core/lib/protobuf_config/src/proto/config/external_proof_integration_api.proto new file mode 100644 index 000000000000..07203202c9d6 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/config/external_proof_integration_api.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; + +package zksync.config.external_proof_integration_api; + +message ExternalProofIntegrationApi { + optional uint32 http_port = 1; +} diff --git a/core/lib/protobuf_config/src/proto/config/general.proto b/core/lib/protobuf_config/src/proto/config/general.proto index 37d507b9ab62..373559e73516 100644 --- a/core/lib/protobuf_config/src/proto/config/general.proto +++ b/core/lib/protobuf_config/src/proto/config/general.proto @@ -9,6 +9,7 @@ import "zksync/config/contract_verifier.proto"; import "zksync/config/database.proto"; import "zksync/config/circuit_breaker.proto"; import "zksync/config/eth_sender.proto"; +import "zksync/config/experimental.proto"; import "zksync/config/house_keeper.proto"; import "zksync/config/observability.proto"; import "zksync/config/snapshots_creator.proto"; @@ -21,37 +22,40 @@ import "zksync/config/pruning.proto"; import "zksync/config/object_store.proto"; import "zksync/config/base_token_adjuster.proto"; import "zksync/config/external_price_api_client.proto"; +import "zksync/config/external_proof_integration_api.proto"; import "zksync/core/consensus.proto"; message GeneralConfig { - optional database.Postgres postgres = 1; - optional api.Api api = 2; - optional contract_verifier.ContractVerifier contract_verifier = 3; - optional circuit_breaker.CircuitBreaker circuit_breaker = 5; - optional chain.Mempool mempool = 6; - optional chain.OperationsManager operations_manager = 8; - optional chain.StateKeeper state_keeper = 9; - optional house_keeper.HouseKeeper house_keeper = 10; - optional prover.Prover prover = 12; - optional utils.Prometheus prometheus = 15; - optional database.DB db = 20; - optional eth.ETH eth = 22; - optional prover.WitnessGenerator witness_generator = 24; - optional prover.WitnessVectorGenerator witness_vector_generator = 25; - optional prover.ProofCompressor proof_compressor = 27; - optional prover.ProofDataHandler data_handler = 28; - optional prover.ProverGroup prover_group = 29; - optional prover.ProverGateway prover_gateway = 30; - optional snapshot_creator.SnapshotsCreator snapshot_creator = 31; - optional observability.Observability observability = 32; - optional vm_runner.ProtectiveReadsWriter protective_reads_writer = 33; - optional object_store.ObjectStore core_object_store = 34; - optional snapshot_recovery.SnapshotRecovery snapshot_recovery = 35; - optional pruning.Pruning pruning = 36; - optional commitment_generator.CommitmentGenerator commitment_generator = 37; - optional da_dispatcher.DataAvailabilityDispatcher da_dispatcher = 38; - optional base_token_adjuster.BaseTokenAdjuster base_token_adjuster = 39; - optional vm_runner.BasicWitnessInputProducer basic_witness_input_producer = 40; - optional external_price_api_client.ExternalPriceApiClient external_price_api_client = 41; - optional core.consensus.Config consensus = 42; + optional database.Postgres postgres = 1; + optional api.Api api = 2; + optional contract_verifier.ContractVerifier contract_verifier = 3; + optional circuit_breaker.CircuitBreaker circuit_breaker = 5; + optional chain.Mempool mempool = 6; + optional chain.OperationsManager operations_manager = 8; + optional chain.StateKeeper state_keeper = 9; + optional house_keeper.HouseKeeper house_keeper = 10; + optional prover.Prover prover = 12; + optional utils.Prometheus prometheus = 15; + optional database.DB db = 20; + optional eth.ETH eth = 22; + optional prover.WitnessGenerator witness_generator = 24; + optional prover.WitnessVectorGenerator witness_vector_generator = 25; + optional prover.ProofCompressor proof_compressor = 27; + optional prover.ProofDataHandler data_handler = 28; + optional prover.ProverGroup prover_group = 29; + optional prover.ProverGateway prover_gateway = 30; + optional snapshot_creator.SnapshotsCreator snapshot_creator = 31; + optional observability.Observability observability = 32; + optional vm_runner.ProtectiveReadsWriter protective_reads_writer = 33; + optional object_store.ObjectStore core_object_store = 34; + optional snapshot_recovery.SnapshotRecovery snapshot_recovery = 35; + optional pruning.Pruning pruning = 36; + optional commitment_generator.CommitmentGenerator commitment_generator = 37; + optional da_dispatcher.DataAvailabilityDispatcher da_dispatcher = 38; + optional base_token_adjuster.BaseTokenAdjuster base_token_adjuster = 39; + optional vm_runner.BasicWitnessInputProducer basic_witness_input_producer = 40; + optional external_price_api_client.ExternalPriceApiClient external_price_api_client = 41; + optional core.consensus.Config consensus = 42; + optional external_proof_integration_api.ExternalProofIntegrationApi external_proof_integration_api = 43; + optional experimental.Vm experimental_vm = 44; } diff --git a/core/lib/protobuf_config/src/proto/config/vm_runner.proto b/core/lib/protobuf_config/src/proto/config/vm_runner.proto index 93521a5fd893..d6537c109e6d 100644 --- a/core/lib/protobuf_config/src/proto/config/vm_runner.proto +++ b/core/lib/protobuf_config/src/proto/config/vm_runner.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package zksync.config.vm_runner; +import "zksync/config/experimental.proto"; + message ProtectiveReadsWriter { optional string db_path = 1; // required; fs path optional uint64 window_size = 2; // required diff --git a/core/lib/protobuf_config/src/vm_runner.rs b/core/lib/protobuf_config/src/vm_runner.rs index cc0d53ad519e..134cc20952f1 100644 --- a/core/lib/protobuf_config/src/vm_runner.rs +++ b/core/lib/protobuf_config/src/vm_runner.rs @@ -1,6 +1,6 @@ use anyhow::Context; use zksync_basic_types::L1BatchNumber; -use zksync_config::configs::{self}; +use zksync_config::configs; use zksync_protobuf::{required, ProtoRepr}; use crate::proto::vm_runner as proto; diff --git a/core/lib/prover_interface/src/api.rs b/core/lib/prover_interface/src/api.rs index 00ac85a40739..e4fe566618b8 100644 --- a/core/lib/prover_interface/src/api.rs +++ b/core/lib/prover_interface/src/api.rs @@ -4,6 +4,7 @@ use serde::{Deserialize, Serialize}; use zksync_types::{ protocol_version::{L1VerifierConfig, ProtocolSemanticVersion}, + tee_types::TeeType, L1BatchNumber, }; @@ -14,7 +15,7 @@ use crate::{ // Structs for holding data returned in HTTP responses -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct ProofGenerationData { pub l1_batch_number: L1BatchNumber, pub witness_input_data: WitnessInputData, @@ -52,7 +53,10 @@ pub enum RegisterTeeAttestationResponse { #[derive(Debug, Serialize, Deserialize)] pub struct ProofGenerationDataRequest {} -pub type TeeProofGenerationDataRequest = ProofGenerationDataRequest; +#[derive(Debug, Serialize, Deserialize)] +pub struct TeeProofGenerationDataRequest { + pub tee_type: TeeType, +} #[derive(Debug, Serialize, Deserialize)] pub enum SubmitProofRequest { @@ -61,6 +65,12 @@ pub enum SubmitProofRequest { SkippedProofGeneration, } +#[derive(Debug, Serialize, Deserialize)] +pub struct OptionalProofGenerationDataRequest(pub Option); + +#[derive(Debug, Serialize, Deserialize)] +pub struct VerifyProofRequest(pub Box); + #[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct SubmitTeeProofRequest(pub Box); diff --git a/core/lib/state/src/lib.rs b/core/lib/state/src/lib.rs index 5044490c46dd..7041b9bc2a6f 100644 --- a/core/lib/state/src/lib.rs +++ b/core/lib/state/src/lib.rs @@ -32,7 +32,7 @@ pub use self::{ BatchDiff, OwnedPostgresStorage, OwnedStorage, PgOrRocksdbStorage, ReadStorageFactory, RocksdbWithMemory, }, - storage_view::{StorageView, StorageViewCache, StorageViewMetrics}, + storage_view::{ImmutableStorageView, StorageView, StorageViewCache, StorageViewMetrics}, witness::WitnessStorage, }; @@ -92,3 +92,21 @@ pub trait WriteStorage: ReadStorage { /// Smart pointer to [`WriteStorage`]. pub type StoragePtr = Rc>; + +impl ReadStorage for StoragePtr { + fn read_value(&mut self, key: &StorageKey) -> StorageValue { + self.borrow_mut().read_value(key) + } + + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + self.borrow_mut().is_write_initial(key) + } + + fn load_factory_dep(&mut self, hash: H256) -> Option> { + self.borrow_mut().load_factory_dep(hash) + } + + fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { + self.borrow_mut().get_enumeration_index(key) + } +} diff --git a/core/lib/state/src/storage_view.rs b/core/lib/state/src/storage_view.rs index 7dcfda2ba406..b01f423f0787 100644 --- a/core/lib/state/src/storage_view.rs +++ b/core/lib/state/src/storage_view.rs @@ -8,7 +8,7 @@ use std::{ use zksync_types::{StorageKey, StorageValue, H256}; -use crate::{ReadStorage, WriteStorage}; +use crate::{ReadStorage, StoragePtr, WriteStorage}; /// Metrics for [`StorageView`]. #[derive(Debug, Default, Clone, Copy)] @@ -224,6 +224,46 @@ impl WriteStorage for StorageView { } } +/// Immutable wrapper around [`StorageView`] that reads directly from the underlying storage ignoring any +/// modifications in the [`StorageView`]. Used by the fast VM, which has its own internal management of writes. +#[derive(Debug)] +pub struct ImmutableStorageView(StoragePtr>); + +impl ImmutableStorageView { + /// Creates a new view based on the provided storage pointer. + pub fn new(ptr: StoragePtr>) -> Self { + Self(ptr) + } +} + +// All methods other than `read_value()` do not read back modified storage slots, so we proxy them as-is. +impl ReadStorage for ImmutableStorageView { + fn read_value(&mut self, key: &StorageKey) -> StorageValue { + let started_at = Instant::now(); + let mut this = self.0.borrow_mut(); + let cached_value = this.read_storage_keys().get(key); + cached_value.copied().unwrap_or_else(|| { + let value = this.storage_handle.read_value(key); + this.cache.read_storage_keys.insert(*key, value); + this.metrics.time_spent_on_storage_missed += started_at.elapsed(); + this.metrics.storage_invocations_missed += 1; + value + }) + } + + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + self.0.borrow_mut().is_write_initial(key) + } + + fn load_factory_dep(&mut self, hash: H256) -> Option> { + self.0.borrow_mut().load_factory_dep(hash) + } + + fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { + self.0.borrow_mut().get_enumeration_index(key) + } +} + #[cfg(test)] mod test { use zksync_types::{AccountTreeId, Address, H256}; @@ -272,4 +312,23 @@ mod test { assert_eq!(metrics.get_value_storage_invocations, 3); assert_eq!(metrics.set_value_storage_invocations, 2); } + + #[test] + fn immutable_storage_view() { + let account: AccountTreeId = AccountTreeId::new(Address::from([0xfe; 20])); + let key = H256::from_low_u64_be(61); + let value = H256::from_low_u64_be(73); + let key = StorageKey::new(account, key); + + let mut raw_storage = InMemoryStorage::default(); + raw_storage.set_value(key, value); + let storage_view = StorageView::new(raw_storage).to_rc_ptr(); + let mut immutable_view = ImmutableStorageView::new(storage_view.clone()); + + let new_value = H256::repeat_byte(0x11); + let prev_value = storage_view.borrow_mut().set_value(key, new_value); + assert_eq!(prev_value, value); + + assert_eq!(immutable_view.read_value(&key), value); + } } diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index b69b295130d1..32443b60c8ca 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -12,14 +12,14 @@ use zksync_merkle_tree::{ BlockOutputWithProofs, TreeInstruction, TreeLogEntry, TreeLogEntryWithProof, ValueHash, }; use zksync_multivm::{ - interface::{FinishedL1Batch, L2BlockEnv, VmInterface}, + interface::{FinishedL1Batch, L2BlockEnv, VmFactory, VmInterface}, vm_latest::HistoryEnabled, VmInstance, }; use zksync_prover_interface::inputs::{ StorageLogMetadata, V1TeeVerifierInput, WitnessInputMerklePaths, }; -use zksync_state::{InMemoryStorage, StorageView, WriteStorage}; +use zksync_state::{InMemoryStorage, ReadStorage, StorageView}; use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, StorageLog, H256}; use zksync_utils::bytecode::hash_bytecode; use zksync_vm_utils::execute_tx; @@ -154,7 +154,7 @@ fn get_bowp_and_set_initial_values( } /// Executes the VM and returns `FinishedL1Batch` on success. -fn execute_vm( +fn execute_vm( l2_blocks_execution_data: Vec, mut vm: VmInstance, ) -> anyhow::Result { diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index ec612bfdd7fc..8897df9b4f97 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -3,6 +3,7 @@ use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use serde_json::Value; use strum::Display; use zksync_basic_types::{ + tee_types::TeeType, web3::{AccessList, Bytes, Index}, L1BatchNumber, H160, H2048, H256, H64, U256, U64, }; @@ -823,6 +824,18 @@ pub struct Proof { pub storage_proof: Vec, } +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct TeeProof { + pub l1_batch_number: L1BatchNumber, + pub tee_type: Option, + pub pubkey: Option>, + pub signature: Option>, + pub proof: Option>, + pub proved_at: DateTime, + pub attestation: Option>, +} + #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct TransactionDetailedResult { @@ -847,6 +860,17 @@ pub struct TransactionExecutionInfo { pub execution_info: Value, } +/// The fee history type returned from `eth_feeHistory` call. +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct FeeHistory { + #[serde(flatten)] + pub inner: zksync_basic_types::web3::FeeHistory, + /// An array of effective pubdata prices. Note, that this field is L2-specific and only provided by L2 nodes. + #[serde(default)] + pub l2_pubdata_price: Vec, +} + #[cfg(test)] mod tests { use super::*; diff --git a/core/lib/types/src/fee_model.rs b/core/lib/types/src/fee_model.rs index 6f5985d46108..b59aa65b04e0 100644 --- a/core/lib/types/src/fee_model.rs +++ b/core/lib/types/src/fee_model.rs @@ -9,9 +9,10 @@ use crate::ProtocolVersionId; /// Fee input to be provided into the VM. It contains two options: /// - `L1Pegged`: L1 gas price is provided to the VM, and the pubdata price is derived from it. Using this option is required for the -/// versions of Era prior to 1.4.1 integration. +/// versions of Era prior to 1.4.1 integration. /// - `PubdataIndependent`: L1 gas price and pubdata price are not necessarily dependent on one another. This options is more suitable for the -/// versions of Era after the 1.4.1 integration. It is expected that if a VM supports `PubdataIndependent` version, then it should also support `L1Pegged` version, but converting it into `PubdataIndependentBatchFeeModelInput` in-place. +/// versions of Era after the 1.4.1 integration. It is expected that if a VM supports `PubdataIndependent` version, then it should also support +/// `L1Pegged` version, but converting it into `PubdataIndependentBatchFeeModelInput` in-place. #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] pub enum BatchFeeInput { L1Pegged(L1PeggedBatchFeeModelInput), @@ -161,11 +162,11 @@ pub struct PubdataIndependentBatchFeeModelInput { /// The enum which represents the version of the fee model. It is used to determine which fee model should be used for the batch. /// - `V1`, the first model that was used in ZKsync Era. In this fee model, the pubdata price must be pegged to the L1 gas price. -/// Also, the fair L2 gas price is expected to only include the proving/computation price for the operator and not the costs that come from -/// processing the batch on L1. +/// Also, the fair L2 gas price is expected to only include the proving/computation price for the operator and not the costs that come from +/// processing the batch on L1. /// - `V2`, the second model that was used in ZKsync Era. There the pubdata price might be independent from the L1 gas price. Also, -/// The fair L2 gas price is expected to both the proving/computation price for the operator and the costs that come from -/// processing the batch on L1. +/// The fair L2 gas price is expected to both the proving/computation price for the operator and the costs that come from +/// processing the batch on L1. #[derive(Debug, Clone, Copy, Serialize, Deserialize)] pub enum FeeModelConfig { V1(FeeModelConfigV1), diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index 105d43aa6c6c..a55f6b5753db 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -16,7 +16,7 @@ pub use protocol_upgrade::{ProtocolUpgrade, ProtocolVersion}; use serde::{Deserialize, Serialize}; pub use storage::*; pub use tx::Execute; -pub use zksync_basic_types::{protocol_version::ProtocolVersionId, vm_version::VmVersion, *}; +pub use zksync_basic_types::{protocol_version::ProtocolVersionId, vm, *}; pub use zksync_crypto_primitives::*; use zksync_utils::{ address_to_u256, bytecode::hash_bytecode, h256_to_u256, u256_to_account_address, diff --git a/core/lib/types/src/pubdata_da.rs b/core/lib/types/src/pubdata_da.rs index 6705fdc29530..bc7dc55e53de 100644 --- a/core/lib/types/src/pubdata_da.rs +++ b/core/lib/types/src/pubdata_da.rs @@ -15,6 +15,8 @@ pub enum PubdataDA { Blobs, /// Pubdata is sent to the external storage (GCS/DA layers) or not sent at all. Custom, + /// Pubdata is sent to an L2 to be eventually relayed to L1. + RelayedL2Calldata, } impl From for PubdataDA { @@ -23,6 +25,7 @@ impl From for PubdataDA { PubdataSendingMode::Calldata => PubdataDA::Calldata, PubdataSendingMode::Blobs => PubdataDA::Blobs, PubdataSendingMode::Custom => PubdataDA::Custom, + PubdataSendingMode::RelayedL2Calldata => PubdataDA::RelayedL2Calldata, } } } diff --git a/core/lib/types/src/storage/writes/mod.rs b/core/lib/types/src/storage/writes/mod.rs index ef19eeffed02..88f032db1924 100644 --- a/core/lib/types/src/storage/writes/mod.rs +++ b/core/lib/types/src/storage/writes/mod.rs @@ -122,10 +122,11 @@ impl StateDiffRecord { /// entry <- enumeration_index || compressed value /// 2. if initial write: /// entry <- blake2(bytes32(address), key) || compressed value + /// /// size: - /// initial: max of 65 bytes - /// repeated: max of 38 bytes - /// before: 156 bytes for each + /// - initial: max of 65 bytes + /// - repeated: max of 38 bytes + /// - before: 156 bytes for each pub fn compress(&self) -> Vec { let mut comp_state_diff = match self.enumeration_index { 0 => self.derived_key.to_vec(), diff --git a/core/lib/types/src/utils.rs b/core/lib/types/src/utils.rs index 2bbbc34e8f74..bf086d6cdcd4 100644 --- a/core/lib/types/src/utils.rs +++ b/core/lib/types/src/utils.rs @@ -50,7 +50,7 @@ pub fn nonces_to_full_nonce(tx_nonce: U256, deploy_nonce: U256) -> U256 { DEPLOYMENT_NONCE_INCREMENT * deploy_nonce + tx_nonce } -fn key_for_eth_balance(address: &Address) -> H256 { +pub fn key_for_eth_balance(address: &Address) -> H256 { let address_h256 = address_to_h256(address); let bytes = [address_h256.as_bytes(), &[0; 32]].concat(); diff --git a/core/lib/vlog/src/lib.rs b/core/lib/vlog/src/lib.rs index 390b7782c584..268fbd0b39eb 100644 --- a/core/lib/vlog/src/lib.rs +++ b/core/lib/vlog/src/lib.rs @@ -42,6 +42,7 @@ impl ObservabilityGuard { if let Some(sentry_guard) = &self.sentry_guard { sentry_guard.flush(Some(FLUSH_TIMEOUT)); + tracing::info!("Sentry events are flushed"); } if let Some(provider) = &self.otlp_tracing_provider { @@ -50,6 +51,7 @@ impl ObservabilityGuard { tracing::warn!("Flushing the spans failed: {err:?}"); } } + tracing::info!("Spans are flushed"); } if let Some(provider) = &self.otlp_logging_provider { @@ -58,6 +60,7 @@ impl ObservabilityGuard { tracing::warn!("Flushing the logs failed: {err:?}"); } } + tracing::info!("Logs are flushed"); } } @@ -70,15 +73,20 @@ impl ObservabilityGuard { // `take` here and below ensures that we don't have any access to the deinitialized resources. if let Some(sentry_guard) = self.sentry_guard.take() { sentry_guard.close(Some(SHUTDOWN_TIMEOUT)); + tracing::info!("Sentry client is shut down"); } if let Some(provider) = self.otlp_tracing_provider.take() { if let Err(err) = provider.shutdown() { tracing::warn!("Shutting down the OTLP tracing provider failed: {err:?}"); + } else { + tracing::info!("OTLP tracing provider is shut down"); } } if let Some(provider) = self.otlp_logging_provider.take() { if let Err(err) = provider.shutdown() { tracing::warn!("Shutting down the OTLP logs provider failed: {err:?}"); + } else { + tracing::info!("OTLP logs provider is shut down"); } } } diff --git a/core/lib/vlog/src/logs/mod.rs b/core/lib/vlog/src/logs/mod.rs index 0ecf1c6d9f0b..2379119ddffc 100644 --- a/core/lib/vlog/src/logs/mod.rs +++ b/core/lib/vlog/src/logs/mod.rs @@ -1,4 +1,4 @@ -use std::{backtrace::Backtrace, panic::PanicInfo, str::FromStr}; +use std::{backtrace::Backtrace, str::FromStr}; use serde::Deserialize; use tracing_subscriber::{fmt, registry::LookupSpan, EnvFilter, Layer}; @@ -129,7 +129,8 @@ impl Logs { } } -fn json_panic_handler(panic_info: &PanicInfo) { +#[allow(deprecated)] // Not available yet on stable, so we can't switch right now. +fn json_panic_handler(panic_info: &std::panic::PanicInfo) { let backtrace = Backtrace::force_capture(); let timestamp = chrono::Utc::now(); let panic_message = if let Some(s) = panic_info.payload().downcast_ref::() { diff --git a/core/lib/vlog/src/opentelemetry/mod.rs b/core/lib/vlog/src/opentelemetry/mod.rs index d55680cd0a50..c0b72b802de8 100644 --- a/core/lib/vlog/src/opentelemetry/mod.rs +++ b/core/lib/vlog/src/opentelemetry/mod.rs @@ -8,7 +8,7 @@ use opentelemetry_sdk::{ Resource, }; use opentelemetry_semantic_conventions::resource::{ - K8S_NAMESPACE_NAME, K8S_POD_NAME, SERVICE_NAME, + DEPLOYMENT_ENVIRONMENT, K8S_CLUSTER_NAME, K8S_NAMESPACE_NAME, K8S_POD_NAME, SERVICE_NAME, }; use tracing_subscriber::{registry::LookupSpan, EnvFilter, Layer}; use url::Url; @@ -27,6 +27,11 @@ pub struct ServiceDescriptor { pub k8s_pod_name: String, /// Name of the k8s namespace. pub k8s_namespace_name: String, + /// Name of the k8s cluster. + pub k8s_cluster_name: String, + /// Name of the deployment environment. + /// Note that the single deployment environment can be spread among multiple clusters. + pub deployment_environment: String, /// Name of the service. pub service_name: String, } @@ -42,12 +47,20 @@ impl ServiceDescriptor { pub const K8S_POD_NAME_ENV_VAR: &'static str = "POD_NAME"; /// Environment variable to fetch the k8s namespace name. pub const K8S_NAMESPACE_NAME_ENV_VAR: &'static str = "POD_NAMESPACE"; + /// Environment variable to fetch the k8s cluster name. + pub const K8S_CLUSTER_NAME_ENV_VAR: &'static str = "CLUSTER_NAME"; + /// Environment variable to fetch the deployment environment. + pub const DEPLOYMENT_ENVIRONMENT_ENV_VAR: &'static str = "DEPLOYMENT_ENVIRONMENT"; /// Environment variable to fetch the service name. pub const SERVICE_NAME_ENV_VAR: &'static str = "SERVICE_NAME"; /// Default value for the k8s pod name. pub const DEFAULT_K8S_POD_NAME: &'static str = "zksync-0"; /// Default value for the k8s namespace name. pub const DEFAULT_K8S_NAMESPACE_NAME: &'static str = "local"; + /// Default value for the k8s cluster name. + pub const DEFAULT_K8S_CLUSTER_NAME: &'static str = "local"; + /// Default value for the deployment environment. + pub const DEFAULT_DEPLOYMENT_ENVIRONMENT: &'static str = "local"; /// Default value for the service name. pub const DEFAULT_SERVICE_NAME: &'static str = "zksync"; @@ -64,6 +77,14 @@ impl ServiceDescriptor { Self::K8S_NAMESPACE_NAME_ENV_VAR, Self::DEFAULT_K8S_NAMESPACE_NAME, ), + k8s_cluster_name: env_or( + Self::K8S_CLUSTER_NAME_ENV_VAR, + Self::DEFAULT_K8S_CLUSTER_NAME, + ), + deployment_environment: env_or( + Self::DEPLOYMENT_ENVIRONMENT_ENV_VAR, + Self::DEFAULT_DEPLOYMENT_ENVIRONMENT, + ), service_name: env_or(Self::SERVICE_NAME_ENV_VAR, Self::DEFAULT_SERVICE_NAME), } } @@ -93,6 +114,8 @@ impl ServiceDescriptor { let attributes = vec![ KeyValue::new(K8S_POD_NAME, self.k8s_pod_name), KeyValue::new(K8S_NAMESPACE_NAME, self.k8s_namespace_name), + KeyValue::new(K8S_CLUSTER_NAME, self.k8s_cluster_name), + KeyValue::new(DEPLOYMENT_ENVIRONMENT, self.deployment_environment), KeyValue::new(SERVICE_NAME, self.service_name), ]; Resource::new(attributes) diff --git a/core/lib/vm_utils/src/lib.rs b/core/lib/vm_utils/src/lib.rs index 9cec0e13be8b..a3ec715851a4 100644 --- a/core/lib/vm_utils/src/lib.rs +++ b/core/lib/vm_utils/src/lib.rs @@ -1,20 +1,20 @@ -pub mod storage; - use anyhow::{anyhow, Context}; use tokio::runtime::Handle; use zksync_dal::{Connection, Core}; use zksync_multivm::{ - interface::{VmInterface, VmInterfaceHistoryEnabled}, + interface::{VmFactory, VmInterface, VmInterfaceHistoryEnabled}, vm_latest::HistoryEnabled, VmInstance, }; -use zksync_state::{PostgresStorage, StoragePtr, StorageView, WriteStorage}; +use zksync_state::{PostgresStorage, ReadStorage, StoragePtr, StorageView}; use zksync_types::{L1BatchNumber, L2ChainId, Transaction}; use crate::storage::L1BatchParamsProvider; +pub mod storage; + pub type VmAndStorage<'a> = ( - VmInstance>, HistoryEnabled>, + VmInstance, HistoryEnabled>, StoragePtr>>, ); @@ -58,7 +58,7 @@ pub fn create_vm( Ok((vm, storage_view)) } -pub fn execute_tx( +pub fn execute_tx( tx: &Transaction, vm: &mut VmInstance, ) -> anyhow::Result<()> { diff --git a/core/lib/web3_decl/src/client/boxed.rs b/core/lib/web3_decl/src/client/boxed.rs index c49e8aed721c..53def182c932 100644 --- a/core/lib/web3_decl/src/client/boxed.rs +++ b/core/lib/web3_decl/src/client/boxed.rs @@ -9,7 +9,7 @@ use jsonrpsee::core::{ }; use serde::de::DeserializeOwned; -use super::{ForNetwork, Network, TaggedClient}; +use super::{ForWeb3Network, Network, TaggedClient}; #[derive(Debug)] pub struct RawParams(pub(super) Option>); @@ -30,7 +30,7 @@ impl ToRpcParams for RawParams { // The implementation is fairly straightforward: [`RawParams`] is used as a catch-all params type, // and `serde_json::Value` is used as a catch-all response type. #[async_trait] -pub trait ObjectSafeClient: 'static + Send + Sync + fmt::Debug + ForNetwork { +pub trait ObjectSafeClient: 'static + Send + Sync + fmt::Debug + ForWeb3Network { /// Tags this client as working for a specific component. The component name can be used in logging, /// metrics etc. fn for_component(self: Box, component_name: &'static str) -> Box>; diff --git a/core/lib/web3_decl/src/client/mock.rs b/core/lib/web3_decl/src/client/mock.rs index 75bd037049d4..2dcb30094aae 100644 --- a/core/lib/web3_decl/src/client/mock.rs +++ b/core/lib/web3_decl/src/client/mock.rs @@ -14,7 +14,7 @@ use jsonrpsee::{ }; use serde::{de::DeserializeOwned, Serialize}; -use super::{boxed::RawParams, ForNetwork, Network, TaggedClient}; +use super::{boxed::RawParams, ForWeb3Network, Network, TaggedClient}; /// Object-safe counterpart to [`Handler`]. We need it because async closures aren't available on stable Rust. #[async_trait] @@ -177,7 +177,7 @@ impl MockClient { } } -impl ForNetwork for MockClient { +impl ForWeb3Network for MockClient { type Net = Net; fn network(&self) -> Self::Net { diff --git a/core/lib/web3_decl/src/client/mod.rs b/core/lib/web3_decl/src/client/mod.rs index ca861e77fdfe..a8246216eca3 100644 --- a/core/lib/web3_decl/src/client/mod.rs +++ b/core/lib/web3_decl/src/client/mod.rs @@ -37,8 +37,8 @@ use zksync_types::url::SensitiveUrl; use self::metrics::{L2ClientMetrics, METRICS}; pub use self::{ boxed::{DynClient, ObjectSafeClient}, - mock::MockClient, - network::{ForNetwork, Network, TaggedClient, L1, L2}, + mock::{MockClient, MockClientBuilder}, + network::{ForWeb3Network, Network, TaggedClient, L1, L2}, shared::Shared, }; @@ -227,7 +227,7 @@ impl Client { } } -impl ForNetwork for Client { +impl ForWeb3Network for Client { type Net = Net; fn network(&self) -> Self::Net { diff --git a/core/lib/web3_decl/src/client/network.rs b/core/lib/web3_decl/src/client/network.rs index 82136689d1d0..d0cb09299385 100644 --- a/core/lib/web3_decl/src/client/network.rs +++ b/core/lib/web3_decl/src/client/network.rs @@ -2,7 +2,7 @@ use std::fmt; -use zksync_types::{L2ChainId, SLChainId}; +use zksync_types::{L1ChainId, L2ChainId, SLChainId}; /// Marker trait for networks. Two standard network kinds are [`L1`] and [`L2`]. /// @@ -33,6 +33,12 @@ impl From for L1 { } } +impl From for L1 { + fn from(chain_id: L1ChainId) -> Self { + Self(Some(chain_id.into())) + } +} + /// L2 network. #[derive(Debug, Clone, Copy, Default)] pub struct L2(Option); @@ -55,7 +61,7 @@ impl From for L2 { /// Associates a type with a particular type of RPC networks, such as Ethereum or ZKsync Era. RPC traits created using `jsonrpsee::rpc` /// can use `ForNetwork` as a client boundary to restrict which implementations can call their methods. -pub trait ForNetwork { +pub trait ForWeb3Network { /// Network that the type is associated with. type Net: Network; @@ -67,7 +73,7 @@ pub trait ForNetwork { fn component(&self) -> &'static str; } -impl ForNetwork for &T { +impl ForWeb3Network for &T { type Net = T::Net; fn network(&self) -> Self::Net { @@ -79,7 +85,7 @@ impl ForNetwork for &T { } } -impl ForNetwork for Box { +impl ForWeb3Network for Box { type Net = T::Net; fn network(&self) -> Self::Net { @@ -92,7 +98,7 @@ impl ForNetwork for Box { } /// Client that can be tagged with the component using it. -pub trait TaggedClient: ForNetwork { +pub trait TaggedClient: ForWeb3Network { /// Tags this client as working for a specific component. fn set_component(&mut self, component_name: &'static str); } diff --git a/core/lib/web3_decl/src/namespaces/debug.rs b/core/lib/web3_decl/src/namespaces/debug.rs index b06560b47c32..1fbe3237104b 100644 --- a/core/lib/web3_decl/src/namespaces/debug.rs +++ b/core/lib/web3_decl/src/namespaces/debug.rs @@ -8,17 +8,17 @@ use zksync_types::{ }; use crate::{ - client::{ForNetwork, L2}, + client::{ForWeb3Network, L2}, types::H256, }; #[cfg_attr( feature = "server", - rpc(server, client, namespace = "debug", client_bounds(Self: ForNetwork)) + rpc(server, client, namespace = "debug", client_bounds(Self: ForWeb3Network)) )] #[cfg_attr( not(feature = "server"), - rpc(client, namespace = "debug", client_bounds(Self: ForNetwork)) + rpc(client, namespace = "debug", client_bounds(Self: ForWeb3Network)) )] pub trait DebugNamespace { #[method(name = "traceBlockByNumber")] diff --git a/core/lib/web3_decl/src/namespaces/en.rs b/core/lib/web3_decl/src/namespaces/en.rs index 7bb80bccd1c4..dac774dd7bdf 100644 --- a/core/lib/web3_decl/src/namespaces/en.rs +++ b/core/lib/web3_decl/src/namespaces/en.rs @@ -4,15 +4,15 @@ use jsonrpsee::proc_macros::rpc; use zksync_config::{configs::EcosystemContracts, GenesisConfig}; use zksync_types::{api::en, tokens::TokenInfo, Address, L2BlockNumber}; -use crate::client::{ForNetwork, L2}; +use crate::client::{ForWeb3Network, L2}; #[cfg_attr( feature = "server", - rpc(server, client, namespace = "en", client_bounds(Self: ForNetwork)) + rpc(server, client, namespace = "en", client_bounds(Self: ForWeb3Network)) )] #[cfg_attr( not(feature = "server"), - rpc(client, namespace = "en", client_bounds(Self: ForNetwork)) + rpc(client, namespace = "en", client_bounds(Self: ForWeb3Network)) )] pub trait EnNamespace { #[method(name = "syncL2Block")] diff --git a/core/lib/web3_decl/src/namespaces/eth.rs b/core/lib/web3_decl/src/namespaces/eth.rs index 10443443958b..9f271d80cbcf 100644 --- a/core/lib/web3_decl/src/namespaces/eth.rs +++ b/core/lib/web3_decl/src/namespaces/eth.rs @@ -3,28 +3,27 @@ use jsonrpsee::core::RpcResult; use jsonrpsee::proc_macros::rpc; use zksync_types::{ api::{ - state_override::StateOverride, BlockId, BlockIdVariant, BlockNumber, Transaction, - TransactionVariant, + state_override::StateOverride, BlockId, BlockIdVariant, BlockNumber, FeeHistory, + Transaction, TransactionVariant, }, transaction_request::CallRequest, Address, H256, }; use crate::{ - client::{ForNetwork, L2}, + client::{ForWeb3Network, L2}, types::{ - Block, Bytes, FeeHistory, Filter, FilterChanges, Index, Log, SyncState, TransactionReceipt, - U256, U64, + Block, Bytes, Filter, FilterChanges, Index, Log, SyncState, TransactionReceipt, U256, U64, }, }; #[cfg_attr( feature = "server", - rpc(server, client, namespace = "eth", client_bounds(Self: ForNetwork)) + rpc(server, client, namespace = "eth", client_bounds(Self: ForWeb3Network)) )] #[cfg_attr( not(feature = "server"), - rpc(client, namespace = "eth", client_bounds(Self: ForNetwork)) + rpc(client, namespace = "eth", client_bounds(Self: ForWeb3Network)) )] pub trait EthNamespace { #[method(name = "blockNumber")] diff --git a/core/lib/web3_decl/src/namespaces/net.rs b/core/lib/web3_decl/src/namespaces/net.rs index 21e6548e5341..eebe503ea7a2 100644 --- a/core/lib/web3_decl/src/namespaces/net.rs +++ b/core/lib/web3_decl/src/namespaces/net.rs @@ -3,15 +3,15 @@ use jsonrpsee::core::RpcResult; use jsonrpsee::proc_macros::rpc; use zksync_types::U256; -use crate::client::{ForNetwork, L2}; +use crate::client::{ForWeb3Network, L2}; #[cfg_attr( feature = "server", - rpc(server, client, namespace = "net", client_bounds(Self: ForNetwork)) + rpc(server, client, namespace = "net", client_bounds(Self: ForWeb3Network)) )] #[cfg_attr( not(feature = "server"), - rpc(client, namespace = "net", client_bounds(Self: ForNetwork)) + rpc(client, namespace = "net", client_bounds(Self: ForWeb3Network)) )] pub trait NetNamespace { #[method(name = "version")] diff --git a/core/lib/web3_decl/src/namespaces/snapshots.rs b/core/lib/web3_decl/src/namespaces/snapshots.rs index 6b82d5f590d8..5d1ac36d95c9 100644 --- a/core/lib/web3_decl/src/namespaces/snapshots.rs +++ b/core/lib/web3_decl/src/namespaces/snapshots.rs @@ -6,15 +6,15 @@ use zksync_types::{ L1BatchNumber, }; -use crate::client::{ForNetwork, L2}; +use crate::client::{ForWeb3Network, L2}; #[cfg_attr( feature = "server", - rpc(server, client, namespace = "snapshots", client_bounds(Self: ForNetwork)) + rpc(server, client, namespace = "snapshots", client_bounds(Self: ForWeb3Network)) )] #[cfg_attr( not(feature = "server"), - rpc(client, namespace = "snapshots", client_bounds(Self: ForNetwork)) + rpc(client, namespace = "snapshots", client_bounds(Self: ForWeb3Network)) )] pub trait SnapshotsNamespace { #[method(name = "getAllSnapshots")] diff --git a/core/lib/web3_decl/src/namespaces/unstable.rs b/core/lib/web3_decl/src/namespaces/unstable.rs index 4996813a9855..e6b36dd26846 100644 --- a/core/lib/web3_decl/src/namespaces/unstable.rs +++ b/core/lib/web3_decl/src/namespaces/unstable.rs @@ -1,18 +1,22 @@ #[cfg_attr(not(feature = "server"), allow(unused_imports))] use jsonrpsee::core::RpcResult; use jsonrpsee::proc_macros::rpc; -use zksync_types::{api::TransactionExecutionInfo, H256}; +use zksync_types::{ + api::{TeeProof, TransactionExecutionInfo}, + tee_types::TeeType, + L1BatchNumber, H256, +}; -use crate::client::{ForNetwork, L2}; +use crate::client::{ForWeb3Network, L2}; /// RPCs in this namespace are experimental, and their interface is unstable, and it WILL change. #[cfg_attr( feature = "server", - rpc(server, client, namespace = "unstable", client_bounds(Self: ForNetwork)) + rpc(server, client, namespace = "unstable", client_bounds(Self: ForWeb3Network)) )] #[cfg_attr( not(feature = "server"), - rpc(client, namespace = "unstable", client_bounds(Self: ForNetwork)) + rpc(client, namespace = "unstable", client_bounds(Self: ForWeb3Network)) )] pub trait UnstableNamespace { #[method(name = "getTransactionExecutionInfo")] @@ -20,4 +24,11 @@ pub trait UnstableNamespace { &self, hash: H256, ) -> RpcResult>; + + #[method(name = "getTeeProofs")] + async fn tee_proofs( + &self, + l1_batch_number: L1BatchNumber, + tee_type: Option, + ) -> RpcResult>; } diff --git a/core/lib/web3_decl/src/namespaces/web3.rs b/core/lib/web3_decl/src/namespaces/web3.rs index 8851f6d0c3be..d9d417a49d47 100644 --- a/core/lib/web3_decl/src/namespaces/web3.rs +++ b/core/lib/web3_decl/src/namespaces/web3.rs @@ -2,15 +2,15 @@ use jsonrpsee::core::RpcResult; use jsonrpsee::proc_macros::rpc; -use crate::client::{ForNetwork, L2}; +use crate::client::{ForWeb3Network, L2}; #[cfg_attr( feature = "server", - rpc(server, client, namespace = "web3", client_bounds(Self: ForNetwork)) + rpc(server, client, namespace = "web3", client_bounds(Self: ForWeb3Network)) )] #[cfg_attr( not(feature = "server"), - rpc(client, namespace = "web3", client_bounds(Self: ForNetwork)) + rpc(client, namespace = "web3", client_bounds(Self: ForWeb3Network)) )] pub trait Web3Namespace { #[method(name = "clientVersion")] diff --git a/core/lib/web3_decl/src/namespaces/zks.rs b/core/lib/web3_decl/src/namespaces/zks.rs index 6f443dbded6a..47aae2a0835e 100644 --- a/core/lib/web3_decl/src/namespaces/zks.rs +++ b/core/lib/web3_decl/src/namespaces/zks.rs @@ -15,17 +15,17 @@ use zksync_types::{ }; use crate::{ - client::{ForNetwork, L2}, + client::{ForWeb3Network, L2}, types::{Bytes, Token}, }; #[cfg_attr( feature = "server", - rpc(server, client, namespace = "zks", client_bounds(Self: ForNetwork)) + rpc(server, client, namespace = "zks", client_bounds(Self: ForWeb3Network)) )] #[cfg_attr( not(feature = "server"), - rpc(client, namespace = "zks", client_bounds(Self: ForNetwork)) + rpc(client, namespace = "zks", client_bounds(Self: ForWeb3Network)) )] pub trait ZksNamespace { #[method(name = "estimateFee")] diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index b79b86d718d0..9d399bdd0aff 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -62,6 +62,10 @@ pub enum Component { BaseTokenRatioPersister, /// VM runner-based component that saves VM execution data for basic witness generation. VmRunnerBwip, + /// External prover API that is used to retrieve data for proving and verifies final proofs against ones, generated by us + ExternalProofIntegrationApi, + /// VM runner-based component that allows to test experimental VM features. Doesn't save any data to Postgres. + VmPlayground, } #[derive(Debug)] @@ -106,6 +110,10 @@ impl FromStr for Components { Ok(Components(vec![Component::BaseTokenRatioPersister])) } "vm_runner_bwip" => Ok(Components(vec![Component::VmRunnerBwip])), + "vm_playground" => Ok(Components(vec![Component::VmPlayground])), + "external_proof_integration_api" => { + Ok(Components(vec![Component::ExternalProofIntegrationApi])) + } other => Err(format!("{} is not a valid component name", other)), } } diff --git a/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs b/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs index 1ad688ed14cb..d25c46bda083 100644 --- a/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs +++ b/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs @@ -12,15 +12,15 @@ use zksync_config::{ house_keeper::HouseKeeperConfig, vm_runner::BasicWitnessInputProducerConfig, wallets::{AddressWallet, EthSender, StateKeeper, Wallet, Wallets}, - CommitmentGeneratorConfig, DatabaseSecrets, ExternalPriceApiClientConfig, - FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig, - FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, GeneralConfig, - ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, ProtectiveReadsWriterConfig, - PruningConfig, SnapshotRecoveryConfig, + CommitmentGeneratorConfig, DatabaseSecrets, ExperimentalVmConfig, + ExternalPriceApiClientConfig, FriProofCompressorConfig, FriProverConfig, + FriProverGatewayConfig, FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, + GeneralConfig, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, + ProtectiveReadsWriterConfig, PruningConfig, SnapshotRecoveryConfig, }, ApiConfig, BaseTokenAdjusterConfig, ContractVerifierConfig, DADispatcherConfig, DBConfig, - EthConfig, EthWatchConfig, GasAdjusterConfig, ObjectStoreConfig, PostgresConfig, - SnapshotsCreatorConfig, + EthConfig, EthWatchConfig, ExternalProofIntegrationApiConfig, GasAdjusterConfig, + ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, }; use zksync_env_config::FromEnv; use zksync_protobuf::repr::ProtoRepr; @@ -77,6 +77,8 @@ pub struct TempConfigStore { pub pruning: Option, pub snapshot_recovery: Option, pub external_price_api_client_config: Option, + pub external_proof_integration_api_config: Option, + pub experimental_vm_config: Option, } impl TempConfigStore { @@ -112,6 +114,10 @@ impl TempConfigStore { pruning: self.pruning.clone(), external_price_api_client_config: self.external_price_api_client_config.clone(), consensus_config: None, + external_proof_integration_api_config: self + .external_proof_integration_api_config + .clone(), + experimental_vm_config: self.experimental_vm_config.clone(), } } @@ -183,10 +189,12 @@ fn load_env_config() -> anyhow::Result { pruning: None, snapshot_recovery: None, external_price_api_client_config: ExternalPriceApiClientConfig::from_env().ok(), + external_proof_integration_api_config: ExternalProofIntegrationApiConfig::from_env().ok(), + experimental_vm_config: ExperimentalVmConfig::from_env().ok(), }) } -pub fn load_general_config(path: Option) -> anyhow::Result { +pub fn load_general_config(path: Option) -> anyhow::Result { match path { Some(path) => { let yaml = std::fs::read_to_string(path).context("Failed to read general config")?; @@ -198,7 +206,7 @@ pub fn load_general_config(path: Option) -> anyhow::Result) -> anyhow::Result { +pub fn load_database_secrets(path: Option) -> anyhow::Result { match path { Some(path) => { let yaml = std::fs::read_to_string(path).context("Failed to read secrets")?; diff --git a/core/node/api_server/src/execution_sandbox/apply.rs b/core/node/api_server/src/execution_sandbox/apply.rs index a65538e25025..99664697b14c 100644 --- a/core/node/api_server/src/execution_sandbox/apply.rs +++ b/core/node/api_server/src/execution_sandbox/apply.rs @@ -40,7 +40,7 @@ use super::{ }; type VmStorageView<'a> = StorageView>>; -type BoxedVm<'a> = Box, HistoryDisabled>>; +type BoxedVm<'a> = Box>, HistoryDisabled>>; #[derive(Debug)] struct Sandbox<'a> { @@ -301,7 +301,7 @@ pub(super) fn apply_vm_in_sandbox( block_args: BlockArgs, // Block arguments for the transaction. state_override: Option, apply: impl FnOnce( - &mut VmInstance, HistoryDisabled>, + &mut VmInstance>, HistoryDisabled>, Transaction, ProtocolVersionId, ) -> T, diff --git a/core/node/api_server/src/tx_sender/master_pool_sink.rs b/core/node/api_server/src/tx_sender/master_pool_sink.rs index b7478b9c9711..cb4e73e3bb79 100644 --- a/core/node/api_server/src/tx_sender/master_pool_sink.rs +++ b/core/node/api_server/src/tx_sender/master_pool_sink.rs @@ -56,9 +56,8 @@ impl TxSink for MasterPoolSink { .transactions_dal() .insert_transaction_l2(tx, execution_metrics) .await - .map(|submission_res_handle| { - APP_METRICS.processed_txs[&TxStage::Mempool(submission_res_handle)].inc(); - submission_res_handle + .inspect(|submission_res_handle| { + APP_METRICS.processed_txs[&TxStage::Mempool(*submission_res_handle)].inc(); }) .map_err(|err| err.generalize().into()), Err(err) => Err(err.generalize().into()), diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index 38939937fcda..826200b5537c 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -31,9 +31,9 @@ use zksync_types::{ l2::{error::TxCheckError::TxDuplication, L2Tx}, transaction_request::CallOverrides, utils::storage_key_for_eth_balance, + vm::VmVersion, AccountTreeId, Address, ExecuteTransactionCommon, L2ChainId, Nonce, PackedEthSignature, - ProtocolVersionId, Transaction, VmVersion, H160, H256, MAX_L2_TX_GAS_LIMIT, - MAX_NEW_FACTORY_DEPS, U256, + ProtocolVersionId, Transaction, H160, H256, MAX_L2_TX_GAS_LIMIT, MAX_NEW_FACTORY_DEPS, U256, }; use zksync_utils::h256_to_u256; diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs index ff8ce0356a05..15528c5b309b 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs @@ -1,10 +1,10 @@ use zksync_types::{ api::{ - state_override::StateOverride, Block, BlockId, BlockIdVariant, BlockNumber, Log, - Transaction, TransactionId, TransactionReceipt, TransactionVariant, + state_override::StateOverride, Block, BlockId, BlockIdVariant, BlockNumber, FeeHistory, + Log, Transaction, TransactionId, TransactionReceipt, TransactionVariant, }, transaction_request::CallRequest, - web3::{Bytes, FeeHistory, Index, SyncState}, + web3::{Bytes, Index, SyncState}, Address, H256, U256, U64, }; use zksync_web3_decl::{ diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/unstable.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/unstable.rs index 6abaa718a050..91330aa7d949 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/unstable.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/unstable.rs @@ -1,4 +1,8 @@ -use zksync_types::{api::TransactionExecutionInfo, H256}; +use zksync_types::{ + api::{TeeProof, TransactionExecutionInfo}, + tee_types::TeeType, + L1BatchNumber, H256, +}; use zksync_web3_decl::{ jsonrpsee::core::{async_trait, RpcResult}, namespaces::UnstableNamespaceServer, @@ -16,4 +20,14 @@ impl UnstableNamespaceServer for UnstableNamespace { .await .map_err(|err| self.current_method().map_err(err)) } + + async fn tee_proofs( + &self, + l1_batch_number: L1BatchNumber, + tee_type: Option, + ) -> RpcResult> { + self.get_tee_proofs_impl(l1_batch_number, tee_type) + .await + .map_err(|err| self.current_method().map_err(err)) + } } diff --git a/core/node/api_server/src/web3/namespaces/eth.rs b/core/node/api_server/src/web3/namespaces/eth.rs index 68030763fd60..c3bed64a1468 100644 --- a/core/node/api_server/src/web3/namespaces/eth.rs +++ b/core/node/api_server/src/web3/namespaces/eth.rs @@ -3,13 +3,13 @@ use zksync_dal::{CoreDal, DalError}; use zksync_system_constants::DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE; use zksync_types::{ api::{ - state_override::StateOverride, BlockId, BlockNumber, GetLogsFilter, Transaction, - TransactionId, TransactionReceipt, TransactionVariant, + state_override::StateOverride, BlockId, BlockNumber, FeeHistory, GetLogsFilter, + Transaction, TransactionId, TransactionReceipt, TransactionVariant, }, l2::{L2Tx, TransactionType}, transaction_request::CallRequest, utils::decompose_full_nonce, - web3::{self, Bytes, FeeHistory, SyncInfo, SyncState}, + web3::{self, Bytes, SyncInfo, SyncState}, AccountTreeId, L2BlockNumber, StorageKey, H256, L2_BASE_TOKEN_ADDRESS, U256, }; use zksync_utils::u256_to_h256; @@ -678,13 +678,15 @@ impl EthNamespace { .await?; self.set_block_diff(newest_l2_block); - let mut base_fee_per_gas = connection + let (mut base_fee_per_gas, mut effective_pubdata_price_history) = connection .blocks_web3_dal() .get_fee_history(newest_l2_block, block_count) .await .map_err(DalError::generalize)?; + // DAL method returns fees in DESC order while we need ASC. base_fee_per_gas.reverse(); + effective_pubdata_price_history.reverse(); let oldest_block = newest_l2_block.0 + 1 - base_fee_per_gas.len() as u32; // We do not store gas used ratio for blocks, returns array of zeroes as a placeholder. @@ -702,12 +704,15 @@ impl EthNamespace { // `base_fee_per_gas` for next L2 block cannot be calculated, appending last fee as a placeholder. base_fee_per_gas.push(*base_fee_per_gas.last().unwrap()); Ok(FeeHistory { - oldest_block: web3::BlockNumber::Number(oldest_block.into()), - base_fee_per_gas, - gas_used_ratio, - reward, - base_fee_per_blob_gas, - blob_gas_used_ratio, + inner: web3::FeeHistory { + oldest_block: zksync_types::web3::BlockNumber::Number(oldest_block.into()), + base_fee_per_gas, + gas_used_ratio, + reward, + base_fee_per_blob_gas, + blob_gas_used_ratio, + }, + l2_pubdata_price: effective_pubdata_price_history, }) } diff --git a/core/node/api_server/src/web3/namespaces/unstable.rs b/core/node/api_server/src/web3/namespaces/unstable.rs index b46ecd6dc530..783088cdc36a 100644 --- a/core/node/api_server/src/web3/namespaces/unstable.rs +++ b/core/node/api_server/src/web3/namespaces/unstable.rs @@ -1,5 +1,10 @@ +use chrono::{DateTime, Utc}; use zksync_dal::{CoreDal, DalError}; -use zksync_types::api::TransactionExecutionInfo; +use zksync_types::{ + api::{TeeProof, TransactionExecutionInfo}, + tee_types::TeeType, + L1BatchNumber, +}; use zksync_web3_decl::{error::Web3Error, types::H256}; use crate::web3::{backend_jsonrpsee::MethodTracer, RpcState}; @@ -30,4 +35,28 @@ impl UnstableNamespace { .map_err(DalError::generalize)? .map(|execution_info| TransactionExecutionInfo { execution_info })) } + + pub async fn get_tee_proofs_impl( + &self, + l1_batch_number: L1BatchNumber, + tee_type: Option, + ) -> Result, Web3Error> { + let mut storage = self.state.acquire_connection().await?; + Ok(storage + .tee_proof_generation_dal() + .get_tee_proofs(l1_batch_number, tee_type) + .await + .map_err(DalError::generalize)? + .into_iter() + .map(|proof| TeeProof { + l1_batch_number, + tee_type, + pubkey: proof.pubkey, + signature: proof.signature, + proof: proof.proof, + proved_at: DateTime::::from_naive_utc_and_offset(proof.updated_at, Utc), + attestation: proof.attestation, + }) + .collect::>()) + } } diff --git a/core/node/block_reverter/src/lib.rs b/core/node/block_reverter/src/lib.rs index 466b5f3c69f7..c7397ee475a7 100644 --- a/core/node/block_reverter/src/lib.rs +++ b/core/node/block_reverter/src/lib.rs @@ -8,10 +8,7 @@ use zksync_contracts::hyperchain_contract; use zksync_dal::{ConnectionPool, Core, CoreDal}; // Public re-export to simplify the API use. pub use zksync_eth_client as eth_client; -use zksync_eth_client::{ - clients::{DynClient, L1}, - BoundEthInterface, CallFunctionArgs, EthInterface, Options, -}; +use zksync_eth_client::{BoundEthInterface, CallFunctionArgs, EthInterface, Options}; use zksync_merkle_tree::domain::ZkSyncTree; use zksync_object_store::{ObjectStore, ObjectStoreError}; use zksync_state::RocksdbStorage; @@ -538,7 +535,7 @@ impl BlockReverter { #[tracing::instrument(err)] async fn get_l1_batch_number_from_contract( - eth_client: &DynClient, + eth_client: &dyn EthInterface, contract_address: Address, op: AggregatedActionType, ) -> anyhow::Result { @@ -560,7 +557,7 @@ impl BlockReverter { /// Returns suggested values for a reversion. pub async fn suggested_values( &self, - eth_client: &DynClient, + eth_client: &dyn EthInterface, eth_config: &BlockReverterEthConfig, reverter_address: Address, ) -> anyhow::Result { diff --git a/core/node/commitment_generator/src/utils.rs b/core/node/commitment_generator/src/utils.rs index b4e6bc542e97..59f8753859a4 100644 --- a/core/node/commitment_generator/src/utils.rs +++ b/core/node/commitment_generator/src/utils.rs @@ -15,7 +15,7 @@ use zk_evm_1_5_0::{ zk_evm_abstractions::queries::LogQuery as LogQuery_1_5_0, }; use zksync_multivm::utils::get_used_bootloader_memory_bytes; -use zksync_types::{zk_evm_types::LogQuery, ProtocolVersionId, VmVersion, H256, U256}; +use zksync_types::{vm::VmVersion, zk_evm_types::LogQuery, ProtocolVersionId, H256, U256}; use zksync_utils::expand_memory_contents; /// Encapsulates computations of commitment components. diff --git a/core/node/commitment_generator/src/validation_task.rs b/core/node/commitment_generator/src/validation_task.rs index a28eeabfd0fc..639bb79baf97 100644 --- a/core/node/commitment_generator/src/validation_task.rs +++ b/core/node/commitment_generator/src/validation_task.rs @@ -3,7 +3,7 @@ use std::time::Duration; use tokio::sync::watch; use zksync_eth_client::{ clients::{DynClient, L1}, - CallFunctionArgs, ClientError, ContractCallError, + CallFunctionArgs, ClientError, ContractCallError, EthInterface, }; use zksync_types::{commitment::L1BatchCommitmentMode, Address}; @@ -46,9 +46,9 @@ impl L1BatchCommitmentModeValidationTask { async fn validate_commitment_mode(self) -> anyhow::Result<()> { let expected_mode = self.expected_mode; let diamond_proxy_address = self.diamond_proxy_address; - let eth_client = self.eth_client.as_ref(); loop { - let result = Self::get_pubdata_pricing_mode(diamond_proxy_address, eth_client).await; + let result = + Self::get_pubdata_pricing_mode(diamond_proxy_address, &self.eth_client).await; match result { Ok(mode) => { anyhow::ensure!( @@ -91,7 +91,7 @@ impl L1BatchCommitmentModeValidationTask { async fn get_pubdata_pricing_mode( diamond_proxy_address: Address, - eth_client: &DynClient, + eth_client: &dyn EthInterface, ) -> Result { CallFunctionArgs::new("getPubdataPricingMode", ()) .for_contract( @@ -124,7 +124,7 @@ impl L1BatchCommitmentModeValidationTask { mod tests { use std::{mem, sync::Mutex}; - use zksync_eth_client::clients::MockEthereum; + use zksync_eth_client::clients::MockSettlementLayer; use zksync_types::{ethabi, U256}; use zksync_web3_decl::{client::MockClient, jsonrpsee::types::ErrorObject}; @@ -132,7 +132,7 @@ mod tests { fn mock_ethereum(token: ethabi::Token, err: Option) -> MockClient { let err_mutex = Mutex::new(err); - MockEthereum::builder() + MockSettlementLayer::builder() .with_fallible_call_handler(move |_, _| { let err = mem::take(&mut *err_mutex.lock().unwrap()); if let Some(err) = err { diff --git a/core/node/consistency_checker/src/lib.rs b/core/node/consistency_checker/src/lib.rs index 29b020b18607..20ba43a4166e 100644 --- a/core/node/consistency_checker/src/lib.rs +++ b/core/node/consistency_checker/src/lib.rs @@ -518,7 +518,7 @@ impl ConsistencyChecker { let version: U256 = CallFunctionArgs::new("getProtocolVersion", ()) .for_contract(address, &self.contract) - .call(self.l1_client.as_ref()) + .call(&self.l1_client) .await?; tracing::info!("Checked diamond proxy {address:?} (protocol version: {version})"); Ok(()) diff --git a/core/node/consistency_checker/src/tests/mod.rs b/core/node/consistency_checker/src/tests/mod.rs index 914e21069bdd..40c447071cf4 100644 --- a/core/node/consistency_checker/src/tests/mod.rs +++ b/core/node/consistency_checker/src/tests/mod.rs @@ -7,7 +7,7 @@ use test_casing::{test_casing, Product}; use tokio::sync::mpsc; use zksync_config::GenesisConfig; use zksync_dal::Connection; -use zksync_eth_client::{clients::MockEthereum, Options}; +use zksync_eth_client::{clients::MockSettlementLayer, Options}; use zksync_l1_contract_interface::{i_executor::methods::CommitBatches, Tokenizable, Tokenize}; use zksync_node_genesis::{insert_genesis_batch, mock_genesis_config, GenesisParams}; use zksync_node_test_utils::{ @@ -92,7 +92,7 @@ pub(crate) fn build_commit_tx_input_data( } pub(crate) fn create_mock_checker( - client: MockEthereum, + client: MockSettlementLayer, pool: ConnectionPool, commitment_mode: L1BatchCommitmentMode, ) -> ConsistencyChecker { @@ -111,8 +111,8 @@ pub(crate) fn create_mock_checker( } } -fn create_mock_ethereum() -> MockEthereum { - let mock = MockEthereum::builder().with_call_handler(|call, _block_id| { +fn create_mock_ethereum() -> MockSettlementLayer { + let mock = MockSettlementLayer::builder().with_call_handler(|call, _block_id| { assert_eq!(call.to, Some(DIAMOND_PROXY_ADDR)); let packed_semver = ProtocolVersionId::latest().into_packed_semver_with_patch(0); let contract = zksync_contracts::hyperchain_contract(); @@ -650,7 +650,7 @@ impl IncorrectDataKind { async fn apply( self, - client: &MockEthereum, + client: &MockSettlementLayer, l1_batch: &L1BatchWithMetadata, commitment_mode: L1BatchCommitmentMode, ) -> H256 { diff --git a/core/node/eth_sender/src/abstract_l1_interface.rs b/core/node/eth_sender/src/abstract_l1_interface.rs index 9c9af82553e9..1f1956c9dd84 100644 --- a/core/node/eth_sender/src/abstract_l1_interface.rs +++ b/core/node/eth_sender/src/abstract_l1_interface.rs @@ -3,7 +3,6 @@ use std::fmt; use async_trait::async_trait; use vise::{EncodeLabelSet, EncodeLabelValue}; use zksync_eth_client::{ - clients::{DynClient, L1}, BoundEthInterface, EnrichedClientResult, EthInterface, ExecutedTxStatus, FailureInfo, Options, RawTransactionBytes, SignedCallResult, }; @@ -91,11 +90,14 @@ pub(super) struct RealL1Interface { } impl RealL1Interface { - pub(crate) fn query_client(&self) -> &DynClient { + pub(crate) fn query_client(&self) -> &dyn EthInterface { self.ethereum_gateway().as_ref() } - pub(crate) fn query_client_for_operator(&self, operator_type: OperatorType) -> &DynClient { + pub(crate) fn query_client_for_operator( + &self, + operator_type: OperatorType, + ) -> &dyn EthInterface { if operator_type == OperatorType::Blob { self.ethereum_gateway_blobs().unwrap().as_ref() } else { @@ -103,6 +105,7 @@ impl RealL1Interface { } } } + #[async_trait] impl AbstractL1Interface for RealL1Interface { async fn failure_reason(&self, tx_hash: H256) -> Option { diff --git a/core/node/eth_sender/src/eth_tx_aggregator.rs b/core/node/eth_sender/src/eth_tx_aggregator.rs index 126d8149e022..4947cb1086f0 100644 --- a/core/node/eth_sender/src/eth_tx_aggregator.rs +++ b/core/node/eth_sender/src/eth_tx_aggregator.rs @@ -2,7 +2,7 @@ use tokio::sync::watch; use zksync_config::configs::eth_sender::SenderConfig; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; -use zksync_eth_client::{BoundEthInterface, CallFunctionArgs, EthInterface}; +use zksync_eth_client::{BoundEthInterface, CallFunctionArgs}; use zksync_l1_contract_interface::{ i_executor::{ commit::kzg::{KzgInfo, ZK_SYNC_BYTES_PER_BLOB}, diff --git a/core/node/eth_sender/src/tester.rs b/core/node/eth_sender/src/tester.rs index 7cad69c5a9a3..5bd5181ed8c7 100644 --- a/core/node/eth_sender/src/tester.rs +++ b/core/node/eth_sender/src/tester.rs @@ -5,9 +5,9 @@ use zksync_config::{ ContractsConfig, EthConfig, GasAdjusterConfig, }; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; -use zksync_eth_client::{clients::MockEthereum, BaseFees, BoundEthInterface}; +use zksync_eth_client::{clients::MockSettlementLayer, BaseFees, BoundEthInterface}; use zksync_l1_contract_interface::i_executor::methods::{ExecuteBatches, ProveBatches}; -use zksync_node_fee_model::l1_gas_price::GasAdjuster; +use zksync_node_fee_model::l1_gas_price::{GasAdjuster, GasAdjusterClient}; use zksync_node_test_utils::{create_l1_batch, l1_batch_metadata_to_commitment_artifacts}; use zksync_object_store::MockObjectStore; use zksync_types::{ @@ -109,8 +109,8 @@ impl TestL1Batch { #[derive(Debug)] pub(crate) struct EthSenderTester { pub conn: ConnectionPool, - pub gateway: Box, - pub gateway_blobs: Box, + pub gateway: Box, + pub gateway_blobs: Box, pub manager: MockEthTxManager, pub aggregator: EthTxAggregator, pub gas_adjuster: Arc, @@ -152,14 +152,16 @@ impl EthSenderTester { .map(|base_fee_per_gas| BaseFees { base_fee_per_gas, base_fee_per_blob_gas: 0.into(), + l2_pubdata_price: 0.into(), }) .collect(); - let gateway = MockEthereum::builder() + let gateway = MockSettlementLayer::builder() .with_fee_history( std::iter::repeat_with(|| BaseFees { base_fee_per_gas: 0, base_fee_per_blob_gas: 0.into(), + l2_pubdata_price: 0.into(), }) .take(Self::WAIT_CONFIRMATIONS as usize) .chain(history.clone()) @@ -174,11 +176,12 @@ impl EthSenderTester { gateway.advance_block_number(Self::WAIT_CONFIRMATIONS); let gateway = Box::new(gateway); - let gateway_blobs = MockEthereum::builder() + let gateway_blobs = MockSettlementLayer::builder() .with_fee_history( std::iter::repeat_with(|| BaseFees { base_fee_per_gas: 0, base_fee_per_blob_gas: 0.into(), + l2_pubdata_price: 0.into(), }) .take(Self::WAIT_CONFIRMATIONS as usize) .chain(history) @@ -195,7 +198,7 @@ impl EthSenderTester { let gas_adjuster = Arc::new( GasAdjuster::new( - Box::new(gateway.clone().into_client()), + GasAdjusterClient::from_l1(Box::new(gateway.clone().into_client())), GasAdjusterConfig { max_base_fee_samples: Self::MAX_BASE_FEE_SAMPLES, pricing_formula_parameter_a: 3.0, diff --git a/core/node/eth_watch/src/client.rs b/core/node/eth_watch/src/client.rs index 58d2cbc4d95c..51b156d58e91 100644 --- a/core/node/eth_watch/src/client.rs +++ b/core/node/eth_watch/src/client.rs @@ -123,7 +123,7 @@ impl EthClient for EthHttpQueryClient { // New verifier returns the hash of the verification key. CallFunctionArgs::new("verificationKeyHash", ()) .for_contract(verifier_address, &self.verifier_contract_abi) - .call(self.client.as_ref()) + .call(&self.client) .await } diff --git a/core/node/external_proof_integration_api/Cargo.toml b/core/node/external_proof_integration_api/Cargo.toml new file mode 100644 index 000000000000..ae7cd4c4d031 --- /dev/null +++ b/core/node/external_proof_integration_api/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "zksync_external_proof_integration_api" +description = "ZKsync external proof integration API" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +[dependencies] +axum.workspace = true +tracing.workspace = true +zksync_prover_interface.workspace = true +zksync_basic_types.workspace = true +zksync_config.workspace = true +zksync_object_store.workspace = true +zksync_dal.workspace = true +tokio.workspace = true +bincode.workspace = true +anyhow.workspace = true diff --git a/core/node/external_proof_integration_api/src/error.rs b/core/node/external_proof_integration_api/src/error.rs new file mode 100644 index 000000000000..dac8e2a27ed6 --- /dev/null +++ b/core/node/external_proof_integration_api/src/error.rs @@ -0,0 +1,86 @@ +use axum::{ + http::StatusCode, + response::{IntoResponse, Response}, +}; +use zksync_basic_types::L1BatchNumber; +use zksync_dal::DalError; +use zksync_object_store::ObjectStoreError; + +pub(crate) enum ProcessorError { + ObjectStore(ObjectStoreError), + Dal(DalError), + Serialization(bincode::Error), + InvalidProof, + BatchNotReady(L1BatchNumber), +} + +impl From for ProcessorError { + fn from(err: ObjectStoreError) -> Self { + Self::ObjectStore(err) + } +} + +impl From for ProcessorError { + fn from(err: DalError) -> Self { + Self::Dal(err) + } +} + +impl From for ProcessorError { + fn from(err: bincode::Error) -> Self { + Self::Serialization(err) + } +} + +impl IntoResponse for ProcessorError { + fn into_response(self) -> Response { + let (status_code, message) = match self { + ProcessorError::ObjectStore(err) => { + tracing::error!("GCS error: {:?}", err); + match err { + ObjectStoreError::KeyNotFound(_) => ( + StatusCode::NOT_FOUND, + "Proof verification not possible anymore, batch is too old.".to_owned(), + ), + _ => ( + StatusCode::INTERNAL_SERVER_ERROR, + "Failed fetching from GCS".to_owned(), + ), + } + } + ProcessorError::Dal(err) => { + tracing::error!("Sqlx error: {:?}", err); + match err.inner() { + zksync_dal::SqlxError::RowNotFound => { + (StatusCode::NOT_FOUND, "Non existing L1 batch".to_owned()) + } + _ => ( + StatusCode::INTERNAL_SERVER_ERROR, + "Failed fetching/saving from db".to_owned(), + ), + } + } + ProcessorError::Serialization(err) => { + tracing::error!("Serialization error: {:?}", err); + ( + StatusCode::BAD_REQUEST, + "Failed to deserialize proof data".to_owned(), + ) + } + ProcessorError::BatchNotReady(l1_batch_number) => { + tracing::error!( + "Batch {l1_batch_number:?} is not yet ready for proving. Most likely our proof for this batch is not generated yet" + ); + ( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Batch {l1_batch_number:?} is not yet ready for proving. Most likely our proof for this batch is not generated yet, try again later"), + ) + } + ProcessorError::InvalidProof => { + tracing::error!("Invalid proof data"); + (StatusCode::BAD_REQUEST, "Invalid proof data".to_owned()) + } + }; + (status_code, message).into_response() + } +} diff --git a/core/node/external_proof_integration_api/src/lib.rs b/core/node/external_proof_integration_api/src/lib.rs new file mode 100644 index 000000000000..51fecf8c23fc --- /dev/null +++ b/core/node/external_proof_integration_api/src/lib.rs @@ -0,0 +1,73 @@ +mod error; +mod processor; + +use std::{net::SocketAddr, sync::Arc}; + +use anyhow::Context; +use axum::{extract::Path, routing::post, Json, Router}; +use tokio::sync::watch; +use zksync_basic_types::commitment::L1BatchCommitmentMode; +use zksync_config::configs::external_proof_integration_api::ExternalProofIntegrationApiConfig; +use zksync_dal::{ConnectionPool, Core}; +use zksync_object_store::ObjectStore; +use zksync_prover_interface::api::{OptionalProofGenerationDataRequest, VerifyProofRequest}; + +use crate::processor::Processor; + +pub async fn run_server( + config: ExternalProofIntegrationApiConfig, + blob_store: Arc, + connection_pool: ConnectionPool, + commitment_mode: L1BatchCommitmentMode, + mut stop_receiver: watch::Receiver, +) -> anyhow::Result<()> { + let bind_address = SocketAddr::from(([0, 0, 0, 0], config.http_port)); + tracing::debug!("Starting external prover API server on {bind_address}"); + let app = create_router(blob_store, connection_pool, commitment_mode).await; + + let listener = tokio::net::TcpListener::bind(bind_address) + .await + .with_context(|| format!("Failed binding external prover API server to {bind_address}"))?; + axum::serve(listener, app) + .with_graceful_shutdown(async move { + if stop_receiver.changed().await.is_err() { + tracing::warn!("Stop signal sender for external prover API server was dropped without sending a signal"); + } + tracing::info!("Stop signal received, external prover API server is shutting down"); + }) + .await + .context("External prover API server failed")?; + tracing::info!("External prover API server shut down"); + Ok(()) +} + +async fn create_router( + blob_store: Arc, + connection_pool: ConnectionPool, + commitment_mode: L1BatchCommitmentMode, +) -> Router { + let mut processor = + Processor::new(blob_store.clone(), connection_pool.clone(), commitment_mode); + let verify_proof_processor = processor.clone(); + Router::new() + .route( + "/proof_generation_data", + post( + // we use post method because the returned data is not idempotent, + // i.e we return different result on each call. + move |payload: Json| async move { + processor.get_proof_generation_data(payload).await + }, + ), + ) + .route( + "/verify_proof/:l1_batch_number", + post( + move |l1_batch_number: Path, payload: Json| async move { + verify_proof_processor + .verify_proof(l1_batch_number, payload) + .await + }, + ), + ) +} diff --git a/core/node/external_proof_integration_api/src/processor.rs b/core/node/external_proof_integration_api/src/processor.rs new file mode 100644 index 000000000000..a15e45e48037 --- /dev/null +++ b/core/node/external_proof_integration_api/src/processor.rs @@ -0,0 +1,190 @@ +use std::sync::Arc; + +use axum::{extract::Path, Json}; +use zksync_basic_types::{ + basic_fri_types::Eip4844Blobs, commitment::L1BatchCommitmentMode, L1BatchNumber, +}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; +use zksync_object_store::{bincode, ObjectStore}; +use zksync_prover_interface::{ + api::{ + OptionalProofGenerationDataRequest, ProofGenerationData, ProofGenerationDataResponse, + VerifyProofRequest, + }, + inputs::{ + L1BatchMetadataHashes, VMRunWitnessInputData, WitnessInputData, WitnessInputMerklePaths, + }, + outputs::L1BatchProofForL1, +}; + +use crate::error::ProcessorError; + +#[derive(Clone)] +pub(crate) struct Processor { + blob_store: Arc, + pool: ConnectionPool, + commitment_mode: L1BatchCommitmentMode, +} + +impl Processor { + pub(crate) fn new( + blob_store: Arc, + pool: ConnectionPool, + commitment_mode: L1BatchCommitmentMode, + ) -> Self { + Self { + blob_store, + pool, + commitment_mode, + } + } + + #[tracing::instrument(skip_all)] + pub(crate) async fn get_proof_generation_data( + &mut self, + request: Json, + ) -> Result, ProcessorError> { + tracing::info!("Received request for proof generation data: {:?}", request); + + let latest_available_batch = self + .pool + .connection() + .await + .unwrap() + .proof_generation_dal() + .get_available_batch() + .await?; + + let l1_batch_number = if let Some(l1_batch_number) = request.0 .0 { + if l1_batch_number > latest_available_batch { + tracing::error!( + "Requested batch is not available: {:?}, latest available batch is {:?}", + l1_batch_number, + latest_available_batch + ); + return Err(ProcessorError::BatchNotReady(l1_batch_number)); + } + l1_batch_number + } else { + latest_available_batch + }; + + let proof_generation_data = self + .proof_generation_data_for_existing_batch(l1_batch_number) + .await; + + match proof_generation_data { + Ok(data) => Ok(Json(ProofGenerationDataResponse::Success(Some(Box::new( + data, + ))))), + Err(err) => Err(err), + } + } + + #[tracing::instrument(skip(self))] + async fn proof_generation_data_for_existing_batch( + &self, + l1_batch_number: L1BatchNumber, + ) -> Result { + let vm_run_data: VMRunWitnessInputData = self + .blob_store + .get(l1_batch_number) + .await + .map_err(ProcessorError::ObjectStore)?; + let merkle_paths: WitnessInputMerklePaths = self + .blob_store + .get(l1_batch_number) + .await + .map_err(ProcessorError::ObjectStore)?; + + // Acquire connection after interacting with GCP, to avoid holding the connection for too long. + let mut conn = self.pool.connection().await.map_err(ProcessorError::Dal)?; + + let previous_batch_metadata = conn + .blocks_dal() + .get_l1_batch_metadata(L1BatchNumber(l1_batch_number.checked_sub(1).unwrap())) + .await + .map_err(ProcessorError::Dal)? + .expect("No metadata for previous batch"); + + let header = conn + .blocks_dal() + .get_l1_batch_header(l1_batch_number) + .await + .map_err(ProcessorError::Dal)? + .unwrap_or_else(|| panic!("Missing header for {}", l1_batch_number)); + + let minor_version = header.protocol_version.unwrap(); + let protocol_version = conn + .protocol_versions_dal() + .get_protocol_version_with_latest_patch(minor_version) + .await + .map_err(ProcessorError::Dal)? + .unwrap_or_else(|| { + panic!("Missing l1 verifier info for protocol version {minor_version}") + }); + + let batch_header = conn + .blocks_dal() + .get_l1_batch_header(l1_batch_number) + .await + .map_err(ProcessorError::Dal)? + .unwrap_or_else(|| panic!("Missing header for {}", l1_batch_number)); + + let eip_4844_blobs = match self.commitment_mode { + L1BatchCommitmentMode::Validium => Eip4844Blobs::empty(), + L1BatchCommitmentMode::Rollup => { + let blobs = batch_header.pubdata_input.as_deref().unwrap_or_else(|| { + panic!( + "expected pubdata, but it is not available for batch {l1_batch_number:?}" + ) + }); + Eip4844Blobs::decode(blobs).expect("failed to decode EIP-4844 blobs") + } + }; + + let blob = WitnessInputData { + vm_run_data, + merkle_paths, + eip_4844_blobs, + previous_batch_metadata: L1BatchMetadataHashes { + root_hash: previous_batch_metadata.metadata.root_hash, + meta_hash: previous_batch_metadata.metadata.meta_parameters_hash, + aux_hash: previous_batch_metadata.metadata.aux_data_hash, + }, + }; + + Ok(ProofGenerationData { + l1_batch_number, + witness_input_data: blob, + protocol_version: protocol_version.version, + l1_verifier_config: protocol_version.l1_verifier_config, + }) + } + + pub(crate) async fn verify_proof( + &self, + Path(l1_batch_number): Path, + Json(payload): Json, + ) -> Result<(), ProcessorError> { + let l1_batch_number = L1BatchNumber(l1_batch_number); + tracing::info!( + "Received request to verify proof for batch: {:?}", + l1_batch_number + ); + + let serialized_proof = bincode::serialize(&payload.0)?; + let expected_proof = bincode::serialize( + &self + .blob_store + .get::((l1_batch_number, payload.0.protocol_version)) + .await?, + )?; + + if serialized_proof != expected_proof { + return Err(ProcessorError::InvalidProof); + } + + Ok(()) + } +} diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/metrics.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/metrics.rs index 0a671179de39..f75c1796037f 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/metrics.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/metrics.rs @@ -7,6 +7,7 @@ use vise::{Gauge, Metrics}; pub(super) struct GasAdjusterMetrics { pub current_base_fee_per_gas: Gauge, pub current_blob_base_fee: Gauge, + pub current_l2_pubdata_price: Gauge, pub median_base_fee_per_gas: Gauge, pub median_blob_base_fee_per_gas: Gauge, pub median_blob_base_fee: Gauge, diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs index 2032cb9c89fd..244220da026f 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs @@ -7,9 +7,9 @@ use std::{ use tokio::sync::watch; use zksync_config::{configs::eth_sender::PubdataSendingMode, GasAdjusterConfig}; -use zksync_eth_client::EthInterface; +use zksync_eth_client::EthFeeInterface; use zksync_types::{commitment::L1BatchCommitmentMode, L1_GAS_PER_PUBDATA_BYTE, U256}; -use zksync_web3_decl::client::{DynClient, L1}; +use zksync_web3_decl::client::{DynClient, L1, L2}; use self::metrics::METRICS; use super::L1TxParamsProvider; @@ -18,6 +18,40 @@ mod metrics; #[cfg(test)] mod tests; +#[derive(Debug)] +pub struct GasAdjusterClient { + gateway_mode: bool, + inner: Box, +} + +impl GasAdjusterClient { + pub fn from_l1(inner: Box>) -> Self { + Self { + inner: Box::new(inner.for_component("gas_adjuster")), + gateway_mode: false, + } + } + + pub fn from_l2(inner: Box>) -> Self { + Self { + inner: Box::new(inner.for_component("gas_adjuster")), + gateway_mode: true, + } + } +} + +impl From>> for GasAdjusterClient { + fn from(inner: Box>) -> Self { + Self::from_l1(inner) + } +} + +impl From>> for GasAdjusterClient { + fn from(inner: Box>) -> Self { + Self::from_l2(inner) + } +} + /// This component keeps track of the median `base_fee` from the last `max_base_fee_samples` blocks /// and of the median `blob_base_fee` from the last `max_blob_base_fee_sample` blocks. /// It is used to adjust the base_fee of transactions sent to L1. @@ -27,31 +61,53 @@ pub struct GasAdjuster { // Type for blob base fee is chosen to be `U256`. // In practice, it's very unlikely to overflow `u64` (if `blob_base_fee_statistics` = 10 ^ 18, then price for one blob is 2 ^ 17 ETH). // But it's still possible and code shouldn't panic if that happens. One more argument is that geth uses big int type for blob prices. + // + // Note, that for L2-based chains it will contains only zeroes. pub(super) blob_base_fee_statistics: GasStatistics, + // Note, that for L1-based chains the following field contains only zeroes. + pub(super) l2_pubdata_price_statistics: GasStatistics, + pub(super) config: GasAdjusterConfig, pubdata_sending_mode: PubdataSendingMode, - eth_client: Box>, + client: GasAdjusterClient, commitment_mode: L1BatchCommitmentMode, } impl GasAdjuster { pub async fn new( - eth_client: Box>, + client: GasAdjusterClient, config: GasAdjusterConfig, pubdata_sending_mode: PubdataSendingMode, commitment_mode: L1BatchCommitmentMode, ) -> anyhow::Result { - let eth_client = eth_client.for_component("gas_adjuster"); + // A runtime check to ensure consistent config. + if config.settlement_mode.is_gateway() { + anyhow::ensure!(client.gateway_mode, "Must be L2 client in L2 mode"); + + anyhow::ensure!( + matches!(pubdata_sending_mode, PubdataSendingMode::RelayedL2Calldata), + "Only relayed L2 calldata is available for L2 mode" + ); + } else { + anyhow::ensure!(!client.gateway_mode, "Must be L1 client in L1 mode"); + + anyhow::ensure!( + !matches!(pubdata_sending_mode, PubdataSendingMode::RelayedL2Calldata), + "Relayed L2 calldata is only available in L2 mode" + ); + } // Subtracting 1 from the "latest" block number to prevent errors in case // the info about the latest block is not yet present on the node. // This sometimes happens on Infura. - let current_block = eth_client + let current_block = client + .inner .block_number() .await? .as_usize() .saturating_sub(1); - let fee_history = eth_client + let fee_history = client + .inner .base_fee_history(current_block, config.max_base_fee_samples) .await?; @@ -67,12 +123,19 @@ impl GasAdjuster { fee_history.iter().map(|fee| fee.base_fee_per_blob_gas), ); + let l2_pubdata_price_statistics = GasStatistics::new( + config.num_samples_for_blob_base_fee_estimate, + current_block, + fee_history.iter().map(|fee| fee.l2_pubdata_price), + ); + Ok(Self { base_fee_statistics, blob_base_fee_statistics, + l2_pubdata_price_statistics, config, pubdata_sending_mode, - eth_client, + client, commitment_mode, }) } @@ -84,7 +147,8 @@ impl GasAdjuster { // the info about the latest block is not yet present on the node. // This sometimes happens on Infura. let current_block = self - .eth_client + .client + .inner .block_number() .await? .as_usize() @@ -94,26 +158,27 @@ impl GasAdjuster { if current_block > last_processed_block { let n_blocks = current_block - last_processed_block; - let base_fees = self - .eth_client + let fee_data = self + .client + .inner .base_fee_history(current_block, n_blocks) .await?; // We shouldn't rely on L1 provider to return consistent results, so we check that we have at least one new sample. - if let Some(current_base_fee_per_gas) = base_fees.last().map(|fee| fee.base_fee_per_gas) + if let Some(current_base_fee_per_gas) = fee_data.last().map(|fee| fee.base_fee_per_gas) { METRICS .current_base_fee_per_gas .set(current_base_fee_per_gas); } self.base_fee_statistics - .add_samples(base_fees.iter().map(|fee| fee.base_fee_per_gas)); + .add_samples(fee_data.iter().map(|fee| fee.base_fee_per_gas)); if let Some(current_blob_base_fee) = - base_fees.last().map(|fee| fee.base_fee_per_blob_gas) + fee_data.last().map(|fee| fee.base_fee_per_blob_gas) { // Blob base fee overflows `u64` only in very extreme cases. - // It doesn't worth to observe exact value with metric because anyway values that can be used + // It isn't worth to observe exact value with metric because anyway values that can be used // are capped by `self.config.max_blob_base_fee()` of `u64` type. if current_blob_base_fee > U256::from(u64::MAX) { tracing::error!("Failed to report current_blob_base_fee = {current_blob_base_fee}, it exceeds u64::MAX"); @@ -124,7 +189,23 @@ impl GasAdjuster { } } self.blob_base_fee_statistics - .add_samples(base_fees.iter().map(|fee| fee.base_fee_per_blob_gas)); + .add_samples(fee_data.iter().map(|fee| fee.base_fee_per_blob_gas)); + + if let Some(current_l2_pubdata_price) = fee_data.last().map(|fee| fee.l2_pubdata_price) + { + // L2 pubdata price overflows `u64` only in very extreme cases. + // It isn't worth to observe exact value with metric because anyway values that can be used + // are capped by `self.config.max_blob_base_fee()` of `u64` type. + if current_l2_pubdata_price > U256::from(u64::MAX) { + tracing::error!("Failed to report current_l2_pubdata_price = {current_l2_pubdata_price}, it exceeds u64::MAX"); + } else { + METRICS + .current_l2_pubdata_price + .set(current_l2_pubdata_price.as_u64()); + } + } + self.l2_pubdata_price_statistics + .add_samples(fee_data.iter().map(|fee| fee.l2_pubdata_price)); } Ok(()) } @@ -197,36 +278,33 @@ impl GasAdjuster { * BLOB_GAS_PER_BYTE as f64 * self.config.internal_pubdata_pricing_multiplier; - self.bound_blob_base_fee(calculated_price) - } - PubdataSendingMode::Calldata => { - self.estimate_effective_gas_price() * self.pubdata_byte_gas() + self.cap_pubdata_fee(calculated_price) } + PubdataSendingMode::Calldata => self.cap_pubdata_fee( + (self.estimate_effective_gas_price() * L1_GAS_PER_PUBDATA_BYTE as u64) as f64, + ), PubdataSendingMode::Custom => { // Fix this when we have a better understanding of dynamic pricing for custom DA layers. // GitHub issue: https://github.com/matter-labs/zksync-era/issues/2105 0 } + PubdataSendingMode::RelayedL2Calldata => { + self.cap_pubdata_fee(self.l2_pubdata_price_statistics.median().as_u64() as f64) + } } } - fn pubdata_byte_gas(&self) -> u64 { - match self.commitment_mode { - L1BatchCommitmentMode::Validium => 0, - L1BatchCommitmentMode::Rollup => L1_GAS_PER_PUBDATA_BYTE.into(), - } - } - - fn bound_blob_base_fee(&self, blob_base_fee: f64) -> u64 { + fn cap_pubdata_fee(&self, pubdata_fee: f64) -> u64 { + // We will treat the max blob base fee as the maximal fee that we can take for each byte of pubdata. let max_blob_base_fee = self.config.max_blob_base_fee(); match self.commitment_mode { L1BatchCommitmentMode::Validium => 0, L1BatchCommitmentMode::Rollup => { - if blob_base_fee > max_blob_base_fee as f64 { - tracing::error!("Blob base fee is too high: {blob_base_fee}, using max allowed: {max_blob_base_fee}"); + if pubdata_fee > max_blob_base_fee as f64 { + tracing::error!("Blob base fee is too high: {pubdata_fee}, using max allowed: {max_blob_base_fee}"); return max_blob_base_fee; } - blob_base_fee as u64 + pubdata_fee as u64 } } } diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs index 200903b6deda..2643e4b3c424 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs @@ -2,10 +2,12 @@ use std::{collections::VecDeque, sync::RwLockReadGuard}; use test_casing::test_casing; use zksync_config::{configs::eth_sender::PubdataSendingMode, GasAdjusterConfig}; -use zksync_eth_client::{clients::MockEthereum, BaseFees}; -use zksync_types::commitment::L1BatchCommitmentMode; +use zksync_eth_client::{clients::MockSettlementLayer, BaseFees}; +use zksync_types::{commitment::L1BatchCommitmentMode, settlement::SettlementMode}; +use zksync_web3_decl::client::L2; use super::{GasAdjuster, GasStatistics, GasStatisticsInner}; +use crate::l1_gas_price::GasAdjusterClient; /// Check that we compute the median correctly #[test] @@ -28,57 +30,78 @@ fn samples_queue() { assert_eq!(stats.samples, VecDeque::from([4, 5, 18, 18, 18])); } +const TEST_BLOCK_FEES: [u64; 10] = [0, 4, 6, 8, 7, 5, 5, 8, 10, 9]; +const TEST_BLOB_FEES: [u64; 10] = [ + 0, + 393216, + 393216, + 393216 * 2, + 393216, + 393216 * 2, + 393216 * 2, + 393216 * 3, + 393216 * 4, + 393216, +]; +const TEST_PUBDATA_PRICES: [u64; 10] = [ + 0, + 493216, + 493216, + 493216 * 2, + 493216, + 493216 * 2, + 493216 * 2, + 493216 * 3, + 493216 * 4, + 493216, +]; + +fn test_config(settlement_mode: SettlementMode) -> GasAdjusterConfig { + GasAdjusterConfig { + default_priority_fee_per_gas: 5, + max_base_fee_samples: 5, + pricing_formula_parameter_a: 1.5, + pricing_formula_parameter_b: 1.0005, + internal_l1_pricing_multiplier: 0.8, + internal_enforced_l1_gas_price: None, + internal_enforced_pubdata_price: None, + poll_period: 5, + max_l1_gas_price: None, + num_samples_for_blob_base_fee_estimate: 3, + internal_pubdata_pricing_multiplier: 1.0, + max_blob_base_fee: None, + settlement_mode, + } +} + +/// Helper function to read a value from adjuster +fn read(statistics: &GasStatistics) -> RwLockReadGuard> { + statistics.0.read().unwrap() +} + /// Check that we properly fetch base fees as block are mined #[test_casing(2, [L1BatchCommitmentMode::Rollup, L1BatchCommitmentMode::Validium])] #[tokio::test] async fn kept_updated(commitment_mode: L1BatchCommitmentMode) { - // Helper function to read a value from adjuster - fn read(statistics: &GasStatistics) -> RwLockReadGuard> { - statistics.0.read().unwrap() - } - - let block_fees = vec![0, 4, 6, 8, 7, 5, 5, 8, 10, 9]; - let blob_fees = vec![ - 0, - 393216, - 393216, - 393216 * 2, - 393216, - 393216 * 2, - 393216 * 2, - 393216 * 3, - 393216 * 4, - 393216, - ]; - let base_fees = block_fees + let base_fees = TEST_BLOCK_FEES .into_iter() - .zip(blob_fees) + .zip(TEST_BLOB_FEES) .map(|(block, blob)| BaseFees { base_fee_per_gas: block, base_fee_per_blob_gas: blob.into(), + l2_pubdata_price: 0.into(), }) .collect(); - let eth_client = MockEthereum::builder().with_fee_history(base_fees).build(); + let eth_client = MockSettlementLayer::builder() + .with_fee_history(base_fees) + .build(); // 5 sampled blocks + additional block to account for latest block subtraction eth_client.advance_block_number(6); - let config = GasAdjusterConfig { - default_priority_fee_per_gas: 5, - max_base_fee_samples: 5, - pricing_formula_parameter_a: 1.5, - pricing_formula_parameter_b: 1.0005, - internal_l1_pricing_multiplier: 0.8, - internal_enforced_l1_gas_price: None, - internal_enforced_pubdata_price: None, - poll_period: 5, - max_l1_gas_price: None, - num_samples_for_blob_base_fee_estimate: 3, - internal_pubdata_pricing_multiplier: 1.0, - max_blob_base_fee: None, - }; + let config = test_config(SettlementMode::SettlesToL1); let adjuster = GasAdjuster::new( - Box::new(eth_client.clone().into_client()), + GasAdjusterClient::from_l1(Box::new(eth_client.clone().into_client())), config, PubdataSendingMode::Calldata, commitment_mode, @@ -119,3 +142,67 @@ async fn kept_updated(commitment_mode: L1BatchCommitmentMode) { expected_median_blob_base_fee.into() ); } + +/// Check that we properly fetch base fees as block are mined +#[test_casing(2, [L1BatchCommitmentMode::Rollup, L1BatchCommitmentMode::Validium])] +#[tokio::test] +async fn kept_updated_l2(commitment_mode: L1BatchCommitmentMode) { + let base_fees = TEST_BLOCK_FEES + .into_iter() + .zip(TEST_PUBDATA_PRICES) + .map(|(block, pubdata)| BaseFees { + base_fee_per_gas: block, + base_fee_per_blob_gas: 0.into(), + l2_pubdata_price: pubdata.into(), + }) + .collect(); + + let eth_client = MockSettlementLayer::::builder() + .with_fee_history(base_fees) + .build(); + // 5 sampled blocks + additional block to account for latest block subtraction + eth_client.advance_block_number(6); + + let config = test_config(SettlementMode::Gateway); + let adjuster = GasAdjuster::new( + GasAdjusterClient::from_l2(Box::new(eth_client.clone().into_client())), + config, + PubdataSendingMode::RelayedL2Calldata, + commitment_mode, + ) + .await + .unwrap(); + + assert_eq!( + read(&adjuster.base_fee_statistics).samples.len(), + config.max_base_fee_samples + ); + assert_eq!(read(&adjuster.base_fee_statistics).median(), 6); + + eprintln!("{:?}", read(&adjuster.l2_pubdata_price_statistics).samples); + let expected_median_blob_base_fee = 493216 * 2; + assert_eq!( + read(&adjuster.l2_pubdata_price_statistics).samples.len(), + config.num_samples_for_blob_base_fee_estimate + ); + assert_eq!( + read(&adjuster.l2_pubdata_price_statistics).median(), + expected_median_blob_base_fee.into() + ); + + eth_client.advance_block_number(3); + adjuster.keep_updated().await.unwrap(); + + assert_eq!( + read(&adjuster.base_fee_statistics).samples.len(), + config.max_base_fee_samples + ); + assert_eq!(read(&adjuster.base_fee_statistics).median(), 7); + + let expected_median_blob_base_fee = 493216 * 3; + assert_eq!(read(&adjuster.l2_pubdata_price_statistics).samples.len(), 3); + assert_eq!( + read(&adjuster.l2_pubdata_price_statistics).median(), + expected_median_blob_base_fee.into() + ); +} diff --git a/core/node/fee_model/src/l1_gas_price/mod.rs b/core/node/fee_model/src/l1_gas_price/mod.rs index 0dab2d921c40..29db21bc1733 100644 --- a/core/node/fee_model/src/l1_gas_price/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/mod.rs @@ -3,13 +3,12 @@ use std::fmt; pub use self::{ - gas_adjuster::GasAdjuster, main_node_fetcher::MainNodeFeeParamsFetcher, - singleton::GasAdjusterSingleton, + gas_adjuster::{GasAdjuster, GasAdjusterClient}, + main_node_fetcher::MainNodeFeeParamsFetcher, }; mod gas_adjuster; mod main_node_fetcher; -mod singleton; /// Abstraction that provides parameters to set the fee for an L1 transaction, taking the desired /// mining time into account. diff --git a/core/node/fee_model/src/lib.rs b/core/node/fee_model/src/lib.rs index 66a1c07a1c64..f65239912523 100644 --- a/core/node/fee_model/src/lib.rs +++ b/core/node/fee_model/src/lib.rs @@ -286,9 +286,10 @@ impl BatchFeeModelInputProvider for MockBatchFeeParamsProvider { mod tests { use std::num::NonZeroU64; + use l1_gas_price::GasAdjusterClient; use zksync_base_token_adjuster::NoOpRatioProvider; use zksync_config::{configs::eth_sender::PubdataSendingMode, GasAdjusterConfig}; - use zksync_eth_client::{clients::MockEthereum, BaseFees}; + use zksync_eth_client::{clients::MockSettlementLayer, BaseFees}; use zksync_types::{commitment::L1BatchCommitmentMode, fee_model::BaseTokenConversionRatio}; use super::*; @@ -744,19 +745,20 @@ mod tests { } // Helper function to create BaseFees. - fn base_fees(block: u64, blob: U256) -> BaseFees { + fn test_base_fees(block: u64, blob: U256, pubdata: U256) -> BaseFees { BaseFees { base_fee_per_gas: block, base_fee_per_blob_gas: blob, + l2_pubdata_price: pubdata, } } // Helper function to setup the GasAdjuster. async fn setup_gas_adjuster(l1_gas_price: u64, l1_pubdata_price: u64) -> GasAdjuster { - let mock = MockEthereum::builder() + let mock = MockSettlementLayer::builder() .with_fee_history(vec![ - base_fees(0, U256::from(4)), - base_fees(1, U256::from(3)), + test_base_fees(0, U256::from(4), U256::from(0)), + test_base_fees(1, U256::from(3), U256::from(0)), ]) .build(); mock.advance_block_number(2); // Ensure we have enough blocks for the fee history @@ -770,7 +772,7 @@ mod tests { }; GasAdjuster::new( - Box::new(mock.into_client()), + GasAdjusterClient::from_l1(Box::new(mock.into_client())), gas_adjuster_config, PubdataSendingMode::Blobs, L1BatchCommitmentMode::Rollup, diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index 640000c6a7d8..142d6cfa11ab 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -53,6 +53,7 @@ zksync_node_db_pruner.workspace = true zksync_base_token_adjuster.workspace = true zksync_node_storage_init.workspace = true zksync_external_price_api.workspace = true +zksync_external_proof_integration_api.workspace = true pin-project-lite.workspace = true tracing.workspace = true diff --git a/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs b/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs index e979c372d8e8..b5f8ee423138 100644 --- a/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs +++ b/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs @@ -7,7 +7,7 @@ use crate::{ implementations::resources::{ circuit_breakers::CircuitBreakersResource, eth_interface::{BoundEthInterfaceForBlobsResource, BoundEthInterfaceResource}, - l1_tx_params::L1TxParamsResource, + gas_adjuster::GasAdjusterResource, pools::{MasterPool, PoolResource, ReplicaPool}, }, service::StopReceiver, @@ -45,7 +45,7 @@ pub struct Input { pub replica_pool: PoolResource, pub eth_client: BoundEthInterfaceResource, pub eth_client_blobs: Option, - pub l1_tx_params: L1TxParamsResource, + pub gas_adjuster: GasAdjusterResource, #[context(default)] pub circuit_breakers: CircuitBreakersResource, } @@ -82,7 +82,7 @@ impl WiringLayer for EthTxManagerLayer { let config = self.eth_sender_config.sender.context("sender")?; - let gas_adjuster = input.l1_tx_params.0; + let gas_adjuster = input.gas_adjuster.0; let eth_tx_manager = EthTxManager::new( master_pool, diff --git a/core/node/node_framework/src/implementations/layers/external_proof_integration_api.rs b/core/node/node_framework/src/implementations/layers/external_proof_integration_api.rs new file mode 100644 index 000000000000..7877bc6abbe3 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/external_proof_integration_api.rs @@ -0,0 +1,100 @@ +use std::sync::Arc; + +use zksync_config::configs::external_proof_integration_api::ExternalProofIntegrationApiConfig; +use zksync_dal::{ConnectionPool, Core}; +use zksync_object_store::ObjectStore; +use zksync_types::commitment::L1BatchCommitmentMode; + +use crate::{ + implementations::resources::{ + object_store::ObjectStoreResource, + pools::{PoolResource, ReplicaPool}, + }, + service::StopReceiver, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, +}; + +/// Wiring layer for proof data handler server. +#[derive(Debug)] +pub struct ExternalProofIntegrationApiLayer { + external_proof_integration_api_config: ExternalProofIntegrationApiConfig, + commitment_mode: L1BatchCommitmentMode, +} + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub object_store: ObjectStoreResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub task: ProverApiTask, +} + +impl ExternalProofIntegrationApiLayer { + pub fn new( + external_proof_integration_api_config: ExternalProofIntegrationApiConfig, + commitment_mode: L1BatchCommitmentMode, + ) -> Self { + Self { + external_proof_integration_api_config, + commitment_mode, + } + } +} + +#[async_trait::async_trait] +impl WiringLayer for ExternalProofIntegrationApiLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "external_proof_integration_api_layer" + } + + async fn wire(self, input: Self::Input) -> Result { + let main_pool = input.master_pool.get().await.unwrap(); + let blob_store = input.object_store.0; + + let task = ProverApiTask { + external_proof_integration_api_config: self.external_proof_integration_api_config, + blob_store, + main_pool, + commitment_mode: self.commitment_mode, + }; + + Ok(Output { task }) + } +} + +#[derive(Debug)] +pub struct ProverApiTask { + external_proof_integration_api_config: ExternalProofIntegrationApiConfig, + blob_store: Arc, + main_pool: ConnectionPool, + commitment_mode: L1BatchCommitmentMode, +} + +#[async_trait::async_trait] +impl Task for ProverApiTask { + fn id(&self) -> TaskId { + "external_proof_integration_api".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + zksync_external_proof_integration_api::run_server( + self.external_proof_integration_api_config, + self.blob_store, + self.main_pool, + self.commitment_mode, + stop_receiver.0, + ) + .await + } +} diff --git a/core/node/node_framework/src/implementations/layers/gas_adjuster.rs b/core/node/node_framework/src/implementations/layers/gas_adjuster.rs new file mode 100644 index 000000000000..229700289a71 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/gas_adjuster.rs @@ -0,0 +1,114 @@ +use std::sync::Arc; + +use anyhow::Context; +use zksync_config::{configs::eth_sender::PubdataSendingMode, GasAdjusterConfig, GenesisConfig}; +use zksync_node_fee_model::l1_gas_price::GasAdjuster; + +use crate::{ + implementations::resources::{ + eth_interface::{EthInterfaceResource, L2InterfaceResource}, + gas_adjuster::GasAdjusterResource, + }, + service::StopReceiver, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, +}; + +/// Wiring layer for sequencer L1 gas interfaces. +/// Adds several resources that depend on L1 gas price. +#[derive(Debug)] +pub struct GasAdjusterLayer { + gas_adjuster_config: GasAdjusterConfig, + genesis_config: GenesisConfig, + pubdata_sending_mode: PubdataSendingMode, +} + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub eth_interface_client: EthInterfaceResource, + pub l2_inteface_client: Option, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub gas_adjuster: GasAdjusterResource, + /// Only runs if someone uses the resources listed above. + #[context(task)] + pub gas_adjuster_task: GasAdjusterTask, +} + +impl GasAdjusterLayer { + pub fn new( + gas_adjuster_config: GasAdjusterConfig, + genesis_config: GenesisConfig, + pubdata_sending_mode: PubdataSendingMode, + ) -> Self { + Self { + gas_adjuster_config, + genesis_config, + pubdata_sending_mode, + } + } +} + +#[async_trait::async_trait] +impl WiringLayer for GasAdjusterLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "gas_adjuster_layer" + } + + async fn wire(self, input: Self::Input) -> Result { + let client = if self.gas_adjuster_config.settlement_mode.is_gateway() { + input.l2_inteface_client.unwrap().0.into() + } else { + input.eth_interface_client.0.into() + }; + + let adjuster = GasAdjuster::new( + client, + self.gas_adjuster_config, + self.pubdata_sending_mode, + self.genesis_config.l1_batch_commit_data_generator_mode, + ) + .await + .context("GasAdjuster::new()")?; + let gas_adjuster = Arc::new(adjuster); + + Ok(Output { + gas_adjuster: gas_adjuster.clone().into(), + gas_adjuster_task: GasAdjusterTask { gas_adjuster }, + }) + } +} + +#[derive(Debug)] +pub struct GasAdjusterTask { + gas_adjuster: Arc, +} + +#[async_trait::async_trait] +impl Task for GasAdjusterTask { + fn id(&self) -> TaskId { + "gas_adjuster".into() + } + + async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { + // Gas adjuster layer is added to provide a resource for anyone to use, but it comes with + // a support task. If nobody has used the resource, we don't need to run the support task. + if Arc::strong_count(&self.gas_adjuster) == 1 { + tracing::info!( + "Gas adjuster is not used by any other task, not running the support task" + ); + stop_receiver.0.changed().await?; + return Ok(()); + } + + self.gas_adjuster.run(stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/layers/l1_gas.rs b/core/node/node_framework/src/implementations/layers/l1_gas.rs index 85e0422cdcb1..9a4ccb8264f6 100644 --- a/core/node/node_framework/src/implementations/layers/l1_gas.rs +++ b/core/node/node_framework/src/implementations/layers/l1_gas.rs @@ -1,39 +1,33 @@ use std::sync::Arc; -use anyhow::Context; -use zksync_config::{ - configs::{chain::StateKeeperConfig, eth_sender::PubdataSendingMode}, - GasAdjusterConfig, GenesisConfig, -}; -use zksync_node_fee_model::{l1_gas_price::GasAdjuster, MainNodeFeeInputProvider}; +use zksync_config::configs::chain::StateKeeperConfig; +use zksync_node_fee_model::{ApiFeeInputProvider, MainNodeFeeInputProvider}; use zksync_types::fee_model::FeeModelConfig; use crate::{ implementations::resources::{ base_token_ratio_provider::BaseTokenRatioProviderResource, - eth_interface::EthInterfaceResource, fee_input::FeeInputResource, + fee_input::{ApiFeeInputResource, SequencerFeeInputResource}, + gas_adjuster::GasAdjusterResource, l1_tx_params::L1TxParamsResource, + pools::{PoolResource, ReplicaPool}, }, - service::StopReceiver, - task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, FromContext, IntoContext, }; -/// Wiring layer for sequencer L1 gas interfaces. +/// Wiring layer for L1 gas interfaces. /// Adds several resources that depend on L1 gas price. #[derive(Debug)] -pub struct SequencerL1GasLayer { - gas_adjuster_config: GasAdjusterConfig, - genesis_config: GenesisConfig, - pubdata_sending_mode: PubdataSendingMode, +pub struct L1GasLayer { state_keeper_config: StateKeeperConfig, } #[derive(Debug, FromContext)] #[context(crate = crate)] pub struct Input { - pub eth_client: EthInterfaceResource, + pub gas_adjuster: GasAdjusterResource, + pub replica_pool: PoolResource, /// If not provided, the base token assumed to be ETH, and the ratio will be constant. #[context(default)] pub base_token_ratio_provider: BaseTokenRatioProviderResource, @@ -42,87 +36,47 @@ pub struct Input { #[derive(Debug, IntoContext)] #[context(crate = crate)] pub struct Output { - pub fee_input: FeeInputResource, + pub sequencer_fee_input: SequencerFeeInputResource, + pub api_fee_input: ApiFeeInputResource, pub l1_tx_params: L1TxParamsResource, - /// Only runs if someone uses the resources listed above. - #[context(task)] - pub gas_adjuster_task: GasAdjusterTask, } -impl SequencerL1GasLayer { - pub fn new( - gas_adjuster_config: GasAdjusterConfig, - genesis_config: GenesisConfig, - state_keeper_config: StateKeeperConfig, - pubdata_sending_mode: PubdataSendingMode, - ) -> Self { +impl L1GasLayer { + pub fn new(state_keeper_config: StateKeeperConfig) -> Self { Self { - gas_adjuster_config, - genesis_config, - pubdata_sending_mode, state_keeper_config, } } } #[async_trait::async_trait] -impl WiringLayer for SequencerL1GasLayer { +impl WiringLayer for L1GasLayer { type Input = Input; type Output = Output; fn layer_name(&self) -> &'static str { - "sequencer_l1_gas_layer" + "l1_gas_layer" } async fn wire(self, input: Self::Input) -> Result { - let client = input.eth_client.0; - let adjuster = GasAdjuster::new( - client, - self.gas_adjuster_config, - self.pubdata_sending_mode, - self.genesis_config.l1_batch_commit_data_generator_mode, - ) - .await - .context("GasAdjuster::new()")?; - let gas_adjuster = Arc::new(adjuster); - let ratio_provider = input.base_token_ratio_provider; - let batch_fee_input_provider = Arc::new(MainNodeFeeInputProvider::new( - gas_adjuster.clone(), - ratio_provider.0.clone(), + let main_fee_input_provider = Arc::new(MainNodeFeeInputProvider::new( + input.gas_adjuster.0.clone(), + ratio_provider.0, FeeModelConfig::from_state_keeper_config(&self.state_keeper_config), )); - Ok(Output { - fee_input: batch_fee_input_provider.into(), - l1_tx_params: gas_adjuster.clone().into(), - gas_adjuster_task: GasAdjusterTask { gas_adjuster }, - }) - } -} - -#[derive(Debug)] -pub struct GasAdjusterTask { - gas_adjuster: Arc, -} - -#[async_trait::async_trait] -impl Task for GasAdjusterTask { - fn id(&self) -> TaskId { - "gas_adjuster".into() - } - async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { - // Gas adjuster layer is added to provide a resource for anyone to use, but it comes with - // a support task. If nobody has used the resource, we don't need to run the support task. - if Arc::strong_count(&self.gas_adjuster) == 1 { - tracing::info!( - "Gas adjuster is not used by any other task, not running the support task" - ); - stop_receiver.0.changed().await?; - return Ok(()); - } + let replica_pool = input.replica_pool.get().await?; + let api_fee_input_provider = Arc::new(ApiFeeInputProvider::new( + main_fee_input_provider.clone(), + replica_pool, + )); - self.gas_adjuster.run(stop_receiver.0).await + Ok(Output { + sequencer_fee_input: main_fee_input_provider.into(), + api_fee_input: api_fee_input_provider.into(), + l1_tx_params: input.gas_adjuster.0.into(), + }) } } diff --git a/core/node/node_framework/src/implementations/layers/main_node_fee_params_fetcher.rs b/core/node/node_framework/src/implementations/layers/main_node_fee_params_fetcher.rs index 848dd4464387..6e7df3b47e42 100644 --- a/core/node/node_framework/src/implementations/layers/main_node_fee_params_fetcher.rs +++ b/core/node/node_framework/src/implementations/layers/main_node_fee_params_fetcher.rs @@ -4,7 +4,8 @@ use zksync_node_fee_model::l1_gas_price::MainNodeFeeParamsFetcher; use crate::{ implementations::resources::{ - fee_input::FeeInputResource, main_node_client::MainNodeClientResource, + fee_input::{ApiFeeInputResource, SequencerFeeInputResource}, + main_node_client::MainNodeClientResource, }, service::StopReceiver, task::{Task, TaskId}, @@ -26,7 +27,8 @@ pub struct Input { #[derive(Debug, IntoContext)] #[context(crate = crate)] pub struct Output { - pub fee_input: FeeInputResource, + pub sequencer_fee_input: SequencerFeeInputResource, + pub api_fee_input: ApiFeeInputResource, #[context(task)] pub fetcher: MainNodeFeeParamsFetcherTask, } @@ -44,7 +46,8 @@ impl WiringLayer for MainNodeFeeParamsFetcherLayer { let MainNodeClientResource(main_node_client) = input.main_node_client; let fetcher = Arc::new(MainNodeFeeParamsFetcher::new(main_node_client)); Ok(Output { - fee_input: fetcher.clone().into(), + sequencer_fee_input: fetcher.clone().into(), + api_fee_input: fetcher.clone().into(), fetcher: MainNodeFeeParamsFetcherTask { fetcher }, }) } diff --git a/core/node/node_framework/src/implementations/layers/mod.rs b/core/node/node_framework/src/implementations/layers/mod.rs index 55bc0a40ca73..6256f2d61043 100644 --- a/core/node/node_framework/src/implementations/layers/mod.rs +++ b/core/node/node_framework/src/implementations/layers/mod.rs @@ -9,6 +9,8 @@ pub mod contract_verification_api; pub mod da_dispatcher; pub mod eth_sender; pub mod eth_watch; +pub mod external_proof_integration_api; +pub mod gas_adjuster; pub mod healtcheck_server; pub mod house_keeper; pub mod l1_batch_commitment_mode_validation; diff --git a/core/node/node_framework/src/implementations/layers/query_eth_client.rs b/core/node/node_framework/src/implementations/layers/query_eth_client.rs index b3a9c7d4b275..116823d92d8a 100644 --- a/core/node/node_framework/src/implementations/layers/query_eth_client.rs +++ b/core/node/node_framework/src/implementations/layers/query_eth_client.rs @@ -1,10 +1,11 @@ use anyhow::Context; -use zksync_types::{url::SensitiveUrl, SLChainId}; +use zksync_types::{settlement::SettlementMode, url::SensitiveUrl, L2ChainId, SLChainId}; use zksync_web3_decl::client::Client; use crate::{ - implementations::resources::eth_interface::EthInterfaceResource, + implementations::resources::eth_interface::{EthInterfaceResource, L2InterfaceResource}, wiring_layer::{WiringError, WiringLayer}, + IntoContext, }; /// Wiring layer for Ethereum client. @@ -12,28 +13,58 @@ use crate::{ pub struct QueryEthClientLayer { chain_id: SLChainId, web3_url: SensitiveUrl, + settlement_mode: SettlementMode, } impl QueryEthClientLayer { - pub fn new(chain_id: SLChainId, web3_url: SensitiveUrl) -> Self { - Self { chain_id, web3_url } + pub fn new( + chain_id: SLChainId, + web3_url: SensitiveUrl, + settlement_mode: SettlementMode, + ) -> Self { + Self { + chain_id, + web3_url, + settlement_mode, + } } } +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + query_client_l1: EthInterfaceResource, + query_client_l2: Option, +} + #[async_trait::async_trait] impl WiringLayer for QueryEthClientLayer { type Input = (); - type Output = EthInterfaceResource; + type Output = Output; fn layer_name(&self) -> &'static str { "query_eth_client_layer" } - async fn wire(self, _input: Self::Input) -> Result { - let query_client = Client::http(self.web3_url.clone()) - .context("Client::new()")? - .for_network(self.chain_id.into()) - .build(); - Ok(EthInterfaceResource(Box::new(query_client))) + async fn wire(self, _input: Self::Input) -> Result { + // Both the L1 and L2 client have the same URL, but provide different type guarantees. + Ok(Output { + query_client_l1: EthInterfaceResource(Box::new( + Client::http(self.web3_url.clone()) + .context("Client::new()")? + .for_network(self.chain_id.into()) + .build(), + )), + query_client_l2: if self.settlement_mode.is_gateway() { + Some(L2InterfaceResource(Box::new( + Client::http(self.web3_url.clone()) + .context("Client::new()")? + .for_network(L2ChainId::try_from(self.chain_id.0).unwrap().into()) + .build(), + ))) + } else { + None + }, + }) } } diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs b/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs index 33d3b5676aac..3288b68bdebb 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs @@ -1,9 +1,9 @@ use zksync_state_keeper::MainBatchExecutor; +use zksync_types::vm::FastVmMode; use crate::{ implementations::resources::state_keeper::BatchExecutorResource, wiring_layer::{WiringError, WiringLayer}, - IntoContext, }; /// Wiring layer for `MainBatchExecutor`, part of the state keeper responsible for running the VM. @@ -11,12 +11,7 @@ use crate::{ pub struct MainBatchExecutorLayer { save_call_traces: bool, optional_bytecode_compression: bool, -} - -#[derive(Debug, IntoContext)] -#[context(crate = crate)] -pub struct Output { - pub batch_executor: BatchExecutorResource, + fast_vm_mode: FastVmMode, } impl MainBatchExecutorLayer { @@ -24,25 +19,29 @@ impl MainBatchExecutorLayer { Self { save_call_traces, optional_bytecode_compression, + fast_vm_mode: FastVmMode::default(), } } + + pub fn with_fast_vm_mode(mut self, mode: FastVmMode) -> Self { + self.fast_vm_mode = mode; + self + } } #[async_trait::async_trait] impl WiringLayer for MainBatchExecutorLayer { type Input = (); - type Output = Output; + type Output = BatchExecutorResource; fn layer_name(&self) -> &'static str { "main_batch_executor_layer" } - async fn wire(self, _input: Self::Input) -> Result { - let builder = + async fn wire(self, (): Self::Input) -> Result { + let mut executor = MainBatchExecutor::new(self.save_call_traces, self.optional_bytecode_compression); - - Ok(Output { - batch_executor: builder.into(), - }) + executor.set_fast_vm_mode(self.fast_vm_mode); + Ok(executor.into()) } } diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs b/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs index 6be6544ee3df..ec2c415b9bbd 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs @@ -8,7 +8,7 @@ use zksync_types::L2ChainId; use crate::{ implementations::resources::{ - fee_input::FeeInputResource, + fee_input::SequencerFeeInputResource, pools::{MasterPool, PoolResource}, state_keeper::{ConditionalSealerResource, StateKeeperIOResource}, }, @@ -44,7 +44,7 @@ pub struct MempoolIOLayer { #[derive(Debug, FromContext)] #[context(crate = crate)] pub struct Input { - pub fee_input: FeeInputResource, + pub fee_input: SequencerFeeInputResource, pub master_pool: PoolResource, } diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs b/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs index 74b4b5e32072..ee2fb84416e1 100644 --- a/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs +++ b/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs @@ -1,8 +1,9 @@ use zksync_config::configs::vm_runner::BasicWitnessInputProducerConfig; +use zksync_state_keeper::MainBatchExecutor; use zksync_types::L2ChainId; use zksync_vm_runner::{ - BasicWitnessInputProducer, BasicWitnessInputProducerIo, ConcurrentOutputHandlerFactoryTask, - StorageSyncTask, + impls::{BasicWitnessInputProducer, BasicWitnessInputProducerIo}, + ConcurrentOutputHandlerFactoryTask, StorageSyncTask, }; use crate::{ @@ -18,17 +19,14 @@ use crate::{ #[derive(Debug)] pub struct BasicWitnessInputProducerLayer { - basic_witness_input_producer_config: BasicWitnessInputProducerConfig, + config: BasicWitnessInputProducerConfig, zksync_network_id: L2ChainId, } impl BasicWitnessInputProducerLayer { - pub fn new( - basic_witness_input_producer_config: BasicWitnessInputProducerConfig, - zksync_network_id: L2ChainId, - ) -> Self { + pub fn new(config: BasicWitnessInputProducerConfig, zksync_network_id: L2ChainId) -> Self { Self { - basic_witness_input_producer_config, + config, zksync_network_id, } } @@ -68,25 +66,26 @@ impl WiringLayer for BasicWitnessInputProducerLayer { object_store, } = input; + // - 1 connection for `StorageSyncTask` which can hold a long-term connection in case it needs to + // catch up cache. + // - 1 connection for `ConcurrentOutputHandlerFactoryTask` / `VmRunner` as they need occasional access + // to DB for querying last processed batch and last ready to be loaded batch. + // - `window_size` connections for `BasicWitnessInputProducer` + // as there can be multiple output handlers holding multi-second connections to process + // BWIP data. + let connection_pool = master_pool.get_custom(self.config.window_size + 2).await?; + + // We don't get the executor from the context because it would contain state keeper-specific settings. + let batch_executor = Box::new(MainBatchExecutor::new(false, false)); + let (basic_witness_input_producer, tasks) = BasicWitnessInputProducer::new( - // One for `StorageSyncTask` which can hold a long-term connection in case it needs to - // catch up cache. - // - // One for `ConcurrentOutputHandlerFactoryTask`/`VmRunner` as they need occasional access - // to DB for querying last processed batch and last ready to be loaded batch. - // - // `window_size` connections for `BasicWitnessInputProducer` - // as there can be multiple output handlers holding multi-second connections to process - // BWIP data. - master_pool - .get_custom(self.basic_witness_input_producer_config.window_size + 2) - .await?, + connection_pool, object_store.0, - self.basic_witness_input_producer_config.db_path, + batch_executor, + self.config.db_path, self.zksync_network_id, - self.basic_witness_input_producer_config - .first_processed_batch, - self.basic_witness_input_producer_config.window_size, + self.config.first_processed_batch, + self.config.window_size, ) .await?; diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/mod.rs b/core/node/node_framework/src/implementations/layers/vm_runner/mod.rs index 91e92ffcd1ba..85b7028bc799 100644 --- a/core/node/node_framework/src/implementations/layers/vm_runner/mod.rs +++ b/core/node/node_framework/src/implementations/layers/vm_runner/mod.rs @@ -6,6 +6,7 @@ use crate::{ }; pub mod bwip; +pub mod playground; pub mod protective_reads; #[async_trait::async_trait] diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/playground.rs b/core/node/node_framework/src/implementations/layers/vm_runner/playground.rs new file mode 100644 index 000000000000..810d538ba978 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/vm_runner/playground.rs @@ -0,0 +1,117 @@ +use async_trait::async_trait; +use zksync_config::configs::ExperimentalVmPlaygroundConfig; +use zksync_node_framework_derive::{FromContext, IntoContext}; +use zksync_types::L2ChainId; +use zksync_vm_runner::{ + impls::{VmPlayground, VmPlaygroundIo, VmPlaygroundLoaderTask}, + ConcurrentOutputHandlerFactoryTask, +}; + +use crate::{ + implementations::resources::{ + healthcheck::AppHealthCheckResource, + pools::{MasterPool, PoolResource}, + }, + StopReceiver, Task, TaskId, WiringError, WiringLayer, +}; + +#[derive(Debug)] +pub struct VmPlaygroundLayer { + config: ExperimentalVmPlaygroundConfig, + zksync_network_id: L2ChainId, +} + +impl VmPlaygroundLayer { + pub fn new(config: ExperimentalVmPlaygroundConfig, zksync_network_id: L2ChainId) -> Self { + Self { + config, + zksync_network_id, + } + } +} + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + #[context(default)] + pub app_health: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub output_handler_factory_task: ConcurrentOutputHandlerFactoryTask, + #[context(task)] + pub loader_task: VmPlaygroundLoaderTask, + #[context(task)] + pub playground: VmPlayground, +} + +#[async_trait] +impl WiringLayer for VmPlaygroundLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "vm_runner_playground" + } + + async fn wire(self, input: Self::Input) -> Result { + let Input { + master_pool, + app_health, + } = input; + + // - 1 connection for `StorageSyncTask` which can hold a long-term connection in case it needs to + // catch up cache. + // - 1 connection for `ConcurrentOutputHandlerFactoryTask` / `VmRunner` as they need occasional access + // to DB for querying last processed batch and last ready to be loaded batch. + // - 1 connection for the only running VM instance. + let connection_pool = master_pool.get_custom(3).await?; + + let (playground, tasks) = VmPlayground::new( + connection_pool, + self.config.fast_vm_mode, + self.config.db_path, + self.zksync_network_id, + self.config.first_processed_batch, + self.config.reset, + ) + .await?; + + app_health + .0 + .insert_component(playground.health_check()) + .map_err(WiringError::internal)?; + + Ok(Output { + output_handler_factory_task: tasks.output_handler_factory_task, + loader_task: tasks.loader_task, + playground, + }) + } +} + +#[async_trait] +impl Task for VmPlaygroundLoaderTask { + fn id(&self) -> TaskId { + "vm_runner/playground/storage_sync".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await + } +} + +#[async_trait] +impl Task for VmPlayground { + fn id(&self) -> TaskId { + "vm_runner/playground".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(&stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/protective_reads.rs b/core/node/node_framework/src/implementations/layers/vm_runner/protective_reads.rs index 3b07d0cea139..a0b0d18a4d93 100644 --- a/core/node/node_framework/src/implementations/layers/vm_runner/protective_reads.rs +++ b/core/node/node_framework/src/implementations/layers/vm_runner/protective_reads.rs @@ -2,7 +2,8 @@ use zksync_config::configs::vm_runner::ProtectiveReadsWriterConfig; use zksync_node_framework_derive::FromContext; use zksync_types::L2ChainId; use zksync_vm_runner::{ - ConcurrentOutputHandlerFactoryTask, ProtectiveReadsIo, ProtectiveReadsWriter, StorageSyncTask, + impls::{ProtectiveReadsIo, ProtectiveReadsWriter}, + ConcurrentOutputHandlerFactoryTask, StorageSyncTask, }; use crate::{ diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs index 4ece9b024300..3574b8e8c24c 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs @@ -15,7 +15,7 @@ use zksync_web3_decl::{ use crate::{ implementations::resources::{ - fee_input::FeeInputResource, + fee_input::ApiFeeInputResource, main_node_client::MainNodeClientResource, pools::{PoolResource, ReplicaPool}, state_keeper::ConditionalSealerResource, @@ -67,7 +67,7 @@ pub struct TxSenderLayer { pub struct Input { pub tx_sink: TxSinkResource, pub replica_pool: PoolResource, - pub fee_input: FeeInputResource, + pub fee_input: ApiFeeInputResource, pub main_node_client: Option, pub sealer: Option, } diff --git a/core/node/node_framework/src/implementations/resources/eth_interface.rs b/core/node/node_framework/src/implementations/resources/eth_interface.rs index cf470c0379da..5879610b75ed 100644 --- a/core/node/node_framework/src/implementations/resources/eth_interface.rs +++ b/core/node/node_framework/src/implementations/resources/eth_interface.rs @@ -1,5 +1,5 @@ use zksync_eth_client::BoundEthInterface; -use zksync_web3_decl::client::{DynClient, L1}; +use zksync_web3_decl::client::{DynClient, L1, L2}; use crate::resource::Resource; @@ -13,6 +13,20 @@ impl Resource for EthInterfaceResource { } } +/// A resource that provides L2 interface object to the service. +/// It is expected to have the same URL as the `EthInterfaceResource`, but have different capabilities. +/// +/// This resource is provided separately from `EthInterfaceResource`, to provide type safety in places, where the +/// component must work with L1-interface only and should use `EthInterfaceResource` instead. +#[derive(Debug, Clone)] +pub struct L2InterfaceResource(pub Box>); + +impl Resource for L2InterfaceResource { + fn name() -> String { + "common/l2_interface".into() + } +} + /// A resource that provides L1 interface with signing capabilities to the service. #[derive(Debug, Clone)] pub struct BoundEthInterfaceResource(pub Box); diff --git a/core/node/node_framework/src/implementations/resources/fee_input.rs b/core/node/node_framework/src/implementations/resources/fee_input.rs index 10271977bac7..1553df1ee5bd 100644 --- a/core/node/node_framework/src/implementations/resources/fee_input.rs +++ b/core/node/node_framework/src/implementations/resources/fee_input.rs @@ -4,17 +4,33 @@ use zksync_node_fee_model::BatchFeeModelInputProvider; use crate::resource::Resource; -/// A resource that provides [`BatchFeeModelInputProvider`] implementation to the service. +/// A resource that provides [`BatchFeeModelInputProvider`] implementation to the service and is used by sequencer. #[derive(Debug, Clone)] -pub struct FeeInputResource(pub Arc); +pub struct SequencerFeeInputResource(pub Arc); -impl Resource for FeeInputResource { +impl Resource for SequencerFeeInputResource { fn name() -> String { - "common/fee_input".into() + "common/sequencer_fee_input".into() } } -impl From> for FeeInputResource { +impl From> for SequencerFeeInputResource { + fn from(provider: Arc) -> Self { + Self(provider) + } +} + +/// A resource that provides [`BatchFeeModelInputProvider`] implementation to the service and is used by API. +#[derive(Debug, Clone)] +pub struct ApiFeeInputResource(pub Arc); + +impl Resource for ApiFeeInputResource { + fn name() -> String { + "common/api_fee_input".into() + } +} + +impl From> for ApiFeeInputResource { fn from(provider: Arc) -> Self { Self(provider) } diff --git a/core/node/node_framework/src/implementations/resources/gas_adjuster.rs b/core/node/node_framework/src/implementations/resources/gas_adjuster.rs new file mode 100644 index 000000000000..ff135c715cc3 --- /dev/null +++ b/core/node/node_framework/src/implementations/resources/gas_adjuster.rs @@ -0,0 +1,21 @@ +use std::sync::Arc; + +use zksync_node_fee_model::l1_gas_price::GasAdjuster; + +use crate::resource::Resource; + +/// A resource that provides [`GasAdjuster`] to the service. +#[derive(Debug, Clone)] +pub struct GasAdjusterResource(pub Arc); + +impl Resource for GasAdjusterResource { + fn name() -> String { + "common/gas_adjuster".into() + } +} + +impl From> for GasAdjusterResource { + fn from(gas_adjuster: Arc) -> Self { + Self(gas_adjuster) + } +} diff --git a/core/node/node_framework/src/implementations/resources/mod.rs b/core/node/node_framework/src/implementations/resources/mod.rs index 4f82f4c3a911..e7f581c77093 100644 --- a/core/node/node_framework/src/implementations/resources/mod.rs +++ b/core/node/node_framework/src/implementations/resources/mod.rs @@ -4,6 +4,7 @@ pub mod circuit_breakers; pub mod da_client; pub mod eth_interface; pub mod fee_input; +pub mod gas_adjuster; pub mod healthcheck; pub mod l1_tx_params; pub mod main_node_client; diff --git a/core/node/node_framework/src/service/mod.rs b/core/node/node_framework/src/service/mod.rs index b6bbaa2e4d28..b4cb5857bbab 100644 --- a/core/node/node_framework/src/service/mod.rs +++ b/core/node/node_framework/src/service/mod.rs @@ -153,7 +153,7 @@ impl ZkStackService { tracing::info!("Exiting the service"); - if let Some(mut observability_guard) = observability_guard.into() { + if let Some(observability_guard) = &mut observability_guard.into() { // Make sure that the shutdown happens in the `tokio` context. let _guard = self.runtime.enter(); observability_guard.shutdown(); diff --git a/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs b/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs index 96819101fa2b..e4f68cade6a4 100644 --- a/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs +++ b/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs @@ -135,7 +135,7 @@ impl L1DataProvider { /// Guesses the number of an L1 block with a `BlockCommit` event for the specified L1 batch. /// The guess is based on the L1 batch seal timestamp. async fn guess_l1_commit_block_number( - eth_client: &DynClient, + eth_client: &dyn EthInterface, l1_batch_seal_timestamp: u64, ) -> EnrichedClientResult<(U64, usize)> { let l1_batch_seal_timestamp = U256::from(l1_batch_seal_timestamp); @@ -171,7 +171,7 @@ impl L1DataProvider { /// Gets a block that should be present on L1. async fn get_block( - eth_client: &DynClient, + eth_client: &dyn EthInterface, number: web3::BlockNumber, ) -> EnrichedClientResult<(U64, U256)> { let block = eth_client.block(number.into()).await?.ok_or_else(|| { @@ -218,11 +218,9 @@ impl TreeDataProvider for L1DataProvider { let from_block = match from_block { Some(number) => number, None => { - let (approximate_block, steps) = Self::guess_l1_commit_block_number( - self.eth_client.as_ref(), - l1_batch_seal_timestamp, - ) - .await?; + let (approximate_block, steps) = + Self::guess_l1_commit_block_number(&self.eth_client, l1_batch_seal_timestamp) + .await?; tracing::debug!( number = number.0, "Guessed L1 block number for L1 batch #{number} commit in {steps} binary search steps: {approximate_block}" diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs index 243c9e06cfcc..d85591dd2c90 100644 --- a/core/node/proof_data_handler/src/tee_request_processor.rs +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -49,9 +49,10 @@ impl TeeRequestProcessor { let l1_batch_number_result = connection .tee_proof_generation_dal() - .get_next_block_to_be_proven(self.config.proof_generation_timeout()) + .get_next_batch_to_be_proven(request.tee_type, self.config.proof_generation_timeout()) .await .map_err(RequestProcessorError::Dal)?; + let l1_batch_number = match l1_batch_number_result { Some(number) => number, None => return Ok(Json(TeeProofGenerationDataResponse(None))), @@ -63,9 +64,9 @@ impl TeeRequestProcessor { .await .map_err(RequestProcessorError::ObjectStore)?; - Ok(Json(TeeProofGenerationDataResponse(Some(Box::new( - tee_verifier_input, - ))))) + let response = TeeProofGenerationDataResponse(Some(Box::new(tee_verifier_input))); + + Ok(Json(response)) } pub(crate) async fn submit_proof( @@ -82,16 +83,16 @@ impl TeeRequestProcessor { let mut dal = connection.tee_proof_generation_dal(); tracing::info!( - "Received proof {:?} for block number: {:?}", + "Received proof {:?} for batch number: {:?}", proof, l1_batch_number ); dal.save_proof_artifacts_metadata( l1_batch_number, - &proof.0.signature, + proof.0.tee_type, &proof.0.pubkey, + &proof.0.signature, &proof.0.proof, - proof.0.tee_type, ) .await .map_err(RequestProcessorError::Dal)?; diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index 1fbe563d2d28..88d4930e6920 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -18,7 +18,7 @@ use zksync_prover_interface::{ api::SubmitTeeProofRequest, inputs::{TeeVerifierInput, V1TeeVerifierInput, WitnessInputMerklePaths}, }; -use zksync_types::{commitment::L1BatchCommitmentMode, L1BatchNumber, H256}; +use zksync_types::{commitment::L1BatchCommitmentMode, tee_types::TeeType, L1BatchNumber, H256}; use crate::create_proof_processing_router; @@ -94,7 +94,7 @@ async fn request_tee_proof_inputs() { }, L1BatchCommitmentMode::Rollup, ); - let req_body = Body::from(serde_json::to_vec(&json!({})).unwrap()); + let req_body = Body::from(serde_json::to_vec(&json!({ "tee_type": "Sgx" })).unwrap()); let response = app .oneshot( Request::builder() @@ -180,6 +180,26 @@ async fn submit_tee_proof() { .unwrap(); assert!(oldest_batch_number.is_none()); + + // there should be one SGX proof in the db now + + let proofs = proof_db_conn + .tee_proof_generation_dal() + .get_tee_proofs(batch_number, Some(TeeType::Sgx)) + .await + .unwrap(); + + assert_eq!(proofs.len(), 1); + + let proof = &proofs[0]; + + assert_eq!(proof.proof.as_ref().unwrap(), &tee_proof_request.0.proof); + assert_eq!(proof.attestation.as_ref().unwrap(), &attestation); + assert_eq!( + proof.signature.as_ref().unwrap(), + &tee_proof_request.0.signature + ); + assert_eq!(proof.pubkey.as_ref().unwrap(), &tee_proof_request.0.pubkey); } // Mock SQL db with information about the status of the TEE proof generation @@ -215,7 +235,7 @@ async fn mock_tee_batch_status( // mock SQL table with relevant information about the status of TEE proof generation ('ready_to_be_proven') proof_dal - .insert_tee_proof_generation_job(batch_number) + .insert_tee_proof_generation_job(batch_number, TeeType::Sgx) .await .expect("Failed to insert tee_proof_generation_job"); diff --git a/core/node/state_keeper/src/batch_executor/main_executor.rs b/core/node/state_keeper/src/batch_executor/main_executor.rs index 4c85fc5bb1fc..d3595323a9a3 100644 --- a/core/node/state_keeper/src/batch_executor/main_executor.rs +++ b/core/node/state_keeper/src/batch_executor/main_executor.rs @@ -13,8 +13,8 @@ use zksync_multivm::{ MultiVMTracer, VmInstance, }; use zksync_shared_metrics::{InteractionType, TxStage, APP_METRICS}; -use zksync_state::{OwnedStorage, ReadStorage, StorageView, WriteStorage}; -use zksync_types::{vm_trace::Call, Transaction}; +use zksync_state::{OwnedStorage, ReadStorage, StorageView}; +use zksync_types::{vm::FastVmMode, vm_trace::Call, Transaction}; use zksync_utils::bytecode::CompressedBytecodeInfo; use super::{BatchExecutor, BatchExecutorHandle, Command, TxExecutionResult}; @@ -35,6 +35,7 @@ pub struct MainBatchExecutor { /// that in cases where the node is expected to process any transactions processed by the sequencer /// regardless of its configuration, this flag should be set to `true`. optional_bytecode_compression: bool, + fast_vm_mode: FastVmMode, } impl MainBatchExecutor { @@ -42,8 +43,18 @@ impl MainBatchExecutor { Self { save_call_traces, optional_bytecode_compression, + fast_vm_mode: FastVmMode::Old, } } + + pub fn set_fast_vm_mode(&mut self, fast_vm_mode: FastVmMode) { + if !matches!(fast_vm_mode, FastVmMode::Old) { + tracing::warn!( + "Running new VM with mode {fast_vm_mode:?}; this can lead to incorrect node behavior" + ); + } + self.fast_vm_mode = fast_vm_mode; + } } impl BatchExecutor for MainBatchExecutor { @@ -59,6 +70,7 @@ impl BatchExecutor for MainBatchExecutor { let executor = CommandReceiver { save_call_traces: self.save_call_traces, optional_bytecode_compression: self.optional_bytecode_compression, + fast_vm_mode: self.fast_vm_mode, commands: commands_receiver, }; @@ -86,6 +98,7 @@ impl BatchExecutor for MainBatchExecutor { struct CommandReceiver { save_call_traces: bool, optional_bytecode_compression: bool, + fast_vm_mode: FastVmMode, commands: mpsc::Receiver, } @@ -99,8 +112,12 @@ impl CommandReceiver { tracing::info!("Starting executing L1 batch #{}", &l1_batch_params.number); let storage_view = StorageView::new(secondary_storage).to_rc_ptr(); - - let mut vm = VmInstance::new(l1_batch_params, system_env, storage_view.clone()); + let mut vm = VmInstance::maybe_fast( + l1_batch_params, + system_env, + storage_view.clone(), + self.fast_vm_mode, + ); while let Some(cmd) = self.commands.blocking_recv() { match cmd { @@ -152,12 +169,15 @@ impl CommandReceiver { tracing::info!("State keeper exited with an unfinished L1 batch"); } - fn execute_tx( + fn execute_tx( &self, tx: &Transaction, vm: &mut VmInstance, ) -> TxExecutionResult { - // Save pre-`execute_next_tx` VM snapshot. + // Executing a next transaction means that a previous transaction was either rolled back (in which case its snapshot + // was already removed), or that we build on top of it (in which case, it can be removed now). + vm.pop_snapshot_no_rollback(); + // Save pre-execution VM snapshot. vm.make_snapshot(); // Execute the transaction. @@ -191,13 +211,13 @@ impl CommandReceiver { } } - fn rollback_last_tx(&self, vm: &mut VmInstance) { + fn rollback_last_tx(&self, vm: &mut VmInstance) { let latency = KEEPER_METRICS.tx_execution_time[&TxExecutionStage::TxRollback].start(); vm.rollback_to_the_latest_snapshot(); latency.observe(); } - fn start_next_l2_block( + fn start_next_l2_block( &self, l2_block_env: L2BlockEnv, vm: &mut VmInstance, @@ -205,7 +225,7 @@ impl CommandReceiver { vm.start_new_l2_block(l2_block_env); } - fn finish_batch( + fn finish_batch( &self, vm: &mut VmInstance, ) -> FinishedL1Batch { @@ -225,7 +245,7 @@ impl CommandReceiver { /// Attempts to execute transaction with or without bytecode compression. /// If compression fails, the transaction will be re-executed without compression. - fn execute_tx_in_vm_with_optional_compression( + fn execute_tx_in_vm_with_optional_compression( &self, tx: &Transaction, vm: &mut VmInstance, @@ -243,9 +263,6 @@ impl CommandReceiver { // it means that there is no sense in polluting the space of compressed bytecodes, // and so we re-execute the transaction, but without compression. - // Saving the snapshot before executing - vm.make_snapshot(); - let call_tracer_result = Arc::new(OnceCell::default()); let tracer = if self.save_call_traces { vec![CallTracer::new(call_tracer_result.clone()).into_tracer_pointer()] @@ -257,7 +274,6 @@ impl CommandReceiver { vm.inspect_transaction_with_bytecode_compression(tracer.into(), tx.clone(), true) { let compressed_bytecodes = vm.get_last_tx_compressed_bytecodes(); - vm.pop_snapshot_no_rollback(); let trace = Arc::try_unwrap(call_tracer_result) .unwrap() @@ -265,7 +281,11 @@ impl CommandReceiver { .unwrap_or_default(); return (result, compressed_bytecodes, trace); } + + // Roll back to the snapshot just before the transaction execution taken in `Self::execute_tx()` + // and create a snapshot at the same VM state again. vm.rollback_to_the_latest_snapshot(); + vm.make_snapshot(); let call_tracer_result = Arc::new(OnceCell::default()); let tracer = if self.save_call_traces { @@ -292,7 +312,7 @@ impl CommandReceiver { /// Attempts to execute transaction with mandatory bytecode compression. /// If bytecode compression fails, the transaction will be rejected. - fn execute_tx_in_vm( + fn execute_tx_in_vm( &self, tx: &Transaction, vm: &mut VmInstance, diff --git a/core/node/state_keeper/src/batch_executor/tests/mod.rs b/core/node/state_keeper/src/batch_executor/tests/mod.rs index 4b36965895fd..ab9115991deb 100644 --- a/core/node/state_keeper/src/batch_executor/tests/mod.rs +++ b/core/node/state_keeper/src/batch_executor/tests/mod.rs @@ -2,7 +2,9 @@ use assert_matches::assert_matches; use test_casing::{test_casing, Product}; use zksync_dal::{ConnectionPool, Core}; use zksync_test_account::Account; -use zksync_types::{get_nonce_key, utils::storage_key_for_eth_balance, PriorityOpId}; +use zksync_types::{ + get_nonce_key, utils::storage_key_for_eth_balance, vm::FastVmMode, PriorityOpId, +}; use self::tester::{AccountLoadNextExecutable, StorageSnapshot, TestConfig, Tester}; use super::TxExecutionResult; @@ -41,13 +43,15 @@ impl StorageType { const ALL: [Self; 3] = [Self::AsyncRocksdbCache, Self::Rocksdb, Self::Postgres]; } +const FAST_VM_MODES: [FastVmMode; 3] = [FastVmMode::Old, FastVmMode::New, FastVmMode::Shadow]; + /// Checks that we can successfully execute a single L2 tx in batch executor on all storage types. -#[test_casing(3, StorageType::ALL)] +#[test_casing(9, Product((StorageType::ALL, FAST_VM_MODES)))] #[tokio::test] -async fn execute_l2_tx(storage_type: StorageType) { +async fn execute_l2_tx(storage_type: StorageType, vm_mode: FastVmMode) { let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, vm_mode); tester.genesis().await; tester.fund(&[alice.address()]).await; let mut executor = tester.create_batch_executor(storage_type).await; @@ -82,14 +86,9 @@ impl SnapshotRecoveryMutation { } } -const EXECUTE_L2_TX_AFTER_SNAPSHOT_RECOVERY_CASES: Product<( - [Option; 3], - [StorageType; 3], -)> = Product((SnapshotRecoveryMutation::ALL, StorageType::ALL)); - /// Tests that we can continue executing account transactions after emulating snapshot recovery. /// Test cases with a set `mutation` ensure that the VM executor correctly detects missing data (e.g., dropped account nonce). -#[test_casing(9, EXECUTE_L2_TX_AFTER_SNAPSHOT_RECOVERY_CASES)] +#[test_casing(9, Product((SnapshotRecoveryMutation::ALL, StorageType::ALL)))] #[tokio::test] async fn execute_l2_tx_after_snapshot_recovery( mutation: Option, @@ -106,7 +105,7 @@ async fn execute_l2_tx_after_snapshot_recovery( } let snapshot = storage_snapshot.recover(&connection_pool).await; - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, FastVmMode::Old); let mut executor = tester .recover_batch_executor_custom(&storage_type, &snapshot) .await; @@ -120,12 +119,13 @@ async fn execute_l2_tx_after_snapshot_recovery( } /// Checks that we can successfully execute a single L1 tx in batch executor. +#[test_casing(3, FAST_VM_MODES)] #[tokio::test] -async fn execute_l1_tx() { +async fn execute_l1_tx(vm_mode: FastVmMode) { let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, vm_mode); tester.genesis().await; tester.fund(&[alice.address()]).await; @@ -142,12 +142,13 @@ async fn execute_l1_tx() { } /// Checks that we can successfully execute a single L2 tx and a single L1 tx in batch executor. +#[test_casing(3, FAST_VM_MODES)] #[tokio::test] -async fn execute_l2_and_l1_txs() { +async fn execute_l2_and_l1_txs(vm_mode: FastVmMode) { let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, vm_mode); tester.genesis().await; tester.fund(&[alice.address()]).await; let mut executor = tester @@ -167,12 +168,13 @@ async fn execute_l2_and_l1_txs() { } /// Checks that we can successfully rollback the transaction and execute it once again. +#[test_casing(3, FAST_VM_MODES)] #[tokio::test] -async fn rollback() { +async fn rollback(vm_mode: FastVmMode) { let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, vm_mode); tester.genesis().await; tester.fund(&[alice.address()]).await; @@ -213,12 +215,13 @@ async fn rollback() { } /// Checks that incorrect transactions are marked as rejected. +#[test_casing(3, FAST_VM_MODES)] #[tokio::test] -async fn reject_tx() { +async fn reject_tx(vm_mode: FastVmMode) { let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, vm_mode); tester.genesis().await; let mut executor = tester @@ -232,12 +235,13 @@ async fn reject_tx() { /// Checks that tx with too big gas limit is correctly processed. /// When processed in the bootloader, no more than 80M gas can be used within the execution context. +#[test_casing(3, FAST_VM_MODES)] #[tokio::test] -async fn too_big_gas_limit() { +async fn too_big_gas_limit(vm_mode: FastVmMode) { let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, vm_mode); tester.genesis().await; tester.fund(&[alice.address()]).await; let mut executor = tester @@ -252,12 +256,13 @@ async fn too_big_gas_limit() { } /// Checks that we can't execute the same transaction twice. +#[test_casing(3, FAST_VM_MODES)] #[tokio::test] -async fn tx_cant_be_reexecuted() { +async fn tx_cant_be_reexecuted(vm_mode: FastVmMode) { let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, vm_mode); tester.genesis().await; tester.fund(&[alice.address()]).await; let mut executor = tester @@ -274,12 +279,13 @@ async fn tx_cant_be_reexecuted() { } /// Checks that we can deploy and call the loadnext contract. +#[test_casing(3, FAST_VM_MODES)] #[tokio::test] -async fn deploy_and_call_loadtest() { +async fn deploy_and_call_loadtest(vm_mode: FastVmMode) { let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, vm_mode); tester.genesis().await; tester.fund(&[alice.address()]).await; let mut executor = tester @@ -304,12 +310,13 @@ async fn deploy_and_call_loadtest() { } /// Checks that a tx that is reverted by the VM still can be included into a batch. +#[test_casing(3, FAST_VM_MODES)] #[tokio::test] -async fn execute_reverted_tx() { +async fn execute_reverted_tx(vm_mode: FastVmMode) { let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, vm_mode); tester.genesis().await; tester.fund(&[alice.address()]).await; @@ -334,13 +341,14 @@ async fn execute_reverted_tx() { /// Runs the batch executor through a semi-realistic basic scenario: /// a batch with different operations, both successful and not. +#[test_casing(3, FAST_VM_MODES)] #[tokio::test] -async fn execute_realistic_scenario() { +async fn execute_realistic_scenario(vm_mode: FastVmMode) { let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); let mut bob = Account::random(); - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, vm_mode); tester.genesis().await; tester.fund(&[alice.address()]).await; @@ -395,8 +403,9 @@ async fn execute_realistic_scenario() { } /// Checks that we handle the bootloader out of gas error on execution phase. +#[test_casing(3, FAST_VM_MODES)] #[tokio::test] -async fn bootloader_out_of_gas_for_any_tx() { +async fn bootloader_out_of_gas_for_any_tx(vm_mode: FastVmMode) { let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); @@ -406,6 +415,7 @@ async fn bootloader_out_of_gas_for_any_tx() { save_call_traces: false, vm_gas_limit: Some(10), validation_computational_gas_limit: u32::MAX, + fast_vm_mode: vm_mode, }, ); @@ -426,7 +436,7 @@ async fn bootloader_tip_out_of_gas() { let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, FastVmMode::Old); tester.genesis().await; tester.fund(&[alice.address()]).await; @@ -451,6 +461,7 @@ async fn bootloader_tip_out_of_gas() { - 10, ), validation_computational_gas_limit: u32::MAX, + fast_vm_mode: FastVmMode::Old, }); let mut second_executor = tester @@ -467,7 +478,7 @@ async fn catchup_rocksdb_cache() { let mut alice = Account::random(); let mut bob = Account::random(); - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, FastVmMode::Old); tester.genesis().await; tester.fund(&[alice.address(), bob.address()]).await; diff --git a/core/node/state_keeper/src/batch_executor/tests/tester.rs b/core/node/state_keeper/src/batch_executor/tests/tester.rs index 961ccf9db16f..6730d427c67f 100644 --- a/core/node/state_keeper/src/batch_executor/tests/tester.rs +++ b/core/node/state_keeper/src/batch_executor/tests/tester.rs @@ -24,6 +24,7 @@ use zksync_types::{ storage_writes_deduplicator::StorageWritesDeduplicator, system_contracts::get_system_smart_contracts, utils::storage_key_for_standard_token_balance, + vm::FastVmMode, AccountTreeId, Address, Execute, L1BatchNumber, L2BlockNumber, PriorityOpId, ProtocolVersionId, StorageLog, Transaction, H256, L2_BASE_TOKEN_ADDRESS, U256, }; @@ -45,16 +46,18 @@ pub(super) struct TestConfig { pub(super) save_call_traces: bool, pub(super) vm_gas_limit: Option, pub(super) validation_computational_gas_limit: u32, + pub(super) fast_vm_mode: FastVmMode, } impl TestConfig { - pub(super) fn new() -> Self { + pub(super) fn new(fast_vm_mode: FastVmMode) -> Self { let config = StateKeeperConfig::for_tests(); Self { vm_gas_limit: None, save_call_traces: false, validation_computational_gas_limit: config.validation_computational_gas_limit, + fast_vm_mode, } } } @@ -71,8 +74,8 @@ pub(super) struct Tester { } impl Tester { - pub(super) fn new(pool: ConnectionPool) -> Self { - Self::with_config(pool, TestConfig::new()) + pub(super) fn new(pool: ConnectionPool, fast_vm_mode: FastVmMode) -> Self { + Self::with_config(pool, TestConfig::new(fast_vm_mode)) } pub(super) fn with_config(pool: ConnectionPool, config: TestConfig) -> Self { @@ -141,6 +144,8 @@ impl Tester { system_env: SystemEnv, ) -> BatchExecutorHandle { let mut batch_executor = MainBatchExecutor::new(self.config.save_call_traces, false); + batch_executor.set_fast_vm_mode(self.config.fast_vm_mode); + let (_stop_sender, stop_receiver) = watch::channel(false); let storage = storage_factory .access_storage(&stop_receiver, l1_batch_env.number - 1) @@ -440,7 +445,7 @@ impl StorageSnapshot { alice: &mut Account, transaction_count: u32, ) -> Self { - let mut tester = Tester::new(connection_pool.clone()); + let mut tester = Tester::new(connection_pool.clone(), FastVmMode::Old); tester.genesis().await; tester.fund(&[alice.address()]).await; diff --git a/core/node/state_keeper/src/io/tests/tester.rs b/core/node/state_keeper/src/io/tests/tester.rs index 28fcbd51822e..dc5e5f345d5a 100644 --- a/core/node/state_keeper/src/io/tests/tester.rs +++ b/core/node/state_keeper/src/io/tests/tester.rs @@ -9,9 +9,12 @@ use zksync_config::{ }; use zksync_contracts::BaseSystemContracts; use zksync_dal::{ConnectionPool, Core, CoreDal}; -use zksync_eth_client::{clients::MockEthereum, BaseFees}; +use zksync_eth_client::{clients::MockSettlementLayer, BaseFees}; use zksync_multivm::vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT; -use zksync_node_fee_model::{l1_gas_price::GasAdjuster, MainNodeFeeInputProvider}; +use zksync_node_fee_model::{ + l1_gas_price::{GasAdjuster, GasAdjusterClient}, + MainNodeFeeInputProvider, +}; use zksync_node_genesis::create_genesis_l1_batch; use zksync_node_test_utils::{ create_l1_batch, create_l2_block, create_l2_transaction, execute_l2_transaction, @@ -54,9 +57,12 @@ impl Tester { .map(|base_fee_per_gas| BaseFees { base_fee_per_gas, base_fee_per_blob_gas: 1.into(), // Not relevant for the test + l2_pubdata_price: 0.into(), // Not relevant for the test }) .collect(); - let eth_client = MockEthereum::builder().with_fee_history(base_fees).build(); + let eth_client = MockSettlementLayer::builder() + .with_fee_history(base_fees) + .build(); let gas_adjuster_config = GasAdjusterConfig { default_priority_fee_per_gas: 10, @@ -71,10 +77,11 @@ impl Tester { num_samples_for_blob_base_fee_estimate: 10, internal_pubdata_pricing_multiplier: 1.0, max_blob_base_fee: None, + settlement_mode: Default::default(), }; GasAdjuster::new( - Box::new(eth_client.into_client()), + GasAdjusterClient::from_l1(Box::new(eth_client.into_client())), gas_adjuster_config, PubdataSendingMode::Calldata, self.commitment_mode, diff --git a/core/node/state_keeper/src/mempool_actor.rs b/core/node/state_keeper/src/mempool_actor.rs index d79d9ebb34a8..5003d75b6694 100644 --- a/core/node/state_keeper/src/mempool_actor.rs +++ b/core/node/state_keeper/src/mempool_actor.rs @@ -11,7 +11,7 @@ use zksync_multivm::utils::derive_base_fee_and_gas_per_pubdata; use zksync_node_fee_model::BatchFeeModelInputProvider; #[cfg(test)] use zksync_types::H256; -use zksync_types::{get_nonce_key, Address, Nonce, Transaction, VmVersion}; +use zksync_types::{get_nonce_key, vm::VmVersion, Address, Nonce, Transaction}; use super::{metrics::KEEPER_METRICS, types::MempoolGuard}; diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index 465042a602df..e47e1182699d 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -45,9 +45,6 @@ pub(super) fn default_vm_batch_result() -> FinishedL1Batch { used_contract_hashes: vec![], user_l2_to_l1_logs: vec![], system_logs: vec![], - total_log_queries: 0, - cycles_used: 0, - deduplicated_events_logs: vec![], storage_refunds: Vec::new(), pubdata_costs: Vec::new(), }, diff --git a/core/node/tee_verifier_input_producer/src/lib.rs b/core/node/tee_verifier_input_producer/src/lib.rs index 0cd28ee5ce79..abd70542a42f 100644 --- a/core/node/tee_verifier_input_producer/src/lib.rs +++ b/core/node/tee_verifier_input_producer/src/lib.rs @@ -19,7 +19,7 @@ use zksync_prover_interface::inputs::{ }; use zksync_queued_job_processor::JobProcessor; use zksync_tee_verifier::Verify; -use zksync_types::{L1BatchNumber, L2ChainId}; +use zksync_types::{tee_types::TeeType, L1BatchNumber, L2ChainId}; use zksync_utils::u256_to_h256; use zksync_vm_utils::storage::L1BatchParamsProvider; @@ -241,7 +241,7 @@ impl JobProcessor for TeeVerifierInputProducer { .context("failed to mark job as successful for TeeVerifierInputProducer")?; transaction .tee_proof_generation_dal() - .insert_tee_proof_generation_job(job_id) + .insert_tee_proof_generation_job(job_id, TeeType::Sgx) .await?; transaction .commit() diff --git a/core/node/vm_runner/Cargo.toml b/core/node/vm_runner/Cargo.toml index 52a8e4676437..cc6313fa5727 100644 --- a/core/node/vm_runner/Cargo.toml +++ b/core/node/vm_runner/Cargo.toml @@ -22,7 +22,9 @@ zksync_utils.workspace = true zksync_prover_interface.workspace = true zksync_object_store.workspace = true zksync_vm_utils.workspace = true +zksync_health_check.workspace = true +serde.workspace = true tokio = { workspace = true, features = ["time"] } anyhow.workspace = true async-trait.workspace = true diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index 48f243cd9bc5..f7f8c099609f 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -6,7 +6,7 @@ use tokio::sync::watch; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_object_store::ObjectStore; use zksync_prover_interface::inputs::VMRunWitnessInputData; -use zksync_state_keeper::{MainBatchExecutor, StateKeeperOutputHandler, UpdatesManager}; +use zksync_state_keeper::{BatchExecutor, StateKeeperOutputHandler, UpdatesManager}; use zksync_types::{ block::StorageOracleInfo, witness_block_state::WitnessStorageState, L1BatchNumber, L2ChainId, H256, @@ -30,6 +30,7 @@ impl BasicWitnessInputProducer { pub async fn new( pool: ConnectionPool, object_store: Arc, + batch_executor: Box, rocksdb_path: String, chain_id: L2ChainId, first_processed_batch: L1BatchNumber, @@ -47,13 +48,12 @@ impl BasicWitnessInputProducer { }; let (output_handler_factory, output_handler_factory_task) = ConcurrentOutputHandlerFactory::new(pool.clone(), io.clone(), output_handler_factory); - let batch_processor = MainBatchExecutor::new(false, false); let vm_runner = VmRunner::new( pool, Box::new(io), Arc::new(loader), Box::new(output_handler_factory), - Box::new(batch_processor), + batch_executor, ); Ok(( Self { vm_runner }, @@ -75,8 +75,7 @@ impl BasicWitnessInputProducer { } } -/// A collections of tasks that need to be run in order for BWIP to work as -/// intended. +/// Collection of tasks that need to be run in order for BWIP to work as intended. #[derive(Debug)] pub struct BasicWitnessInputProducerTasks { /// Task that synchronizes storage with new available batches. diff --git a/core/node/vm_runner/src/impls/mod.rs b/core/node/vm_runner/src/impls/mod.rs index 2d982730498a..7f9869531c65 100644 --- a/core/node/vm_runner/src/impls/mod.rs +++ b/core/node/vm_runner/src/impls/mod.rs @@ -1,7 +1,13 @@ +//! Components powered by a VM runner. + mod bwip; +mod playground; mod protective_reads; -pub use bwip::{ - BasicWitnessInputProducer, BasicWitnessInputProducerIo, BasicWitnessInputProducerTasks, +pub use self::{ + bwip::{ + BasicWitnessInputProducer, BasicWitnessInputProducerIo, BasicWitnessInputProducerTasks, + }, + playground::{VmPlayground, VmPlaygroundIo, VmPlaygroundLoaderTask, VmPlaygroundTasks}, + protective_reads::{ProtectiveReadsIo, ProtectiveReadsWriter, ProtectiveReadsWriterTasks}, }; -pub use protective_reads::{ProtectiveReadsIo, ProtectiveReadsWriter, ProtectiveReadsWriterTasks}; diff --git a/core/node/vm_runner/src/impls/playground.rs b/core/node/vm_runner/src/impls/playground.rs new file mode 100644 index 000000000000..4fb140431df6 --- /dev/null +++ b/core/node/vm_runner/src/impls/playground.rs @@ -0,0 +1,333 @@ +use std::{ + io, + path::{Path, PathBuf}, + sync::Arc, +}; + +use anyhow::Context as _; +use async_trait::async_trait; +use serde::Serialize; +use tokio::{ + fs, + sync::{oneshot, watch}, +}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; +use zksync_state::RocksdbStorage; +use zksync_state_keeper::{MainBatchExecutor, StateKeeperOutputHandler, UpdatesManager}; +use zksync_types::{vm::FastVmMode, L1BatchNumber, L2ChainId}; + +use crate::{ + ConcurrentOutputHandlerFactory, ConcurrentOutputHandlerFactoryTask, OutputHandlerFactory, + StorageSyncTask, VmRunner, VmRunnerIo, VmRunnerStorage, +}; + +#[derive(Debug, Serialize)] +struct VmPlaygroundHealth { + vm_mode: FastVmMode, + last_processed_batch: L1BatchNumber, +} + +impl From for Health { + fn from(health: VmPlaygroundHealth) -> Self { + Health::from(HealthStatus::Ready).with_details(health) + } +} + +/// Virtual machine playground. Does not persist anything in Postgres; instead, keeps an L1 batch cursor as a plain text file in the RocksDB directory +/// (so that the playground doesn't repeatedly process same batches after a restart). +#[derive(Debug)] +pub struct VmPlayground { + pool: ConnectionPool, + batch_executor: MainBatchExecutor, + rocksdb_path: String, + chain_id: L2ChainId, + io: VmPlaygroundIo, + loader_task_sender: oneshot::Sender>, + output_handler_factory: + ConcurrentOutputHandlerFactory, + reset_to_batch: Option, +} + +impl VmPlayground { + /// Creates a new playground. + pub async fn new( + pool: ConnectionPool, + vm_mode: FastVmMode, + rocksdb_path: String, + chain_id: L2ChainId, + first_processed_batch: L1BatchNumber, + reset_state: bool, + ) -> anyhow::Result<(Self, VmPlaygroundTasks)> { + tracing::info!( + "Starting VM playground with mode {vm_mode:?}, first processed batch is #{first_processed_batch} \ + (reset processing: {reset_state:?})" + ); + + let cursor_file_path = Path::new(&rocksdb_path).join("__vm_playground_cursor"); + let latest_processed_batch = VmPlaygroundIo::read_cursor(&cursor_file_path).await?; + tracing::info!("Latest processed batch: {latest_processed_batch:?}"); + let latest_processed_batch = if reset_state { + first_processed_batch + } else { + latest_processed_batch.unwrap_or(first_processed_batch) + }; + + let mut batch_executor = MainBatchExecutor::new(false, false); + batch_executor.set_fast_vm_mode(vm_mode); + + let io = VmPlaygroundIo { + cursor_file_path, + vm_mode, + latest_processed_batch: Arc::new(watch::channel(latest_processed_batch).0), + health_updater: Arc::new(ReactiveHealthCheck::new("vm_playground").1), + }; + let (output_handler_factory, output_handler_factory_task) = + ConcurrentOutputHandlerFactory::new( + pool.clone(), + io.clone(), + VmPlaygroundOutputHandler, + ); + let (loader_task_sender, loader_task_receiver) = oneshot::channel(); + + let this = Self { + pool, + batch_executor, + rocksdb_path, + chain_id, + io, + loader_task_sender, + output_handler_factory, + reset_to_batch: reset_state.then_some(first_processed_batch), + }; + Ok(( + this, + VmPlaygroundTasks { + loader_task: VmPlaygroundLoaderTask { + inner: loader_task_receiver, + }, + output_handler_factory_task, + }, + )) + } + + /// Returns a health check for this component. + pub fn health_check(&self) -> ReactiveHealthCheck { + self.io.health_updater.subscribe() + } + + #[cfg(test)] + pub(crate) fn io(&self) -> &VmPlaygroundIo { + &self.io + } + + #[tracing::instrument(skip(self), err)] + async fn reset_rocksdb_cache(&self, last_retained_batch: L1BatchNumber) -> anyhow::Result<()> { + let builder = RocksdbStorage::builder(self.rocksdb_path.as_ref()).await?; + let current_l1_batch = builder.l1_batch_number().await; + if current_l1_batch <= Some(last_retained_batch) { + tracing::info!("Resetting RocksDB cache is not required: its current batch #{current_l1_batch:?} is lower than the target"); + return Ok(()); + } + + tracing::info!("Resetting RocksDB cache from batch #{current_l1_batch:?}"); + let mut conn = self.pool.connection_tagged("vm_playground").await?; + builder.roll_back(&mut conn, last_retained_batch).await + } + + /// Continuously loads new available batches and writes the corresponding data + /// produced by that batch. + /// + /// # Errors + /// + /// Propagates RocksDB and Postgres errors. + pub async fn run(self, stop_receiver: &watch::Receiver) -> anyhow::Result<()> { + fs::create_dir_all(&self.rocksdb_path) + .await + .with_context(|| format!("cannot create dir `{}`", self.rocksdb_path))?; + + if let Some(reset_to_batch) = self.reset_to_batch { + self.io.health_updater.update(HealthStatus::Affected.into()); + + self.reset_rocksdb_cache(reset_to_batch).await?; + self.io + .write_cursor(reset_to_batch) + .await + .context("failed resetting VM playground state")?; + tracing::info!("Finished resetting playground state"); + } + + self.io.update_health(); + + let (loader, loader_task) = VmRunnerStorage::new( + self.pool.clone(), + self.rocksdb_path, + self.io.clone(), + self.chain_id, + ) + .await?; + self.loader_task_sender.send(loader_task).ok(); + let vm_runner = VmRunner::new( + self.pool, + Box::new(self.io), + Arc::new(loader), + Box::new(self.output_handler_factory), + Box::new(self.batch_executor), + ); + vm_runner.run(stop_receiver).await + } +} + +/// Loader task for the VM playground. +#[derive(Debug)] +pub struct VmPlaygroundLoaderTask { + inner: oneshot::Receiver>, +} + +impl VmPlaygroundLoaderTask { + /// Runs a task until a stop signal is received. + pub async fn run(self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + let task = tokio::select! { + biased; + _ = stop_receiver.changed() => return Ok(()), + res = self.inner => match res { + Ok(task) => task, + Err(_) => anyhow::bail!("VM playground stopped before spawning loader task"), + } + }; + task.run(stop_receiver).await + } +} + +/// Collection of tasks that need to be run in order for the VM playground to work as intended. +#[derive(Debug)] +pub struct VmPlaygroundTasks { + /// Task that synchronizes storage with new available batches. + pub loader_task: VmPlaygroundLoaderTask, + /// Task that handles output from processed batches. + pub output_handler_factory_task: ConcurrentOutputHandlerFactoryTask, +} + +/// I/O powering [`VmPlayground`]. +#[derive(Debug, Clone)] +pub struct VmPlaygroundIo { + cursor_file_path: PathBuf, + vm_mode: FastVmMode, + // We don't read this value from the cursor file in the `VmRunnerIo` implementation because reads / writes + // aren't guaranteed to be atomic. + latest_processed_batch: Arc>, + health_updater: Arc, +} + +impl VmPlaygroundIo { + async fn read_cursor(cursor_file_path: &Path) -> anyhow::Result> { + match fs::read_to_string(cursor_file_path).await { + Ok(buffer) => { + let cursor = buffer + .parse::() + .with_context(|| format!("invalid cursor value: {buffer}"))?; + Ok(Some(L1BatchNumber(cursor))) + } + Err(err) if err.kind() == io::ErrorKind::NotFound => Ok(None), + Err(err) => Err(anyhow::Error::new(err).context(format!( + "failed reading VM playground cursor from `{}`", + cursor_file_path.display() + ))), + } + } + + async fn write_cursor(&self, cursor: L1BatchNumber) -> anyhow::Result<()> { + let buffer = cursor.to_string(); + fs::write(&self.cursor_file_path, buffer) + .await + .with_context(|| { + format!( + "failed writing VM playground cursor to `{}`", + self.cursor_file_path.display() + ) + }) + } + + fn update_health(&self) { + let health = VmPlaygroundHealth { + vm_mode: self.vm_mode, + last_processed_batch: *self.latest_processed_batch.borrow(), + }; + self.health_updater.update(health.into()); + } + + #[cfg(test)] + pub(crate) fn subscribe_to_completed_batches(&self) -> watch::Receiver { + self.latest_processed_batch.subscribe() + } +} + +#[async_trait] +impl VmRunnerIo for VmPlaygroundIo { + fn name(&self) -> &'static str { + "vm_playground" + } + + async fn latest_processed_batch( + &self, + _conn: &mut Connection<'_, Core>, + ) -> anyhow::Result { + Ok(*self.latest_processed_batch.borrow()) + } + + async fn last_ready_to_be_loaded_batch( + &self, + conn: &mut Connection<'_, Core>, + ) -> anyhow::Result { + let sealed_l1_batch = conn + .blocks_dal() + .get_sealed_l1_batch_number() + .await? + .context("no L1 batches in Postgres")?; + let last_processed_l1_batch = self.latest_processed_batch(conn).await?; + Ok(sealed_l1_batch.min(last_processed_l1_batch + 1)) + } + + async fn mark_l1_batch_as_processing( + &self, + _conn: &mut Connection<'_, Core>, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result<()> { + tracing::info!("Started processing L1 batch #{l1_batch_number}"); + Ok(()) + } + + async fn mark_l1_batch_as_completed( + &self, + _conn: &mut Connection<'_, Core>, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result<()> { + tracing::info!("Finished processing L1 batch #{l1_batch_number}"); + self.write_cursor(l1_batch_number).await?; + // We should only update the in-memory value after the write to the cursor file succeeded. + self.latest_processed_batch.send_replace(l1_batch_number); + self.update_health(); + Ok(()) + } +} + +#[derive(Debug)] +struct VmPlaygroundOutputHandler; + +#[async_trait] +impl StateKeeperOutputHandler for VmPlaygroundOutputHandler { + async fn handle_l2_block(&mut self, updates_manager: &UpdatesManager) -> anyhow::Result<()> { + tracing::trace!("Processed L2 block #{}", updates_manager.l2_block.number); + Ok(()) + } +} + +#[async_trait] +impl OutputHandlerFactory for VmPlaygroundOutputHandler { + async fn create_handler( + &mut self, + _l1_batch_number: L1BatchNumber, + ) -> anyhow::Result> { + Ok(Box::new(Self)) + } +} diff --git a/core/node/vm_runner/src/lib.rs b/core/node/vm_runner/src/lib.rs index b252eebcbb1f..03e3f43baedc 100644 --- a/core/node/vm_runner/src/lib.rs +++ b/core/node/vm_runner/src/lib.rs @@ -3,7 +3,7 @@ #![warn(missing_debug_implementations, missing_docs)] -mod impls; +pub mod impls; mod io; mod output_handler; mod process; @@ -13,13 +13,11 @@ mod metrics; #[cfg(test)] mod tests; -pub use impls::{ - BasicWitnessInputProducer, BasicWitnessInputProducerIo, BasicWitnessInputProducerTasks, - ProtectiveReadsIo, ProtectiveReadsWriter, ProtectiveReadsWriterTasks, +pub use self::{ + io::VmRunnerIo, + output_handler::{ + ConcurrentOutputHandlerFactory, ConcurrentOutputHandlerFactoryTask, OutputHandlerFactory, + }, + process::VmRunner, + storage::{BatchExecuteData, StorageSyncTask, VmRunnerStorage}, }; -pub use io::VmRunnerIo; -pub use output_handler::{ - ConcurrentOutputHandlerFactory, ConcurrentOutputHandlerFactoryTask, OutputHandlerFactory, -}; -pub use process::VmRunner; -pub use storage::{BatchExecuteData, StorageSyncTask, VmRunnerStorage}; diff --git a/core/node/vm_runner/src/storage.rs b/core/node/vm_runner/src/storage.rs index 75ed4cb57a9c..1c83b6525c7b 100644 --- a/core/node/vm_runner/src/storage.rs +++ b/core/node/vm_runner/src/storage.rs @@ -57,8 +57,8 @@ struct BatchData { /// Abstraction for VM runner's storage layer that provides two main features: /// /// 1. A [`ReadStorageFactory`] implementation backed by either Postgres or RocksDB (if it's -/// caught up). Always initialized as a `Postgres` variant and is then mutated into `Rocksdb` -/// once RocksDB cache is caught up. +/// caught up). Always initialized as a `Postgres` variant and is then mutated into `Rocksdb` +/// once RocksDB cache is caught up. /// 2. Loads data needed to re-execute the next unprocessed L1 batch. /// /// Users of `VmRunnerStorage` are not supposed to retain storage access to batches that are less diff --git a/core/node/vm_runner/src/tests/mod.rs b/core/node/vm_runner/src/tests/mod.rs index e9dbebfa24d5..4cb2d26f6bd6 100644 --- a/core/node/vm_runner/src/tests/mod.rs +++ b/core/node/vm_runner/src/tests/mod.rs @@ -28,6 +28,7 @@ use super::{BatchExecuteData, OutputHandlerFactory, VmRunnerIo}; use crate::storage::{load_batch_execute_data, StorageLoader}; mod output_handler; +mod playground; mod process; mod storage; mod storage_writer; @@ -306,11 +307,12 @@ async fn store_l1_batches( digest.push_tx_hash(tx.hash()); new_l2_block.hash = digest.finalize(ProtocolVersionId::latest()); - l2_block_number += 1; new_l2_block.base_system_contracts_hashes = contract_hashes; new_l2_block.l2_tx_count = 1; conn.blocks_dal().insert_l2_block(&new_l2_block).await?; last_l2_block_hash = new_l2_block.hash; + l2_block_number += 1; + let tx_result = execute_l2_transaction(tx.clone()); conn.transactions_dal() .mark_txs_as_executed_in_l2_block( @@ -330,9 +332,9 @@ async fn store_l1_batches( last_l2_block_hash, ); fictive_l2_block.hash = digest.finalize(ProtocolVersionId::latest()); - l2_block_number += 1; conn.blocks_dal().insert_l2_block(&fictive_l2_block).await?; last_l2_block_hash = fictive_l2_block.hash; + l2_block_number += 1; let header = L1BatchHeader::new( l1_batch_number, diff --git a/core/node/vm_runner/src/tests/playground.rs b/core/node/vm_runner/src/tests/playground.rs new file mode 100644 index 000000000000..c4111f737418 --- /dev/null +++ b/core/node/vm_runner/src/tests/playground.rs @@ -0,0 +1,139 @@ +use test_casing::test_casing; +use tokio::sync::watch; +use zksync_health_check::HealthStatus; +use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; +use zksync_state::RocksdbStorage; +use zksync_types::vm::FastVmMode; + +use super::*; +use crate::impls::VmPlayground; + +async fn run_playground( + pool: ConnectionPool, + rocksdb_dir: &tempfile::TempDir, + reset_state: bool, +) { + let mut conn = pool.connection().await.unwrap(); + let genesis_params = GenesisParams::mock(); + if conn.blocks_dal().is_genesis_needed().await.unwrap() { + insert_genesis_batch(&mut conn, &genesis_params) + .await + .unwrap(); + + // Generate some batches and persist them in Postgres + let mut accounts = [Account::random()]; + fund(&mut conn, &accounts).await; + store_l1_batches( + &mut conn, + 1..=1, // TODO: test on >1 batch + genesis_params.base_system_contracts().hashes(), + &mut accounts, + ) + .await + .unwrap(); + } + + let (playground, playground_tasks) = VmPlayground::new( + pool.clone(), + FastVmMode::Shadow, + rocksdb_dir.path().to_str().unwrap().to_owned(), + genesis_params.config().l2_chain_id, + L1BatchNumber(0), + reset_state, + ) + .await + .unwrap(); + + let (stop_sender, stop_receiver) = watch::channel(false); + let playground_io = playground.io().clone(); + assert_eq!( + playground_io + .latest_processed_batch(&mut conn) + .await + .unwrap(), + L1BatchNumber(0) + ); + assert_eq!( + playground_io + .last_ready_to_be_loaded_batch(&mut conn) + .await + .unwrap(), + L1BatchNumber(1) + ); + let mut health_check = playground.health_check(); + + let mut completed_batches = playground_io.subscribe_to_completed_batches(); + let task_handles = [ + tokio::spawn(playground_tasks.loader_task.run(stop_receiver.clone())), + tokio::spawn( + playground_tasks + .output_handler_factory_task + .run(stop_receiver.clone()), + ), + tokio::spawn(async move { playground.run(&stop_receiver).await }), + ]; + // Wait until all batches are processed. + completed_batches + .wait_for(|&number| number == L1BatchNumber(1)) + .await + .unwrap(); + health_check + .wait_for(|health| { + if !matches!(health.status(), HealthStatus::Ready) { + return false; + } + let health_details = health.details().unwrap(); + assert_eq!(health_details["vm_mode"], "shadow"); + health_details["last_processed_batch"] == 1_u64 + }) + .await; + + // Check that playground I/O works correctly. + assert_eq!( + playground_io + .latest_processed_batch(&mut conn) + .await + .unwrap(), + L1BatchNumber(1) + ); + // There's no batch #2 in storage + assert_eq!( + playground_io + .last_ready_to_be_loaded_batch(&mut conn) + .await + .unwrap(), + L1BatchNumber(1) + ); + + stop_sender.send_replace(true); + for task_handle in task_handles { + task_handle.await.unwrap().unwrap(); + } +} + +#[test_casing(2, [false, true])] +#[tokio::test] +async fn vm_playground_basics(reset_state: bool) { + let pool = ConnectionPool::test_pool().await; + let rocksdb_dir = tempfile::TempDir::new().unwrap(); + run_playground(pool, &rocksdb_dir, reset_state).await; +} + +#[tokio::test] +async fn resetting_playground_state() { + let pool = ConnectionPool::test_pool().await; + let rocksdb_dir = tempfile::TempDir::new().unwrap(); + run_playground(pool.clone(), &rocksdb_dir, false).await; + + // Manually catch up RocksDB to Postgres to ensure that resetting it is not trivial. + let (_stop_sender, stop_receiver) = watch::channel(false); + let mut conn = pool.connection().await.unwrap(); + RocksdbStorage::builder(rocksdb_dir.path()) + .await + .unwrap() + .synchronize(&mut conn, &stop_receiver, None) + .await + .unwrap(); + + run_playground(pool.clone(), &rocksdb_dir, true).await; +} diff --git a/core/tests/loadnext/src/account/tx_command_executor.rs b/core/tests/loadnext/src/account/tx_command_executor.rs index b085219060b7..2a916564fd61 100644 --- a/core/tests/loadnext/src/account/tx_command_executor.rs +++ b/core/tests/loadnext/src/account/tx_command_executor.rs @@ -1,6 +1,5 @@ use std::time::Instant; -use zksync_eth_client::EthInterface; use zksync_system_constants::MAX_L1_TRANSACTION_GAS_LIMIT; use zksync_types::{ api::{BlockNumber, TransactionReceipt}, diff --git a/core/tests/loadnext/src/executor.rs b/core/tests/loadnext/src/executor.rs index 48d90f19c1d7..a573583ed318 100644 --- a/core/tests/loadnext/src/executor.rs +++ b/core/tests/loadnext/src/executor.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use anyhow::anyhow; use futures::{channel::mpsc, future, SinkExt}; -use zksync_eth_client::{EthInterface, Options}; +use zksync_eth_client::Options; use zksync_eth_signer::PrivateKeySigner; use zksync_system_constants::MAX_L1_TRANSACTION_GAS_LIMIT; use zksync_types::{ diff --git a/core/tests/loadnext/src/sdk/ethereum/mod.rs b/core/tests/loadnext/src/sdk/ethereum/mod.rs index 31fcc5269774..4b7bb00a3080 100644 --- a/core/tests/loadnext/src/sdk/ethereum/mod.rs +++ b/core/tests/loadnext/src/sdk/ethereum/mod.rs @@ -131,7 +131,7 @@ impl EthereumProvider { &self.eth_client } - pub fn query_client(&self) -> &DynClient { + pub fn query_client(&self) -> &dyn EthInterface { self.eth_client.as_ref() } diff --git a/core/tests/loadnext/src/sdk/operations/deploy_contract.rs b/core/tests/loadnext/src/sdk/operations/deploy_contract.rs index 3b4c7a5eb53f..161d156a53e9 100644 --- a/core/tests/loadnext/src/sdk/operations/deploy_contract.rs +++ b/core/tests/loadnext/src/sdk/operations/deploy_contract.rs @@ -3,10 +3,11 @@ use zksync_types::{ l2::L2Tx, transaction_request::PaymasterParams, Execute, Nonce, CONTRACT_DEPLOYER_ADDRESS, U256, }; use zksync_utils::bytecode::hash_bytecode; +use zksync_web3_decl::namespaces::EthNamespaceClient; use crate::sdk::{ error::ClientError, operations::SyncTransactionHandle, wallet::Wallet, zksync_types::fee::Fee, - EthNamespaceClient, ZksNamespaceClient, + ZksNamespaceClient, }; pub struct DeployContractBuilder<'a, S: EthereumSigner, P> { diff --git a/core/tests/ts-integration/src/context-owner.ts b/core/tests/ts-integration/src/context-owner.ts index 634e8c950a69..6cc2bed0a8dd 100644 --- a/core/tests/ts-integration/src/context-owner.ts +++ b/core/tests/ts-integration/src/context-owner.ts @@ -2,7 +2,7 @@ import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; import { BigNumberish } from 'ethers'; -import { TestContext, TestEnvironment, TestWallets } from './types'; +import { NodeMode, TestContext, TestEnvironment, TestWallets } from './types'; import { lookupPrerequisites } from './prerequisites'; import { Reporter } from './reporter'; import { scaledGasPrice } from './helpers'; @@ -541,6 +541,64 @@ export class TestContextOwner { this.reporter.finishAction(); } + /** + * Waits until the VM playground processes all L1 batches. If the playground runs the new VM in the shadow mode, this means + * that there are no divergence in old and new VM execution. Outputs a warning if the VM playground isn't run or runs not in the shadow mode. + */ + private async waitForVmPlayground() { + while (true) { + const lastProcessedBatch = await this.lastPlaygroundBatch(); + if (lastProcessedBatch === undefined) { + this.reporter.warn('The node does not run VM playground; run to check old / new VM divergence'); + break; + } + const lastNodeBatch = await this.l2Provider.getL1BatchNumber(); + + this.reporter.debug(`VM playground progress: L1 batch #${lastProcessedBatch} / ${lastNodeBatch}`); + if (lastProcessedBatch >= lastNodeBatch) { + break; + } + await zksync.utils.sleep(500); + } + } + + /** + * Returns the number of the last L1 batch processed by the VM playground, taking it from the node health endpoint. + * Returns `undefined` if the VM playground isn't run or doesn't have the shadow mode. + */ + private async lastPlaygroundBatch() { + interface VmPlaygroundHealth { + readonly status: string; + readonly details?: { + vm_mode?: string; + last_processed_batch?: number; + }; + } + + interface NodeHealth { + readonly components: { + vm_playground?: VmPlaygroundHealth; + }; + } + + const healthcheckPort = process.env.API_HEALTHCHECK_PORT ?? '3071'; + const nodeHealth = (await (await fetch(`http://127.0.0.1:${healthcheckPort}/health`)).json()) as NodeHealth; + const playgroundHealth = nodeHealth.components.vm_playground; + if (playgroundHealth === undefined) { + return undefined; + } + if (playgroundHealth.status !== 'ready') { + throw new Error(`Unexpected VM playground health status: ${playgroundHealth.status}`); + } + if (playgroundHealth.details?.vm_mode !== 'shadow') { + this.reporter.warn( + `VM playground mode is '${playgroundHealth.details?.vm_mode}'; should be set to 'shadow' to check VM divergence` + ); + return undefined; + } + return playgroundHealth.details?.last_processed_batch ?? 0; + } + /** * Performs context deinitialization. */ @@ -548,10 +606,16 @@ export class TestContextOwner { // Reset the reporter context. this.reporter = new Reporter(); try { + if (this.env.nodeMode == NodeMode.Main && this.env.network === 'localhost') { + // Check that the VM execution hasn't diverged using the VM playground. The component and thus the main node + // will crash on divergence, so we just need to make sure that the test doesn't exit before the VM playground + // processes all batches on the node. + this.reporter.startAction('Waiting for VM playground to catch up'); + await this.waitForVmPlayground(); + this.reporter.finishAction(); + } this.reporter.startAction(`Tearing down the context`); - await this.collectFunds(); - this.reporter.finishAction(); } catch (error: any) { // Report the issue to the console and mark the last action as failed. diff --git a/core/tests/ts-integration/src/reporter.ts b/core/tests/ts-integration/src/reporter.ts index 114ff2a7f5cf..903ff3101ef9 100644 --- a/core/tests/ts-integration/src/reporter.ts +++ b/core/tests/ts-integration/src/reporter.ts @@ -88,8 +88,8 @@ export class Reporter { /** * Prints an error message to the console. */ - error(message: string) { - console.log(this.indent(`${errorPrefix('Error:')}: ${fail(message)}`)); + error(message: string, ...args: any[]) { + console.log(this.indent(`${errorPrefix('Error:')}: ${fail(message)}`), ...args); } /** diff --git a/core/tests/vm-benchmark/Cargo.toml b/core/tests/vm-benchmark/Cargo.toml index f5f85b3b4d29..efbc08a957a6 100644 --- a/core/tests/vm-benchmark/Cargo.toml +++ b/core/tests/vm-benchmark/Cargo.toml @@ -27,6 +27,10 @@ harness = false name = "iai" harness = false +[[bench]] +name = "fill_bootloader" +harness = false + [[bin]] name = "iai_results_to_prometheus" path = "src/iai_results_to_prometheus.rs" diff --git a/core/tests/vm-benchmark/benches/fill_bootloader.rs b/core/tests/vm-benchmark/benches/fill_bootloader.rs new file mode 100644 index 000000000000..fac422c82375 --- /dev/null +++ b/core/tests/vm-benchmark/benches/fill_bootloader.rs @@ -0,0 +1,23 @@ +use std::time::Instant; + +use criterion::black_box; +use zksync_vm_benchmark_harness::{ + cut_to_allowed_bytecode_size, get_deploy_tx_with_gas_limit, BenchmarkingVm, +}; + +fn main() { + let test_contract = + std::fs::read("deployment_benchmarks/event_spam").expect("failed to read file"); + + let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); + let tx = get_deploy_tx_with_gas_limit(code, 1000); + + let start = Instant::now(); + + let mut vm = BenchmarkingVm::new(); + for _ in 0..1000 { + vm.run_transaction(black_box(&tx)); + } + + println!("{:?}", start.elapsed()); +} diff --git a/core/tests/vm-benchmark/deployment_benchmarks/heap_read_write b/core/tests/vm-benchmark/deployment_benchmarks/heap_read_write new file mode 100644 index 000000000000..914a23386850 Binary files /dev/null and b/core/tests/vm-benchmark/deployment_benchmarks/heap_read_write differ diff --git a/core/tests/vm-benchmark/deployment_benchmarks_sources/heap_read_write.sol b/core/tests/vm-benchmark/deployment_benchmarks_sources/heap_read_write.sol new file mode 100644 index 000000000000..d5a503eb7087 --- /dev/null +++ b/core/tests/vm-benchmark/deployment_benchmarks_sources/heap_read_write.sol @@ -0,0 +1,25 @@ +pragma solidity ^0.8.0; + +contract HeapBenchmark { + constructor() { + uint256 v1 = 0; + uint256 v2 = 0; + uint256 n = 16000; + uint256[] memory array = new uint256[](1); + + assembly { + mstore(add(array, sub(n, 1)), 4242) + + let j := 0 + for {} lt(j, n) {} { + v1 := mload(add(array, mod(mul(j, j), n))) + v2 := mload(add(array, j)) + mstore(add(array, j), add(add(v1, v2), 42)) + j := add(j, 1) + if gt(j, sub(n, 1)) { + j := 0 + } + } + } + } +} diff --git a/core/tests/vm-benchmark/harness/src/instruction_counter.rs b/core/tests/vm-benchmark/harness/src/instruction_counter.rs index 0d80658c7208..017b13da44ca 100644 --- a/core/tests/vm-benchmark/harness/src/instruction_counter.rs +++ b/core/tests/vm-benchmark/harness/src/instruction_counter.rs @@ -13,6 +13,7 @@ pub struct InstructionCounter { /// A tracer that counts the number of instructions executed by the VM. impl InstructionCounter { + #[allow(dead_code)] // FIXME pub fn new(output: Rc>) -> Self { Self { count: 0, output } } diff --git a/core/tests/vm-benchmark/harness/src/lib.rs b/core/tests/vm-benchmark/harness/src/lib.rs index 35e7530e9aad..a30221cfa0be 100644 --- a/core/tests/vm-benchmark/harness/src/lib.rs +++ b/core/tests/vm-benchmark/harness/src/lib.rs @@ -7,9 +7,10 @@ use zksync_multivm::{ L2BlockEnv, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, }, utils::get_max_gas_per_pubdata_byte, - vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, HistoryEnabled, TracerDispatcher, Vm}, + vm_fast::Vm, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; -use zksync_state::{InMemoryStorage, StorageView}; +use zksync_state::InMemoryStorage; use zksync_types::{ block::L2BlockHasher, ethabi::{encode, Token}, @@ -61,7 +62,7 @@ static CREATE_FUNCTION_SIGNATURE: Lazy<[u8; 4]> = Lazy::new(|| { static PRIVATE_KEY: Lazy = Lazy::new(|| K256PrivateKey::from_bytes(H256([42; 32])).expect("invalid key bytes")); -pub struct BenchmarkingVm(Vm, HistoryEnabled>); +pub struct BenchmarkingVm(Vm<&'static InMemoryStorage>); impl BenchmarkingVm { #[allow(clippy::new_without_default)] @@ -95,7 +96,7 @@ impl BenchmarkingVm { default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, chain_id: L2ChainId::from(270), }, - Rc::new(RefCell::new(StorageView::new(&*STORAGE))), + &*STORAGE, )) } @@ -109,18 +110,17 @@ impl BenchmarkingVm { let count = Rc::new(RefCell::new(0)); - self.0.inspect( - TracerDispatcher::new(vec![Box::new( - instruction_counter::InstructionCounter::new(count.clone()), - )]), - VmExecutionMode::OneTx, - ); + self.0.inspect((), VmExecutionMode::OneTx); count.take() } } pub fn get_deploy_tx(code: &[u8]) -> Transaction { + get_deploy_tx_with_gas_limit(code, 30_000_000) +} + +pub fn get_deploy_tx_with_gas_limit(code: &[u8], gas_limit: u32) -> Transaction { let params = [ Token::FixedBytes(vec![0u8; 32]), Token::FixedBytes(hash_bytecode(code).0.to_vec()), @@ -137,7 +137,7 @@ pub fn get_deploy_tx(code: &[u8]) -> Transaction { calldata, Nonce(0), Fee { - gas_limit: U256::from(30000000u32), + gas_limit: U256::from(gas_limit), max_fee_per_gas: U256::from(250_000_000), max_priority_fee_per_gas: U256::from(0), gas_per_pubdata_limit: U256::from(get_max_gas_per_pubdata_byte( diff --git a/docker/build-base/Dockerfile b/docker/build-base/Dockerfile index 68ea7ce001c7..be3c6ddb452e 100644 --- a/docker/build-base/Dockerfile +++ b/docker/build-base/Dockerfile @@ -9,7 +9,7 @@ ENV RUSTUP_HOME=/usr/local/rustup \ PATH=/usr/local/cargo/bin:$PATH RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2024-05-07 && \ - rustup default nightly-2024-05-07 + rustup install nightly-2024-08-01 && \ + rustup default nightly-2024-08-01 RUN cargo install sqlx-cli --version 0.8.0 diff --git a/docker/proof-fri-gpu-compressor/Dockerfile b/docker/proof-fri-gpu-compressor/Dockerfile index e6d2e0f11627..02ca4a3b77b0 100644 --- a/docker/proof-fri-gpu-compressor/Dockerfile +++ b/docker/proof-fri-gpu-compressor/Dockerfile @@ -15,8 +15,8 @@ ENV RUSTUP_HOME=/usr/local/rustup \ PATH=/usr/local/cargo/bin:$PATH RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2024-05-07 && \ - rustup default nightly-2024-05-07 + rustup install nightly-2024-08-01 && \ + rustup default nightly-2024-08-01 RUN curl -Lo cmake-3.24.2-linux-x86_64.sh https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh && \ chmod +x cmake-3.24.2-linux-x86_64.sh && \ diff --git a/docker/prover-gpu-fri/Dockerfile b/docker/prover-gpu-fri/Dockerfile index 0894c1c0c47d..1f1aaa447f22 100644 --- a/docker/prover-gpu-fri/Dockerfile +++ b/docker/prover-gpu-fri/Dockerfile @@ -14,8 +14,8 @@ ENV RUSTUP_HOME=/usr/local/rustup \ PATH=/usr/local/cargo/bin:$PATH RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2024-05-07 && \ - rustup default nightly-2024-05-07 + rustup install nightly-2024-08-01 && \ + rustup default nightly-2024-08-01 RUN curl -Lo cmake-3.24.2-linux-x86_64.sh https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh && \ chmod +x cmake-3.24.2-linux-x86_64.sh && \ diff --git a/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile b/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile index 645111a4f0a6..0c0fd7a9bb3d 100644 --- a/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile +++ b/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile @@ -78,7 +78,7 @@ RUN echo "deb http://packages.cloud.google.com/apt cloud-sdk main" > /etc/apt/so gcloud config set metrics/environment github_docker_image RUN wget -c -O - https://sh.rustup.rs | bash -s -- -y -RUN rustup install nightly-2024-05-07 +RUN rustup install nightly-2024-08-01 RUN rustup default stable RUN cargo install --version=0.8.0 sqlx-cli RUN cargo install cargo-nextest diff --git a/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile b/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile index c7af93ce906a..5bd569b7d20b 100644 --- a/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile +++ b/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile @@ -76,7 +76,7 @@ RUN echo "deb http://packages.cloud.google.com/apt cloud-sdk main" > /etc/apt/so gcloud config set metrics/environment github_docker_image RUN wget -c -O - https://sh.rustup.rs | bash -s -- -y -RUN rustup install nightly-2024-05-07 +RUN rustup install nightly-2024-08-01 RUN rustup default stable RUN cargo install --version=0.8.0 sqlx-cli RUN cargo install cargo-nextest diff --git a/docker/zk-environment/Dockerfile b/docker/zk-environment/Dockerfile index 9dc7aa1a13b4..db9fb0ce5971 100644 --- a/docker/zk-environment/Dockerfile +++ b/docker/zk-environment/Dockerfile @@ -142,5 +142,5 @@ ENV RUSTC_WRAPPER=/usr/local/cargo/bin/sccache FROM rust-lightweight as rust-lightweight-nightly -RUN rustup install nightly-2024-05-07 && \ - rustup default nightly-2024-05-07 +RUN rustup install nightly-2024-08-01 && \ + rustup default nightly-2024-08-01 diff --git a/etc/contracts-test-data/contracts/expensive/expensive.sol b/etc/contracts-test-data/contracts/expensive/expensive.sol index c3b99df48923..27e18b6eb6cf 100644 --- a/etc/contracts-test-data/contracts/expensive/expensive.sol +++ b/etc/contracts-test-data/contracts/expensive/expensive.sol @@ -12,4 +12,10 @@ contract Expensive { } return keccak256(abi.encodePacked(array)); } + + function cleanUp() public { + for (uint i = 0; i < array.length; i++) { + array[i] = 0; + } + } } diff --git a/etc/env/base/external_proof_integration_api.toml b/etc/env/base/external_proof_integration_api.toml new file mode 100644 index 000000000000..5918a061be3a --- /dev/null +++ b/etc/env/base/external_proof_integration_api.toml @@ -0,0 +1,2 @@ +[external_proof_integration_api] +http_port = 3073 diff --git a/etc/env/base/vm_runner.toml b/etc/env/base/vm_runner.toml index dd8e9915280b..8e6171d79366 100644 --- a/etc/env/base/vm_runner.toml +++ b/etc/env/base/vm_runner.toml @@ -1,6 +1,4 @@ -# Configuration for the VM runner crate - -[vm_runner] +# Configuration for the VM runner instances and experimental VM [vm_runner.protective_reads] # Path to the directory that contains RocksDB with protective reads writer cache. @@ -17,3 +15,13 @@ db_path = "./db/main/basic_witness_input_producer" window_size = 3 # All batches before this one (inclusive) are always considered to be processed. first_processed_batch = 0 + +[experimental_vm] +# Mode in which to run the new fast VM in the state keeper. Don't set to "new" / "shadow" in production yet! +state_keeper_fast_vm_mode = "old" # default value + +[experimental_vm.playground] +# Path to the directory that contains RocksDB with protective reads writer cache. +db_path = "./db/main/vm_playground" +# Mode in which to run the new fast VM +fast_vm_mode = "shadow" diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 300138e9a867..670bfc1cc776 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -41,7 +41,7 @@ api: estimate_gas_scale_factor: 1.3 estimate_gas_acceptable_overestimation: 5000 max_tx_size: 1000000 - api_namespaces: [en,eth,net,web3,zks,pubsub,debug] + api_namespaces: [ en,eth,net,web3,zks,pubsub,debug ] state_keeper: transaction_slots: 8192 max_allowed_l2_tx_gas_limit: 15000000000 @@ -104,7 +104,7 @@ eth: aggregated_block_execute_deadline: 10 timestamp_criteria_max_allowed_lag: 30 max_eth_tx_data_size: 120000 - aggregated_proof_sizes: [1] + aggregated_proof_sizes: [ 1 ] max_aggregated_tx_gas: 15000000 max_acceptable_priority_fee_in_gwei: 100000000000 pubdata_sending_mode: BLOBS @@ -302,7 +302,7 @@ prometheus: observability: log_format: plain - log_directives: "zksync_node_test_utils=info,zksync_state_keeper=info,zksync_reorg_detector=info,zksync_consistency_checker=info,zksync_metadata_calculator=info,zksync_node_sync=info,zksync_node_consensus=info,zksync_contract_verification_server=info,zksync_node_api_server=info,zksync_tee_verifier_input_producer=info,zksync_node_framework=info,zksync_block_reverter=info,zksync_commitment_generator=info,zksync_node_db_pruner=info,zksync_eth_sender=info,zksync_node_fee_model=info,zksync_node_genesis=info,zksync_house_keeper=info,zksync_proof_data_handler=info,zksync_shared_metrics=info,zksync_node_test_utils=info,zksync_vm_runner=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core_leftovers=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_db_connection=info,zksync_eth_client=info,zksync_eth_watch=debug,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_web3_decl=debug,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=error,snapshots_creator=debug,zksync_base_token_adjuster=debug,zksync_external_price_api=debug" + log_directives: "zksync_node_test_utils=info,zksync_state_keeper=info,zksync_reorg_detector=info,zksync_consistency_checker=info,zksync_metadata_calculator=info,zksync_node_sync=info,zksync_node_consensus=info,zksync_contract_verification_server=info,zksync_node_api_server=info,zksync_tee_verifier_input_producer=info,zksync_node_framework=info,zksync_block_reverter=info,zksync_commitment_generator=info,zksync_node_db_pruner=info,zksync_eth_sender=info,zksync_node_fee_model=info,zksync_node_genesis=info,zksync_house_keeper=info,zksync_proof_data_handler=info,zksync_shared_metrics=info,zksync_node_test_utils=info,zksync_vm_runner=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core_leftovers=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_db_connection=info,zksync_eth_client=info,zksync_eth_watch=debug,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_web3_decl=debug,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=error,snapshots_creator=debug,zksync_base_token_adjuster=debug,zksync_external_price_api=debug,zksync_external_proof_integration_api=info" # Uncomment only if needed # sentry: # url: unset @@ -323,6 +323,12 @@ basic_witness_input_producer: window_size: 3 first_processed_batch: 0 +experimental_vm: + state_keeper_fast_vm_mode: OLD + playground: + db_path: "./db/main/vm_playground" + fast_vm_mode: SHADOW + snapshot_recovery: enabled: false object_store: @@ -354,3 +360,6 @@ da_dispatcher: polling_interval_ms: 5000 max_rows_to_dispatch: 100 max_retries: 5 + +external_proof_integration_api: + http_port: 3073 diff --git a/flake.lock b/flake.lock index fe16e2254b51..e217d37664cd 100644 --- a/flake.lock +++ b/flake.lock @@ -7,11 +7,11 @@ ] }, "locked": { - "lastModified": 1720226507, - "narHash": "sha256-yHVvNsgrpyNTXZBEokL8uyB2J6gB1wEx0KOJzoeZi1A=", + "lastModified": 1722960479, + "narHash": "sha256-NhCkJJQhD5GUib8zN9JrmYGMwt4lCRp6ZVNzIiYCl0Y=", "owner": "ipetkov", "repo": "crane", - "rev": "0aed560c5c0a61c9385bddff471a13036203e11c", + "rev": "4c6c77920b8d44cd6660c1621dea6b3fc4b4c4f4", "type": "github" }, "original": { @@ -257,11 +257,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1719956923, - "narHash": "sha256-nNJHJ9kfPdzYsCOlHOnbiiyKjZUW5sWbwx3cakg3/C4=", + "lastModified": 1722869614, + "narHash": "sha256-7ojM1KSk3mzutD7SkrdSflHXEujPvW1u7QuqWoTLXQU=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "706eef542dec88cc0ed25b9075d3037564b2d164", + "rev": "883180e6550c1723395a3a342f830bfc5c371f6b", "type": "github" }, "original": { @@ -341,11 +341,11 @@ "snowfall-lib": "snowfall-lib" }, "locked": { - "lastModified": 1719923509, - "narHash": "sha256-3buuJSKCVT0o42jpreoflYA+Rlp/4eQKATEAY+pPeh8=", + "lastModified": 1721741092, + "narHash": "sha256-ghFoP5gZpc1i4I4PiVCH00QNZ6s6ipGUcA0P1TsSSC8=", "owner": "matter-labs", "repo": "nixsgx", - "rev": "520ad6227523c5720468726f9e945cecdb7a37aa", + "rev": "be2c19592d0d5601184c52c07ab6d88dec07ffd6", "type": "github" }, "original": { @@ -407,11 +407,11 @@ "nixpkgs": "nixpkgs_3" }, "locked": { - "lastModified": 1720059535, - "narHash": "sha256-h/O3PoV3KvQG4tC5UpANBZOsptAZCzEGiwyi+3oSpYc=", + "lastModified": 1722997267, + "narHash": "sha256-8Pncp8IKd0f0N711CRrCGTC4iLfBE+/5kaMqyWxnYic=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "8deeed2dfa21837c7792b46b6a9b2e73f97b472b", + "rev": "d720bf3cebac38c2426d77ee2e59943012854cb8", "type": "github" }, "original": { @@ -623,11 +623,11 @@ "vault-auth-tee-flake": "vault-auth-tee-flake" }, "locked": { - "lastModified": 1720011517, - "narHash": "sha256-1oo9Z47CNdqDgtGNE1LC+6CQ+VXcy7TtFFnvifBnVLE=", + "lastModified": 1723034739, + "narHash": "sha256-bu4XvqwsPUzfMzk5t10wyHliItfH7FOk42V0CIwl4lg=", "owner": "matter-labs", "repo": "teepot", - "rev": "8dadc1f76b7dd8a98be7781e8206fed5268dd0e6", + "rev": "4ed311a16a72521f79418216ad29e6eed8db347d", "type": "github" }, "original": { diff --git a/prover/Cargo.lock b/prover/Cargo.lock index a88155be9024..e6ef7fd95f83 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -524,7 +524,7 @@ version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "cexpr", "clang-sys", "itertools 0.12.1", @@ -567,9 +567,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" dependencies = [ "serde", ] @@ -698,9 +698,9 @@ dependencies = [ [[package]] name = "boojum" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cf10f4b3980dc82dc31709dfa8193b7d6106a3a7ce9f9a9f8872bfb8719aa2d" +checksum = "df88daa33db46d683967ca09a4f04817c38950483f2501a771d497669a8a4bb1" dependencies = [ "arrayvec 0.7.4", "bincode", @@ -730,9 +730,9 @@ dependencies = [ [[package]] name = "boojum-cuda" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf1cadeaf29fa0f076c230a08f57932619b2ba46a1977c72bb42160574400f54" +checksum = "407123a79308091866f0199d510ee2fb930727204dd77d6805b3437d6cb859eb" dependencies = [ "boojum", "cmake", @@ -921,9 +921,9 @@ dependencies = [ [[package]] name = "circuit_definitions" -version = "0.140.0-gpu-wrapper.1" +version = "0.140.1-gpu-wrapper.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f38b36c1cbc5e53bb016eebbaa0740f939bec9154100e9e31d28035faed202" +checksum = "54965c22dfd81bca2a8abd53f140c1907304b7aad0fd69679389a96202129003" dependencies = [ "crossbeam 0.8.4", "derivative", @@ -931,16 +931,16 @@ dependencies = [ "serde", "snark_wrapper", "zk_evm 0.140.0", - "zkevm_circuits 0.140.1", + "zkevm_circuits 0.140.2", ] [[package]] name = "circuit_definitions" -version = "0.150.2-rc.2" +version = "0.150.2-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f98221101f42cceafa9baf8ec320ef78aa02bf7c2be5c398bf90e7acf1709bfa" +checksum = "68c5da9d10ee04601445afac76591f838b4f64f4f8fb8c3d1b3414a260d51b6c" dependencies = [ - "circuit_encodings 0.150.2-rc.2", + "circuit_encodings 0.150.2-rc.3", "crossbeam 0.8.4", "derivative", "seq-macro", @@ -957,7 +957,7 @@ dependencies = [ "derivative", "serde", "zk_evm 0.140.0", - "zkevm_circuits 0.140.1", + "zkevm_circuits 0.140.2", ] [[package]] @@ -986,14 +986,14 @@ dependencies = [ [[package]] name = "circuit_encodings" -version = "0.150.2-rc.2" +version = "0.150.2-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e59747066b9a0d1a15d45f5837658aec5d53744fb643954f9dcc412f76c0d346" +checksum = "c928cad0aeeb35e86f8605376fdbb27b506cfcec14af1f532459a47e34d8b6f9" dependencies = [ "derivative", "serde", "zk_evm 0.150.0", - "zkevm_circuits 0.150.1", + "zkevm_circuits 0.150.2", ] [[package]] @@ -1053,12 +1053,12 @@ dependencies = [ [[package]] name = "circuit_sequencer_api" -version = "0.150.2-rc.2" +version = "0.150.2-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbfeb50910b20c4f05cc51700b2396a655cef9e6f0c84debd71cb02ce4853902" +checksum = "18d8ca58b9bb7c63a75813c96a5a80314fd70013d7929f61fc0e9e69b0e440a7" dependencies = [ "bellman_ce 0.7.0", - "circuit_encodings 0.150.2-rc.2", + "circuit_encodings 0.150.2-rc.3", "derivative", "rayon", "serde", @@ -1478,9 +1478,9 @@ dependencies = [ [[package]] name = "cs_derive" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab1f510bfddd1fc643a1d1bf8a405e279ffc818ee7ac86ed658e667a44958178" +checksum = "24cf603ca4299c6e20e644da88897f7b81d688510f4887e818b0bfe0b792081b" dependencies = [ "proc-macro-error", "proc-macro2 1.0.85", @@ -1488,6 +1488,16 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "ctor" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edb49164822f3ee45b17acd4a208cfc1251410cf0cad9a833234c9890774dd9f" +dependencies = [ + "quote 1.0.36", + "syn 2.0.66", +] + [[package]] name = "ctrlc" version = "3.4.4" @@ -1659,6 +1669,12 @@ dependencies = [ "zeroize", ] +[[package]] +name = "diff" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" + [[package]] name = "digest" version = "0.9.0" @@ -1822,6 +1838,18 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "enum_dispatch" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa18ce2bc66555b3218614519ac839ddb759a7d6720732f979ef8d13be147ecd" +dependencies = [ + "once_cell", + "proc-macro2 1.0.85", + "quote 1.0.36", + "syn 2.0.66", +] + [[package]] name = "env_filter" version = "0.1.0" @@ -1873,22 +1901,21 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "era_cudart" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1725b17e5e41b89f566ace3900f119fdc87f04e2daa8e253b668573ad67a454f" +checksum = "6592e1277ac1ab0f3925151784a3809f4f973b1a63a0244b6d44e3872b413199" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "era_cudart_sys", "paste", ] [[package]] name = "era_cudart_sys" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60d46683f8a9a5364874f95b00073f6dc93d33e9a019f150b0d6ce09ffc13251" +checksum = "21767c452b418a7fb2bb9ffb07c744e4616da8d14176db4dcab76649c3206ece" dependencies = [ - "bindgen 0.69.4", "serde_json", ] @@ -2126,9 +2153,9 @@ dependencies = [ [[package]] name = "franklin-crypto" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e35c05c35290529cf4704bf667777bda5d1b757d63445591cd19163ee0909df8" +checksum = "05eab544ba915269919b5f158a061b540a4e3a04150c1346481f4f7b80eb6311" dependencies = [ "arr_macro", "bellman_ce 0.8.0", @@ -3317,26 +3344,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "linkme" -version = "0.3.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccb76662d78edc9f9bf56360d6919bdacc8b7761227727e5082f128eeb90bbf5" -dependencies = [ - "linkme-impl", -] - -[[package]] -name = "linkme-impl" -version = "0.3.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8dccda732e04fa3baf2e17cf835bfe2601c7c2edafd64417c627dabae3a8cda" -dependencies = [ - "proc-macro2 1.0.85", - "quote 1.0.36", - "syn 2.0.66", -] - [[package]] name = "linux-raw-sys" version = "0.4.14" @@ -3611,7 +3618,7 @@ version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "cfg-if 1.0.0", "cfg_aliases", "libc", @@ -3860,7 +3867,7 @@ version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "cfg-if 1.0.0", "foreign-types", "libc", @@ -4230,6 +4237,16 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "pretty_assertions" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af7cee1a6c8a5b9208b3cb1061f10c0cb689087b3d8ce85fb9d2dd7a29b6ba66" +dependencies = [ + "diff", + "yansi", +] + [[package]] name = "prettyplease" version = "0.2.20" @@ -4369,7 +4386,7 @@ checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.5.0", + "bitflags 2.6.0", "lazy_static", "num-traits", "rand 0.8.5", @@ -4507,7 +4524,7 @@ dependencies = [ "anyhow", "bincode", "chrono", - "circuit_definitions 0.150.2-rc.2", + "circuit_definitions 0.150.2-rc.3", "clap 4.5.4", "colored", "dialoguer", @@ -4518,7 +4535,7 @@ dependencies = [ "tokio", "tracing", "tracing-subscriber", - "zkevm_test_harness 0.150.2-rc.2", + "zkevm_test_harness 0.150.2-rc.3", "zksync_basic_types", "zksync_config", "zksync_contracts", @@ -4566,7 +4583,7 @@ version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57206b407293d2bcd3af849ce869d52068623f19e1b5ff8e8778e3309439682b" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "memchr", "unicase", ] @@ -4721,14 +4738,14 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", ] [[package]] name = "regex" -version = "1.10.4" +version = "1.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" +checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" dependencies = [ "aho-corasick", "memchr", @@ -4881,9 +4898,9 @@ dependencies = [ [[package]] name = "rescue_poseidon" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf8f3aca783485c92639215977a2219cf25ba26836d088d75a088e7cba842bf" +checksum = "f27fbc6ba44baf99a0ca8387b1fa1cf90d3d7062860c1afedbbb64454829acc5" dependencies = [ "addchain", "arrayvec 0.7.4", @@ -5058,7 +5075,7 @@ version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "errno", "libc", "linux-raw-sys", @@ -5273,7 +5290,7 @@ version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "core-foundation", "core-foundation-sys", "libc", @@ -5605,15 +5622,15 @@ checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" [[package]] name = "shivini" -version = "0.150.2-rc.2" +version = "0.150.2-rc3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85c35f41f876fc986d18e02d97a20a6980ca108c28f77cc844406d1c9c1f6ed9" +checksum = "110bb1fe4020af4f1be74f467b69bace76a98a3ecedc4c654ed90cc7c6a9aaba" dependencies = [ "bincode", "blake2 0.10.6", "boojum", "boojum-cuda", - "circuit_definitions 0.150.2-rc.2", + "circuit_definitions 0.150.2-rc.3", "derivative", "era_cudart", "era_cudart_sys", @@ -5712,9 +5729,9 @@ dependencies = [ [[package]] name = "snark_wrapper" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7dd2b167b67a0ec5b2727519bf7c03fcbd43edaa3ebf05a6013fb6fbfed20f6" +checksum = "71aa5bffe5e7daca634bf2fedf0bf566273cb7eae01711d1aa6e5223d36d987d" dependencies = [ "derivative", "rand 0.4.6", @@ -5897,7 +5914,7 @@ dependencies = [ "atoi", "base64 0.22.1", "bigdecimal", - "bitflags 2.5.0", + "bitflags 2.6.0", "byteorder", "bytes", "chrono", @@ -5942,7 +5959,7 @@ dependencies = [ "atoi", "base64 0.22.1", "bigdecimal", - "bitflags 2.5.0", + "bitflags 2.6.0", "byteorder", "chrono", "crc", @@ -6831,13 +6848,13 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "vise" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229baafe01d5177b63c6ee1def80d8e39a2365e64caf69ddb05a57594b15647c" +checksum = "90ade36f3548b1524396f4de7b36f4f210c8a01dfab568eb2bff466af64eb6e5" dependencies = [ "compile-fmt", + "ctor", "elsa", - "linkme", "once_cell", "prometheus-client", "vise-macros", @@ -6845,9 +6862,9 @@ dependencies = [ [[package]] name = "vise-exporter" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23981b18d697026f5430249ab01ba739ef2edc463e400042394331cb2bb63494" +checksum = "671d3b894d5d0849f0a597f56bf071f42d4f2a1cbcf2f78ca21f870ab7c0cc2b" dependencies = [ "hyper 0.14.29", "once_cell", @@ -6858,9 +6875,9 @@ dependencies = [ [[package]] name = "vise-macros" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bb19c33cd5f04dcf4e767635e058a998edbc2b7fca32ade0a4a1cea0f8e9b34" +checksum = "6a511871dc5de990a3b2a0e715facfbc5da848c0c0395597a1415029fb7c250a" dependencies = [ "proc-macro2 1.0.85", "quote 1.0.36", @@ -6873,7 +6890,7 @@ version = "0.1.0" dependencies = [ "anyhow", "bincode", - "circuit_definitions 0.150.2-rc.2", + "circuit_definitions 0.150.2-rc.3", "clap 4.5.4", "hex", "indicatif", @@ -6890,7 +6907,7 @@ dependencies = [ "toml_edit 0.14.4", "tracing", "tracing-subscriber", - "zkevm_test_harness 0.150.2-rc.2", + "zkevm_test_harness 0.150.2-rc.3", "zksync_config", "zksync_env_config", "zksync_prover_fri_types", @@ -6899,6 +6916,17 @@ dependencies = [ "zksync_vlog", ] +[[package]] +name = "vm2" +version = "0.1.0" +source = "git+https://github.com/matter-labs/vm2.git?rev=9a38900d7af9b1d72b47ce3be980e77c1239a61d#9a38900d7af9b1d72b47ce3be980e77c1239a61d" +dependencies = [ + "enum_dispatch", + "primitive-types", + "zk_evm_abstractions 0.150.0", + "zkevm_opcode_defs 0.150.0", +] + [[package]] name = "wait-timeout" version = "0.2.0" @@ -7286,6 +7314,12 @@ dependencies = [ "tap", ] +[[package]] +name = "yansi" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" + [[package]] name = "zerocopy" version = "0.7.34" @@ -7488,9 +7522,9 @@ dependencies = [ [[package]] name = "zkevm_circuits" -version = "0.140.1" +version = "0.140.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "806caf4ccbe34ac68193f7d0dd591d1d866d95a740fe45a358eaefd61c357d8e" +checksum = "8beed4cc1ab1f9d99a694506d18705e10059534b30742832be49637c4775e1f8" dependencies = [ "arrayvec 0.7.4", "bincode", @@ -7532,9 +7566,9 @@ dependencies = [ [[package]] name = "zkevm_circuits" -version = "0.150.1" +version = "0.150.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8a85c1987a1e7e89f1f8c39ca19bffb61521e719050086372aaea8817f403fc" +checksum = "94d97632ba26e4e6a77a680d6b2bfbcc6f7b9b722976ee31afb922d16a675d45" dependencies = [ "arrayvec 0.7.4", "boojum", @@ -7568,7 +7602,7 @@ version = "0.132.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0769f7b27d8fb06e715da3290c575cac5d04d10a557faef180e847afce50ac4" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "blake2 0.10.6", "ethereum-types", "k256 0.11.6", @@ -7583,7 +7617,7 @@ version = "0.141.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6be7bd5f0e0b61211f544147289640b4712715589d7f2fe5229d92a7a3ac64c0" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "blake2 0.10.6", "ethereum-types", "k256 0.13.3", @@ -7598,7 +7632,7 @@ version = "0.150.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3328c012d444bdbfadb754a72c01a56879eb66584efc71eac457e89e7843608" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "blake2 0.10.6", "ethereum-types", "k256 0.13.3", @@ -7611,12 +7645,12 @@ dependencies = [ [[package]] name = "zkevm_test_harness" -version = "0.140.0-gpu-wrapper.1" +version = "0.140.1-gpu-wrapper.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dbcf39f7cdd57174c572f227ae46513d2111faed7dc754e300465200e9b791a" +checksum = "bb6b9ca7e42aa17d9bda1209faf166f7c999b583a637a0ce8cefaf3e18e381a3" dependencies = [ "bincode", - "circuit_definitions 0.140.0-gpu-wrapper.1", + "circuit_definitions 0.140.1-gpu-wrapper.1", "codegen", "crossbeam 0.8.4", "derivative", @@ -7635,13 +7669,13 @@ dependencies = [ [[package]] name = "zkevm_test_harness" -version = "0.150.2-rc.2" +version = "0.150.2-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2ca25fe64b0dee537ba226983a1f8ca7f8bd4ce82e5d58eb4522252fbe40a6" +checksum = "d584283b3a574f76f7854a7edac51ace2e19596aefd72ebd516264415b798c13" dependencies = [ "bincode", - "circuit_definitions 0.150.2-rc.2", - "circuit_sequencer_api 0.150.2-rc.2", + "circuit_definitions 0.150.2-rc.3", + "circuit_sequencer_api 0.150.2-rc.3", "codegen", "crossbeam 0.8.4", "derivative", @@ -7662,9 +7696,9 @@ dependencies = [ [[package]] name = "zksync-gpu-ffi" -version = "0.140.0-gpu-wrapper.1" +version = "0.140.1-gpu-wrapper.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cebf6d2f7102b2b4f3bf14d6dfd8c520f95621b180bc4bbfd07ee53093336205" +checksum = "5b5f8b16cc9cafee49f5cdab6d4f13ebf80bdd1c587b6e7d0b9d30c1944e6246" dependencies = [ "bindgen 0.59.2", "crossbeam 0.8.4", @@ -7676,9 +7710,9 @@ dependencies = [ [[package]] name = "zksync-gpu-prover" -version = "0.140.0-gpu-wrapper.1" +version = "0.140.1-gpu-wrapper.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "502acf81d04b5fad616e01ff7c913285da21ce88d17cd1a083a0d3f25cc27b27" +checksum = "2a1d3928ffae19c41263a5efcea810075282c01c996fa5b5c2bf310b8bca6c45" dependencies = [ "bit-vec", "cfg-if 1.0.0", @@ -7693,12 +7727,12 @@ dependencies = [ [[package]] name = "zksync-wrapper-prover" -version = "0.140.0-gpu-wrapper.1" +version = "0.140.1-gpu-wrapper.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99746d1164f60d788f9701b7146ad2878c92f1ffd463ea5b6d72f9979ac464b7" +checksum = "a93d0d66ca7f3b095123a8bf528c3d3353e8d8f2fcc49a889969832b1f149b55" dependencies = [ - "circuit_definitions 0.140.0-gpu-wrapper.1", - "zkevm_test_harness 0.140.0-gpu-wrapper.1", + "circuit_definitions 0.140.1-gpu-wrapper.1", + "zkevm_test_harness 0.140.1-gpu-wrapper.1", "zksync-gpu-prover", ] @@ -7722,9 +7756,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.8" +version = "0.1.0-rc.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "209b83578357184ab72af4d4cb2eca76f85e5f2f35d739a47e3fd5931eb9252d" +checksum = "a9f9a4352244ccd5e5fd34fb0d029861a5f57b05c80fe7944a7b532f54c58f89" dependencies = [ "anyhow", "once_cell", @@ -7747,6 +7781,7 @@ dependencies = [ "rand 0.8.5", "secrecy", "serde", + "tracing", "url", "zksync_basic_types", "zksync_concurrency", @@ -7757,9 +7792,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.1.0-rc.8" +version = "0.1.0-rc.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1abf1f3d9c8109da32a6d5e61a2a64a61b0bff90fdd355992522a4e8a57e69" +checksum = "f8c91270540e8db9479e1eaedaf0e600de468f71ccd5dc7c0258072e743830e6" dependencies = [ "anyhow", "blst", @@ -7781,9 +7816,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.1.0-rc.8" +version = "0.1.0-rc.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32e0b04d43a542a3bb1af0ac4c0a17acf6b743607c3cb9028192df0c7d2f5b24" +checksum = "87e79025fd678ec2733add1697645827e9daed3f120c8cebf43513ac17e65b63" dependencies = [ "anyhow", "bit-vec", @@ -7803,9 +7838,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.1.0-rc.8" +version = "0.1.0-rc.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c39f79628bd6685f7ec5561874c007f5d26693d6ba7e5595dfa260981e8f006" +checksum = "470991a42d5f9a3f2385ebe52889e63742d95d141b80b95a1eabe9f51e18cb7e" dependencies = [ "anyhow", "async-trait", @@ -7823,9 +7858,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.8" +version = "0.1.0-rc.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4514629a34abdf943ef911c16228dfec656edb02d8412db4febd4df5ccf3f91" +checksum = "6c43283b5813fd887e0e7ccaee73c6e41907b1de311a3a01b2fa5f2e3f2ba503" dependencies = [ "anyhow", "rand 0.8.5", @@ -7981,9 +8016,9 @@ dependencies = [ [[package]] name = "zksync_kzg" -version = "0.150.2-rc.2" +version = "0.150.2-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c44edd3a3316dcab45aab7e190c96150f2586d4a92fa21f93dcc20178308313a" +checksum = "9235fbdaa98f27b9aacaa861bcb850b6b0dbf37e59477ce3f08c64555a25d00d" dependencies = [ "boojum", "derivative", @@ -7993,7 +8028,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "zkevm_circuits 0.150.1", + "zkevm_circuits 0.150.2", ] [[package]] @@ -8033,14 +8068,16 @@ dependencies = [ "circuit_sequencer_api 0.140.0", "circuit_sequencer_api 0.141.1", "circuit_sequencer_api 0.142.0", - "circuit_sequencer_api 0.150.2-rc.2", + "circuit_sequencer_api 0.150.2-rc.3", "hex", "itertools 0.10.5", "once_cell", + "pretty_assertions", "serde", "thiserror", "tracing", "vise", + "vm2", "zk_evm 0.131.0-rc.2", "zk_evm 0.133.0", "zk_evm 0.140.0", @@ -8104,7 +8141,7 @@ dependencies = [ "anyhow", "async-trait", "bincode", - "circuit_sequencer_api 0.150.2-rc.2", + "circuit_sequencer_api 0.150.2-rc.3", "clap 4.5.4", "ctrlc", "futures 0.3.30", @@ -8116,7 +8153,7 @@ dependencies = [ "tracing", "vise", "vk_setup_data_generator_server_fri", - "zkevm_test_harness 0.150.2-rc.2", + "zkevm_test_harness 0.150.2-rc.3", "zksync-wrapper-prover", "zksync_config", "zksync_core_leftovers", @@ -8133,9 +8170,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.8" +version = "0.1.0-rc.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53128384270314bfbd4e044c15138af63cb3a505ab95bb3339f3b866ccbe211c" +checksum = "b5db598a518958b244aed5e3f925c763808429a5ea022bb50957b98e68540495" dependencies = [ "anyhow", "bit-vec", @@ -8154,9 +8191,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.8" +version = "0.1.0-rc.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d7dfb4dcdd48ab5fa1ccff25f585d73b58cf95e0fb74e96618dd666f198a005" +checksum = "4047ed624c7a19e206125f8259f7e175ad70020beeb66e1975e068af060d2fb5" dependencies = [ "anyhow", "heck 0.5.0", @@ -8204,7 +8241,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "circuit_definitions 0.150.2-rc.2", + "circuit_definitions 0.150.2-rc.3", "clap 4.5.4", "ctrlc", "futures 0.3.30", @@ -8217,7 +8254,7 @@ dependencies = [ "tracing", "vise", "vk_setup_data_generator_server_fri", - "zkevm_test_harness 0.150.2-rc.2", + "zkevm_test_harness 0.150.2-rc.3", "zksync_config", "zksync_core_leftovers", "zksync_env_config", @@ -8261,7 +8298,7 @@ dependencies = [ name = "zksync_prover_fri_types" version = "0.1.0" dependencies = [ - "circuit_definitions 0.150.2-rc.2", + "circuit_definitions 0.150.2-rc.3", "serde", "zksync_object_store", "zksync_types", @@ -8290,7 +8327,7 @@ name = "zksync_prover_interface" version = "0.1.0" dependencies = [ "chrono", - "circuit_sequencer_api 0.150.2-rc.2", + "circuit_sequencer_api 0.150.2-rc.3", "serde", "serde_with", "strum", @@ -8473,7 +8510,7 @@ dependencies = [ "anyhow", "async-trait", "bincode", - "circuit_definitions 0.150.2-rc.2", + "circuit_definitions 0.150.2-rc.3", "const-decoder", "ctrlc", "futures 0.3.30", @@ -8487,7 +8524,7 @@ dependencies = [ "tracing", "vise", "vk_setup_data_generator_server_fri", - "zkevm_test_harness 0.150.2-rc.2", + "zkevm_test_harness 0.150.2-rc.3", "zksync_config", "zksync_core_leftovers", "zksync_env_config", diff --git a/prover/Cargo.toml b/prover/Cargo.toml index 4dfc77432979..7ab6dd16b99b 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -53,16 +53,16 @@ tokio = "1" toml_edit = "0.14.4" tracing = "0.1" tracing-subscriber = { version = "0.3" } -vise = "0.1.0" +vise = "0.2.0" # Proving dependencies -circuit_definitions = "=0.150.2-rc.2" -circuit_sequencer_api = "=0.150.2-rc.2" -zkevm_test_harness = "=0.150.2-rc.2" +circuit_definitions = "=0.150.2-rc.3" +circuit_sequencer_api = "=0.150.2-rc.3" +zkevm_test_harness = "=0.150.2-rc.3" # GPU proving dependencies -wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.140.0-gpu-wrapper.1" } -shivini = "=0.150.2-rc.2" +wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.140.1-gpu-wrapper.1" } +shivini = "=0.150.2-rc3" # Core workspace dependencies zksync_multivm = { path = "../core/lib/multivm", version = "0.1.0" } diff --git a/prover/crates/bin/proof_fri_compressor/README.md b/prover/crates/bin/proof_fri_compressor/README.md index 097a59e5d09b..c3d5729f40c9 100644 --- a/prover/crates/bin/proof_fri_compressor/README.md +++ b/prover/crates/bin/proof_fri_compressor/README.md @@ -4,4 +4,4 @@ Used to compress FRI proof to Bellman proof that gets sent to L1. ## running -`zk f cargo +nightly-2024-05-07 run --release --bin zksync_proof_fri_compressor` +`zk f cargo +nightly-2024-08-01 run --release --bin zksync_proof_fri_compressor` diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/utils.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/utils.rs index 6f4946af5b21..1ac6c4f4230d 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/utils.rs +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/src/utils.rs @@ -136,7 +136,7 @@ mod tests { for entry in std::fs::read_dir(path_to_input.clone()).unwrap().flatten() { if entry.metadata().unwrap().is_dir() { - let basepath = path_to_input.join(&entry.file_name().into_string().unwrap()); + let basepath = path_to_input.join(entry.file_name()); let keystore = Keystore::new_with_optional_setup_path(basepath.clone(), None); let expected = diff --git a/prover/crates/bin/witness_vector_generator/README.md b/prover/crates/bin/witness_vector_generator/README.md index dde192533db3..3348b294324a 100644 --- a/prover/crates/bin/witness_vector_generator/README.md +++ b/prover/crates/bin/witness_vector_generator/README.md @@ -4,4 +4,4 @@ Used to generate witness vectors using circuit and sending them to prover over T ## running -`zk f cargo +nightly-2024-05-07 run --release --bin zksync_witness_vector_generator` +`zk f cargo +nightly-2024-08-01 run --release --bin zksync_witness_vector_generator` diff --git a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs index bc9cde72fde2..488d5b3a5ec9 100644 --- a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs @@ -304,6 +304,7 @@ impl FriWitnessGeneratorDal<'_, '_> { /// - all node aggregation jobs at depth 0 for the batch /// - the recursion tip witness job /// - the scheduler witness job + /// /// NOTE: Not all batches have all circuits, so it's possible we'll be missing some aggregation jobs (for circuits not present in the batch). pub async fn create_aggregation_jobs( &mut self, diff --git a/prover/rust-toolchain b/prover/rust-toolchain index 5aaef38cd79d..03c040b91f1f 100644 --- a/prover/rust-toolchain +++ b/prover/rust-toolchain @@ -1 +1 @@ -nightly-2024-05-07 +nightly-2024-08-01 diff --git a/rust-toolchain b/rust-toolchain index 5aaef38cd79d..03c040b91f1f 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -nightly-2024-05-07 +nightly-2024-08-01 diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index 51d6af249f71..71ac44361179 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -883,6 +883,16 @@ dependencies = [ "typenum", ] +[[package]] +name = "ctor" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edb49164822f3ee45b17acd4a208cfc1251410cf0cad9a833234c9890774dd9f" +dependencies = [ + "quote", + "syn 2.0.68", +] + [[package]] name = "ctr" version = "0.9.2" @@ -2571,26 +2581,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "linkme" -version = "0.3.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccb76662d78edc9f9bf56360d6919bdacc8b7761227727e5082f128eeb90bbf5" -dependencies = [ - "linkme-impl", -] - -[[package]] -name = "linkme-impl" -version = "0.3.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8dccda732e04fa3baf2e17cf835bfe2601c7c2edafd64417c627dabae3a8cda" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.68", -] - [[package]] name = "linux-raw-sys" version = "0.4.14" @@ -5742,13 +5732,13 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "vise" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229baafe01d5177b63c6ee1def80d8e39a2365e64caf69ddb05a57594b15647c" +checksum = "90ade36f3548b1524396f4de7b36f4f210c8a01dfab568eb2bff466af64eb6e5" dependencies = [ "compile-fmt", + "ctor", "elsa", - "linkme", "once_cell", "prometheus-client", "vise-macros", @@ -5756,9 +5746,9 @@ dependencies = [ [[package]] name = "vise-exporter" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23981b18d697026f5430249ab01ba739ef2edc463e400042394331cb2bb63494" +checksum = "671d3b894d5d0849f0a597f56bf071f42d4f2a1cbcf2f78ca21f870ab7c0cc2b" dependencies = [ "hyper 0.14.29", "once_cell", @@ -5769,9 +5759,9 @@ dependencies = [ [[package]] name = "vise-macros" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bb19c33cd5f04dcf4e767635e058a998edbc2b7fca32ade0a4a1cea0f8e9b34" +checksum = "6a511871dc5de990a3b2a0e715facfbc5da848c0c0395597a1415029fb7c250a" dependencies = [ "proc-macro2", "quote", @@ -6347,9 +6337,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.8" +version = "0.1.0-rc.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "209b83578357184ab72af4d4cb2eca76f85e5f2f35d739a47e3fd5931eb9252d" +checksum = "a9f9a4352244ccd5e5fd34fb0d029861a5f57b05c80fe7944a7b532f54c58f89" dependencies = [ "anyhow", "once_cell", @@ -6381,9 +6371,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.8" +version = "0.1.0-rc.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4514629a34abdf943ef911c16228dfec656edb02d8412db4febd4df5ccf3f91" +checksum = "6c43283b5813fd887e0e7ccaee73c6e41907b1de311a3a01b2fa5f2e3f2ba503" dependencies = [ "anyhow", "rand", @@ -6432,9 +6422,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.8" +version = "0.1.0-rc.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53128384270314bfbd4e044c15138af63cb3a505ab95bb3339f3b866ccbe211c" +checksum = "b5db598a518958b244aed5e3f925c763808429a5ea022bb50957b98e68540495" dependencies = [ "anyhow", "bit-vec", @@ -6453,9 +6443,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.8" +version = "0.1.0-rc.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d7dfb4dcdd48ab5fa1ccff25f585d73b58cf95e0fb74e96618dd666f198a005" +checksum = "4047ed624c7a19e206125f8259f7e175ad70020beeb66e1975e068af060d2fb5" dependencies = [ "anyhow", "heck", diff --git a/zk_toolbox/Cargo.toml b/zk_toolbox/Cargo.toml index 5a08b56cce7d..a8b6633e0360 100644 --- a/zk_toolbox/Cargo.toml +++ b/zk_toolbox/Cargo.toml @@ -30,7 +30,7 @@ types = { path = "crates/types" } zksync_config = { path = "../core/lib/config" } zksync_protobuf_config = { path = "../core/lib/protobuf_config" } zksync_basic_types = { path = "../core/lib/basic_types" } -zksync_protobuf = "=0.1.0-rc.8" +zksync_protobuf = "=0.1.0-rc.9" # External dependencies anyhow = "1.0.82"