From e241949a3205fe6f81551808609adafd66775102 Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Wed, 2 Oct 2024 08:51:04 +0200 Subject: [PATCH 01/31] fix(tee_verifier): correctly initialize storage for re-execution Previously the storage for VM re-execution was initialized just from `WitnessInputMerklePaths`. This although misses the storage values for slots, which are only read/written to by rolled back transactions. With this commit, the TEE verifier uses `WitnessStorageState` of `VMRunWitnessInputData` to initialize the storage. This requires waiting for the BasicWitnessInputProducer to complete and therefore the TEE verifier input producer can be removed. The input for the TEE verifier is now collected in the `proof_data_handler`, which enables to remove the whole job queue for the TEE verifier input producer. Co-authored-by: Patrick Beza Signed-off-by: Harald Hoyer --- Cargo.lock | 25 +- Cargo.toml | 2 - core/bin/zksync_server/src/main.rs | 2 +- core/bin/zksync_server/src/node_builder.rs | 12 - ...ad2574cd1310dff1d1bf06825d5634ba25f04.json | 30 -- ...94ec52b3eb68c346492a8fed98f20f2a0381d.json | 36 --- ...62af196d586cc08ea0f23d2c568527e94b41d.json | 12 - ...e494ce1d8e3b6cfb0b897745fb596f283be79.json | 52 ---- ...6f1a607a0bcc6864490c5961dd4e2ee12ed78.json | 22 -- ...5aea6710351dea1f1e57d73447476c3fcd199.json | 28 -- ...3c39e774c405508e73e77cdd0c01f924c97c0.json | 40 --- ...c3eb3b46aacc51e93b046d2549763115e75a2.json | 26 ++ ...729b9149fee37c5ef7d69e259ee33cb8ca860.json | 65 ----- ...9542f0dba42101b32e026751362e169381662.json | 22 ++ ...e_tee_verifier_input_producer_job.down.sql | 18 ++ ...ove_tee_verifier_input_producer_job.up.sql | 3 + core/lib/dal/src/lib.rs | 10 +- core/lib/dal/src/tee_proof_generation_dal.rs | 108 +++++--- .../src/tee_verifier_input_producer_dal.rs | 234 ---------------- core/lib/object_store/src/file.rs | 1 - core/lib/object_store/src/raw.rs | 2 - core/lib/prover_interface/src/inputs.rs | 35 +-- core/lib/tee_verifier/Cargo.toml | 7 +- core/lib/tee_verifier/src/lib.rs | 169 +++++++----- .../types/src/storage/witness_block_state.rs | 2 +- core/lib/zksync_core_leftovers/src/lib.rs | 6 - .../api_server/src/web3/tests/unstable.rs | 7 +- core/node/metadata_calculator/src/updater.rs | 4 - core/node/node_framework/Cargo.toml | 1 - .../src/implementations/layers/mod.rs | 1 - .../layers/tee_verifier_input_producer.rs | 69 ----- core/node/proof_data_handler/Cargo.toml | 2 + core/node/proof_data_handler/src/errors.rs | 8 + .../src/tee_request_processor.rs | 81 +++++- core/node/proof_data_handler/src/tests.rs | 145 +++------- core/node/shared_metrics/src/lib.rs | 2 - .../tee_verifier_input_producer/Cargo.toml | 27 -- .../tee_verifier_input_producer/README.md | 3 - .../tee_verifier_input_producer/src/lib.rs | 261 ------------------ .../src/metrics.rs | 18 -- core/node/vm_runner/src/impls/bwip.rs | 2 + etc/env/base/rust.toml | 3 +- etc/env/file_based/general.yaml | 2 +- prover/docs/03_launch.md | 2 +- 44 files changed, 387 insertions(+), 1220 deletions(-) delete mode 100644 core/lib/dal/.sqlx/query-0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04.json delete mode 100644 core/lib/dal/.sqlx/query-2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d.json delete mode 100644 core/lib/dal/.sqlx/query-2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d.json delete mode 100644 core/lib/dal/.sqlx/query-3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79.json delete mode 100644 core/lib/dal/.sqlx/query-5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78.json delete mode 100644 core/lib/dal/.sqlx/query-6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199.json delete mode 100644 core/lib/dal/.sqlx/query-aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0.json create mode 100644 core/lib/dal/.sqlx/query-d082522ca2316233c0d257e80ccc3eb3b46aacc51e93b046d2549763115e75a2.json delete mode 100644 core/lib/dal/.sqlx/query-d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860.json create mode 100644 core/lib/dal/.sqlx/query-fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662.json create mode 100644 core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.down.sql create mode 100644 core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.up.sql delete mode 100644 core/lib/dal/src/tee_verifier_input_producer_dal.rs delete mode 100644 core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs delete mode 100644 core/node/tee_verifier_input_producer/Cargo.toml delete mode 100644 core/node/tee_verifier_input_producer/README.md delete mode 100644 core/node/tee_verifier_input_producer/src/lib.rs delete mode 100644 core/node/tee_verifier_input_producer/src/metrics.rs diff --git a/Cargo.lock b/Cargo.lock index 0873faae904c..e1a5a519a8e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10681,7 +10681,6 @@ dependencies = [ "zksync_state", "zksync_state_keeper", "zksync_storage", - "zksync_tee_verifier_input_producer", "zksync_types", "zksync_utils", "zksync_vlog", @@ -10849,6 +10848,8 @@ dependencies = [ "zksync_object_store", "zksync_prover_interface", "zksync_types", + "zksync_utils", + "zksync_vm_executor", ] [[package]] @@ -11157,6 +11158,8 @@ name = "zksync_tee_verifier" version = "0.1.0" dependencies = [ "anyhow", + "bincode", + "once_cell", "serde", "tracing", "zksync_config", @@ -11164,31 +11167,11 @@ dependencies = [ "zksync_crypto_primitives", "zksync_merkle_tree", "zksync_multivm", - "zksync_object_store", "zksync_prover_interface", "zksync_types", "zksync_utils", ] -[[package]] -name = "zksync_tee_verifier_input_producer" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "tokio", - "tracing", - "vise", - "zksync_dal", - "zksync_object_store", - "zksync_prover_interface", - "zksync_queued_job_processor", - "zksync_tee_verifier", - "zksync_types", - "zksync_utils", - "zksync_vm_executor", -] - [[package]] name = "zksync_test_account" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 94fadb25968a..fc79f43c5774 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,7 +35,6 @@ members = [ "core/node/consensus", "core/node/contract_verification_server", "core/node/api_server", - "core/node/tee_verifier_input_producer", "core/node/base_token_adjuster", "core/node/external_proof_integration_api", "core/node/logs_bloom_backfill", @@ -307,6 +306,5 @@ zksync_node_storage_init = { version = "0.1.0", path = "core/node/node_storage_i zksync_node_consensus = { version = "0.1.0", path = "core/node/consensus" } zksync_contract_verification_server = { version = "0.1.0", path = "core/node/contract_verification_server" } zksync_node_api_server = { version = "0.1.0", path = "core/node/api_server" } -zksync_tee_verifier_input_producer = { version = "0.1.0", path = "core/node/tee_verifier_input_producer" } zksync_base_token_adjuster = { version = "0.1.0", path = "core/node/base_token_adjuster" } zksync_logs_bloom_backfill = { version = "0.1.0", path = "core/node/logs_bloom_backfill" } diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index da0a93f624df..9e1a1b5948c7 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -45,7 +45,7 @@ struct Cli { /// Comma-separated list of components to launch. #[arg( long, - default_value = "api,tree,eth,state_keeper,housekeeper,tee_verifier_input_producer,commitment_generator,da_dispatcher,vm_runner_protective_reads" + default_value = "api,tree,eth,state_keeper,housekeeper,commitment_generator,da_dispatcher,vm_runner_protective_reads" )] components: ComponentsToRun, /// Path to the yaml config. If set, it will be used instead of env vars. diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 4600b0f9e543..42a49433d477 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -57,7 +57,6 @@ use zksync_node_framework::{ main_batch_executor::MainBatchExecutorLayer, mempool_io::MempoolIOLayer, output_handler::OutputHandlerLayer, RocksdbStorageOptions, StateKeeperLayer, }, - tee_verifier_input_producer::TeeVerifierInputProducerLayer, vm_runner::{ bwip::BasicWitnessInputProducerLayer, playground::VmPlaygroundLayer, protective_reads::ProtectiveReadsWriterLayer, @@ -494,14 +493,6 @@ impl MainNodeBuilder { Ok(self) } - fn add_tee_verifier_input_producer_layer(mut self) -> anyhow::Result { - self.node.add_layer(TeeVerifierInputProducerLayer::new( - self.genesis_config.l2_chain_id, - )); - - Ok(self) - } - fn add_da_client_layer(mut self) -> anyhow::Result { let Some(da_client_config) = self.configs.da_client_config.clone() else { tracing::warn!("No config for DA client, using the NoDA client"); @@ -744,9 +735,6 @@ impl MainNodeBuilder { Component::EthTxManager => { self = self.add_eth_tx_manager_layer()?; } - Component::TeeVerifierInputProducer => { - self = self.add_tee_verifier_input_producer_layer()?; - } Component::Housekeeper => { self = self .add_house_keeper_layer()? diff --git a/core/lib/dal/.sqlx/query-0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04.json b/core/lib/dal/.sqlx/query-0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04.json deleted file mode 100644 index 05b94ad249ac..000000000000 --- a/core/lib/dal/.sqlx/query-0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_verifier_input_producer_jobs\n SET\n status = $1,\n updated_at = NOW(),\n time_taken = $3,\n input_blob_url = $4\n WHERE\n l1_batch_number = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Int8", - "Time", - "Text" - ] - }, - "nullable": [] - }, - "hash": "0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04" -} diff --git a/core/lib/dal/.sqlx/query-2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d.json b/core/lib/dal/.sqlx/query-2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d.json deleted file mode 100644 index 9d8cc36189fc..000000000000 --- a/core/lib/dal/.sqlx/query-2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN\n tee_verifier_input_producer_jobs AS inputs\n ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $1\n AND proofs.status = $2\n ORDER BY\n proofs.l1_batch_number ASC\n LIMIT\n 1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Text" - ] - }, - "nullable": [ - false - ] - }, - "hash": "2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d" -} diff --git a/core/lib/dal/.sqlx/query-2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d.json b/core/lib/dal/.sqlx/query-2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d.json deleted file mode 100644 index a273eb249a4e..000000000000 --- a/core/lib/dal/.sqlx/query-2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n DELETE FROM tee_verifier_input_producer_jobs\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [] - }, - "nullable": [] - }, - "hash": "2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d" -} diff --git a/core/lib/dal/.sqlx/query-3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79.json b/core/lib/dal/.sqlx/query-3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79.json deleted file mode 100644 index 6012c6326515..000000000000 --- a/core/lib/dal/.sqlx/query-3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_verifier_input_producer_jobs\n SET\n status = $1,\n updated_at = NOW(),\n time_taken = $3,\n error = $4\n WHERE\n l1_batch_number = $2\n AND status != $5\n RETURNING\n tee_verifier_input_producer_jobs.attempts\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "attempts", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [ - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Int8", - "Time", - "Text", - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - } - ] - }, - "nullable": [ - false - ] - }, - "hash": "3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79" -} diff --git a/core/lib/dal/.sqlx/query-5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78.json b/core/lib/dal/.sqlx/query-5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78.json deleted file mode 100644 index f34c4a548cb3..000000000000 --- a/core/lib/dal/.sqlx/query-5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n attempts\n FROM\n tee_verifier_input_producer_jobs\n WHERE\n l1_batch_number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "attempts", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78" -} diff --git a/core/lib/dal/.sqlx/query-6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199.json b/core/lib/dal/.sqlx/query-6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199.json deleted file mode 100644 index 01ede1d8643a..000000000000 --- a/core/lib/dal/.sqlx/query-6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n tee_verifier_input_producer_jobs (\n l1_batch_number, status, created_at, updated_at\n )\n VALUES\n ($1, $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - } - ] - }, - "nullable": [] - }, - "hash": "6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199" -} diff --git a/core/lib/dal/.sqlx/query-aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0.json b/core/lib/dal/.sqlx/query-aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0.json deleted file mode 100644 index b17b58282110..000000000000 --- a/core/lib/dal/.sqlx/query-aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_proof_generation_details\n SET\n status = $1,\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n tee_type = $2\n AND l1_batch_number = (\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN\n tee_verifier_input_producer_jobs AS inputs\n ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $3\n AND (\n proofs.status = $4\n OR (\n proofs.status = $1\n AND proofs.prover_taken_at < NOW() - $5::INTERVAL\n )\n )\n AND proofs.l1_batch_number >= $6\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n tee_proof_generation_details.l1_batch_number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Text", - "Text", - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Text", - "Interval", - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0" -} diff --git a/core/lib/dal/.sqlx/query-d082522ca2316233c0d257e80ccc3eb3b46aacc51e93b046d2549763115e75a2.json b/core/lib/dal/.sqlx/query-d082522ca2316233c0d257e80ccc3eb3b46aacc51e93b046d2549763115e75a2.json new file mode 100644 index 000000000000..0d4e44e48858 --- /dev/null +++ b/core/lib/dal/.sqlx/query-d082522ca2316233c0d257e80ccc3eb3b46aacc51e93b046d2549763115e75a2.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n upsert AS (\n SELECT\n proof_generation_details.l1_batch_number\n FROM\n proof_generation_details\n LEFT JOIN\n l1_batches\n ON proof_generation_details.l1_batch_number = l1_batches.number\n LEFT JOIN\n tee_proof_generation_details AS proofs\n ON proof_generation_details.l1_batch_number = proofs.l1_batch_number\n WHERE\n (\n proof_generation_details.vm_run_data_blob_url IS NOT NULL\n AND proof_generation_details.proof_gen_data_blob_url IS NOT NULL\n AND l1_batches.hash IS NOT NULL\n AND l1_batches.aux_data_hash IS NOT NULL\n AND l1_batches.meta_parameters_hash IS NOT NULL\n )\n AND (\n proofs.tee_type IS NULL\n OR proofs.tee_type = $1\n )\n AND (\n proofs.status IS NULL\n OR proofs.status = $3\n OR (\n proofs.status = $2\n AND proofs.prover_taken_at < NOW() - $4::INTERVAL\n )\n )\n AND (\n proofs.l1_batch_number IS NULL\n OR proofs.l1_batch_number >= $5\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n )\n \n INSERT INTO\n tee_proof_generation_details (\n l1_batch_number, tee_type, status, created_at, updated_at, prover_taken_at\n )\n SELECT\n l1_batch_number,\n $1,\n $2,\n NOW(),\n NOW(),\n NOW()\n FROM\n upsert\n ON CONFLICT (l1_batch_number, tee_type) DO\n UPDATE\n SET\n status = $2,\n updated_at = NOW(),\n prover_taken_at = NOW()\n RETURNING\n l1_batch_number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Text", + "Interval", + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "d082522ca2316233c0d257e80ccc3eb3b46aacc51e93b046d2549763115e75a2" +} diff --git a/core/lib/dal/.sqlx/query-d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860.json b/core/lib/dal/.sqlx/query-d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860.json deleted file mode 100644 index fa1a5d6741ad..000000000000 --- a/core/lib/dal/.sqlx/query-d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_verifier_input_producer_jobs\n SET\n status = $1,\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n tee_verifier_input_producer_jobs\n WHERE\n status = $2\n OR (\n status = $1\n AND processing_started_at < NOW() - $4::INTERVAL\n )\n OR (\n status = $3\n AND attempts < $5\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n tee_verifier_input_producer_jobs.l1_batch_number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Interval", - "Int2" - ] - }, - "nullable": [ - false - ] - }, - "hash": "d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860" -} diff --git a/core/lib/dal/.sqlx/query-fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662.json b/core/lib/dal/.sqlx/query-fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662.json new file mode 100644 index 000000000000..12e28266fbcc --- /dev/null +++ b/core/lib/dal/.sqlx/query-fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n WHERE\n proofs.status = $1\n ORDER BY\n proofs.l1_batch_number ASC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false + ] + }, + "hash": "fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662" +} diff --git a/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.down.sql b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.down.sql new file mode 100644 index 000000000000..120b19f6f0af --- /dev/null +++ b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.down.sql @@ -0,0 +1,18 @@ +CREATE TABLE tee_proof_generation_details ( + l1_batch_number BIGINT NOT NULL, + status TEXT NOT NULL, + signature BYTEA, + pubkey BYTEA, + proof BYTEA, + tee_type TEXT NOT NULL, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL, + prover_taken_at TIMESTAMP, + PRIMARY KEY (l1_batch_number, tee_type), + CONSTRAINT tee_proof_generation_details_l1_batch_number_fkey FOREIGN KEY (l1_batch_number) REFERENCES tee_verifier_input_producer_jobs(l1_batch_number) ON DELETE CASCADE, + CONSTRAINT tee_proof_generation_details_pubkey_fkey FOREIGN KEY (pubkey) REFERENCES tee_attestations(pubkey) ON DELETE SET NULL +); + +CREATE INDEX idx_tee_proof_generation_details_status_prover_taken_at + ON tee_proof_generation_details (prover_taken_at) + WHERE status = 'picked_by_prover'; diff --git a/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.up.sql b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.up.sql new file mode 100644 index 000000000000..c2417ba86b3b --- /dev/null +++ b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE tee_proof_generation_details DROP CONSTRAINT tee_proof_generation_details_l1_batch_number_fkey; + +DROP TABLE IF EXISTS tee_verifier_input_producer_jobs; diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index f0d2f0c16711..fbe225beb902 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -23,8 +23,7 @@ use crate::{ snapshots_dal::SnapshotsDal, storage_logs_dal::StorageLogsDal, storage_logs_dedup_dal::StorageLogsDedupDal, storage_web3_dal::StorageWeb3Dal, sync_dal::SyncDal, system_dal::SystemDal, tee_proof_generation_dal::TeeProofGenerationDal, - tee_verifier_input_producer_dal::TeeVerifierInputProducerDal, tokens_dal::TokensDal, - tokens_web3_dal::TokensWeb3Dal, transactions_dal::TransactionsDal, + tokens_dal::TokensDal, tokens_web3_dal::TokensWeb3Dal, transactions_dal::TransactionsDal, transactions_web3_dal::TransactionsWeb3Dal, vm_runner_dal::VmRunnerDal, }; @@ -56,7 +55,6 @@ pub mod storage_web3_dal; pub mod sync_dal; pub mod system_dal; pub mod tee_proof_generation_dal; -pub mod tee_verifier_input_producer_dal; pub mod tokens_dal; pub mod tokens_web3_dal; pub mod transactions_dal; @@ -81,8 +79,6 @@ where fn transactions_web3_dal(&mut self) -> TransactionsWeb3Dal<'_, 'a>; - fn tee_verifier_input_producer_dal(&mut self) -> TeeVerifierInputProducerDal<'_, 'a>; - fn blocks_dal(&mut self) -> BlocksDal<'_, 'a>; fn blocks_web3_dal(&mut self) -> BlocksWeb3Dal<'_, 'a>; @@ -155,10 +151,6 @@ impl<'a> CoreDal<'a> for Connection<'a, Core> { TransactionsWeb3Dal { storage: self } } - fn tee_verifier_input_producer_dal(&mut self) -> TeeVerifierInputProducerDal<'_, 'a> { - TeeVerifierInputProducerDal { storage: self } - } - fn blocks_dal(&mut self) -> BlocksDal<'_, 'a> { BlocksDal { storage: self } } diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index db56b9d0e3e7..8a7bb2863d41 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -10,10 +10,7 @@ use zksync_db_connection::{ }; use zksync_types::{tee_types::TeeType, L1BatchNumber}; -use crate::{ - models::storage_tee_proof::StorageTeeProof, - tee_verifier_input_producer_dal::TeeVerifierInputProducerJobStatus, Core, -}; +use crate::{models::storage_tee_proof::StorageTeeProof, Core}; #[derive(Debug)] pub struct TeeProofGenerationDal<'a, 'c> { @@ -41,44 +38,72 @@ impl TeeProofGenerationDal<'_, '_> { let min_batch_number = min_batch_number.map_or(0, |num| i64::from(num.0)); let query = sqlx::query!( r#" - UPDATE tee_proof_generation_details - SET - status = $1, - updated_at = NOW(), - prover_taken_at = NOW() - WHERE - tee_type = $2 - AND l1_batch_number = ( - SELECT - proofs.l1_batch_number - FROM - tee_proof_generation_details AS proofs - JOIN - tee_verifier_input_producer_jobs AS inputs - ON proofs.l1_batch_number = inputs.l1_batch_number - WHERE - inputs.status = $3 - AND ( - proofs.status = $4 - OR ( - proofs.status = $1 - AND proofs.prover_taken_at < NOW() - $5::INTERVAL - ) + WITH + upsert AS ( + SELECT + proof_generation_details.l1_batch_number + FROM + proof_generation_details + LEFT JOIN + l1_batches + ON proof_generation_details.l1_batch_number = l1_batches.number + LEFT JOIN + tee_proof_generation_details AS proofs + ON proof_generation_details.l1_batch_number = proofs.l1_batch_number + WHERE + ( + proof_generation_details.vm_run_data_blob_url IS NOT NULL + AND proof_generation_details.proof_gen_data_blob_url IS NOT NULL + AND l1_batches.hash IS NOT NULL + AND l1_batches.aux_data_hash IS NOT NULL + AND l1_batches.meta_parameters_hash IS NOT NULL + ) + AND ( + proofs.tee_type IS NULL + OR proofs.tee_type = $1 + ) + AND ( + proofs.status IS NULL + OR proofs.status = $3 + OR ( + proofs.status = $2 + AND proofs.prover_taken_at < NOW() - $4::INTERVAL ) - AND proofs.l1_batch_number >= $6 - ORDER BY - l1_batch_number ASC - LIMIT - 1 - FOR UPDATE - SKIP LOCKED - ) + ) + AND ( + proofs.l1_batch_number IS NULL + OR proofs.l1_batch_number >= $5 + ) + ORDER BY + l1_batch_number ASC + LIMIT + 1 + ) + + INSERT INTO + tee_proof_generation_details ( + l1_batch_number, tee_type, status, created_at, updated_at, prover_taken_at + ) + SELECT + l1_batch_number, + $1, + $2, + NOW(), + NOW(), + NOW() + FROM + upsert + ON CONFLICT (l1_batch_number, tee_type) DO + UPDATE + SET + status = $2, + updated_at = NOW(), + prover_taken_at = NOW() RETURNING - tee_proof_generation_details.l1_batch_number + l1_batch_number "#, - TeeProofGenerationJobStatus::PickedByProver.to_string(), tee_type.to_string(), - TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, + TeeProofGenerationJobStatus::PickedByProver.to_string(), TeeProofGenerationJobStatus::Unpicked.to_string(), processing_timeout, min_batch_number @@ -278,18 +303,13 @@ impl TeeProofGenerationDal<'_, '_> { proofs.l1_batch_number FROM tee_proof_generation_details AS proofs - JOIN - tee_verifier_input_producer_jobs AS inputs - ON proofs.l1_batch_number = inputs.l1_batch_number WHERE - inputs.status = $1 - AND proofs.status = $2 + proofs.status = $1 ORDER BY proofs.l1_batch_number ASC LIMIT 1 "#, - TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, TeeProofGenerationJobStatus::Unpicked.to_string(), ); let batch_number = Instrumented::new("get_oldest_unpicked_batch") diff --git a/core/lib/dal/src/tee_verifier_input_producer_dal.rs b/core/lib/dal/src/tee_verifier_input_producer_dal.rs deleted file mode 100644 index dddb451a2d7d..000000000000 --- a/core/lib/dal/src/tee_verifier_input_producer_dal.rs +++ /dev/null @@ -1,234 +0,0 @@ -use std::time::{Duration, Instant}; - -use sqlx::postgres::types::PgInterval; -use zksync_db_connection::{ - connection::Connection, - error::DalResult, - instrument::InstrumentExt, - utils::{duration_to_naive_time, pg_interval_from_duration}, -}; -use zksync_types::L1BatchNumber; - -use crate::Core; - -#[derive(Debug)] -pub struct TeeVerifierInputProducerDal<'a, 'c> { - pub(crate) storage: &'a mut Connection<'c, Core>, -} - -/// The amount of attempts to process a job before giving up. -pub const JOB_MAX_ATTEMPT: i16 = 5; - -/// Time to wait for job to be processed -const JOB_PROCESSING_TIMEOUT: PgInterval = pg_interval_from_duration(Duration::from_secs(10 * 60)); - -/// Status of a job that the producer will work on. - -#[derive(Debug, sqlx::Type)] -#[sqlx(type_name = "tee_verifier_input_producer_job_status")] -pub enum TeeVerifierInputProducerJobStatus { - /// When the job is queued. Metadata calculator creates the job and marks it as queued. - Queued, - /// The job is not going to be processed. This state is designed for manual operations on DB. - /// It is expected to be used if some jobs should be skipped like: - /// - testing purposes (want to check a specific L1 Batch, I can mark everything before it skipped) - /// - trim down costs on some environments (if I've done breaking changes, - /// makes no sense to wait for everything to be processed, I can just skip them and save resources) - ManuallySkipped, - /// Currently being processed by one of the jobs. Transitory state, will transition to either - /// [`TeeVerifierInputProducerStatus::Successful`] or [`TeeVerifierInputProducerStatus::Failed`]. - InProgress, - /// The final (happy case) state we expect all jobs to end up. After the run is complete, - /// the job uploaded it's inputs, it lands in successful. - Successful, - /// The job failed for reasons. It will be marked as such and the error persisted in DB. - /// If it failed less than MAX_ATTEMPTs, the job will be retried, - /// otherwise it will stay in this state as final state. - Failed, -} - -impl TeeVerifierInputProducerDal<'_, '_> { - pub async fn create_tee_verifier_input_producer_job( - &mut self, - l1_batch_number: L1BatchNumber, - ) -> DalResult<()> { - sqlx::query!( - r#" - INSERT INTO - tee_verifier_input_producer_jobs ( - l1_batch_number, status, created_at, updated_at - ) - VALUES - ($1, $2, NOW(), NOW()) - ON CONFLICT (l1_batch_number) DO NOTHING - "#, - i64::from(l1_batch_number.0), - TeeVerifierInputProducerJobStatus::Queued as TeeVerifierInputProducerJobStatus, - ) - .instrument("create_tee_verifier_input_producer_job") - .with_arg("l1_batch_number", &l1_batch_number) - .report_latency() - .execute(self.storage) - .await?; - - Ok(()) - } - - pub async fn get_next_tee_verifier_input_producer_job( - &mut self, - ) -> DalResult> { - let l1_batch_number = sqlx::query!( - r#" - UPDATE tee_verifier_input_producer_jobs - SET - status = $1, - attempts = attempts + 1, - updated_at = NOW(), - processing_started_at = NOW() - WHERE - l1_batch_number = ( - SELECT - l1_batch_number - FROM - tee_verifier_input_producer_jobs - WHERE - status = $2 - OR ( - status = $1 - AND processing_started_at < NOW() - $4::INTERVAL - ) - OR ( - status = $3 - AND attempts < $5 - ) - ORDER BY - l1_batch_number ASC - LIMIT - 1 - FOR UPDATE - SKIP LOCKED - ) - RETURNING - tee_verifier_input_producer_jobs.l1_batch_number - "#, - TeeVerifierInputProducerJobStatus::InProgress as TeeVerifierInputProducerJobStatus, - TeeVerifierInputProducerJobStatus::Queued as TeeVerifierInputProducerJobStatus, - TeeVerifierInputProducerJobStatus::Failed as TeeVerifierInputProducerJobStatus, - &JOB_PROCESSING_TIMEOUT, - JOB_MAX_ATTEMPT, - ) - .instrument("get_next_tee_verifier_input_producer_job") - .report_latency() - .fetch_optional(self.storage) - .await? - .map(|job| L1BatchNumber(job.l1_batch_number as u32)); - - Ok(l1_batch_number) - } - - pub async fn get_tee_verifier_input_producer_job_attempts( - &mut self, - l1_batch_number: L1BatchNumber, - ) -> DalResult> { - let attempts = sqlx::query!( - r#" - SELECT - attempts - FROM - tee_verifier_input_producer_jobs - WHERE - l1_batch_number = $1 - "#, - i64::from(l1_batch_number.0), - ) - .instrument("get_tee_verifier_input_producer_job_attempts") - .with_arg("l1_batch_number", &l1_batch_number) - .fetch_optional(self.storage) - .await? - .map(|job| job.attempts as u32); - - Ok(attempts) - } - - pub async fn mark_job_as_successful( - &mut self, - l1_batch_number: L1BatchNumber, - started_at: Instant, - object_path: &str, - ) -> DalResult<()> { - sqlx::query!( - r#" - UPDATE tee_verifier_input_producer_jobs - SET - status = $1, - updated_at = NOW(), - time_taken = $3, - input_blob_url = $4 - WHERE - l1_batch_number = $2 - "#, - TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, - i64::from(l1_batch_number.0), - duration_to_naive_time(started_at.elapsed()), - object_path, - ) - .instrument("mark_job_as_successful") - .with_arg("l1_batch_number", &l1_batch_number) - .report_latency() - .execute(self.storage) - .await?; - - Ok(()) - } - - pub async fn mark_job_as_failed( - &mut self, - l1_batch_number: L1BatchNumber, - started_at: Instant, - error: String, - ) -> DalResult> { - let attempts = sqlx::query!( - r#" - UPDATE tee_verifier_input_producer_jobs - SET - status = $1, - updated_at = NOW(), - time_taken = $3, - error = $4 - WHERE - l1_batch_number = $2 - AND status != $5 - RETURNING - tee_verifier_input_producer_jobs.attempts - "#, - TeeVerifierInputProducerJobStatus::Failed as TeeVerifierInputProducerJobStatus, - i64::from(l1_batch_number.0), - duration_to_naive_time(started_at.elapsed()), - error, - TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, - ) - .instrument("mark_job_as_failed") - .with_arg("l1_batch_number", &l1_batch_number) - .report_latency() - .fetch_optional(self.storage) - .await? - .map(|job| job.attempts as u32); - - Ok(attempts) - } -} - -/// These functions should only be used for tests. -impl TeeVerifierInputProducerDal<'_, '_> { - pub async fn delete_all_jobs(&mut self) -> DalResult<()> { - sqlx::query!( - r#" - DELETE FROM tee_verifier_input_producer_jobs - "# - ) - .instrument("delete_all_tee_verifier_jobs") - .execute(self.storage) - .await?; - Ok(()) - } -} diff --git a/core/lib/object_store/src/file.rs b/core/lib/object_store/src/file.rs index 308cd65427fb..3484f2dad347 100644 --- a/core/lib/object_store/src/file.rs +++ b/core/lib/object_store/src/file.rs @@ -42,7 +42,6 @@ impl FileBackedObjectStore { Bucket::SchedulerWitnessJobsFri, Bucket::ProofsFri, Bucket::StorageSnapshot, - Bucket::TeeVerifierInput, Bucket::VmDumps, ] { let bucket_path = format!("{base_dir}/{bucket}"); diff --git a/core/lib/object_store/src/raw.rs b/core/lib/object_store/src/raw.rs index 740e8d76e246..0859d58d04be 100644 --- a/core/lib/object_store/src/raw.rs +++ b/core/lib/object_store/src/raw.rs @@ -19,7 +19,6 @@ pub enum Bucket { ProofsTee, StorageSnapshot, DataAvailability, - TeeVerifierInput, VmDumps, } @@ -39,7 +38,6 @@ impl Bucket { Self::ProofsTee => "proofs_tee", Self::StorageSnapshot => "storage_logs_snapshots", Self::DataAvailability => "data_availability", - Self::TeeVerifierInput => "tee_verifier_inputs", Self::VmDumps => "vm_dumps", } } diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index 22a20223c8b4..d56226374c1c 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -6,7 +6,8 @@ use zksync_multivm::interface::{L1BatchEnv, SystemEnv}; use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; use zksync_types::{ basic_fri_types::Eip4844Blobs, block::L2BlockExecutionData, - witness_block_state::WitnessStorageState, L1BatchNumber, ProtocolVersionId, H256, U256, + witness_block_state::WitnessStorageState, L1BatchNumber, L2ChainId, ProtocolVersionId, H256, + U256, }; const HASH_LEN: usize = H256::len_bytes(); @@ -136,7 +137,7 @@ impl WitnessInputMerklePaths { } } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct VMRunWitnessInputData { pub l1_batch_number: L1BatchNumber, pub used_bytecodes: HashMap>, @@ -147,6 +148,7 @@ pub struct VMRunWitnessInputData { pub storage_refunds: Vec, pub pubdata_costs: Vec, pub witness_block_state: WitnessStorageState, + pub l2_chain_id: L2ChainId, } impl StoredObject for VMRunWitnessInputData { @@ -161,7 +163,7 @@ impl StoredObject for VMRunWitnessInputData { serialize_using_bincode!(); } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct WitnessInputData { pub vm_run_data: VMRunWitnessInputData, pub merkle_paths: WitnessInputMerklePaths, @@ -181,37 +183,37 @@ impl StoredObject for WitnessInputData { serialize_using_bincode!(); } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct L1BatchMetadataHashes { pub root_hash: H256, pub meta_hash: H256, pub aux_hash: H256, } -/// Version 1 of the data used as input for the TEE verifier. +/// Version 2 of the data used as input for the TEE verifier. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct V1TeeVerifierInput { - pub witness_input_merkle_paths: WitnessInputMerklePaths, + pub vm_run_data: VMRunWitnessInputData, + pub merkle_paths: WitnessInputMerklePaths, pub l2_blocks_execution_data: Vec, pub l1_batch_env: L1BatchEnv, pub system_env: SystemEnv, - pub used_contracts: Vec<(H256, Vec)>, } impl V1TeeVerifierInput { pub fn new( - witness_input_merkle_paths: WitnessInputMerklePaths, + vm_run_data: VMRunWitnessInputData, + merkle_paths: WitnessInputMerklePaths, l2_blocks_execution_data: Vec, l1_batch_env: L1BatchEnv, system_env: SystemEnv, - used_contracts: Vec<(H256, Vec)>, ) -> Self { V1TeeVerifierInput { - witness_input_merkle_paths, + vm_run_data, + merkle_paths, l2_blocks_execution_data, l1_batch_env, system_env, - used_contracts, } } } @@ -232,17 +234,6 @@ impl TeeVerifierInput { } } -impl StoredObject for TeeVerifierInput { - const BUCKET: Bucket = Bucket::TeeVerifierInput; - type Key<'a> = L1BatchNumber; - - fn encode_key(key: Self::Key<'_>) -> String { - format!("tee_verifier_input_for_l1_batch_{key}.bin") - } - - serialize_using_bincode!(); -} - #[cfg(test)] mod tests { use super::*; diff --git a/core/lib/tee_verifier/Cargo.toml b/core/lib/tee_verifier/Cargo.toml index 6828eeef8b10..331c47e365eb 100644 --- a/core/lib/tee_verifier/Cargo.toml +++ b/core/lib/tee_verifier/Cargo.toml @@ -11,18 +11,21 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_multivm.workspace = true zksync_config.workspace = true zksync_crypto_primitives.workspace = true zksync_merkle_tree.workspace = true -zksync_object_store.workspace = true +zksync_multivm.workspace = true zksync_prover_interface.workspace = true zksync_types.workspace = true zksync_utils.workspace = true anyhow.workspace = true +once_cell.workspace = true serde.workspace = true tracing.workspace = true [dev-dependencies] zksync_contracts.workspace = true +zksync_prover_interface.workspace = true + +bincode.workspace = true diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index 68b25416d668..ec0e1b5fff62 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -4,16 +4,14 @@ //! executing the VM and verifying all the accessed memory slots by their //! merkle path. -use std::{cell::RefCell, rc::Rc}; - -use anyhow::Context; +use anyhow::{bail, Context, Result}; use zksync_crypto_primitives::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{ BlockOutputWithProofs, TreeInstruction, TreeLogEntry, TreeLogEntryWithProof, ValueHash, }; use zksync_multivm::{ interface::{ - storage::{InMemoryStorage, ReadStorage, StorageView}, + storage::{ReadStorage, StorageSnapshot, StorageView}, FinishedL1Batch, L2BlockEnv, VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, @@ -23,8 +21,11 @@ use zksync_multivm::{ use zksync_prover_interface::inputs::{ StorageLogMetadata, V1TeeVerifierInput, WitnessInputMerklePaths, }; -use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, StorageLog, Transaction, H256}; -use zksync_utils::bytecode::hash_bytecode; +use zksync_types::{ + block::L2BlockExecutionData, L1BatchNumber, StorageLog, StorageLogKind, StorageValue, + Transaction, H256, +}; +use zksync_utils::u256_to_h256; /// A structure to hold the result of verification. pub struct VerificationResult { @@ -50,30 +51,43 @@ impl Verify for V1TeeVerifierInput { /// not actionable. fn verify(self) -> anyhow::Result { let old_root_hash = self.l1_batch_env.previous_batch_hash.unwrap(); - let l2_chain_id = self.system_env.chain_id; - let enumeration_index = self.witness_input_merkle_paths.next_enumeration_index(); + let enumeration_index = self.merkle_paths.next_enumeration_index(); + let batch_number = self.l1_batch_env.number; - let mut raw_storage = InMemoryStorage::with_custom_system_contracts_and_chain_id( - l2_chain_id, - hash_bytecode, - Vec::with_capacity(0), - ); + let read_storage_ops = self + .vm_run_data + .witness_block_state + .read_storage_key + .into_iter(); - for (hash, bytes) in self.used_contracts.into_iter() { - tracing::trace!("raw_storage.store_factory_dep({hash}, bytes)"); - raw_storage.store_factory_dep(hash, bytes) - } + let initial_writes_ops = self + .vm_run_data + .witness_block_state + .is_write_initial + .into_iter(); - let block_output_with_proofs = - get_bowp_and_set_initial_values(self.witness_input_merkle_paths, &mut raw_storage); + let storage = read_storage_ops + .enumerate() + .map(|(i, (hash, bytes))| (hash.hashed_key(), Some((bytes, i as u64 + 1u64)))) + .chain(initial_writes_ops.filter_map(|(key, initial_write)| { + initial_write.then_some((key.hashed_key(), None)) + })) + .collect(); - let storage_view = Rc::new(RefCell::new(StorageView::new(&raw_storage))); + let factory_deps = self + .vm_run_data + .used_bytecodes + .into_iter() + .map(|(hash, bytes)| (u256_to_h256(hash), bytes.into_flattened())) + .collect(); - let batch_number = self.l1_batch_env.number; + let storage_snapshot = StorageSnapshot::new(storage, factory_deps); + let storage_view = StorageView::new(storage_snapshot).to_rc_ptr(); let vm = LegacyVmInstance::new(self.l1_batch_env, self.system_env, storage_view); - let vm_out = execute_vm(self.l2_blocks_execution_data, vm)?; + let block_output_with_proofs = get_bowp(self.merkle_paths)?; + let instructions: Vec = generate_tree_instructions(enumeration_index, &block_output_with_proofs, vm_out)?; @@ -89,11 +103,8 @@ impl Verify for V1TeeVerifierInput { } /// Sets the initial storage values and returns `BlockOutputWithProofs` -fn get_bowp_and_set_initial_values( - witness_input_merkle_paths: WitnessInputMerklePaths, - raw_storage: &mut InMemoryStorage, -) -> BlockOutputWithProofs { - let logs = witness_input_merkle_paths +fn get_bowp(witness_input_merkle_paths: WitnessInputMerklePaths) -> Result { + let logs_result: Result<_, _> = witness_input_merkle_paths .into_merkle_paths() .map( |StorageLogMetadata { @@ -110,29 +121,31 @@ fn get_bowp_and_set_initial_values( let merkle_path = merkle_paths.into_iter().map(|x| x.into()).collect(); let base: TreeLogEntry = match (is_write, first_write, leaf_enumeration_index) { (false, _, 0) => TreeLogEntry::ReadMissingKey, - (false, _, _) => { + (false, false, _) => { // This is a special U256 here, which needs `to_little_endian` let mut hashed_key = [0_u8; 32]; leaf_storage_key.to_little_endian(&mut hashed_key); - raw_storage.set_value_hashed_enum( - hashed_key.into(), - leaf_enumeration_index, - value_read.into(), + tracing::trace!( + "TreeLogEntry::Read {leaf_storage_key:x} = {:x}", + StorageValue::from(value_read) ); TreeLogEntry::Read { leaf_index: leaf_enumeration_index, value: value_read.into(), } } + (false, true, _) => { + tracing::error!("get_bowp is_write = false, first_write = true"); + bail!("get_bowp is_write = false, first_write = true"); + } (true, true, _) => TreeLogEntry::Inserted, (true, false, _) => { // This is a special U256 here, which needs `to_little_endian` let mut hashed_key = [0_u8; 32]; leaf_storage_key.to_little_endian(&mut hashed_key); - raw_storage.set_value_hashed_enum( - hashed_key.into(), - leaf_enumeration_index, - value_read.into(), + tracing::trace!( + "TreeLogEntry::Updated {leaf_storage_key:x} = {:x}", + StorageValue::from(value_read) ); TreeLogEntry::Updated { leaf_index: leaf_enumeration_index, @@ -140,19 +153,21 @@ fn get_bowp_and_set_initial_values( } } }; - TreeLogEntryWithProof { + Ok(TreeLogEntryWithProof { base, merkle_path, root_hash, - } + }) }, ) .collect(); - BlockOutputWithProofs { + let logs: Vec = logs_result?; + + Ok(BlockOutputWithProofs { logs, leaf_count: 0, - } + }) } /// Executes the VM and returns `FinishedL1Batch` on success. @@ -176,11 +191,17 @@ fn execute_vm( .context("failed to execute transaction in TeeVerifierInputProducer")?; tracing::trace!("Finished execution of tx: {tx:?}"); } + + tracing::trace!("finished l2_block {l2_block_data:?}"); + tracing::trace!("about to vm.start_new_l2_block {next_l2_block_data:?}"); + vm.start_new_l2_block(L2BlockEnv::from_l2_block_data(next_l2_block_data)); tracing::trace!("Finished execution of l2_block: {:?}", l2_block_data.number); } + tracing::trace!("about to vm.finish_batch()"); + Ok(vm.finish_batch()) } @@ -191,36 +212,50 @@ fn map_log_tree( idx: &mut u64, ) -> anyhow::Result { let key = storage_log.key.hashed_key_u256(); - Ok(match (storage_log.is_write(), *tree_log_entry) { - (true, TreeLogEntry::Updated { leaf_index, .. }) => { + let tree_instruction = match (storage_log.is_write(), storage_log.kind, *tree_log_entry) { + (true, _, TreeLogEntry::Updated { leaf_index, .. }) => { TreeInstruction::write(key, leaf_index, H256(storage_log.value.into())) } - (true, TreeLogEntry::Inserted) => { + (true, _, TreeLogEntry::Inserted) => { let leaf_index = *idx; *idx += 1; TreeInstruction::write(key, leaf_index, H256(storage_log.value.into())) } - (false, TreeLogEntry::Read { value, .. }) => { + (false, _, TreeLogEntry::Read { value, .. }) => { if storage_log.value != value { tracing::error!( - "Failed to map LogQuery to TreeInstruction: {:#?} != {:#?}", - storage_log.value, - value - ); - anyhow::bail!( - "Failed to map LogQuery to TreeInstruction: {:#?} != {:#?}", + ?storage_log, + ?tree_log_entry, + "Failed to map LogQuery to TreeInstruction: read value {:#?} != {:#?}", storage_log.value, value ); } - TreeInstruction::Read(key) + anyhow::bail!("Failed to map LogQuery to TreeInstruction"); } - (false, TreeLogEntry::ReadMissingKey { .. }) => TreeInstruction::Read(key), - _ => { - tracing::error!("Failed to map LogQuery to TreeInstruction"); + (true, StorageLogKind::RepeatedWrite, TreeLogEntry::Read { .. }) => { + tracing::error!( + ?storage_log, + ?tree_log_entry, + "Failed to map LogQuery to TreeInstruction" + ); anyhow::bail!("Failed to map LogQuery to TreeInstruction"); } - }) + (false, _, TreeLogEntry::ReadMissingKey { .. }) => TreeInstruction::Read(key), + (true, _, TreeLogEntry::Read { .. }) + | (true, _, TreeLogEntry::ReadMissingKey) + | (false, _, TreeLogEntry::Inserted) + | (false, _, TreeLogEntry::Updated { .. }) => { + tracing::error!( + ?storage_log, + ?tree_log_entry, + "Failed to map LogQuery to TreeInstruction" + ); + anyhow::bail!("Failed to map LogQuery to TreeInstruction"); + } + }; + + Ok(tree_instruction) } /// Generates the `TreeInstruction`s from the VM executions. @@ -269,15 +304,26 @@ fn execute_tx( mod tests { use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_multivm::interface::{L1BatchEnv, SystemEnv, TxExecutionMode}; - use zksync_object_store::StoredObject; - use zksync_prover_interface::inputs::TeeVerifierInput; - use zksync_types::U256; + use zksync_prover_interface::inputs::{TeeVerifierInput, VMRunWitnessInputData}; + use zksync_types::{L2ChainId, U256}; use super::*; #[test] fn test_v1_serialization() { let tvi = V1TeeVerifierInput::new( + VMRunWitnessInputData { + l1_batch_number: Default::default(), + used_bytecodes: Default::default(), + initial_heap_content: vec![], + protocol_version: Default::default(), + bootloader_code: vec![], + default_account_code_hash: Default::default(), + storage_refunds: vec![], + pubdata_costs: vec![], + witness_block_state: Default::default(), + l2_chain_id: L2ChainId::from(0), + }, WitnessInputMerklePaths::new(0), vec![], L1BatchEnv { @@ -312,14 +358,11 @@ mod tests { default_validation_computational_gas_limit: 0, chain_id: Default::default(), }, - vec![(H256([1; 32]), vec![0, 1, 2, 3, 4])], ); let tvi = TeeVerifierInput::new(tvi); - let serialized = ::serialize(&tvi) - .expect("Failed to serialize TeeVerifierInput."); + let serialized = bincode::serialize(&tvi).expect("Failed to serialize TeeVerifierInput."); let deserialized: TeeVerifierInput = - ::deserialize(serialized) - .expect("Failed to deserialize TeeVerifierInput."); + bincode::deserialize(&serialized).expect("Failed to deserialize TeeVerifierInput."); assert_eq!(tvi, deserialized); } diff --git a/core/lib/types/src/storage/witness_block_state.rs b/core/lib/types/src/storage/witness_block_state.rs index bce9cc9034d7..7f3195af873f 100644 --- a/core/lib/types/src/storage/witness_block_state.rs +++ b/core/lib/types/src/storage/witness_block_state.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use crate::{StorageKey, StorageValue}; /// Storage data used during Witness Generation. -#[derive(Debug, Default, Clone)] +#[derive(Debug, Default, Clone, PartialEq)] pub struct WitnessStorageState { pub read_storage_key: HashMap, pub is_write_initial: HashMap, diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index 9d399bdd0aff..87fb7ea28f71 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -43,9 +43,6 @@ pub enum Component { EthTxManager, /// State keeper. StateKeeper, - /// Produces input for the TEE verifier. - /// The blob is later used as input for TEE verifier. - TeeVerifierInputProducer, /// Component for housekeeping task such as cleaning blobs from GCS, reporting metrics etc. Housekeeper, /// Component for exposing APIs to prover for providing proof generation data and accepting proofs. @@ -88,9 +85,6 @@ impl FromStr for Components { "tree_api" => Ok(Components(vec![Component::TreeApi])), "state_keeper" => Ok(Components(vec![Component::StateKeeper])), "housekeeper" => Ok(Components(vec![Component::Housekeeper])), - "tee_verifier_input_producer" => { - Ok(Components(vec![Component::TeeVerifierInputProducer])) - } "eth" => Ok(Components(vec![ Component::EthWatcher, Component::EthTxAggregator, diff --git a/core/node/api_server/src/web3/tests/unstable.rs b/core/node/api_server/src/web3/tests/unstable.rs index 1d425f8b9515..e814081afa02 100644 --- a/core/node/api_server/src/web3/tests/unstable.rs +++ b/core/node/api_server/src/web3/tests/unstable.rs @@ -27,14 +27,9 @@ impl HttpTest for GetTeeProofsTest { assert!(proof.is_empty()); - let mut storage = pool.connection().await.unwrap(); - storage - .tee_verifier_input_producer_dal() - .create_tee_verifier_input_producer_job(batch_no) - .await?; - let pubkey = vec![0xDE, 0xAD, 0xBE, 0xEF]; let attestation = vec![0xC0, 0xFF, 0xEE]; + let mut storage = pool.connection().await.unwrap(); let mut tee_proof_generation_dal = storage.tee_proof_generation_dal(); tee_proof_generation_dal .save_attestation(&pubkey, &attestation) diff --git a/core/node/metadata_calculator/src/updater.rs b/core/node/metadata_calculator/src/updater.rs index e2acf62dea8a..17fd5d900eab 100644 --- a/core/node/metadata_calculator/src/updater.rs +++ b/core/node/metadata_calculator/src/updater.rs @@ -152,10 +152,6 @@ impl TreeUpdater { // right away without having to implement dedicated code. if let Some(object_key) = &object_key { - storage - .tee_verifier_input_producer_dal() - .create_tee_verifier_input_producer_job(l1_batch_number) - .await?; // Save the proof generation details to Postgres storage .proof_generation_dal() diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index 1df47e775539..d85f3dc7c8e9 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -45,7 +45,6 @@ zksync_node_sync.workspace = true zksync_node_api_server.workspace = true zksync_node_consensus.workspace = true zksync_contract_verification_server.workspace = true -zksync_tee_verifier_input_producer.workspace = true zksync_queued_job_processor.workspace = true zksync_reorg_detector.workspace = true zksync_vm_runner.workspace = true diff --git a/core/node/node_framework/src/implementations/layers/mod.rs b/core/node/node_framework/src/implementations/layers/mod.rs index 75828da19023..11a62c9333b2 100644 --- a/core/node/node_framework/src/implementations/layers/mod.rs +++ b/core/node/node_framework/src/implementations/layers/mod.rs @@ -33,7 +33,6 @@ pub mod reorg_detector; pub mod sigint; pub mod state_keeper; pub mod sync_state_updater; -pub mod tee_verifier_input_producer; pub mod tree_data_fetcher; pub mod validate_chain_ids; pub mod vm_runner; diff --git a/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs b/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs deleted file mode 100644 index 68789082a226..000000000000 --- a/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs +++ /dev/null @@ -1,69 +0,0 @@ -use zksync_queued_job_processor::JobProcessor; -use zksync_tee_verifier_input_producer::TeeVerifierInputProducer; -use zksync_types::L2ChainId; - -use crate::{ - implementations::resources::{ - object_store::ObjectStoreResource, - pools::{MasterPool, PoolResource}, - }, - service::StopReceiver, - task::{Task, TaskId}, - wiring_layer::{WiringError, WiringLayer}, - FromContext, IntoContext, -}; - -/// Wiring layer for [`TeeVerifierInputProducer`]. -#[derive(Debug)] -pub struct TeeVerifierInputProducerLayer { - l2_chain_id: L2ChainId, -} - -impl TeeVerifierInputProducerLayer { - pub fn new(l2_chain_id: L2ChainId) -> Self { - Self { l2_chain_id } - } -} - -#[derive(Debug, FromContext)] -#[context(crate = crate)] -pub struct Input { - pub master_pool: PoolResource, - pub object_store: ObjectStoreResource, -} - -#[derive(Debug, IntoContext)] -#[context(crate = crate)] -pub struct Output { - #[context(task)] - pub task: TeeVerifierInputProducer, -} - -#[async_trait::async_trait] -impl WiringLayer for TeeVerifierInputProducerLayer { - type Input = Input; - type Output = Output; - - fn layer_name(&self) -> &'static str { - "tee_verifier_input_producer_layer" - } - - async fn wire(self, input: Self::Input) -> Result { - let pool = input.master_pool.get().await?; - let ObjectStoreResource(object_store) = input.object_store; - let task = TeeVerifierInputProducer::new(pool, object_store, self.l2_chain_id).await?; - - Ok(Output { task }) - } -} - -#[async_trait::async_trait] -impl Task for TeeVerifierInputProducer { - fn id(&self) -> TaskId { - "tee_verifier_input_producer".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0, None).await - } -} diff --git a/core/node/proof_data_handler/Cargo.toml b/core/node/proof_data_handler/Cargo.toml index 82063b23fdb5..76dc89eda04b 100644 --- a/core/node/proof_data_handler/Cargo.toml +++ b/core/node/proof_data_handler/Cargo.toml @@ -17,6 +17,8 @@ zksync_dal.workspace = true zksync_object_store.workspace = true zksync_prover_interface.workspace = true zksync_types.workspace = true +zksync_vm_executor.workspace = true +zksync_utils.workspace = true anyhow.workspace = true axum.workspace = true tokio.workspace = true diff --git a/core/node/proof_data_handler/src/errors.rs b/core/node/proof_data_handler/src/errors.rs index 15ef393294aa..1f8a0d4d613b 100644 --- a/core/node/proof_data_handler/src/errors.rs +++ b/core/node/proof_data_handler/src/errors.rs @@ -6,6 +6,7 @@ use zksync_dal::DalError; use zksync_object_store::ObjectStoreError; pub(crate) enum RequestProcessorError { + GeneralError(String), ObjectStore(ObjectStoreError), Dal(DalError), } @@ -19,6 +20,13 @@ impl From for RequestProcessorError { impl IntoResponse for RequestProcessorError { fn into_response(self) -> Response { let (status_code, message) = match self { + RequestProcessorError::GeneralError(err) => { + tracing::error!("Error: {:?}", err); + ( + StatusCode::BAD_GATEWAY, + "An internal error occurred".to_owned(), + ) + } RequestProcessorError::ObjectStore(err) => { tracing::error!("GCS error: {:?}", err); ( diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs index 4ae1a5026f14..d1d8899e234f 100644 --- a/core/node/proof_data_handler/src/tee_request_processor.rs +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -4,11 +4,17 @@ use axum::{extract::Path, Json}; use zksync_config::configs::ProofDataHandlerConfig; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_object_store::{ObjectStore, ObjectStoreError}; -use zksync_prover_interface::api::{ - RegisterTeeAttestationRequest, RegisterTeeAttestationResponse, SubmitProofResponse, - SubmitTeeProofRequest, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, +use zksync_prover_interface::{ + api::{ + RegisterTeeAttestationRequest, RegisterTeeAttestationResponse, SubmitProofResponse, + SubmitTeeProofRequest, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, + }, + inputs::{ + TeeVerifierInput, V1TeeVerifierInput, VMRunWitnessInputData, WitnessInputMerklePaths, + }, }; use zksync_types::{tee_types::TeeType, L1BatchNumber}; +use zksync_vm_executor::storage::L1BatchParamsProvider; use crate::errors::RequestProcessorError; @@ -50,9 +56,12 @@ impl TeeRequestProcessor { None => break Ok(Json(TeeProofGenerationDataResponse(None))), }; - match self.blob_store.get(l1_batch_number).await { + match self + .tee_verifier_input_for_existing_batch(l1_batch_number) + .await + { Ok(input) => break Ok(Json(TeeProofGenerationDataResponse(Some(Box::new(input))))), - Err(ObjectStoreError::KeyNotFound(_)) => { + Err(RequestProcessorError::ObjectStore(ObjectStoreError::KeyNotFound(_))) => { missing_range = match missing_range { Some((start, _)) => Some((start, l1_batch_number)), None => Some((l1_batch_number, l1_batch_number)), @@ -62,7 +71,7 @@ impl TeeRequestProcessor { } Err(err) => { self.unlock_batch(l1_batch_number, request.tee_type).await?; - break Err(RequestProcessorError::ObjectStore(err)); + break Err(err); } } }; @@ -78,6 +87,66 @@ impl TeeRequestProcessor { result } + #[tracing::instrument(skip(self))] + async fn tee_verifier_input_for_existing_batch( + &self, + l1_batch_number: L1BatchNumber, + ) -> Result { + let vm_run_data: VMRunWitnessInputData = self + .blob_store + .get(l1_batch_number) + .await + .map_err(RequestProcessorError::ObjectStore)?; + + let merkle_paths: WitnessInputMerklePaths = self + .blob_store + .get(l1_batch_number) + .await + .map_err(RequestProcessorError::ObjectStore)?; + + let mut connection = self + .pool + .connection() + .await + .map_err(RequestProcessorError::Dal)?; + + let l2_blocks_execution_data = connection + .transactions_dal() + .get_l2_blocks_to_execute_for_l1_batch(l1_batch_number) + .await + .map_err(|err| RequestProcessorError::Dal(err))?; + + let l1_batch_params_provider = L1BatchParamsProvider::new(&mut connection) + .await + .map_err(|err| RequestProcessorError::GeneralError(err.to_string()))?; + + // In the state keeper, this value is used to reject execution. + // All batches have already been executed by State Keeper. + // This means we don't want to reject any execution, therefore we're using MAX as an allow all. + let validation_computational_gas_limit = u32::MAX; + + let (system_env, l1_batch_env) = l1_batch_params_provider + .load_l1_batch_env( + &mut connection, + l1_batch_number, + validation_computational_gas_limit, + vm_run_data.l2_chain_id, + ) + .await + .map_err(|err| RequestProcessorError::GeneralError(err.to_string()))? + .ok_or(RequestProcessorError::GeneralError( + "system_env, l1_batch_env missing".into(), + ))?; + + Ok(TeeVerifierInput::new(V1TeeVerifierInput { + vm_run_data, + merkle_paths, + l2_blocks_execution_data, + l1_batch_env, + system_env, + })) + } + async fn lock_batch_for_proving( &self, tee_type: TeeType, diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index 86cc53234486..ae78a0e62de4 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -1,5 +1,3 @@ -use std::time::Instant; - use axum::{ body::Body, http::{self, Method, Request, StatusCode}, @@ -8,84 +6,23 @@ use axum::{ }; use serde_json::json; use tower::ServiceExt; -use zksync_basic_types::U256; use zksync_config::configs::ProofDataHandlerConfig; -use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_dal::{ConnectionPool, CoreDal}; -use zksync_multivm::interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; use zksync_object_store::MockObjectStore; -use zksync_prover_interface::{ - api::SubmitTeeProofRequest, - inputs::{TeeVerifierInput, V1TeeVerifierInput, WitnessInputMerklePaths}, -}; -use zksync_types::{commitment::L1BatchCommitmentMode, tee_types::TeeType, L1BatchNumber, H256}; +use zksync_prover_interface::api::SubmitTeeProofRequest; +use zksync_types::{commitment::L1BatchCommitmentMode, tee_types::TeeType, L1BatchNumber}; use crate::create_proof_processing_router; -// Test the /tee/proof_inputs endpoint by: -// 1. Mocking an object store with a single batch blob containing TEE verifier input -// 2. Populating the SQL db with relevant information about the status of the TEE verifier input and -// TEE proof generation -// 3. Sending a request to the /tee/proof_inputs endpoint and asserting that the response -// matches the file from the object store #[tokio::test] async fn request_tee_proof_inputs() { - // prepare a sample mocked TEE verifier input - let batch_number = L1BatchNumber::from(1); - let tvi = V1TeeVerifierInput::new( - WitnessInputMerklePaths::new(0), - vec![], - L1BatchEnv { - previous_batch_hash: Some(H256([1; 32])), - number: batch_number, - timestamp: 0, - fee_input: Default::default(), - fee_account: Default::default(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 0, - timestamp: 0, - prev_block_hash: H256([1; 32]), - max_virtual_blocks_to_create: 0, - }, - }, - SystemEnv { - zk_porter_available: false, - version: Default::default(), - base_system_smart_contracts: BaseSystemContracts { - bootloader: SystemContractCode { - code: vec![U256([1; 4])], - hash: H256([1; 32]), - }, - default_aa: SystemContractCode { - code: vec![U256([1; 4])], - hash: H256([1; 32]), - }, - }, - bootloader_gas_limit: 0, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: 0, - chain_id: Default::default(), - }, - vec![(H256([1; 32]), vec![0, 1, 2, 3, 4])], - ); - let tvi = TeeVerifierInput::V1(tvi); - - // populate mocked object store with a single batch blob - - let blob_store = MockObjectStore::arc(); - let object_path = blob_store.put(batch_number, &tvi).await.unwrap(); - - // get connection to the SQL db and mock the status of the TEE proof generation - let db_conn_pool = ConnectionPool::test_pool().await; - mock_tee_batch_status(db_conn_pool.clone(), batch_number, &object_path).await; - // test the /tee/proof_inputs endpoint; it should return the batch from the object store + mock_tee_batch_status(db_conn_pool.clone(), batch_number).await; let app = create_proof_processing_router( - blob_store, + MockObjectStore::arc(), db_conn_pool, ProofDataHandlerConfig { http_port: 1337, @@ -94,41 +31,40 @@ async fn request_tee_proof_inputs() { }, L1BatchCommitmentMode::Rollup, ); - let req_body = Body::from(serde_json::to_vec(&json!({ "tee_type": "sgx" })).unwrap()); - let response = app - .oneshot( - Request::builder() - .method(Method::POST) - .uri("/tee/proof_inputs") - .header(http::header::CONTENT_TYPE, "application/json") - .body(req_body) - .unwrap(), - ) - .await - .unwrap(); - - assert_eq!(response.status(), StatusCode::OK); - - let body = axum::body::to_bytes(response.into_body(), usize::MAX) - .await - .unwrap(); - let json: serde_json::Value = serde_json::from_slice(&body).unwrap(); - let deserialized: TeeVerifierInput = serde_json::from_value(json).unwrap(); - - assert_eq!(tvi, deserialized); + let test_cases = vec![ + (json!({ "tee_type": "sgx" }), StatusCode::OK), + ( + json!({ "tee_type": "Sgx" }), + StatusCode::UNPROCESSABLE_ENTITY, + ), + ]; + + for (body, expected_status) in test_cases { + let req_body = Body::from(serde_json::to_vec(&body).unwrap()); + let response = app + .clone() + .oneshot( + Request::builder() + .method(Method::POST) + .uri("/tee/proof_inputs") + .header(http::header::CONTENT_TYPE, "application/json") + .body(req_body) + .unwrap(), + ) + .await + .unwrap(); + + assert_eq!(response.status(), expected_status); + } } // Test /tee/submit_proofs endpoint using a mocked TEE proof and verify response and db state #[tokio::test] async fn submit_tee_proof() { - let blob_store = MockObjectStore::arc(); - let db_conn_pool = ConnectionPool::test_pool().await; - let object_path = "mocked_object_path"; let batch_number = L1BatchNumber::from(1); + let db_conn_pool = ConnectionPool::test_pool().await; - mock_tee_batch_status(db_conn_pool.clone(), batch_number, object_path).await; - - // send a request to the /tee/submit_proofs endpoint, using a mocked TEE proof + mock_tee_batch_status(db_conn_pool.clone(), batch_number).await; let tee_proof_request_str = r#"{ "signature": "0001020304", @@ -140,7 +76,7 @@ async fn submit_tee_proof() { serde_json::from_str::(tee_proof_request_str).unwrap(); let uri = format!("/tee/submit_proofs/{}", batch_number.0); let app = create_proof_processing_router( - blob_store, + MockObjectStore::arc(), db_conn_pool.clone(), ProofDataHandlerConfig { http_port: 1337, @@ -206,32 +142,15 @@ async fn submit_tee_proof() { async fn mock_tee_batch_status( db_conn_pool: ConnectionPool, batch_number: L1BatchNumber, - object_path: &str, ) { let mut proof_db_conn = db_conn_pool.connection().await.unwrap(); let mut proof_dal = proof_db_conn.tee_proof_generation_dal(); - let mut input_db_conn = db_conn_pool.connection().await.unwrap(); - let mut input_producer_dal = input_db_conn.tee_verifier_input_producer_dal(); // there should not be any batches awaiting proof in the db yet let oldest_batch_number = proof_dal.get_oldest_unpicked_batch().await.unwrap(); assert!(oldest_batch_number.is_none()); - // mock SQL table with relevant information about the status of the TEE verifier input - - input_producer_dal - .create_tee_verifier_input_producer_job(batch_number) - .await - .expect("Failed to create tee_verifier_input_producer_job"); - - // pretend that the TEE verifier input blob file was fetched successfully - - input_producer_dal - .mark_job_as_successful(batch_number, Instant::now(), object_path) - .await - .expect("Failed to mark tee_verifier_input_producer_job job as successful"); - // mock SQL table with relevant information about the status of TEE proof generation proof_dal diff --git a/core/node/shared_metrics/src/lib.rs b/core/node/shared_metrics/src/lib.rs index e0a7fa74ef42..2c41ec9293a0 100644 --- a/core/node/shared_metrics/src/lib.rs +++ b/core/node/shared_metrics/src/lib.rs @@ -29,7 +29,6 @@ pub enum InitStage { EthTxAggregator, EthTxManager, Tree, - TeeVerifierInputProducer, Consensus, DADispatcher, } @@ -45,7 +44,6 @@ impl fmt::Display for InitStage { Self::EthTxAggregator => formatter.write_str("eth_tx_aggregator"), Self::EthTxManager => formatter.write_str("eth_tx_manager"), Self::Tree => formatter.write_str("tree"), - Self::TeeVerifierInputProducer => formatter.write_str("tee_verifier_input_producer"), Self::Consensus => formatter.write_str("consensus"), Self::DADispatcher => formatter.write_str("da_dispatcher"), } diff --git a/core/node/tee_verifier_input_producer/Cargo.toml b/core/node/tee_verifier_input_producer/Cargo.toml deleted file mode 100644 index 7a5a4de5d0c9..000000000000 --- a/core/node/tee_verifier_input_producer/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -name = "zksync_tee_verifier_input_producer" -description = "ZKsync TEE verifier input producer" -version.workspace = true -edition.workspace = true -authors.workspace = true -homepage.workspace = true -repository.workspace = true -license.workspace = true -keywords.workspace = true -categories.workspace = true - -[dependencies] -zksync_dal.workspace = true -zksync_object_store.workspace = true -zksync_prover_interface.workspace = true -zksync_queued_job_processor.workspace = true -zksync_tee_verifier.workspace = true -zksync_types.workspace = true -zksync_utils.workspace = true -zksync_vm_executor.workspace = true -vise.workspace = true - -anyhow.workspace = true -async-trait.workspace = true -tracing.workspace = true -tokio = { workspace = true, features = ["time"] } diff --git a/core/node/tee_verifier_input_producer/README.md b/core/node/tee_verifier_input_producer/README.md deleted file mode 100644 index 75a2029985cc..000000000000 --- a/core/node/tee_verifier_input_producer/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# `zksync_tee_verifier_input_producer` - -Component responsible for producing inputs for verification of execution in TEE. diff --git a/core/node/tee_verifier_input_producer/src/lib.rs b/core/node/tee_verifier_input_producer/src/lib.rs deleted file mode 100644 index 8a99aa07ae51..000000000000 --- a/core/node/tee_verifier_input_producer/src/lib.rs +++ /dev/null @@ -1,261 +0,0 @@ -//! Produces input for a TEE Verifier -//! -//! Extract all data needed to re-execute and verify an L1Batch without accessing -//! the DB and/or the object store. -//! -//! For testing purposes, the L1 batch is re-executed immediately for now. -//! Eventually, this component will only extract the inputs and send them to another -//! machine over a "to be defined" channel, e.g., save them to an object store. - -use std::{sync::Arc, time::Instant}; - -use anyhow::Context; -use async_trait::async_trait; -use tokio::task::JoinHandle; -use zksync_dal::{tee_verifier_input_producer_dal::JOB_MAX_ATTEMPT, ConnectionPool, Core, CoreDal}; -use zksync_object_store::ObjectStore; -use zksync_prover_interface::inputs::{ - TeeVerifierInput, V1TeeVerifierInput, WitnessInputMerklePaths, -}; -use zksync_queued_job_processor::JobProcessor; -use zksync_tee_verifier::Verify; -use zksync_types::{tee_types::TeeType, L1BatchNumber, L2ChainId}; -use zksync_utils::u256_to_h256; -use zksync_vm_executor::storage::L1BatchParamsProvider; - -use self::metrics::METRICS; - -mod metrics; - -/// Component that extracts all data (from DB) necessary to run a TEE Verifier. -#[derive(Debug)] -pub struct TeeVerifierInputProducer { - connection_pool: ConnectionPool, - l2_chain_id: L2ChainId, - object_store: Arc, -} - -impl TeeVerifierInputProducer { - pub async fn new( - connection_pool: ConnectionPool, - object_store: Arc, - l2_chain_id: L2ChainId, - ) -> anyhow::Result { - Ok(TeeVerifierInputProducer { - connection_pool, - object_store, - l2_chain_id, - }) - } - - async fn process_job_impl( - l1_batch_number: L1BatchNumber, - started_at: Instant, - connection_pool: ConnectionPool, - object_store: Arc, - l2_chain_id: L2ChainId, - ) -> anyhow::Result { - let prepare_basic_circuits_job: WitnessInputMerklePaths = object_store - .get(l1_batch_number) - .await - .context("failed to get PrepareBasicCircuitsJob from object store")?; - - let mut connection = connection_pool - .connection() - .await - .context("failed to get connection for TeeVerifierInputProducer")?; - - let l2_blocks_execution_data = connection - .transactions_dal() - .get_l2_blocks_to_execute_for_l1_batch(l1_batch_number) - .await?; - - let l1_batch_header = connection - .blocks_dal() - .get_l1_batch_header(l1_batch_number) - .await - .with_context(|| format!("header is missing for L1 batch #{l1_batch_number}"))? - .unwrap(); - - let l1_batch_params_provider = L1BatchParamsProvider::new(&mut connection) - .await - .context("failed initializing L1 batch params provider")?; - - // In the state keeper, this value is used to reject execution. - // All batches have already been executed by State Keeper. - // This means we don't want to reject any execution, therefore we're using MAX as an allow all. - let validation_computational_gas_limit = u32::MAX; - - let (system_env, l1_batch_env) = l1_batch_params_provider - .load_l1_batch_env( - &mut connection, - l1_batch_number, - validation_computational_gas_limit, - l2_chain_id, - ) - .await? - .with_context(|| format!("expected L1 batch #{l1_batch_number} to be sealed"))?; - - let used_contract_hashes = l1_batch_header - .used_contract_hashes - .into_iter() - .map(u256_to_h256) - .collect(); - - // `get_factory_deps()` returns the bytecode in chunks of `Vec<[u8; 32]>`, - // but `fn store_factory_dep(&mut self, hash: H256, bytecode: Vec)` in `InMemoryStorage` wants flat byte vecs. - pub fn into_flattened(data: Vec<[T; N]>) -> Vec { - let mut new = Vec::new(); - for slice in data.iter() { - new.extend_from_slice(slice); - } - new - } - - let used_contracts = connection - .factory_deps_dal() - .get_factory_deps(&used_contract_hashes) - .await - .into_iter() - .map(|(hash, bytes)| (u256_to_h256(hash), into_flattened(bytes))) - .collect(); - - tracing::info!("Started execution of l1_batch: {l1_batch_number:?}"); - - let tee_verifier_input = V1TeeVerifierInput::new( - prepare_basic_circuits_job, - l2_blocks_execution_data, - l1_batch_env, - system_env, - used_contracts, - ); - - // TODO (SEC-263): remove these 2 lines after successful testnet runs - tee_verifier_input.clone().verify()?; - tracing::info!("Looks like we verified {l1_batch_number} correctly"); - - tracing::info!("Finished execution of l1_batch: {l1_batch_number:?}"); - - METRICS.process_batch_time.observe(started_at.elapsed()); - tracing::debug!( - "TeeVerifierInputProducer took {:?} for L1BatchNumber {}", - started_at.elapsed(), - l1_batch_number.0 - ); - - Ok(TeeVerifierInput::new(tee_verifier_input)) - } -} - -#[async_trait] -impl JobProcessor for TeeVerifierInputProducer { - type Job = L1BatchNumber; - type JobId = L1BatchNumber; - type JobArtifacts = TeeVerifierInput; - const SERVICE_NAME: &'static str = "tee_verifier_input_producer"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut connection = self.connection_pool.connection().await?; - let l1_batch_to_process = connection - .tee_verifier_input_producer_dal() - .get_next_tee_verifier_input_producer_job() - .await - .context("failed to get next basic witness input producer job")?; - Ok(l1_batch_to_process.map(|number| (number, number))) - } - - async fn save_failure(&self, job_id: Self::JobId, started_at: Instant, error: String) { - let attempts = self - .connection_pool - .connection() - .await - .unwrap() - .tee_verifier_input_producer_dal() - .mark_job_as_failed(job_id, started_at, error) - .await - .expect("errored whilst marking job as failed"); - if let Some(tries) = attempts { - tracing::warn!("Failed to process job: {job_id:?}, after {tries} tries."); - } else { - tracing::warn!("L1 Batch {job_id:?} was processed successfully by another worker."); - } - } - - async fn process_job( - &self, - _job_id: &Self::JobId, - job: Self::Job, - started_at: Instant, - ) -> JoinHandle> { - let l2_chain_id = self.l2_chain_id; - let connection_pool = self.connection_pool.clone(); - let object_store = self.object_store.clone(); - tokio::task::spawn(async move { - Self::process_job_impl( - job, - started_at, - connection_pool.clone(), - object_store, - l2_chain_id, - ) - .await - }) - } - - async fn save_result( - &self, - job_id: Self::JobId, - started_at: Instant, - artifacts: Self::JobArtifacts, - ) -> anyhow::Result<()> { - let observer: vise::LatencyObserver = METRICS.upload_input_time.start(); - let object_path = self - .object_store - .put(job_id, &artifacts) - .await - .context("failed to upload artifacts for TeeVerifierInputProducer")?; - observer.observe(); - let mut connection = self - .connection_pool - .connection() - .await - .context("failed to acquire DB connection for TeeVerifierInputProducer")?; - let mut transaction = connection - .start_transaction() - .await - .context("failed to acquire DB transaction for TeeVerifierInputProducer")?; - transaction - .tee_verifier_input_producer_dal() - .mark_job_as_successful(job_id, started_at, &object_path) - .await - .context("failed to mark job as successful for TeeVerifierInputProducer")?; - transaction - .tee_proof_generation_dal() - .insert_tee_proof_generation_job(job_id, TeeType::Sgx) - .await?; - transaction - .commit() - .await - .context("failed to commit DB transaction for TeeVerifierInputProducer")?; - METRICS.block_number_processed.set(job_id.0 as u64); - Ok(()) - } - - fn max_attempts(&self) -> u32 { - JOB_MAX_ATTEMPT as u32 - } - - async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { - let mut connection = self - .connection_pool - .connection() - .await - .context("failed to acquire DB connection for TeeVerifierInputProducer")?; - connection - .tee_verifier_input_producer_dal() - .get_tee_verifier_input_producer_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for TeeVerifierInputProducer") - } -} diff --git a/core/node/tee_verifier_input_producer/src/metrics.rs b/core/node/tee_verifier_input_producer/src/metrics.rs deleted file mode 100644 index 362804d338e9..000000000000 --- a/core/node/tee_verifier_input_producer/src/metrics.rs +++ /dev/null @@ -1,18 +0,0 @@ -//! Metrics - -use std::time::Duration; - -use vise::{Buckets, Gauge, Histogram, Metrics, Unit}; - -#[derive(Debug, Metrics)] -#[metrics(prefix = "tee_verifier_input_producer")] -pub(crate) struct TeeVerifierInputProducerMetrics { - #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] - pub process_batch_time: Histogram, - #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] - pub upload_input_time: Histogram, - pub block_number_processed: Gauge, -} - -#[vise::register] -pub(super) static METRICS: vise::Global = vise::Global::new(); diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index 6c2933635b4c..e8f0228237e4 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -208,6 +208,7 @@ async fn get_updates_manager_witness_input_data( output: &L1BatchOutput, ) -> anyhow::Result { let initial_heap_content = output.batch.final_bootloader_memory.clone().unwrap(); // might be just empty + let l2_chain_id = system_env.chain_id; let default_aa = system_env.base_system_smart_contracts.hashes().default_aa; let bootloader = system_env.base_system_smart_contracts.hashes().bootloader; let bootloader_code_bytes = connection @@ -257,6 +258,7 @@ async fn get_updates_manager_witness_input_data( storage_refunds, pubdata_costs, witness_block_state, + l2_chain_id, }) } diff --git a/etc/env/base/rust.toml b/etc/env/base/rust.toml index d8bef020c642..18107f0d4f93 100644 --- a/etc/env/base/rust.toml +++ b/etc/env/base/rust.toml @@ -1,6 +1,6 @@ # Environment configuration for the Rust code # We don't provide the group name like `[rust]` here, because we don't want -# these variables to be prefixed during the compiling. +# these variables to be prefixed during the compiling. # `RUST_LOG` environment variable for `env_logger` # Here we use TOML multiline strings: newlines will be trimmed. @@ -26,7 +26,6 @@ zksync_node_sync=info,\ zksync_node_consensus=info,\ zksync_contract_verification_server=info,\ zksync_node_api_server=info,\ -zksync_tee_verifier_input_producer=info,\ zksync_consensus_bft=info,\ zksync_consensus_network=info,\ zksync_consensus_storage=info,\ diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index a4ba8c0201a6..c28c4abb34a1 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -312,7 +312,7 @@ prometheus: observability: log_format: plain - log_directives: "zksync_node_test_utils=info,zksync_state_keeper=info,zksync_reorg_detector=info,zksync_consistency_checker=info,zksync_metadata_calculator=info,zksync_node_sync=info,zksync_node_consensus=info,zksync_contract_verification_server=info,zksync_node_api_server=info,zksync_tee_verifier_input_producer=info,zksync_node_framework=info,zksync_block_reverter=info,zksync_commitment_generator=debug,zksync_node_db_pruner=info,zksync_eth_sender=info,zksync_node_fee_model=info,zksync_node_genesis=info,zksync_house_keeper=info,zksync_proof_data_handler=info,zksync_shared_metrics=info,zksync_node_test_utils=info,zksync_vm_runner=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core_leftovers=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_db_connection=info,zksync_eth_client=info,zksync_eth_watch=debug,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_web3_decl=debug,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=error,snapshots_creator=debug,zksync_base_token_adjuster=debug,zksync_external_price_api=debug,zksync_external_proof_integration_api=info" + log_directives: "zksync_node_test_utils=info,zksync_state_keeper=info,zksync_reorg_detector=info,zksync_consistency_checker=info,zksync_metadata_calculator=info,zksync_node_sync=info,zksync_node_consensus=info,zksync_contract_verification_server=info,zksync_node_api_server=info,zksync_node_framework=info,zksync_block_reverter=info,zksync_commitment_generator=debug,zksync_node_db_pruner=info,zksync_eth_sender=info,zksync_node_fee_model=info,zksync_node_genesis=info,zksync_house_keeper=info,zksync_proof_data_handler=info,zksync_shared_metrics=info,zksync_node_test_utils=info,zksync_vm_runner=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core_leftovers=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_db_connection=info,zksync_eth_client=info,zksync_eth_watch=debug,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_web3_decl=debug,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=error,snapshots_creator=debug,zksync_base_token_adjuster=debug,zksync_external_price_api=debug,zksync_external_proof_integration_api=info" # Uncomment only if needed # sentry: # url: unset diff --git a/prover/docs/03_launch.md b/prover/docs/03_launch.md index 203fb6e8cecf..0465d888f612 100644 --- a/prover/docs/03_launch.md +++ b/prover/docs/03_launch.md @@ -47,7 +47,7 @@ We will be running a bunch of binaries, it's recommended to run each in a separa ### Server ``` -zk server --components=api,tree,eth,state_keeper,housekeeper,tee_verifier_input_producer,commitment_generator,da_dispatcher,proof_data_handler,vm_runner_protective_reads,vm_runner_bwip +zk server --components=api,tree,eth,state_keeper,housekeeper,commitment_generator,da_dispatcher,proof_data_handler,vm_runner_protective_reads,vm_runner_bwip ``` ### Proof data handler From fa06b18517993810ee4ab3513029002ee2d57a21 Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Mon, 7 Oct 2024 10:26:15 +0200 Subject: [PATCH 02/31] fix: sql query simplify empty `tee_proofs` case, but pre-filter with `tee_type` to exclude other TEE techs. Signed-off-by: Harald Hoyer --- ...ffb3e2747766952e04338d71896e0a95a33d3.json | 26 ++++++++++++ ...c3eb3b46aacc51e93b046d2549763115e75a2.json | 26 ------------ core/lib/dal/src/tee_proof_generation_dal.rs | 40 ++++++++++++------- 3 files changed, 51 insertions(+), 41 deletions(-) create mode 100644 core/lib/dal/.sqlx/query-1aa0fe3b9e9a47a880751a237caffb3e2747766952e04338d71896e0a95a33d3.json delete mode 100644 core/lib/dal/.sqlx/query-d082522ca2316233c0d257e80ccc3eb3b46aacc51e93b046d2549763115e75a2.json diff --git a/core/lib/dal/.sqlx/query-1aa0fe3b9e9a47a880751a237caffb3e2747766952e04338d71896e0a95a33d3.json b/core/lib/dal/.sqlx/query-1aa0fe3b9e9a47a880751a237caffb3e2747766952e04338d71896e0a95a33d3.json new file mode 100644 index 000000000000..a590f1257c0e --- /dev/null +++ b/core/lib/dal/.sqlx/query-1aa0fe3b9e9a47a880751a237caffb3e2747766952e04338d71896e0a95a33d3.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n tee_proofs AS (\n SELECT\n *\n FROM\n tee_proof_generation_details\n WHERE\n tee_type = $1\n ),\n \n upsert AS (\n SELECT\n proof_generation_details.l1_batch_number\n FROM\n proof_generation_details\n LEFT JOIN\n l1_batches\n ON proof_generation_details.l1_batch_number = l1_batches.number\n LEFT JOIN\n tee_proofs\n ON proof_generation_details.l1_batch_number = tee_proofs.l1_batch_number\n WHERE\n (\n proof_generation_details.vm_run_data_blob_url IS NOT NULL\n AND proof_generation_details.proof_gen_data_blob_url IS NOT NULL\n AND l1_batches.hash IS NOT NULL\n AND l1_batches.aux_data_hash IS NOT NULL\n AND l1_batches.meta_parameters_hash IS NOT NULL\n )\n AND (\n (\n tee_proofs.tee_type IS NULL\n AND tee_proofs.status IS NULL\n AND tee_proofs.l1_batch_number IS NULL\n ) OR (\n tee_proofs.tee_type = $1\n AND (\n tee_proofs.status = $3\n OR (\n tee_proofs.status = $2\n AND tee_proofs.prover_taken_at < NOW() - $4::INTERVAL\n )\n )\n AND tee_proofs.l1_batch_number >= $5\n )\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n )\n \n INSERT INTO\n tee_proof_generation_details (\n l1_batch_number, tee_type, status, created_at, updated_at, prover_taken_at\n )\n SELECT\n l1_batch_number,\n $1,\n $2,\n NOW(),\n NOW(),\n NOW()\n FROM\n upsert\n ON CONFLICT (l1_batch_number, tee_type) DO\n UPDATE\n SET\n status = $2,\n updated_at = NOW(),\n prover_taken_at = NOW()\n RETURNING\n l1_batch_number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Text", + "Interval", + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "1aa0fe3b9e9a47a880751a237caffb3e2747766952e04338d71896e0a95a33d3" +} diff --git a/core/lib/dal/.sqlx/query-d082522ca2316233c0d257e80ccc3eb3b46aacc51e93b046d2549763115e75a2.json b/core/lib/dal/.sqlx/query-d082522ca2316233c0d257e80ccc3eb3b46aacc51e93b046d2549763115e75a2.json deleted file mode 100644 index 0d4e44e48858..000000000000 --- a/core/lib/dal/.sqlx/query-d082522ca2316233c0d257e80ccc3eb3b46aacc51e93b046d2549763115e75a2.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n upsert AS (\n SELECT\n proof_generation_details.l1_batch_number\n FROM\n proof_generation_details\n LEFT JOIN\n l1_batches\n ON proof_generation_details.l1_batch_number = l1_batches.number\n LEFT JOIN\n tee_proof_generation_details AS proofs\n ON proof_generation_details.l1_batch_number = proofs.l1_batch_number\n WHERE\n (\n proof_generation_details.vm_run_data_blob_url IS NOT NULL\n AND proof_generation_details.proof_gen_data_blob_url IS NOT NULL\n AND l1_batches.hash IS NOT NULL\n AND l1_batches.aux_data_hash IS NOT NULL\n AND l1_batches.meta_parameters_hash IS NOT NULL\n )\n AND (\n proofs.tee_type IS NULL\n OR proofs.tee_type = $1\n )\n AND (\n proofs.status IS NULL\n OR proofs.status = $3\n OR (\n proofs.status = $2\n AND proofs.prover_taken_at < NOW() - $4::INTERVAL\n )\n )\n AND (\n proofs.l1_batch_number IS NULL\n OR proofs.l1_batch_number >= $5\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n )\n \n INSERT INTO\n tee_proof_generation_details (\n l1_batch_number, tee_type, status, created_at, updated_at, prover_taken_at\n )\n SELECT\n l1_batch_number,\n $1,\n $2,\n NOW(),\n NOW(),\n NOW()\n FROM\n upsert\n ON CONFLICT (l1_batch_number, tee_type) DO\n UPDATE\n SET\n status = $2,\n updated_at = NOW(),\n prover_taken_at = NOW()\n RETURNING\n l1_batch_number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Text", - "Text", - "Text", - "Interval", - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "d082522ca2316233c0d257e80ccc3eb3b46aacc51e93b046d2549763115e75a2" -} diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index 8a7bb2863d41..44b9d6605e35 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -39,6 +39,15 @@ impl TeeProofGenerationDal<'_, '_> { let query = sqlx::query!( r#" WITH + tee_proofs AS ( + SELECT + * + FROM + tee_proof_generation_details + WHERE + tee_type = $1 + ), + upsert AS ( SELECT proof_generation_details.l1_batch_number @@ -48,8 +57,8 @@ impl TeeProofGenerationDal<'_, '_> { l1_batches ON proof_generation_details.l1_batch_number = l1_batches.number LEFT JOIN - tee_proof_generation_details AS proofs - ON proof_generation_details.l1_batch_number = proofs.l1_batch_number + tee_proofs + ON proof_generation_details.l1_batch_number = tee_proofs.l1_batch_number WHERE ( proof_generation_details.vm_run_data_blob_url IS NOT NULL @@ -59,21 +68,22 @@ impl TeeProofGenerationDal<'_, '_> { AND l1_batches.meta_parameters_hash IS NOT NULL ) AND ( - proofs.tee_type IS NULL - OR proofs.tee_type = $1 - ) - AND ( - proofs.status IS NULL - OR proofs.status = $3 - OR ( - proofs.status = $2 - AND proofs.prover_taken_at < NOW() - $4::INTERVAL + ( + tee_proofs.tee_type IS NULL + AND tee_proofs.status IS NULL + AND tee_proofs.l1_batch_number IS NULL + ) OR ( + tee_proofs.tee_type = $1 + AND ( + tee_proofs.status = $3 + OR ( + tee_proofs.status = $2 + AND tee_proofs.prover_taken_at < NOW() - $4::INTERVAL + ) + ) + AND tee_proofs.l1_batch_number >= $5 ) ) - AND ( - proofs.l1_batch_number IS NULL - OR proofs.l1_batch_number >= $5 - ) ORDER BY l1_batch_number ASC LIMIT From b61537835d76f44b1c24584370283350e468a56c Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Mon, 7 Oct 2024 13:30:21 +0200 Subject: [PATCH 03/31] fix: remove obsolete `tee_type` check Signed-off-by: Harald Hoyer --- ...f596af40976b05c03f9f912ef79afd559f8af530450649593fc6.json} | 4 ++-- core/lib/dal/src/tee_proof_generation_dal.rs | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) rename core/lib/dal/.sqlx/{query-1aa0fe3b9e9a47a880751a237caffb3e2747766952e04338d71896e0a95a33d3.json => query-b2de2ea4a66ff596af40976b05c03f9f912ef79afd559f8af530450649593fc6.json} (57%) diff --git a/core/lib/dal/.sqlx/query-1aa0fe3b9e9a47a880751a237caffb3e2747766952e04338d71896e0a95a33d3.json b/core/lib/dal/.sqlx/query-b2de2ea4a66ff596af40976b05c03f9f912ef79afd559f8af530450649593fc6.json similarity index 57% rename from core/lib/dal/.sqlx/query-1aa0fe3b9e9a47a880751a237caffb3e2747766952e04338d71896e0a95a33d3.json rename to core/lib/dal/.sqlx/query-b2de2ea4a66ff596af40976b05c03f9f912ef79afd559f8af530450649593fc6.json index a590f1257c0e..c1238981bab1 100644 --- a/core/lib/dal/.sqlx/query-1aa0fe3b9e9a47a880751a237caffb3e2747766952e04338d71896e0a95a33d3.json +++ b/core/lib/dal/.sqlx/query-b2de2ea4a66ff596af40976b05c03f9f912ef79afd559f8af530450649593fc6.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n WITH\n tee_proofs AS (\n SELECT\n *\n FROM\n tee_proof_generation_details\n WHERE\n tee_type = $1\n ),\n \n upsert AS (\n SELECT\n proof_generation_details.l1_batch_number\n FROM\n proof_generation_details\n LEFT JOIN\n l1_batches\n ON proof_generation_details.l1_batch_number = l1_batches.number\n LEFT JOIN\n tee_proofs\n ON proof_generation_details.l1_batch_number = tee_proofs.l1_batch_number\n WHERE\n (\n proof_generation_details.vm_run_data_blob_url IS NOT NULL\n AND proof_generation_details.proof_gen_data_blob_url IS NOT NULL\n AND l1_batches.hash IS NOT NULL\n AND l1_batches.aux_data_hash IS NOT NULL\n AND l1_batches.meta_parameters_hash IS NOT NULL\n )\n AND (\n (\n tee_proofs.tee_type IS NULL\n AND tee_proofs.status IS NULL\n AND tee_proofs.l1_batch_number IS NULL\n ) OR (\n tee_proofs.tee_type = $1\n AND (\n tee_proofs.status = $3\n OR (\n tee_proofs.status = $2\n AND tee_proofs.prover_taken_at < NOW() - $4::INTERVAL\n )\n )\n AND tee_proofs.l1_batch_number >= $5\n )\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n )\n \n INSERT INTO\n tee_proof_generation_details (\n l1_batch_number, tee_type, status, created_at, updated_at, prover_taken_at\n )\n SELECT\n l1_batch_number,\n $1,\n $2,\n NOW(),\n NOW(),\n NOW()\n FROM\n upsert\n ON CONFLICT (l1_batch_number, tee_type) DO\n UPDATE\n SET\n status = $2,\n updated_at = NOW(),\n prover_taken_at = NOW()\n RETURNING\n l1_batch_number\n ", + "query": "\n WITH\n tee_proofs AS (\n SELECT\n *\n FROM\n tee_proof_generation_details\n WHERE\n tee_type = $1\n ),\n \n upsert AS (\n SELECT\n proof_generation_details.l1_batch_number\n FROM\n proof_generation_details\n LEFT JOIN\n l1_batches\n ON proof_generation_details.l1_batch_number = l1_batches.number\n LEFT JOIN\n tee_proofs\n ON proof_generation_details.l1_batch_number = tee_proofs.l1_batch_number\n WHERE\n (\n proof_generation_details.vm_run_data_blob_url IS NOT NULL\n AND proof_generation_details.proof_gen_data_blob_url IS NOT NULL\n AND l1_batches.hash IS NOT NULL\n AND l1_batches.aux_data_hash IS NOT NULL\n AND l1_batches.meta_parameters_hash IS NOT NULL\n )\n AND (\n (\n tee_proofs.tee_type IS NULL\n AND tee_proofs.status IS NULL\n AND tee_proofs.l1_batch_number IS NULL\n ) OR (\n (\n tee_proofs.status = $3\n OR (\n tee_proofs.status = $2\n AND tee_proofs.prover_taken_at < NOW() - $4::INTERVAL\n )\n )\n AND tee_proofs.l1_batch_number >= $5\n )\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n )\n \n INSERT INTO\n tee_proof_generation_details (\n l1_batch_number, tee_type, status, created_at, updated_at, prover_taken_at\n )\n SELECT\n l1_batch_number,\n $1,\n $2,\n NOW(),\n NOW(),\n NOW()\n FROM\n upsert\n ON CONFLICT (l1_batch_number, tee_type) DO\n UPDATE\n SET\n status = $2,\n updated_at = NOW(),\n prover_taken_at = NOW()\n RETURNING\n l1_batch_number\n ", "describe": { "columns": [ { @@ -22,5 +22,5 @@ false ] }, - "hash": "1aa0fe3b9e9a47a880751a237caffb3e2747766952e04338d71896e0a95a33d3" + "hash": "b2de2ea4a66ff596af40976b05c03f9f912ef79afd559f8af530450649593fc6" } diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index 44b9d6605e35..71c7f1c62af4 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -73,8 +73,7 @@ impl TeeProofGenerationDal<'_, '_> { AND tee_proofs.status IS NULL AND tee_proofs.l1_batch_number IS NULL ) OR ( - tee_proofs.tee_type = $1 - AND ( + ( tee_proofs.status = $3 OR ( tee_proofs.status = $2 From 39b453690eeac766a114fc0bc01806dc84c954bd Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Mon, 7 Oct 2024 13:59:12 +0200 Subject: [PATCH 04/31] fix: clippy lint Signed-off-by: Harald Hoyer --- core/node/proof_data_handler/src/tee_request_processor.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs index d1d8899e234f..ce38946a739c 100644 --- a/core/node/proof_data_handler/src/tee_request_processor.rs +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -114,7 +114,7 @@ impl TeeRequestProcessor { .transactions_dal() .get_l2_blocks_to_execute_for_l1_batch(l1_batch_number) .await - .map_err(|err| RequestProcessorError::Dal(err))?; + .map_err(RequestProcessorError::Dal)?; let l1_batch_params_provider = L1BatchParamsProvider::new(&mut connection) .await From a96663a2af8d06f5d13ead9b134f9829817ea07b Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Tue, 8 Oct 2024 10:53:49 +0200 Subject: [PATCH 05/31] fix: logic in tee_prover verify correct leftovers from debug/merge Signed-off-by: Harald Hoyer --- core/lib/tee_verifier/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index ec0e1b5fff62..86d51b8f9937 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -230,8 +230,9 @@ fn map_log_tree( storage_log.value, value ); + anyhow::bail!("Failed to map LogQuery to TreeInstruction"); } - anyhow::bail!("Failed to map LogQuery to TreeInstruction"); + TreeInstruction::Read(key) } (true, StorageLogKind::RepeatedWrite, TreeLogEntry::Read { .. }) => { tracing::error!( From 3a9716829e7a1d64953c20bdf824ed02c767c629 Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Wed, 9 Oct 2024 16:32:05 +0200 Subject: [PATCH 06/31] fix: simplify SQL expression Signed-off-by: Harald Hoyer --- ...03f9f912ef79afd559f8af530450649593fc6.json | 26 --------- ...97ae86e2513aca65199717dfd7359feddcd5e.json | 26 +++++++++ core/lib/dal/src/tee_proof_generation_dal.rs | 58 +++++++------------ 3 files changed, 48 insertions(+), 62 deletions(-) delete mode 100644 core/lib/dal/.sqlx/query-b2de2ea4a66ff596af40976b05c03f9f912ef79afd559f8af530450649593fc6.json create mode 100644 core/lib/dal/.sqlx/query-d825fcac2f7d32a31c159d5674497ae86e2513aca65199717dfd7359feddcd5e.json diff --git a/core/lib/dal/.sqlx/query-b2de2ea4a66ff596af40976b05c03f9f912ef79afd559f8af530450649593fc6.json b/core/lib/dal/.sqlx/query-b2de2ea4a66ff596af40976b05c03f9f912ef79afd559f8af530450649593fc6.json deleted file mode 100644 index c1238981bab1..000000000000 --- a/core/lib/dal/.sqlx/query-b2de2ea4a66ff596af40976b05c03f9f912ef79afd559f8af530450649593fc6.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n tee_proofs AS (\n SELECT\n *\n FROM\n tee_proof_generation_details\n WHERE\n tee_type = $1\n ),\n \n upsert AS (\n SELECT\n proof_generation_details.l1_batch_number\n FROM\n proof_generation_details\n LEFT JOIN\n l1_batches\n ON proof_generation_details.l1_batch_number = l1_batches.number\n LEFT JOIN\n tee_proofs\n ON proof_generation_details.l1_batch_number = tee_proofs.l1_batch_number\n WHERE\n (\n proof_generation_details.vm_run_data_blob_url IS NOT NULL\n AND proof_generation_details.proof_gen_data_blob_url IS NOT NULL\n AND l1_batches.hash IS NOT NULL\n AND l1_batches.aux_data_hash IS NOT NULL\n AND l1_batches.meta_parameters_hash IS NOT NULL\n )\n AND (\n (\n tee_proofs.tee_type IS NULL\n AND tee_proofs.status IS NULL\n AND tee_proofs.l1_batch_number IS NULL\n ) OR (\n (\n tee_proofs.status = $3\n OR (\n tee_proofs.status = $2\n AND tee_proofs.prover_taken_at < NOW() - $4::INTERVAL\n )\n )\n AND tee_proofs.l1_batch_number >= $5\n )\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n )\n \n INSERT INTO\n tee_proof_generation_details (\n l1_batch_number, tee_type, status, created_at, updated_at, prover_taken_at\n )\n SELECT\n l1_batch_number,\n $1,\n $2,\n NOW(),\n NOW(),\n NOW()\n FROM\n upsert\n ON CONFLICT (l1_batch_number, tee_type) DO\n UPDATE\n SET\n status = $2,\n updated_at = NOW(),\n prover_taken_at = NOW()\n RETURNING\n l1_batch_number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Text", - "Text", - "Text", - "Interval", - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "b2de2ea4a66ff596af40976b05c03f9f912ef79afd559f8af530450649593fc6" -} diff --git a/core/lib/dal/.sqlx/query-d825fcac2f7d32a31c159d5674497ae86e2513aca65199717dfd7359feddcd5e.json b/core/lib/dal/.sqlx/query-d825fcac2f7d32a31c159d5674497ae86e2513aca65199717dfd7359feddcd5e.json new file mode 100644 index 000000000000..2d17efc0fccf --- /dev/null +++ b/core/lib/dal/.sqlx/query-d825fcac2f7d32a31c159d5674497ae86e2513aca65199717dfd7359feddcd5e.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH upsert AS (\n SELECT\n p.l1_batch_number\n FROM\n proof_generation_details p\n LEFT JOIN\n l1_batches l1\n ON p.l1_batch_number = l1.number\n LEFT JOIN\n tee_proof_generation_details tee\n ON\n p.l1_batch_number = tee.l1_batch_number\n AND tee.tee_type = $1\n WHERE\n (\n p.l1_batch_number >= $5\n AND p.vm_run_data_blob_url IS NOT NULL\n AND p.proof_gen_data_blob_url IS NOT NULL\n AND l1.hash IS NOT NULL\n AND l1.aux_data_hash IS NOT NULL\n AND l1.meta_parameters_hash IS NOT NULL\n )\n AND (\n tee.l1_batch_number IS NULL\n OR (\n tee.status = $3\n OR (\n tee.status = $2\n AND tee.prover_taken_at < NOW() - $4::INTERVAL\n )\n )\n )\n ORDER BY\n l1_batch_number ASC\n FETCH FIRST ROW ONLY\n )\n \n INSERT INTO\n tee_proof_generation_details (\n l1_batch_number, tee_type, status, created_at, updated_at, prover_taken_at\n )\n SELECT\n l1_batch_number,\n $1,\n $2,\n NOW(),\n NOW(),\n NOW()\n FROM\n upsert\n ON CONFLICT (l1_batch_number, tee_type) DO\n UPDATE\n SET\n status = $2,\n updated_at = NOW(),\n prover_taken_at = NOW()\n RETURNING\n l1_batch_number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Text", + "Interval", + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "d825fcac2f7d32a31c159d5674497ae86e2513aca65199717dfd7359feddcd5e" +} diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index 71c7f1c62af4..c9e5989e6413 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -38,55 +38,41 @@ impl TeeProofGenerationDal<'_, '_> { let min_batch_number = min_batch_number.map_or(0, |num| i64::from(num.0)); let query = sqlx::query!( r#" - WITH - tee_proofs AS ( + WITH upsert AS ( SELECT - * + p.l1_batch_number FROM - tee_proof_generation_details - WHERE - tee_type = $1 - ), - - upsert AS ( - SELECT - proof_generation_details.l1_batch_number - FROM - proof_generation_details + proof_generation_details p LEFT JOIN - l1_batches - ON proof_generation_details.l1_batch_number = l1_batches.number + l1_batches l1 + ON p.l1_batch_number = l1.number LEFT JOIN - tee_proofs - ON proof_generation_details.l1_batch_number = tee_proofs.l1_batch_number + tee_proof_generation_details tee + ON + p.l1_batch_number = tee.l1_batch_number + AND tee.tee_type = $1 WHERE ( - proof_generation_details.vm_run_data_blob_url IS NOT NULL - AND proof_generation_details.proof_gen_data_blob_url IS NOT NULL - AND l1_batches.hash IS NOT NULL - AND l1_batches.aux_data_hash IS NOT NULL - AND l1_batches.meta_parameters_hash IS NOT NULL + p.l1_batch_number >= $5 + AND p.vm_run_data_blob_url IS NOT NULL + AND p.proof_gen_data_blob_url IS NOT NULL + AND l1.hash IS NOT NULL + AND l1.aux_data_hash IS NOT NULL + AND l1.meta_parameters_hash IS NOT NULL ) AND ( - ( - tee_proofs.tee_type IS NULL - AND tee_proofs.status IS NULL - AND tee_proofs.l1_batch_number IS NULL - ) OR ( - ( - tee_proofs.status = $3 - OR ( - tee_proofs.status = $2 - AND tee_proofs.prover_taken_at < NOW() - $4::INTERVAL - ) + tee.l1_batch_number IS NULL + OR ( + tee.status = $3 + OR ( + tee.status = $2 + AND tee.prover_taken_at < NOW() - $4::INTERVAL ) - AND tee_proofs.l1_batch_number >= $5 ) ) ORDER BY l1_batch_number ASC - LIMIT - 1 + FETCH FIRST ROW ONLY ) INSERT INTO From 77f44fdfaa5e1043219d1127975855674fa2a525 Mon Sep 17 00:00:00 2001 From: Patrick Beza Date: Tue, 8 Oct 2024 16:12:07 +0200 Subject: [PATCH 07/31] feat(proof-data-handler): return 204 HTTP status code if no proofs are available --- core/node/proof_data_handler/src/lib.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/core/node/proof_data_handler/src/lib.rs b/core/node/proof_data_handler/src/lib.rs index 618a786ea658..b5ce1b84a79a 100644 --- a/core/node/proof_data_handler/src/lib.rs +++ b/core/node/proof_data_handler/src/lib.rs @@ -1,7 +1,7 @@ use std::{net::SocketAddr, sync::Arc}; use anyhow::Context as _; -use axum::{extract::Path, routing::post, Json, Router}; +use axum::{extract::Path, http::StatusCode, response::IntoResponse, routing::post, Json, Router}; use request_processor::RequestProcessor; use tee_request_processor::TeeRequestProcessor; use tokio::sync::watch; @@ -10,7 +10,7 @@ use zksync_dal::{ConnectionPool, Core}; use zksync_object_store::ObjectStore; use zksync_prover_interface::api::{ ProofGenerationDataRequest, RegisterTeeAttestationRequest, SubmitProofRequest, - SubmitTeeProofRequest, TeeProofGenerationDataRequest, + SubmitTeeProofRequest, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, }; use zksync_types::commitment::L1BatchCommitmentMode; @@ -96,9 +96,15 @@ fn create_proof_processing_router( "/tee/proof_inputs", post( move |payload: Json| async move { - get_tee_proof_gen_processor + let result: Result, errors::RequestProcessorError> = get_tee_proof_gen_processor .get_proof_generation_data(payload) - .await + .await; + + match result { + Ok(Json(TeeProofGenerationDataResponse(None))) => (StatusCode::NO_CONTENT, Json("No new TeeVerifierInputs are available yet")).into_response(), + Ok(data) => (StatusCode::OK, data).into_response(), + Err(e) => e.into_response(), + } }, ), ) From 5699299692ea9483e89e8bf2fbe203f58221238a Mon Sep 17 00:00:00 2001 From: Patrick Beza Date: Wed, 9 Oct 2024 18:03:19 +0200 Subject: [PATCH 08/31] WIP --- core/lib/dal/src/tee_proof_generation_dal.rs | 32 ----- .../api_server/src/web3/tests/unstable.rs | 3 - .../src/tee_request_processor.rs | 12 +- core/node/proof_data_handler/src/tests.rs | 125 +++++++++++++++--- 4 files changed, 115 insertions(+), 57 deletions(-) diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index c9e5989e6413..655af716d2ce 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -196,38 +196,6 @@ impl TeeProofGenerationDal<'_, '_> { Ok(()) } - pub async fn insert_tee_proof_generation_job( - &mut self, - batch_number: L1BatchNumber, - tee_type: TeeType, - ) -> DalResult<()> { - let batch_number = i64::from(batch_number.0); - let query = sqlx::query!( - r#" - INSERT INTO - tee_proof_generation_details ( - l1_batch_number, tee_type, status, created_at, updated_at - ) - VALUES - ($1, $2, $3, NOW(), NOW()) - ON CONFLICT (l1_batch_number, tee_type) DO NOTHING - "#, - batch_number, - tee_type.to_string(), - TeeProofGenerationJobStatus::Unpicked.to_string(), - ); - let instrumentation = Instrumented::new("insert_tee_proof_generation_job") - .with_arg("l1_batch_number", &batch_number) - .with_arg("tee_type", &tee_type); - instrumentation - .clone() - .with(query) - .execute(self.storage) - .await?; - - Ok(()) - } - pub async fn save_attestation(&mut self, pubkey: &[u8], attestation: &[u8]) -> DalResult<()> { let query = sqlx::query!( r#" diff --git a/core/node/api_server/src/web3/tests/unstable.rs b/core/node/api_server/src/web3/tests/unstable.rs index e814081afa02..4c4b604ad3ed 100644 --- a/core/node/api_server/src/web3/tests/unstable.rs +++ b/core/node/api_server/src/web3/tests/unstable.rs @@ -34,9 +34,6 @@ impl HttpTest for GetTeeProofsTest { tee_proof_generation_dal .save_attestation(&pubkey, &attestation) .await?; - tee_proof_generation_dal - .insert_tee_proof_generation_job(batch_no, tee_type) - .await?; let signature = vec![0, 1, 2, 3, 4]; let proof_vec = vec![5, 6, 7, 8, 9]; diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs index ce38946a739c..4c6a2fbe880d 100644 --- a/core/node/proof_data_handler/src/tee_request_processor.rs +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -44,9 +44,11 @@ impl TeeRequestProcessor { ) -> Result, RequestProcessorError> { tracing::info!("Received request for proof generation data: {:?}", request); + println!("foobar_01"); let mut min_batch_number: Option = None; let mut missing_range: Option<(L1BatchNumber, L1BatchNumber)> = None; + println!("foobar_02"); let result = loop { let l1_batch_number = match self .lock_batch_for_proving(request.tee_type, min_batch_number) @@ -56,12 +58,17 @@ impl TeeRequestProcessor { None => break Ok(Json(TeeProofGenerationDataResponse(None))), }; + println!("foobar_03"); match self .tee_verifier_input_for_existing_batch(l1_batch_number) .await { - Ok(input) => break Ok(Json(TeeProofGenerationDataResponse(Some(Box::new(input))))), + Ok(input) => { + println!("foobar_04"); + break Ok(Json(TeeProofGenerationDataResponse(Some(Box::new(input))))); + } Err(RequestProcessorError::ObjectStore(ObjectStoreError::KeyNotFound(_))) => { + println!("foobar_05"); missing_range = match missing_range { Some((start, _)) => Some((start, l1_batch_number)), None => Some((l1_batch_number, l1_batch_number)), @@ -70,11 +77,14 @@ impl TeeRequestProcessor { min_batch_number = Some(min_batch_number.unwrap_or(l1_batch_number) + 1); } Err(err) => { + println!("foobar_06"); self.unlock_batch(l1_batch_number, request.tee_type).await?; break Err(err); } } + println!("foobar_07"); }; + println!("foobar_08"); if let Some((start, end)) = missing_range { tracing::warn!( diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index ae78a0e62de4..c5dc499f4890 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -1,3 +1,5 @@ +use std::env; + use axum::{ body::Body, http::{self, Method, Request, StatusCode}, @@ -5,12 +7,20 @@ use axum::{ Router, }; use serde_json::json; +use tokio::time::{sleep, Duration}; use tower::ServiceExt; use zksync_config::configs::ProofDataHandlerConfig; +use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{ConnectionPool, CoreDal}; use zksync_object_store::MockObjectStore; use zksync_prover_interface::api::SubmitTeeProofRequest; -use zksync_types::{commitment::L1BatchCommitmentMode, tee_types::TeeType, L1BatchNumber}; +use zksync_types::{ + block::L1BatchHeader, + commitment::{L1BatchCommitmentArtifacts, L1BatchCommitmentMode}, + protocol_version::ProtocolSemanticVersion, + tee_types::TeeType, + L1BatchNumber, ProtocolVersionId, H256, +}; use crate::create_proof_processing_router; @@ -18,12 +28,11 @@ use crate::create_proof_processing_router; async fn request_tee_proof_inputs() { let batch_number = L1BatchNumber::from(1); let db_conn_pool = ConnectionPool::test_pool().await; - - mock_tee_batch_status(db_conn_pool.clone(), batch_number).await; + println!("DATABASE_URL: {:?}", db_conn_pool); let app = create_proof_processing_router( MockObjectStore::arc(), - db_conn_pool, + db_conn_pool.clone(), ProofDataHandlerConfig { http_port: 1337, proof_generation_timeout_in_secs: 10, @@ -31,6 +40,34 @@ async fn request_tee_proof_inputs() { }, L1BatchCommitmentMode::Rollup, ); + let test_cases = vec![ + (json!({ "tee_type": "sgx" }), StatusCode::NO_CONTENT), + ( + json!({ "tee_type": "Sgx" }), + StatusCode::UNPROCESSABLE_ENTITY, + ), + ]; + + for (body, expected_status) in test_cases { + let req_body = Body::from(serde_json::to_vec(&body).unwrap()); + let response = app + .clone() + .oneshot( + Request::builder() + .method(Method::POST) + .uri("/tee/proof_inputs") + .header(http::header::CONTENT_TYPE, "application/json") + .body(req_body) + .unwrap(), + ) + .await + .unwrap(); + + assert_eq!(response.status(), expected_status); + } + + mock_l1_batch(db_conn_pool.clone(), batch_number).await; + let test_cases = vec![ (json!({ "tee_type": "sgx" }), StatusCode::OK), ( @@ -39,6 +76,8 @@ async fn request_tee_proof_inputs() { ), ]; + sleep(Duration::from_secs(1000)).await; + for (body, expected_status) in test_cases { let req_body = Body::from(serde_json::to_vec(&body).unwrap()); let response = app @@ -64,8 +103,6 @@ async fn submit_tee_proof() { let batch_number = L1BatchNumber::from(1); let db_conn_pool = ConnectionPool::test_pool().await; - mock_tee_batch_status(db_conn_pool.clone(), batch_number).await; - let tee_proof_request_str = r#"{ "signature": "0001020304", "pubkey": "0506070809", @@ -138,34 +175,80 @@ async fn submit_tee_proof() { assert_eq!(proof.pubkey.as_ref().unwrap(), &tee_proof_request.0.pubkey); } -// Mock SQL db with information about the status of the TEE proof generation -async fn mock_tee_batch_status( +async fn mock_l1_batch( db_conn_pool: ConnectionPool, batch_number: L1BatchNumber, ) { let mut proof_db_conn = db_conn_pool.connection().await.unwrap(); - let mut proof_dal = proof_db_conn.tee_proof_generation_dal(); - // there should not be any batches awaiting proof in the db yet + // Common values + let protocol_version_id = ProtocolVersionId::latest(); + let protocol_version = ProtocolSemanticVersion { + minor: protocol_version_id, + patch: 0.into(), + }; + let base_system_contracts_hashes = BaseSystemContractsHashes { + bootloader: H256::repeat_byte(1), + default_aa: H256::repeat_byte(42), + }; - let oldest_batch_number = proof_dal.get_oldest_unpicked_batch().await.unwrap(); - assert!(oldest_batch_number.is_none()); + // Save protocol version + proof_db_conn + .protocol_versions_dal() + .save_protocol_version( + protocol_version, + 0, + Default::default(), + base_system_contracts_hashes, + None, + ) + .await + .unwrap(); + + // Insert mock L1 batch + let header = L1BatchHeader::new( + batch_number, + 100, + base_system_contracts_hashes, + protocol_version_id, + ); + assert!(proof_db_conn + .blocks_dal() + .insert_mock_l1_batch(&header) + .await + .is_ok()); - // mock SQL table with relevant information about the status of TEE proof generation + // Save L1 batch commitment artifacts and set hash + let hash = H256::repeat_byte(1); + proof_db_conn + .blocks_dal() + .save_l1_batch_commitment_artifacts(batch_number, &L1BatchCommitmentArtifacts::default()) + .await + .unwrap(); + proof_db_conn + .blocks_dal() + .set_l1_batch_hash(batch_number, hash) + .await + .unwrap(); - proof_dal - .insert_tee_proof_generation_job(batch_number, TeeType::Sgx) + // Insert proof generation details + proof_db_conn + .proof_generation_dal() + .insert_proof_generation_details(batch_number) .await - .expect("Failed to insert tee_proof_generation_job"); + .unwrap(); - // now, there should be one batch in the db awaiting proof + proof_db_conn + .proof_generation_dal() + .save_vm_runner_artifacts_metadata(batch_number, "vm_run_data_blob_url") + .await + .unwrap(); - let oldest_batch_number = proof_dal - .get_oldest_unpicked_batch() + proof_db_conn + .proof_generation_dal() + .save_merkle_paths_artifacts_metadata(batch_number, "proof_gen_data_blob_url") .await - .unwrap() .unwrap(); - assert_eq!(oldest_batch_number, batch_number); } async fn send_submit_tee_proof_request( From 70c82b6d387b9bf671ac10b6e0e9ac16d1e72a7e Mon Sep 17 00:00:00 2001 From: Patrick Beza Date: Wed, 9 Oct 2024 19:40:47 +0200 Subject: [PATCH 09/31] Give up more comprehensive tests - need too complex mockups --- core/lib/dal/src/tee_proof_generation_dal.rs | 34 +++++ .../api_server/src/web3/tests/unstable.rs | 3 + .../src/tee_request_processor.rs | 8 -- core/node/proof_data_handler/src/tests.rs | 122 +++--------------- 4 files changed, 55 insertions(+), 112 deletions(-) diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index 655af716d2ce..c98e0529b7f7 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -259,6 +259,40 @@ impl TeeProofGenerationDal<'_, '_> { Ok(proofs) } + /// For testing purposes only. + pub async fn insert_tee_proof_generation_job( + &mut self, + batch_number: L1BatchNumber, + tee_type: TeeType, + ) -> DalResult<()> { + let batch_number = i64::from(batch_number.0); + let query = sqlx::query!( + r#" + INSERT INTO + tee_proof_generation_details ( + l1_batch_number, tee_type, status, created_at, updated_at + ) + VALUES + ($1, $2, $3, NOW(), NOW()) + ON CONFLICT (l1_batch_number, tee_type) DO NOTHING + "#, + batch_number, + tee_type.to_string(), + TeeProofGenerationJobStatus::Unpicked.to_string(), + ); + let instrumentation = Instrumented::new("insert_tee_proof_generation_job") + .with_arg("l1_batch_number", &batch_number) + .with_arg("tee_type", &tee_type); + instrumentation + .clone() + .with(query) + .execute(self.storage) + .await?; + + Ok(()) + } + + /// For testing purposes only. pub async fn get_oldest_unpicked_batch(&mut self) -> DalResult> { let query = sqlx::query!( r#" diff --git a/core/node/api_server/src/web3/tests/unstable.rs b/core/node/api_server/src/web3/tests/unstable.rs index 4c4b604ad3ed..e814081afa02 100644 --- a/core/node/api_server/src/web3/tests/unstable.rs +++ b/core/node/api_server/src/web3/tests/unstable.rs @@ -34,6 +34,9 @@ impl HttpTest for GetTeeProofsTest { tee_proof_generation_dal .save_attestation(&pubkey, &attestation) .await?; + tee_proof_generation_dal + .insert_tee_proof_generation_job(batch_no, tee_type) + .await?; let signature = vec![0, 1, 2, 3, 4]; let proof_vec = vec![5, 6, 7, 8, 9]; diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs index 4c6a2fbe880d..b7e661450177 100644 --- a/core/node/proof_data_handler/src/tee_request_processor.rs +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -44,11 +44,9 @@ impl TeeRequestProcessor { ) -> Result, RequestProcessorError> { tracing::info!("Received request for proof generation data: {:?}", request); - println!("foobar_01"); let mut min_batch_number: Option = None; let mut missing_range: Option<(L1BatchNumber, L1BatchNumber)> = None; - println!("foobar_02"); let result = loop { let l1_batch_number = match self .lock_batch_for_proving(request.tee_type, min_batch_number) @@ -58,17 +56,14 @@ impl TeeRequestProcessor { None => break Ok(Json(TeeProofGenerationDataResponse(None))), }; - println!("foobar_03"); match self .tee_verifier_input_for_existing_batch(l1_batch_number) .await { Ok(input) => { - println!("foobar_04"); break Ok(Json(TeeProofGenerationDataResponse(Some(Box::new(input))))); } Err(RequestProcessorError::ObjectStore(ObjectStoreError::KeyNotFound(_))) => { - println!("foobar_05"); missing_range = match missing_range { Some((start, _)) => Some((start, l1_batch_number)), None => Some((l1_batch_number, l1_batch_number)), @@ -77,14 +72,11 @@ impl TeeRequestProcessor { min_batch_number = Some(min_batch_number.unwrap_or(l1_batch_number) + 1); } Err(err) => { - println!("foobar_06"); self.unlock_batch(l1_batch_number, request.tee_type).await?; break Err(err); } } - println!("foobar_07"); }; - println!("foobar_08"); if let Some((start, end)) = missing_range { tracing::warn!( diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index c5dc499f4890..995b59a3d58c 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -1,5 +1,3 @@ -use std::env; - use axum::{ body::Body, http::{self, Method, Request, StatusCode}, @@ -7,28 +5,18 @@ use axum::{ Router, }; use serde_json::json; -use tokio::time::{sleep, Duration}; use tower::ServiceExt; use zksync_config::configs::ProofDataHandlerConfig; -use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{ConnectionPool, CoreDal}; use zksync_object_store::MockObjectStore; use zksync_prover_interface::api::SubmitTeeProofRequest; -use zksync_types::{ - block::L1BatchHeader, - commitment::{L1BatchCommitmentArtifacts, L1BatchCommitmentMode}, - protocol_version::ProtocolSemanticVersion, - tee_types::TeeType, - L1BatchNumber, ProtocolVersionId, H256, -}; +use zksync_types::{commitment::L1BatchCommitmentMode, tee_types::TeeType, L1BatchNumber}; use crate::create_proof_processing_router; #[tokio::test] async fn request_tee_proof_inputs() { - let batch_number = L1BatchNumber::from(1); let db_conn_pool = ConnectionPool::test_pool().await; - println!("DATABASE_URL: {:?}", db_conn_pool); let app = create_proof_processing_router( MockObjectStore::arc(), @@ -65,36 +53,6 @@ async fn request_tee_proof_inputs() { assert_eq!(response.status(), expected_status); } - - mock_l1_batch(db_conn_pool.clone(), batch_number).await; - - let test_cases = vec![ - (json!({ "tee_type": "sgx" }), StatusCode::OK), - ( - json!({ "tee_type": "Sgx" }), - StatusCode::UNPROCESSABLE_ENTITY, - ), - ]; - - sleep(Duration::from_secs(1000)).await; - - for (body, expected_status) in test_cases { - let req_body = Body::from(serde_json::to_vec(&body).unwrap()); - let response = app - .clone() - .oneshot( - Request::builder() - .method(Method::POST) - .uri("/tee/proof_inputs") - .header(http::header::CONTENT_TYPE, "application/json") - .body(req_body) - .unwrap(), - ) - .await - .unwrap(); - - assert_eq!(response.status(), expected_status); - } } // Test /tee/submit_proofs endpoint using a mocked TEE proof and verify response and db state @@ -103,6 +61,8 @@ async fn submit_tee_proof() { let batch_number = L1BatchNumber::from(1); let db_conn_pool = ConnectionPool::test_pool().await; + mock_tee_batch_status(db_conn_pool.clone(), batch_number).await; + let tee_proof_request_str = r#"{ "signature": "0001020304", "pubkey": "0506070809", @@ -175,80 +135,34 @@ async fn submit_tee_proof() { assert_eq!(proof.pubkey.as_ref().unwrap(), &tee_proof_request.0.pubkey); } -async fn mock_l1_batch( +// Mock SQL db with information about the status of the TEE proof generation +async fn mock_tee_batch_status( db_conn_pool: ConnectionPool, batch_number: L1BatchNumber, ) { let mut proof_db_conn = db_conn_pool.connection().await.unwrap(); + let mut proof_dal = proof_db_conn.tee_proof_generation_dal(); - // Common values - let protocol_version_id = ProtocolVersionId::latest(); - let protocol_version = ProtocolSemanticVersion { - minor: protocol_version_id, - patch: 0.into(), - }; - let base_system_contracts_hashes = BaseSystemContractsHashes { - bootloader: H256::repeat_byte(1), - default_aa: H256::repeat_byte(42), - }; - - // Save protocol version - proof_db_conn - .protocol_versions_dal() - .save_protocol_version( - protocol_version, - 0, - Default::default(), - base_system_contracts_hashes, - None, - ) - .await - .unwrap(); + // there should not be any batches awaiting proof in the db yet - // Insert mock L1 batch - let header = L1BatchHeader::new( - batch_number, - 100, - base_system_contracts_hashes, - protocol_version_id, - ); - assert!(proof_db_conn - .blocks_dal() - .insert_mock_l1_batch(&header) - .await - .is_ok()); + let oldest_batch_number = proof_dal.get_oldest_unpicked_batch().await.unwrap(); + assert!(oldest_batch_number.is_none()); - // Save L1 batch commitment artifacts and set hash - let hash = H256::repeat_byte(1); - proof_db_conn - .blocks_dal() - .save_l1_batch_commitment_artifacts(batch_number, &L1BatchCommitmentArtifacts::default()) - .await - .unwrap(); - proof_db_conn - .blocks_dal() - .set_l1_batch_hash(batch_number, hash) - .await - .unwrap(); + // mock SQL table with relevant information about the status of TEE proof generation - // Insert proof generation details - proof_db_conn - .proof_generation_dal() - .insert_proof_generation_details(batch_number) + proof_dal + .insert_tee_proof_generation_job(batch_number, TeeType::Sgx) .await - .unwrap(); + .expect("Failed to insert tee_proof_generation_job"); - proof_db_conn - .proof_generation_dal() - .save_vm_runner_artifacts_metadata(batch_number, "vm_run_data_blob_url") - .await - .unwrap(); + // now, there should be one batch in the db awaiting proof - proof_db_conn - .proof_generation_dal() - .save_merkle_paths_artifacts_metadata(batch_number, "proof_gen_data_blob_url") + let oldest_batch_number = proof_dal + .get_oldest_unpicked_batch() .await + .unwrap() .unwrap(); + assert_eq!(oldest_batch_number, batch_number); } async fn send_submit_tee_proof_request( From e7c7840625f38cebcb76bcdd4a7968cea81190c0 Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Tue, 8 Oct 2024 11:09:43 +0200 Subject: [PATCH 10/31] fix: remove sort from sql query This makes handling large tables a lot more performant Signed-off-by: Harald Hoyer --- ...9eacd03bd9ec24a7dc982c8cfb0e2277aff8dfaa9654255451ac.json} | 4 ++-- core/lib/dal/src/tee_proof_generation_dal.rs | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) rename core/lib/dal/.sqlx/{query-d825fcac2f7d32a31c159d5674497ae86e2513aca65199717dfd7359feddcd5e.json => query-4498e1c1ff179eacd03bd9ec24a7dc982c8cfb0e2277aff8dfaa9654255451ac.json} (66%) diff --git a/core/lib/dal/.sqlx/query-d825fcac2f7d32a31c159d5674497ae86e2513aca65199717dfd7359feddcd5e.json b/core/lib/dal/.sqlx/query-4498e1c1ff179eacd03bd9ec24a7dc982c8cfb0e2277aff8dfaa9654255451ac.json similarity index 66% rename from core/lib/dal/.sqlx/query-d825fcac2f7d32a31c159d5674497ae86e2513aca65199717dfd7359feddcd5e.json rename to core/lib/dal/.sqlx/query-4498e1c1ff179eacd03bd9ec24a7dc982c8cfb0e2277aff8dfaa9654255451ac.json index 2d17efc0fccf..4d006b6d1d5d 100644 --- a/core/lib/dal/.sqlx/query-d825fcac2f7d32a31c159d5674497ae86e2513aca65199717dfd7359feddcd5e.json +++ b/core/lib/dal/.sqlx/query-4498e1c1ff179eacd03bd9ec24a7dc982c8cfb0e2277aff8dfaa9654255451ac.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n WITH upsert AS (\n SELECT\n p.l1_batch_number\n FROM\n proof_generation_details p\n LEFT JOIN\n l1_batches l1\n ON p.l1_batch_number = l1.number\n LEFT JOIN\n tee_proof_generation_details tee\n ON\n p.l1_batch_number = tee.l1_batch_number\n AND tee.tee_type = $1\n WHERE\n (\n p.l1_batch_number >= $5\n AND p.vm_run_data_blob_url IS NOT NULL\n AND p.proof_gen_data_blob_url IS NOT NULL\n AND l1.hash IS NOT NULL\n AND l1.aux_data_hash IS NOT NULL\n AND l1.meta_parameters_hash IS NOT NULL\n )\n AND (\n tee.l1_batch_number IS NULL\n OR (\n tee.status = $3\n OR (\n tee.status = $2\n AND tee.prover_taken_at < NOW() - $4::INTERVAL\n )\n )\n )\n ORDER BY\n l1_batch_number ASC\n FETCH FIRST ROW ONLY\n )\n \n INSERT INTO\n tee_proof_generation_details (\n l1_batch_number, tee_type, status, created_at, updated_at, prover_taken_at\n )\n SELECT\n l1_batch_number,\n $1,\n $2,\n NOW(),\n NOW(),\n NOW()\n FROM\n upsert\n ON CONFLICT (l1_batch_number, tee_type) DO\n UPDATE\n SET\n status = $2,\n updated_at = NOW(),\n prover_taken_at = NOW()\n RETURNING\n l1_batch_number\n ", + "query": "\n WITH upsert AS (\n SELECT\n p.l1_batch_number\n FROM\n proof_generation_details p\n LEFT JOIN\n l1_batches l1\n ON p.l1_batch_number = l1.number\n LEFT JOIN\n tee_proof_generation_details tee\n ON\n p.l1_batch_number = tee.l1_batch_number\n AND tee.tee_type = $1\n WHERE\n (\n p.l1_batch_number >= $5\n AND p.vm_run_data_blob_url IS NOT NULL\n AND p.proof_gen_data_blob_url IS NOT NULL\n AND l1.hash IS NOT NULL\n AND l1.aux_data_hash IS NOT NULL\n AND l1.meta_parameters_hash IS NOT NULL\n )\n AND (\n tee.l1_batch_number IS NULL\n OR (\n tee.status = $3\n OR (\n tee.status = $2\n AND tee.prover_taken_at < NOW() - $4::INTERVAL\n )\n )\n )\n FETCH FIRST ROW ONLY\n )\n \n INSERT INTO\n tee_proof_generation_details (\n l1_batch_number, tee_type, status, created_at, updated_at, prover_taken_at\n )\n SELECT\n l1_batch_number,\n $1,\n $2,\n NOW(),\n NOW(),\n NOW()\n FROM\n upsert\n ON CONFLICT (l1_batch_number, tee_type) DO\n UPDATE\n SET\n status = $2,\n updated_at = NOW(),\n prover_taken_at = NOW()\n RETURNING\n l1_batch_number\n ", "describe": { "columns": [ { @@ -22,5 +22,5 @@ false ] }, - "hash": "d825fcac2f7d32a31c159d5674497ae86e2513aca65199717dfd7359feddcd5e" + "hash": "4498e1c1ff179eacd03bd9ec24a7dc982c8cfb0e2277aff8dfaa9654255451ac" } diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index c9e5989e6413..116c981f36ce 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -70,8 +70,6 @@ impl TeeProofGenerationDal<'_, '_> { ) ) ) - ORDER BY - l1_batch_number ASC FETCH FIRST ROW ONLY ) From e6969d5256b8412f640e71d758051bdfa9f2c22c Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Thu, 10 Oct 2024 10:21:42 +0200 Subject: [PATCH 11/31] fix: remove L2ChainId from VMRunWitnessInputData Signed-off-by: Harald Hoyer --- core/bin/zksync_server/src/node_builder.rs | 1 + core/lib/prover_interface/src/inputs.rs | 4 +--- .../implementations/layers/proof_data_handler.rs | 8 +++++++- core/node/proof_data_handler/src/lib.rs | 14 +++++++++++--- .../src/tee_request_processor.rs | 7 +++++-- core/node/proof_data_handler/src/tests.rs | 3 +++ core/node/vm_runner/src/impls/bwip.rs | 2 -- 7 files changed, 28 insertions(+), 11 deletions(-) diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 42a49433d477..abe33adc9c41 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -289,6 +289,7 @@ impl MainNodeBuilder { self.node.add_layer(ProofDataHandlerLayer::new( try_load_config!(self.configs.proof_data_handler_config), self.genesis_config.l1_batch_commit_data_generator_mode, + self.genesis_config.l2_chain_id, )); Ok(self) } diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index 42a9ac1bf893..8ae2417a5cca 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -6,8 +6,7 @@ use zksync_multivm::interface::{L1BatchEnv, SystemEnv}; use zksync_object_store::{_reexports::BoxedError, serialize_using_bincode, Bucket, StoredObject}; use zksync_types::{ basic_fri_types::Eip4844Blobs, block::L2BlockExecutionData, - witness_block_state::WitnessStorageState, L1BatchNumber, L2ChainId, ProtocolVersionId, H256, - U256, + witness_block_state::WitnessStorageState, L1BatchNumber, ProtocolVersionId, H256, U256, }; const HASH_LEN: usize = H256::len_bytes(); @@ -150,7 +149,6 @@ pub struct VMRunWitnessInputData { pub storage_refunds: Vec, pub pubdata_costs: Vec, pub witness_block_state: WitnessStorageState, - pub l2_chain_id: L2ChainId, } // skip_serializing_if for field evm_emulator_code_hash doesn't work fine with bincode, diff --git a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs index b53ff73c1a04..3e1269caa4e4 100644 --- a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs +++ b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use zksync_config::configs::ProofDataHandlerConfig; use zksync_dal::{ConnectionPool, Core}; use zksync_object_store::ObjectStore; -use zksync_types::commitment::L1BatchCommitmentMode; +use zksync_types::{commitment::L1BatchCommitmentMode, L2ChainId}; use crate::{ implementations::resources::{ @@ -21,6 +21,7 @@ use crate::{ pub struct ProofDataHandlerLayer { proof_data_handler_config: ProofDataHandlerConfig, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, } #[derive(Debug, FromContext)] @@ -41,10 +42,12 @@ impl ProofDataHandlerLayer { pub fn new( proof_data_handler_config: ProofDataHandlerConfig, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, ) -> Self { Self { proof_data_handler_config, commitment_mode, + l2_chain_id, } } } @@ -67,6 +70,7 @@ impl WiringLayer for ProofDataHandlerLayer { blob_store, main_pool, commitment_mode: self.commitment_mode, + l2_chain_id: self.l2_chain_id, }; Ok(Output { task }) @@ -79,6 +83,7 @@ pub struct ProofDataHandlerTask { blob_store: Arc, main_pool: ConnectionPool, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, } #[async_trait::async_trait] @@ -93,6 +98,7 @@ impl Task for ProofDataHandlerTask { self.blob_store, self.main_pool, self.commitment_mode, + self.l2_chain_id, stop_receiver.0, ) .await diff --git a/core/node/proof_data_handler/src/lib.rs b/core/node/proof_data_handler/src/lib.rs index b5ce1b84a79a..25ff69ca4286 100644 --- a/core/node/proof_data_handler/src/lib.rs +++ b/core/node/proof_data_handler/src/lib.rs @@ -12,7 +12,7 @@ use zksync_prover_interface::api::{ ProofGenerationDataRequest, RegisterTeeAttestationRequest, SubmitProofRequest, SubmitTeeProofRequest, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, }; -use zksync_types::commitment::L1BatchCommitmentMode; +use zksync_types::{commitment::L1BatchCommitmentMode, L2ChainId}; #[cfg(test)] mod tests; @@ -27,11 +27,18 @@ pub async fn run_server( blob_store: Arc, connection_pool: ConnectionPool, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, mut stop_receiver: watch::Receiver, ) -> anyhow::Result<()> { let bind_address = SocketAddr::from(([0, 0, 0, 0], config.http_port)); tracing::debug!("Starting proof data handler server on {bind_address}"); - let app = create_proof_processing_router(blob_store, connection_pool, config, commitment_mode); + let app = create_proof_processing_router( + blob_store, + connection_pool, + config, + commitment_mode, + l2_chain_id, + ); let listener = tokio::net::TcpListener::bind(bind_address) .await @@ -54,6 +61,7 @@ fn create_proof_processing_router( connection_pool: ConnectionPool, config: ProofDataHandlerConfig, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, ) -> Router { let get_proof_gen_processor = RequestProcessor::new( blob_store.clone(), @@ -88,7 +96,7 @@ fn create_proof_processing_router( if config.tee_support { let get_tee_proof_gen_processor = - TeeRequestProcessor::new(blob_store, connection_pool, config.clone()); + TeeRequestProcessor::new(blob_store, connection_pool, config.clone(), l2_chain_id); let submit_tee_proof_processor = get_tee_proof_gen_processor.clone(); let register_tee_attestation_processor = get_tee_proof_gen_processor.clone(); diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs index b7e661450177..5956be17e976 100644 --- a/core/node/proof_data_handler/src/tee_request_processor.rs +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -13,7 +13,7 @@ use zksync_prover_interface::{ TeeVerifierInput, V1TeeVerifierInput, VMRunWitnessInputData, WitnessInputMerklePaths, }, }; -use zksync_types::{tee_types::TeeType, L1BatchNumber}; +use zksync_types::{tee_types::TeeType, L1BatchNumber, L2ChainId}; use zksync_vm_executor::storage::L1BatchParamsProvider; use crate::errors::RequestProcessorError; @@ -23,6 +23,7 @@ pub(crate) struct TeeRequestProcessor { blob_store: Arc, pool: ConnectionPool, config: ProofDataHandlerConfig, + l2_chain_id: L2ChainId, } impl TeeRequestProcessor { @@ -30,11 +31,13 @@ impl TeeRequestProcessor { blob_store: Arc, pool: ConnectionPool, config: ProofDataHandlerConfig, + l2_chain_id: L2ChainId, ) -> Self { Self { blob_store, pool, config, + l2_chain_id, } } @@ -132,7 +135,7 @@ impl TeeRequestProcessor { &mut connection, l1_batch_number, validation_computational_gas_limit, - vm_run_data.l2_chain_id, + self.l2_chain_id, ) .await .map_err(|err| RequestProcessorError::GeneralError(err.to_string()))? diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index 995b59a3d58c..a10044cacd9c 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -6,6 +6,7 @@ use axum::{ }; use serde_json::json; use tower::ServiceExt; +use zksync_basic_types::L2ChainId; use zksync_config::configs::ProofDataHandlerConfig; use zksync_dal::{ConnectionPool, CoreDal}; use zksync_object_store::MockObjectStore; @@ -27,6 +28,7 @@ async fn request_tee_proof_inputs() { tee_support: true, }, L1BatchCommitmentMode::Rollup, + L2ChainId::default(), ); let test_cases = vec![ (json!({ "tee_type": "sgx" }), StatusCode::NO_CONTENT), @@ -81,6 +83,7 @@ async fn submit_tee_proof() { tee_support: true, }, L1BatchCommitmentMode::Rollup, + L2ChainId::default(), ); // this should fail because we haven't saved the attestation for the pubkey yet diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index 86b9f227e899..dc94752d9886 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -208,7 +208,6 @@ async fn get_updates_manager_witness_input_data( output: &L1BatchOutput, ) -> anyhow::Result { let initial_heap_content = output.batch.final_bootloader_memory.clone().unwrap(); // might be just empty - let l2_chain_id = system_env.chain_id; let default_aa = system_env.base_system_smart_contracts.hashes().default_aa; let evm_emulator = system_env.base_system_smart_contracts.hashes().evm_emulator; let bootloader = system_env.base_system_smart_contracts.hashes().bootloader; @@ -276,7 +275,6 @@ async fn get_updates_manager_witness_input_data( storage_refunds, pubdata_costs, witness_block_state, - l2_chain_id, }) } From b07ba6b0bf45f1aaf1984c40702b8dcd54a41dbc Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Thu, 10 Oct 2024 10:37:10 +0200 Subject: [PATCH 12/31] fixup! fix: remove L2ChainId from VMRunWitnessInputData Signed-off-by: Harald Hoyer --- core/lib/tee_verifier/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index c70db0f4ac96..4ead0e522ad4 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -320,10 +320,10 @@ mod tests { protocol_version: Default::default(), bootloader_code: vec![], default_account_code_hash: Default::default(), + evm_emulator_code_hash: None, storage_refunds: vec![], pubdata_costs: vec![], witness_block_state: Default::default(), - l2_chain_id: L2ChainId::from(0), }, WitnessInputMerklePaths::new(0), vec![], From ab23c8b849912a25c5765b8ceefc1da10d71cb02 Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Thu, 10 Oct 2024 10:44:28 +0200 Subject: [PATCH 13/31] fixup! fixup! fix: remove L2ChainId from VMRunWitnessInputData Signed-off-by: Harald Hoyer --- core/lib/tee_verifier/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index 4ead0e522ad4..c5a680a0b170 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -306,7 +306,7 @@ mod tests { use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_multivm::interface::{L1BatchEnv, SystemEnv, TxExecutionMode}; use zksync_prover_interface::inputs::{TeeVerifierInput, VMRunWitnessInputData}; - use zksync_types::{L2ChainId, U256}; + use zksync_types::U256; use super::*; From d852f312d612c4f11f349d828cae8a5d3ad5da52 Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Thu, 10 Oct 2024 10:46:16 +0200 Subject: [PATCH 14/31] fixup! fixup! fixup! fix: remove L2ChainId from VMRunWitnessInputData Signed-off-by: Harald Hoyer --- core/lib/tee_verifier/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index c5a680a0b170..c8868a578376 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -320,7 +320,7 @@ mod tests { protocol_version: Default::default(), bootloader_code: vec![], default_account_code_hash: Default::default(), - evm_emulator_code_hash: None, + evm_emulator_code_hash: Some(Default::default()), storage_refunds: vec![], pubdata_costs: vec![], witness_block_state: Default::default(), From 8a7bd60ac2545a5bc66d7a69a86400f602b63dc0 Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Thu, 10 Oct 2024 13:37:29 +0200 Subject: [PATCH 15/31] fixup! Merge remote-tracking branch 'origin/main' into tee_prover_new_2 Signed-off-by: Harald Hoyer --- Cargo.toml | 158 ++++++++++++++++++++++++++--------------------------- 1 file changed, 79 insertions(+), 79 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 60dfdbe27f38..099d2d566d42 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,84 +1,84 @@ [workspace] members = [ - # Binaries - "core/bin/block_reverter", - "core/bin/contract-verifier", - "core/bin/external_node", - "core/bin/merkle_tree_consistency_checker", - "core/bin/snapshots_creator", - "core/bin/selector_generator", - "core/bin/system-constants-generator", - "core/bin/verified_sources_fetcher", - "core/bin/zksync_server", - "core/bin/genesis_generator", - "core/bin/zksync_tee_prover", - # Node services - "core/node/node_framework", - "core/node/proof_data_handler", - "core/node/block_reverter", - "core/node/commitment_generator", - "core/node/house_keeper", - "core/node/genesis", - "core/node/shared_metrics", - "core/node/db_pruner", - "core/node/fee_model", - "core/node/da_dispatcher", - "core/node/eth_sender", - "core/node/vm_runner", - "core/node/test_utils", - "core/node/state_keeper", - "core/node/reorg_detector", - "core/node/consistency_checker", - "core/node/metadata_calculator", - "core/node/node_sync", - "core/node/node_storage_init", - "core/node/consensus", - "core/node/contract_verification_server", - "core/node/api_server", - "core/node/base_token_adjuster", - "core/node/external_proof_integration_api", - "core/node/logs_bloom_backfill", - "core/node/da_clients", - # Libraries - "core/lib/db_connection", - "core/lib/zksync_core_leftovers", - "core/lib/basic_types", - "core/lib/config", - "core/lib/constants", - "core/lib/contract_verifier", - "core/lib/contracts", - "core/lib/circuit_breaker", - "core/lib/dal", - "core/lib/env_config", - "core/lib/da_client", - "core/lib/eth_client", - "core/lib/eth_signer", - "core/lib/l1_contract_interface", - "core/lib/mempool", - "core/lib/merkle_tree", - "core/lib/mini_merkle_tree", - "core/lib/node_framework_derive", - "core/lib/object_store", - "core/lib/prover_interface", - "core/lib/queued_job_processor", - "core/lib/state", - "core/lib/storage", - "core/lib/tee_verifier", - "core/lib/types", - "core/lib/protobuf_config", - "core/lib/utils", - "core/lib/vlog", - "core/lib/multivm", - "core/lib/vm_interface", - "core/lib/vm_executor", - "core/lib/web3_decl", - "core/lib/snapshots_applier", - "core/lib/crypto_primitives", - "core/lib/external_price_api", - # Test infrastructure - "core/tests/test_account", - "core/tests/loadnext", - "core/tests/vm-benchmark", + # Binaries + "core/bin/block_reverter", + "core/bin/contract-verifier", + "core/bin/external_node", + "core/bin/merkle_tree_consistency_checker", + "core/bin/snapshots_creator", + "core/bin/selector_generator", + "core/bin/system-constants-generator", + "core/bin/verified_sources_fetcher", + "core/bin/zksync_server", + "core/bin/genesis_generator", + "core/bin/zksync_tee_prover", + # Node services + "core/node/node_framework", + "core/node/proof_data_handler", + "core/node/block_reverter", + "core/node/commitment_generator", + "core/node/house_keeper", + "core/node/genesis", + "core/node/shared_metrics", + "core/node/db_pruner", + "core/node/fee_model", + "core/node/da_dispatcher", + "core/node/eth_sender", + "core/node/vm_runner", + "core/node/test_utils", + "core/node/state_keeper", + "core/node/reorg_detector", + "core/node/consistency_checker", + "core/node/metadata_calculator", + "core/node/node_sync", + "core/node/node_storage_init", + "core/node/consensus", + "core/node/contract_verification_server", + "core/node/api_server", + "core/node/base_token_adjuster", + "core/node/external_proof_integration_api", + "core/node/logs_bloom_backfill", + "core/node/da_clients", + # Libraries + "core/lib/db_connection", + "core/lib/zksync_core_leftovers", + "core/lib/basic_types", + "core/lib/config", + "core/lib/constants", + "core/lib/contract_verifier", + "core/lib/contracts", + "core/lib/circuit_breaker", + "core/lib/dal", + "core/lib/env_config", + "core/lib/da_client", + "core/lib/eth_client", + "core/lib/eth_signer", + "core/lib/l1_contract_interface", + "core/lib/mempool", + "core/lib/merkle_tree", + "core/lib/mini_merkle_tree", + "core/lib/node_framework_derive", + "core/lib/object_store", + "core/lib/prover_interface", + "core/lib/queued_job_processor", + "core/lib/state", + "core/lib/storage", + "core/lib/tee_verifier", + "core/lib/types", + "core/lib/protobuf_config", + "core/lib/utils", + "core/lib/vlog", + "core/lib/multivm", + "core/lib/vm_interface", + "core/lib/vm_executor", + "core/lib/web3_decl", + "core/lib/snapshots_applier", + "core/lib/crypto_primitives", + "core/lib/external_price_api", + # Test infrastructure + "core/tests/test_account", + "core/tests/loadnext", + "core/tests/vm-benchmark", ] resolver = "2" From 8a59e0b8844190d9193f9ee210d8b622c15bdea6 Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Thu, 10 Oct 2024 13:45:22 +0200 Subject: [PATCH 16/31] fix: don't use `Instrumented::new` when `.instrumented` can be used Signed-off-by: Harald Hoyer --- core/lib/dal/src/tee_proof_generation_dal.rs | 22 +++++++++----------- 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index c3b1f2dbf262..d3a08b6c3bd7 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -36,7 +36,7 @@ impl TeeProofGenerationDal<'_, '_> { ) -> DalResult> { let processing_timeout = pg_interval_from_duration(processing_timeout); let min_batch_number = min_batch_number.map_or(0, |num| i64::from(num.0)); - let query = sqlx::query!( + let batch_number = sqlx::query!( r#" WITH upsert AS ( SELECT @@ -72,7 +72,7 @@ impl TeeProofGenerationDal<'_, '_> { ) FETCH FIRST ROW ONLY ) - + INSERT INTO tee_proof_generation_details ( l1_batch_number, tee_type, status, created_at, updated_at, prover_taken_at @@ -100,16 +100,14 @@ impl TeeProofGenerationDal<'_, '_> { TeeProofGenerationJobStatus::Unpicked.to_string(), processing_timeout, min_batch_number - ); - - let batch_number = Instrumented::new("lock_batch_for_proving") - .with_arg("tee_type", &tee_type) - .with_arg("processing_timeout", &processing_timeout) - .with_arg("l1_batch_number", &min_batch_number) - .with(query) - .fetch_optional(self.storage) - .await? - .map(|row| L1BatchNumber(row.l1_batch_number as u32)); + ) + .instrument("lock_batch_for_proving") + .with_arg("tee_type", &tee_type) + .with_arg("processing_timeout", &processing_timeout) + .with_arg("l1_batch_number", &min_batch_number) + .fetch_optional(self.storage) + .await? + .map(|row| L1BatchNumber(row.l1_batch_number as u32)); Ok(batch_number) } From fa42354d30e600edf83dd3ca91b178b8c1efb849 Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Thu, 10 Oct 2024 13:55:17 +0200 Subject: [PATCH 17/31] fix: merge extra match hand with already handled case Signed-off-by: Harald Hoyer --- core/lib/tee_verifier/src/lib.rs | 8 -------- 1 file changed, 8 deletions(-) diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index c8868a578376..9c23385390fc 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -234,14 +234,6 @@ fn map_log_tree( } TreeInstruction::Read(key) } - (true, StorageLogKind::RepeatedWrite, TreeLogEntry::Read { .. }) => { - tracing::error!( - ?storage_log, - ?tree_log_entry, - "Failed to map LogQuery to TreeInstruction" - ); - anyhow::bail!("Failed to map LogQuery to TreeInstruction"); - } (false, _, TreeLogEntry::ReadMissingKey { .. }) => TreeInstruction::Read(key), (true, _, TreeLogEntry::Read { .. }) | (true, _, TreeLogEntry::ReadMissingKey) From 1087a8dc17368d90b4e8084ae7c3e9e167ffb3b0 Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Thu, 10 Oct 2024 13:59:02 +0200 Subject: [PATCH 18/31] fix: use "500 Internal Server Error" for internal error Signed-off-by: Harald Hoyer --- core/node/proof_data_handler/src/errors.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/node/proof_data_handler/src/errors.rs b/core/node/proof_data_handler/src/errors.rs index 1f8a0d4d613b..2ce3ccf96c64 100644 --- a/core/node/proof_data_handler/src/errors.rs +++ b/core/node/proof_data_handler/src/errors.rs @@ -23,7 +23,7 @@ impl IntoResponse for RequestProcessorError { RequestProcessorError::GeneralError(err) => { tracing::error!("Error: {:?}", err); ( - StatusCode::BAD_GATEWAY, + StatusCode::INTERNAL_SERVER_ERROR, "An internal error occurred".to_owned(), ) } From c521f526145393c0cf8021e53d7838bfce710f73 Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Thu, 10 Oct 2024 14:06:11 +0200 Subject: [PATCH 19/31] fix: remove type annotation for readability Signed-off-by: Harald Hoyer --- core/node/proof_data_handler/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/node/proof_data_handler/src/lib.rs b/core/node/proof_data_handler/src/lib.rs index 25ff69ca4286..0f90d657be5d 100644 --- a/core/node/proof_data_handler/src/lib.rs +++ b/core/node/proof_data_handler/src/lib.rs @@ -104,7 +104,7 @@ fn create_proof_processing_router( "/tee/proof_inputs", post( move |payload: Json| async move { - let result: Result, errors::RequestProcessorError> = get_tee_proof_gen_processor + let result = get_tee_proof_gen_processor .get_proof_generation_data(payload) .await; From 8556e526f88f94c69b4606b814df024aafb0517f Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Thu, 10 Oct 2024 14:08:24 +0200 Subject: [PATCH 20/31] fixup! fix: don't use `Instrumented::new` when `.instrumented` can be used Signed-off-by: Harald Hoyer --- core/lib/dal/src/tee_proof_generation_dal.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index d3a08b6c3bd7..8cb961e40e39 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -72,7 +72,7 @@ impl TeeProofGenerationDal<'_, '_> { ) FETCH FIRST ROW ONLY ) - + INSERT INTO tee_proof_generation_details ( l1_batch_number, tee_type, status, created_at, updated_at, prover_taken_at From d908979659ed21729f73c72347ad4b6f6f5abf77 Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Thu, 10 Oct 2024 14:17:02 +0200 Subject: [PATCH 21/31] fixup! fix: merge extra match hand with already handled case Signed-off-by: Harald Hoyer --- core/lib/tee_verifier/src/lib.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index 9c23385390fc..0f5514308e05 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -22,8 +22,7 @@ use zksync_prover_interface::inputs::{ StorageLogMetadata, V1TeeVerifierInput, WitnessInputMerklePaths, }; use zksync_types::{ - block::L2BlockExecutionData, L1BatchNumber, StorageLog, StorageLogKind, StorageValue, - Transaction, H256, + block::L2BlockExecutionData, L1BatchNumber, StorageLog, StorageValue, Transaction, H256, }; use zksync_utils::u256_to_h256; From 91cad429db299e8a9cc2c26111c7546217bd8000 Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Thu, 10 Oct 2024 16:31:08 +0200 Subject: [PATCH 22/31] fixup! fix(tee_verifier): correctly initialize storage for re-execution Signed-off-by: Harald Hoyer --- core/lib/prover_interface/src/inputs.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index 62313de627bf..97de24f42dae 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -261,7 +261,7 @@ pub struct L1BatchMetadataHashes { pub aux_hash: H256, } -/// Version 2 of the data used as input for the TEE verifier. +/// Version 1 of the data used as input for the TEE verifier. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct V1TeeVerifierInput { pub vm_run_data: VMRunWitnessInputData, From fc30e5bbc666c69dace1630a1012c2682140f934 Mon Sep 17 00:00:00 2001 From: Patrick Beza Date: Thu, 10 Oct 2024 16:53:14 +0200 Subject: [PATCH 23/31] fix(migration): correct .down.sql script to ensure proper rollback --- ...000_remove_tee_verifier_input_producer_job.down.sql | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.down.sql b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.down.sql index 120b19f6f0af..707ce306365c 100644 --- a/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.down.sql +++ b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.down.sql @@ -1,4 +1,4 @@ -CREATE TABLE tee_proof_generation_details ( +CREATE TABLE tee_verifier_input_producer_jobs ( l1_batch_number BIGINT NOT NULL, status TEXT NOT NULL, signature BYTEA, @@ -13,6 +13,8 @@ CREATE TABLE tee_proof_generation_details ( CONSTRAINT tee_proof_generation_details_pubkey_fkey FOREIGN KEY (pubkey) REFERENCES tee_attestations(pubkey) ON DELETE SET NULL ); -CREATE INDEX idx_tee_proof_generation_details_status_prover_taken_at - ON tee_proof_generation_details (prover_taken_at) - WHERE status = 'picked_by_prover'; +ALTER TABLE tee_proof_generation_details + ADD CONSTRAINT tee_proof_generation_details_l1_batch_number_fkey + FOREIGN KEY (l1_batch_number) + REFERENCES tee_verifier_input_producer_jobs(l1_batch_number) + ON DELETE CASCADE; From 0122e62975c208433b2339ed2f42f752e9e6494b Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Thu, 10 Oct 2024 16:57:26 +0200 Subject: [PATCH 24/31] fix: add a comment on storage in tee_verifier verify method Signed-off-by: Harald Hoyer --- core/lib/tee_verifier/src/lib.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index 0f5514308e05..d5537b8ddf12 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -65,6 +65,11 @@ impl Verify for V1TeeVerifierInput { .is_write_initial .into_iter(); + // We need to define storage slots read during batch execution, and their initial state; + // hence, the use of both read_storage_ops and initial_writes_ops. + // StorageSnapshot also requires providing enumeration indices, + // but they only matter at the end of execution when creating pubdata for the batch, + // which is irrelevant in this case. Thus, enumeration indices are set to dummy values. let storage = read_storage_ops .enumerate() .map(|(i, (hash, bytes))| (hash.hashed_key(), Some((bytes, i as u64 + 1u64)))) From ab6e56552377ea74ccf619f940a03735c0e4d5ea Mon Sep 17 00:00:00 2001 From: Patrick Beza Date: Thu, 10 Oct 2024 18:17:58 +0200 Subject: [PATCH 25/31] fix(tee-prover): simplify TeeProofGenerationDataResponse This PR addresses Alex's code review comment: https://github.com/matter-labs/zksync-era/pull/3017#discussion_r1795237929 and partially addresses this one too: https://github.com/matter-labs/zksync-era/pull/3017#discussion_r1795240750 --- core/bin/zksync_tee_prover/src/api_client.rs | 2 +- core/bin/zksync_tee_prover/src/tee_prover.rs | 18 ++++++++++++------ core/lib/dal/src/tee_proof_generation_dal.rs | 9 ++++----- core/lib/prover_interface/src/api.rs | 2 +- core/node/proof_data_handler/src/lib.rs | 3 +-- .../src/tee_request_processor.rs | 12 ++++-------- core/node/proof_data_handler/src/tests.rs | 2 +- 7 files changed, 24 insertions(+), 24 deletions(-) diff --git a/core/bin/zksync_tee_prover/src/api_client.rs b/core/bin/zksync_tee_prover/src/api_client.rs index 13fbc1ba8868..860c102d5629 100644 --- a/core/bin/zksync_tee_prover/src/api_client.rs +++ b/core/bin/zksync_tee_prover/src/api_client.rs @@ -77,7 +77,7 @@ impl TeeApiClient { pub async fn get_job( &self, tee_type: TeeType, - ) -> Result>, TeeProverError> { + ) -> Result, TeeProverError> { let request = TeeProofGenerationDataRequest { tee_type }; let response = self .post::<_, TeeProofGenerationDataResponse, _>("/tee/proof_inputs", request) diff --git a/core/bin/zksync_tee_prover/src/tee_prover.rs b/core/bin/zksync_tee_prover/src/tee_prover.rs index 1511f0c88e3d..7d40bd324319 100644 --- a/core/bin/zksync_tee_prover/src/tee_prover.rs +++ b/core/bin/zksync_tee_prover/src/tee_prover.rs @@ -1,5 +1,6 @@ use std::fmt; +use reqwest::StatusCode; use secp256k1::{ecdsa::Signature, Message, PublicKey, Secp256k1}; use zksync_basic_types::H256; use zksync_node_framework::{ @@ -90,8 +91,8 @@ impl TeeProver { } async fn step(&self, public_key: &PublicKey) -> Result, TeeProverError> { - match self.api_client.get_job(self.config.tee_type).await? { - Some(job) => { + match self.api_client.get_job(self.config.tee_type).await { + Ok(job) => { let (signature, batch_number, root_hash) = self.verify(*job)?; self.api_client .submit_proof( @@ -104,10 +105,15 @@ impl TeeProver { .await?; Ok(Some(batch_number)) } - None => { - tracing::trace!("There are currently no pending batches to be proven"); - Ok(None) - } + Err(err) => match err { + TeeProverError::Request(req_err) + if req_err.status() == Some(StatusCode::NOT_FOUND) => + { + tracing::trace!("There are currently no pending batches to be proven"); + Ok(None) + } + _ => Err(err), + }, } } } diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index 8cb961e40e39..192b81db1bff 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -33,7 +33,7 @@ impl TeeProofGenerationDal<'_, '_> { tee_type: TeeType, processing_timeout: Duration, min_batch_number: Option, - ) -> DalResult> { + ) -> DalResult { let processing_timeout = pg_interval_from_duration(processing_timeout); let min_batch_number = min_batch_number.map_or(0, |num| i64::from(num.0)); let batch_number = sqlx::query!( @@ -105,11 +105,10 @@ impl TeeProofGenerationDal<'_, '_> { .with_arg("tee_type", &tee_type) .with_arg("processing_timeout", &processing_timeout) .with_arg("l1_batch_number", &min_batch_number) - .fetch_optional(self.storage) - .await? - .map(|row| L1BatchNumber(row.l1_batch_number as u32)); + .fetch_one(self.storage) + .await?; - Ok(batch_number) + Ok(L1BatchNumber(batch_number.l1_batch_number as u32)) } pub async fn unlock_batch( diff --git a/core/lib/prover_interface/src/api.rs b/core/lib/prover_interface/src/api.rs index 776cd3141cbe..acf104cc4c61 100644 --- a/core/lib/prover_interface/src/api.rs +++ b/core/lib/prover_interface/src/api.rs @@ -31,7 +31,7 @@ pub enum ProofGenerationDataResponse { } #[derive(Debug, Serialize, Deserialize)] -pub struct TeeProofGenerationDataResponse(pub Option>); +pub struct TeeProofGenerationDataResponse(pub Box); #[derive(Debug, Serialize, Deserialize)] pub enum SubmitProofResponse { diff --git a/core/node/proof_data_handler/src/lib.rs b/core/node/proof_data_handler/src/lib.rs index 0f90d657be5d..2253cf9df976 100644 --- a/core/node/proof_data_handler/src/lib.rs +++ b/core/node/proof_data_handler/src/lib.rs @@ -10,7 +10,7 @@ use zksync_dal::{ConnectionPool, Core}; use zksync_object_store::ObjectStore; use zksync_prover_interface::api::{ ProofGenerationDataRequest, RegisterTeeAttestationRequest, SubmitProofRequest, - SubmitTeeProofRequest, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, + SubmitTeeProofRequest, TeeProofGenerationDataRequest, }; use zksync_types::{commitment::L1BatchCommitmentMode, L2ChainId}; @@ -109,7 +109,6 @@ fn create_proof_processing_router( .await; match result { - Ok(Json(TeeProofGenerationDataResponse(None))) => (StatusCode::NO_CONTENT, Json("No new TeeVerifierInputs are available yet")).into_response(), Ok(data) => (StatusCode::OK, data).into_response(), Err(e) => e.into_response(), } diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs index 5956be17e976..8af5e08b475b 100644 --- a/core/node/proof_data_handler/src/tee_request_processor.rs +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -51,20 +51,16 @@ impl TeeRequestProcessor { let mut missing_range: Option<(L1BatchNumber, L1BatchNumber)> = None; let result = loop { - let l1_batch_number = match self + let l1_batch_number = self .lock_batch_for_proving(request.tee_type, min_batch_number) - .await? - { - Some(number) => number, - None => break Ok(Json(TeeProofGenerationDataResponse(None))), - }; + .await?; match self .tee_verifier_input_for_existing_batch(l1_batch_number) .await { Ok(input) => { - break Ok(Json(TeeProofGenerationDataResponse(Some(Box::new(input))))); + break Ok(Json(TeeProofGenerationDataResponse(Box::new(input)))); } Err(RequestProcessorError::ObjectStore(ObjectStoreError::KeyNotFound(_))) => { missing_range = match missing_range { @@ -156,7 +152,7 @@ impl TeeRequestProcessor { &self, tee_type: TeeType, min_batch_number: Option, - ) -> Result, RequestProcessorError> { + ) -> Result { let result = self .pool .connection() diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index a10044cacd9c..240940506f8e 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -31,7 +31,7 @@ async fn request_tee_proof_inputs() { L2ChainId::default(), ); let test_cases = vec![ - (json!({ "tee_type": "sgx" }), StatusCode::NO_CONTENT), + (json!({ "tee_type": "sgx" }), StatusCode::NOT_FOUND), ( json!({ "tee_type": "Sgx" }), StatusCode::UNPROCESSABLE_ENTITY, From 59f7756b79552201b04cf14891b99c38c7bf81b5 Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Wed, 16 Oct 2024 10:18:26 +0200 Subject: [PATCH 26/31] fix: use connection_tagged() Signed-off-by: Harald Hoyer --- .../proof_data_handler/src/tee_request_processor.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs index 8af5e08b475b..229ea6a7d811 100644 --- a/core/node/proof_data_handler/src/tee_request_processor.rs +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -107,7 +107,7 @@ impl TeeRequestProcessor { let mut connection = self .pool - .connection() + .connection_tagged("tee_request_processor") .await .map_err(RequestProcessorError::Dal)?; @@ -153,9 +153,8 @@ impl TeeRequestProcessor { tee_type: TeeType, min_batch_number: Option, ) -> Result { - let result = self - .pool - .connection() + self.pool + .connection_tagged("tee_request_processor") .await? .tee_proof_generation_dal() .lock_batch_for_proving( @@ -173,7 +172,7 @@ impl TeeRequestProcessor { tee_type: TeeType, ) -> Result<(), RequestProcessorError> { self.pool - .connection() + .connection_tagged("tee_request_processor") .await? .tee_proof_generation_dal() .unlock_batch(l1_batch_number, tee_type) @@ -187,7 +186,7 @@ impl TeeRequestProcessor { Json(proof): Json, ) -> Result, RequestProcessorError> { let l1_batch_number = L1BatchNumber(l1_batch_number); - let mut connection = self.pool.connection().await?; + let mut connection = self.pool.connection_tagged("tee_request_processor").await?; let mut dal = connection.tee_proof_generation_dal(); tracing::info!( @@ -213,7 +212,7 @@ impl TeeRequestProcessor { ) -> Result, RequestProcessorError> { tracing::info!("Received attestation: {:?}", payload); - let mut connection = self.pool.connection().await?; + let mut connection = self.pool.connection_tagged("tee_request_processor").await?; let mut dal = connection.tee_proof_generation_dal(); dal.save_attestation(&payload.pubkey, &payload.attestation) From 01453f8bae02aa63f7f06b88d30d28c18f821fee Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Wed, 16 Oct 2024 10:20:48 +0200 Subject: [PATCH 27/31] fix: error handling, if no job is available Signed-off-by: Harald Hoyer --- core/lib/dal/src/tee_proof_generation_dal.rs | 11 +++++------ core/node/proof_data_handler/src/errors.rs | 18 +++++++++--------- .../src/tee_request_processor.rs | 5 +++-- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index 192b81db1bff..d865212f190c 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -33,10 +33,10 @@ impl TeeProofGenerationDal<'_, '_> { tee_type: TeeType, processing_timeout: Duration, min_batch_number: Option, - ) -> DalResult { + ) -> DalResult> { let processing_timeout = pg_interval_from_duration(processing_timeout); let min_batch_number = min_batch_number.map_or(0, |num| i64::from(num.0)); - let batch_number = sqlx::query!( + sqlx::query!( r#" WITH upsert AS ( SELECT @@ -105,10 +105,9 @@ impl TeeProofGenerationDal<'_, '_> { .with_arg("tee_type", &tee_type) .with_arg("processing_timeout", &processing_timeout) .with_arg("l1_batch_number", &min_batch_number) - .fetch_one(self.storage) - .await?; - - Ok(L1BatchNumber(batch_number.l1_batch_number as u32)) + .fetch_optional(self.storage) + .await + .map(|record| record.map(|record| L1BatchNumber(record.l1_batch_number as u32))) } pub async fn unlock_batch( diff --git a/core/node/proof_data_handler/src/errors.rs b/core/node/proof_data_handler/src/errors.rs index 2ce3ccf96c64..8d90d1008e63 100644 --- a/core/node/proof_data_handler/src/errors.rs +++ b/core/node/proof_data_handler/src/errors.rs @@ -6,6 +6,7 @@ use zksync_dal::DalError; use zksync_object_store::ObjectStoreError; pub(crate) enum RequestProcessorError { + NoJob, GeneralError(String), ObjectStore(ObjectStoreError), Dal(DalError), @@ -36,15 +37,14 @@ impl IntoResponse for RequestProcessorError { } RequestProcessorError::Dal(err) => { tracing::error!("Sqlx error: {:?}", err); - match err.inner() { - zksync_dal::SqlxError::RowNotFound => { - (StatusCode::NOT_FOUND, "Non existing L1 batch".to_owned()) - } - _ => ( - StatusCode::BAD_GATEWAY, - "Failed fetching/saving from db".to_owned(), - ), - } + ( + StatusCode::BAD_GATEWAY, + "Failed fetching/saving from db".to_owned(), + ) + } + RequestProcessorError::NoJob => { + tracing::trace!("No job found"); + (StatusCode::NOT_FOUND, "No job found".to_owned()) } }; (status_code, message).into_response() diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs index 229ea6a7d811..8f10f573cd84 100644 --- a/core/node/proof_data_handler/src/tee_request_processor.rs +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -162,8 +162,9 @@ impl TeeRequestProcessor { self.config.proof_generation_timeout(), min_batch_number, ) - .await?; - Ok(result) + .await + .map_err(RequestProcessorError::Dal) + .and_then(|batch_number| batch_number.ok_or(RequestProcessorError::NoJob)) } async fn unlock_batch( From bcaa6c5ce1b6e3ba8fde96e36bfa904c1b520366 Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Wed, 16 Oct 2024 10:22:57 +0200 Subject: [PATCH 28/31] fix: storage_log.kind is unused in match Signed-off-by: Harald Hoyer --- core/lib/tee_verifier/src/lib.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index d5537b8ddf12..ffe3a548a02b 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -216,16 +216,16 @@ fn map_log_tree( idx: &mut u64, ) -> anyhow::Result { let key = storage_log.key.hashed_key_u256(); - let tree_instruction = match (storage_log.is_write(), storage_log.kind, *tree_log_entry) { - (true, _, TreeLogEntry::Updated { leaf_index, .. }) => { + let tree_instruction = match (storage_log.is_write(), *tree_log_entry) { + (true, TreeLogEntry::Updated { leaf_index, .. }) => { TreeInstruction::write(key, leaf_index, H256(storage_log.value.into())) } - (true, _, TreeLogEntry::Inserted) => { + (true, TreeLogEntry::Inserted) => { let leaf_index = *idx; *idx += 1; TreeInstruction::write(key, leaf_index, H256(storage_log.value.into())) } - (false, _, TreeLogEntry::Read { value, .. }) => { + (false, TreeLogEntry::Read { value, .. }) => { if storage_log.value != value { tracing::error!( ?storage_log, @@ -238,11 +238,11 @@ fn map_log_tree( } TreeInstruction::Read(key) } - (false, _, TreeLogEntry::ReadMissingKey { .. }) => TreeInstruction::Read(key), - (true, _, TreeLogEntry::Read { .. }) - | (true, _, TreeLogEntry::ReadMissingKey) - | (false, _, TreeLogEntry::Inserted) - | (false, _, TreeLogEntry::Updated { .. }) => { + (false, TreeLogEntry::ReadMissingKey { .. }) => TreeInstruction::Read(key), + (true, TreeLogEntry::Read { .. }) + | (true, TreeLogEntry::ReadMissingKey) + | (false, TreeLogEntry::Inserted) + | (false, TreeLogEntry::Updated { .. }) => { tracing::error!( ?storage_log, ?tree_log_entry, From beb3eef4125805e1b4f645de826234705b23f40d Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Wed, 16 Oct 2024 11:45:47 +0200 Subject: [PATCH 29/31] fix: use NO_CONTENT instead of NOT_FOUND for no job Signed-off-by: Harald Hoyer --- core/bin/zksync_tee_prover/src/api_client.rs | 36 +++++++++----------- core/bin/zksync_tee_prover/src/tee_prover.rs | 19 ++++------- core/node/proof_data_handler/src/errors.rs | 10 +++--- 3 files changed, 30 insertions(+), 35 deletions(-) diff --git a/core/bin/zksync_tee_prover/src/api_client.rs b/core/bin/zksync_tee_prover/src/api_client.rs index 860c102d5629..ffc2839b8d3b 100644 --- a/core/bin/zksync_tee_prover/src/api_client.rs +++ b/core/bin/zksync_tee_prover/src/api_client.rs @@ -1,13 +1,10 @@ -use reqwest::Client; +use reqwest::{Client, Response, StatusCode}; use secp256k1::{ecdsa::Signature, PublicKey}; -use serde::{de::DeserializeOwned, Serialize}; +use serde::Serialize; use url::Url; use zksync_basic_types::H256; use zksync_prover_interface::{ - api::{ - RegisterTeeAttestationRequest, RegisterTeeAttestationResponse, SubmitTeeProofRequest, - SubmitTeeProofResponse, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, - }, + api::{RegisterTeeAttestationRequest, SubmitTeeProofRequest, TeeProofGenerationDataRequest}, inputs::TeeVerifierInput, outputs::L1BatchTeeProofForL1, }; @@ -31,10 +28,9 @@ impl TeeApiClient { } } - async fn post(&self, endpoint: S, request: Req) -> Result + async fn post(&self, endpoint: S, request: Req) -> Result where Req: Serialize + std::fmt::Debug, - Resp: DeserializeOwned, S: AsRef, { let url = self.api_base_url.join(endpoint.as_ref()).unwrap(); @@ -46,9 +42,7 @@ impl TeeApiClient { .json(&request) .send() .await? - .error_for_status()? - .json::() - .await + .error_for_status() } /// Registers the attestation quote with the TEE prover interface API, effectively proving that @@ -63,8 +57,7 @@ impl TeeApiClient { attestation: attestation_quote_bytes, pubkey: public_key.serialize().to_vec(), }; - self.post::<_, RegisterTeeAttestationResponse, _>("/tee/register_attestation", request) - .await?; + self.post("/tee/register_attestation", request).await?; tracing::info!( "Attestation quote was successfully registered for the public key {}", public_key @@ -77,12 +70,17 @@ impl TeeApiClient { pub async fn get_job( &self, tee_type: TeeType, - ) -> Result, TeeProverError> { + ) -> Result, TeeProverError> { let request = TeeProofGenerationDataRequest { tee_type }; - let response = self - .post::<_, TeeProofGenerationDataResponse, _>("/tee/proof_inputs", request) - .await?; - Ok(response.0) + let response = self.post("/tee/proof_inputs", request).await?; + match response.status() { + StatusCode::OK => Ok(Some(response.json::().await?)), + StatusCode::NO_CONTENT => Ok(None), + _ => response + .json::>() + .await + .map_err(TeeProverError::Request), + } } /// Submits the successfully verified proof to the TEE prover interface API. @@ -101,7 +99,7 @@ impl TeeApiClient { tee_type, })); let observer = METRICS.proof_submitting_time.start(); - self.post::<_, SubmitTeeProofResponse, _>( + self.post( format!("/tee/submit_proofs/{batch_number}").as_str(), request, ) diff --git a/core/bin/zksync_tee_prover/src/tee_prover.rs b/core/bin/zksync_tee_prover/src/tee_prover.rs index 7d40bd324319..bb7176644e63 100644 --- a/core/bin/zksync_tee_prover/src/tee_prover.rs +++ b/core/bin/zksync_tee_prover/src/tee_prover.rs @@ -1,6 +1,5 @@ use std::fmt; -use reqwest::StatusCode; use secp256k1::{ecdsa::Signature, Message, PublicKey, Secp256k1}; use zksync_basic_types::H256; use zksync_node_framework::{ @@ -92,8 +91,8 @@ impl TeeProver { async fn step(&self, public_key: &PublicKey) -> Result, TeeProverError> { match self.api_client.get_job(self.config.tee_type).await { - Ok(job) => { - let (signature, batch_number, root_hash) = self.verify(*job)?; + Ok(Some(job)) => { + let (signature, batch_number, root_hash) = self.verify(job)?; self.api_client .submit_proof( batch_number, @@ -105,15 +104,11 @@ impl TeeProver { .await?; Ok(Some(batch_number)) } - Err(err) => match err { - TeeProverError::Request(req_err) - if req_err.status() == Some(StatusCode::NOT_FOUND) => - { - tracing::trace!("There are currently no pending batches to be proven"); - Ok(None) - } - _ => Err(err), - }, + Ok(None) => { + tracing::trace!("There are currently no pending batches to be proven"); + Ok(None) + } + Err(err) => Err(err), } } } diff --git a/core/node/proof_data_handler/src/errors.rs b/core/node/proof_data_handler/src/errors.rs index 8d90d1008e63..00ba7a3c11cd 100644 --- a/core/node/proof_data_handler/src/errors.rs +++ b/core/node/proof_data_handler/src/errors.rs @@ -20,13 +20,14 @@ impl From for RequestProcessorError { impl IntoResponse for RequestProcessorError { fn into_response(self) -> Response { - let (status_code, message) = match self { + match self { RequestProcessorError::GeneralError(err) => { tracing::error!("Error: {:?}", err); ( StatusCode::INTERNAL_SERVER_ERROR, "An internal error occurred".to_owned(), ) + .into_response() } RequestProcessorError::ObjectStore(err) => { tracing::error!("GCS error: {:?}", err); @@ -34,6 +35,7 @@ impl IntoResponse for RequestProcessorError { StatusCode::BAD_GATEWAY, "Failed fetching/saving from GCS".to_owned(), ) + .into_response() } RequestProcessorError::Dal(err) => { tracing::error!("Sqlx error: {:?}", err); @@ -41,12 +43,12 @@ impl IntoResponse for RequestProcessorError { StatusCode::BAD_GATEWAY, "Failed fetching/saving from db".to_owned(), ) + .into_response() } RequestProcessorError::NoJob => { tracing::trace!("No job found"); - (StatusCode::NOT_FOUND, "No job found".to_owned()) + (StatusCode::NO_CONTENT, ()).into_response() } - }; - (status_code, message).into_response() + } } } From cb03ac87cf9815db59c0d037ce8639ce44219ed8 Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Wed, 16 Oct 2024 14:37:08 +0200 Subject: [PATCH 30/31] fixup! fix: use NO_CONTENT instead of NOT_FOUND for no job Signed-off-by: Harald Hoyer --- core/node/proof_data_handler/src/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index 240940506f8e..a10044cacd9c 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -31,7 +31,7 @@ async fn request_tee_proof_inputs() { L2ChainId::default(), ); let test_cases = vec![ - (json!({ "tee_type": "sgx" }), StatusCode::NOT_FOUND), + (json!({ "tee_type": "sgx" }), StatusCode::NO_CONTENT), ( json!({ "tee_type": "Sgx" }), StatusCode::UNPROCESSABLE_ENTITY, From 1d1b24316a0d24883cf4cba3b902f55f00516e8d Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Thu, 17 Oct 2024 09:14:47 +0200 Subject: [PATCH 31/31] fixup! fix: use NO_CONTENT instead of NOT_FOUND for no job Signed-off-by: Harald Hoyer --- core/node/proof_data_handler/src/errors.rs | 19 ++++++------------- core/node/proof_data_handler/src/lib.rs | 3 ++- .../src/tee_request_processor.rs | 15 +++++++++------ 3 files changed, 17 insertions(+), 20 deletions(-) diff --git a/core/node/proof_data_handler/src/errors.rs b/core/node/proof_data_handler/src/errors.rs index 00ba7a3c11cd..7d0e33ea0a3a 100644 --- a/core/node/proof_data_handler/src/errors.rs +++ b/core/node/proof_data_handler/src/errors.rs @@ -6,7 +6,6 @@ use zksync_dal::DalError; use zksync_object_store::ObjectStoreError; pub(crate) enum RequestProcessorError { - NoJob, GeneralError(String), ObjectStore(ObjectStoreError), Dal(DalError), @@ -20,35 +19,29 @@ impl From for RequestProcessorError { impl IntoResponse for RequestProcessorError { fn into_response(self) -> Response { - match self { - RequestProcessorError::GeneralError(err) => { + let (status_code, message) = match self { + Self::GeneralError(err) => { tracing::error!("Error: {:?}", err); ( StatusCode::INTERNAL_SERVER_ERROR, "An internal error occurred".to_owned(), ) - .into_response() } - RequestProcessorError::ObjectStore(err) => { + Self::ObjectStore(err) => { tracing::error!("GCS error: {:?}", err); ( StatusCode::BAD_GATEWAY, "Failed fetching/saving from GCS".to_owned(), ) - .into_response() } - RequestProcessorError::Dal(err) => { + Self::Dal(err) => { tracing::error!("Sqlx error: {:?}", err); ( StatusCode::BAD_GATEWAY, "Failed fetching/saving from db".to_owned(), ) - .into_response() } - RequestProcessorError::NoJob => { - tracing::trace!("No job found"); - (StatusCode::NO_CONTENT, ()).into_response() - } - } + }; + (status_code, message).into_response() } } diff --git a/core/node/proof_data_handler/src/lib.rs b/core/node/proof_data_handler/src/lib.rs index 2c6243fdb51c..a482a7bc07b2 100644 --- a/core/node/proof_data_handler/src/lib.rs +++ b/core/node/proof_data_handler/src/lib.rs @@ -109,7 +109,8 @@ fn create_proof_processing_router( .await; match result { - Ok(data) => (StatusCode::OK, data).into_response(), + Ok(Some(data)) => (StatusCode::OK, data).into_response(), + Ok(None) => { StatusCode::NO_CONTENT.into_response()}, Err(e) => e.into_response(), } }, diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs index 8f10f573cd84..2c2a56300097 100644 --- a/core/node/proof_data_handler/src/tee_request_processor.rs +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -44,23 +44,27 @@ impl TeeRequestProcessor { pub(crate) async fn get_proof_generation_data( &self, request: Json, - ) -> Result, RequestProcessorError> { + ) -> Result>, RequestProcessorError> { tracing::info!("Received request for proof generation data: {:?}", request); let mut min_batch_number: Option = None; let mut missing_range: Option<(L1BatchNumber, L1BatchNumber)> = None; let result = loop { - let l1_batch_number = self + let Some(l1_batch_number) = self .lock_batch_for_proving(request.tee_type, min_batch_number) - .await?; + .await? + else { + // No job available + return Ok(None); + }; match self .tee_verifier_input_for_existing_batch(l1_batch_number) .await { Ok(input) => { - break Ok(Json(TeeProofGenerationDataResponse(Box::new(input)))); + break Ok(Some(Json(TeeProofGenerationDataResponse(Box::new(input))))); } Err(RequestProcessorError::ObjectStore(ObjectStoreError::KeyNotFound(_))) => { missing_range = match missing_range { @@ -152,7 +156,7 @@ impl TeeRequestProcessor { &self, tee_type: TeeType, min_batch_number: Option, - ) -> Result { + ) -> Result, RequestProcessorError> { self.pool .connection_tagged("tee_request_processor") .await? @@ -164,7 +168,6 @@ impl TeeRequestProcessor { ) .await .map_err(RequestProcessorError::Dal) - .and_then(|batch_number| batch_number.ok_or(RequestProcessorError::NoJob)) } async fn unlock_batch(