From fe65319da0f26ca45e95f067c1e8b97cf7874c45 Mon Sep 17 00:00:00 2001 From: EmilLuta Date: Tue, 2 Jul 2024 12:55:03 +0200 Subject: [PATCH] feat(prover): Add prover_cli stats command (#2362) Adds stats commands, which provides information (batch number, when the request for proving was created and how long it took) for all L1 Batch proofs. This speeds up proving time reduction (by giving everyone visibility into current process) and can serve as further automation in the future (for instance, emit metrics, or use the tooling for automated reports). --- core/lib/basic_types/src/prover_dal.rs | 13 +- core/lib/db_connection/src/utils.rs | 17 ++ prover/Cargo.lock | 1 + prover/Cargo.toml | 1 + prover/prover_cli/Cargo.toml | 1 + prover/prover_cli/src/cli.rs | 7 +- prover/prover_cli/src/commands/mod.rs | 3 +- prover/prover_cli/src/commands/stats.rs | 63 +++++ ...bb3402044d201e85e114ff4582394c32bd2bf.json | 34 +++ ...22ff6372f63ecadb504a329499b02e7d3550e.json | 26 -- ...e2d3a6ebb3657862b91e3ece34119f098fc2d.json | 32 +++ ...1578db18c29cdca85b8b6aad86fe2a9bf6bbe.json | 32 --- ...9f41220c51f58a03c61d6b7789eab0504e320.json | 32 --- ...43c868c63c853edb5c4f41e48a3cc6378eca9.json | 32 +++ ...fba74ec2cfc3c89c7e4e2ea475c3ce4092849.json | 26 ++ .../src/fri_witness_generator_dal.rs | 245 ++++++++++-------- prover/prover_fri_types/src/lib.rs | 2 - 17 files changed, 364 insertions(+), 203 deletions(-) create mode 100644 prover/prover_cli/src/commands/stats.rs create mode 100644 prover/prover_dal/.sqlx/query-081e2b928f0816c41d6645c1dedbb3402044d201e85e114ff4582394c32bd2bf.json delete mode 100644 prover/prover_dal/.sqlx/query-33d6be45b246523ad76f9ae512322ff6372f63ecadb504a329499b02e7d3550e.json create mode 100644 prover/prover_dal/.sqlx/query-3941da180ee62a7c5d4e392ff4fe2d3a6ebb3657862b91e3ece34119f098fc2d.json delete mode 100644 prover/prover_dal/.sqlx/query-8182690d0326b820d23fba49d391578db18c29cdca85b8b6aad86fe2a9bf6bbe.json delete mode 100644 prover/prover_dal/.sqlx/query-aa91697157517322b0dbb53dca99f41220c51f58a03c61d6b7789eab0504e320.json create mode 100644 prover/prover_dal/.sqlx/query-abc93d27a8673b23e18d050e84c43c868c63c853edb5c4f41e48a3cc6378eca9.json create mode 100644 prover/prover_dal/.sqlx/query-e743af4c18ec91eb46db5a19556fba74ec2cfc3c89c7e4e2ea475c3ce4092849.json diff --git a/core/lib/basic_types/src/prover_dal.rs b/core/lib/basic_types/src/prover_dal.rs index 5eb00dc63a4f..3215e7095e60 100644 --- a/core/lib/basic_types/src/prover_dal.rs +++ b/core/lib/basic_types/src/prover_dal.rs @@ -10,10 +10,6 @@ use crate::{ L1BatchNumber, }; -// This currently lives in `zksync_prover_types` -- we don't want a dependency between prover types (`zkevm_test_harness`) and DAL. -// This will be gone as part of 1.5.0, when EIP4844 becomes normal jobs, rather than special cased ones. -pub const EIP_4844_CIRCUIT_ID: u8 = 255; - #[derive(Debug, Clone)] pub struct FriProverJobMetadata { pub id: u32, @@ -382,3 +378,12 @@ pub struct ProofCompressionJobInfo { pub time_taken: Option, pub picked_by: Option, } + +// Used for transferring information about L1 Batches from DAL to public interfaces (currently prover_cli stats). +/// DTO containing information about L1 Batch Proof. +#[derive(Debug, Clone)] +pub struct ProofGenerationTime { + pub l1_batch_number: L1BatchNumber, + pub time_taken: NaiveTime, + pub created_at: NaiveDateTime, +} diff --git a/core/lib/db_connection/src/utils.rs b/core/lib/db_connection/src/utils.rs index 7c917845c7e3..80cf0a5cbb35 100644 --- a/core/lib/db_connection/src/utils.rs +++ b/core/lib/db_connection/src/utils.rs @@ -9,6 +9,10 @@ pub(crate) struct InternalMarker; impl DbMarker for InternalMarker {} +const MICROSECONDS_IN_A_SECOND: i64 = 1_000_000; +const MICROSECONDS_IN_A_MINUTE: i64 = MICROSECONDS_IN_A_SECOND * 60; +const MICROSECONDS_IN_AN_HOUR: i64 = MICROSECONDS_IN_A_MINUTE * 60; + pub fn duration_to_naive_time(duration: Duration) -> NaiveTime { let total_seconds = duration.as_secs() as u32; NaiveTime::from_hms_opt( @@ -26,3 +30,16 @@ pub const fn pg_interval_from_duration(processing_timeout: Duration) -> PgInterv microseconds: processing_timeout.as_micros() as i64, } } + +// Note: this conversion purposefully ignores `.days` and `.months` fields of PgInterval. +// The PgIntervals expected are below 24h (represented by `.microseconds`). If that's not the case, +// the function will trim days and months. Use at your own risk. +pub fn naive_time_from_pg_interval(pg_interval: PgInterval) -> NaiveTime { + NaiveTime::from_hms_micro_opt( + (pg_interval.microseconds / MICROSECONDS_IN_AN_HOUR) as u32, + ((pg_interval.microseconds / MICROSECONDS_IN_A_MINUTE) % 60) as u32, + ((pg_interval.microseconds / MICROSECONDS_IN_A_SECOND) % 60) as u32, + (pg_interval.microseconds as u32) % 1_000_000, + ) + .expect("failed to convert PgInterval to NaiveTime") +} diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 5d32755d0ab7..7483b777f68e 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -4589,6 +4589,7 @@ version = "0.1.0" dependencies = [ "anyhow", "bincode", + "chrono", "circuit_definitions 1.5.0", "clap 4.5.4", "colored", diff --git a/prover/Cargo.toml b/prover/Cargo.toml index 40466b879971..3bb55925543c 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -32,6 +32,7 @@ categories = ["cryptography"] anyhow = "1.0" async-trait = "0.1" bincode = "1" +chrono = "0.4.38" circuit_definitions = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.5.0" } circuit_sequencer_api = { package = "circuit_sequencer_api", git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.5.0" } clap = "4.4.6" diff --git a/prover/prover_cli/Cargo.toml b/prover/prover_cli/Cargo.toml index c5ec43c47cb1..f91cd47e0945 100644 --- a/prover/prover_cli/Cargo.toml +++ b/prover/prover_cli/Cargo.toml @@ -36,6 +36,7 @@ sqlx.workspace = true circuit_definitions.workspace = true serde_json.workspace = true zkevm_test_harness = { workspace = true, optional = true, features = ["verbose_circuits"] } +chrono.workspace = true [features] # enable verbose circuits, if you want to use debug_circuit command (as it is quite heavy dependency). diff --git a/prover/prover_cli/src/cli.rs b/prover/prover_cli/src/cli.rs index 57422a448881..7174830f44d1 100644 --- a/prover/prover_cli/src/cli.rs +++ b/prover/prover_cli/src/cli.rs @@ -1,12 +1,12 @@ use clap::{command, Args, Parser, Subcommand}; use zksync_types::url::SensitiveUrl; -use crate::commands::{self, config, debug_proof, delete, get_file_info, requeue, restart}; +use crate::commands::{self, config, debug_proof, delete, get_file_info, requeue, restart, stats}; pub const VERSION_STRING: &str = env!("CARGO_PKG_VERSION"); #[derive(Parser)] -#[command(name="prover-cli", version=VERSION_STRING, about, long_about = None)] +#[command(name = "prover-cli", version = VERSION_STRING, about, long_about = None)] struct ProverCLI { #[command(subcommand)] command: ProverCommand, @@ -35,6 +35,8 @@ enum ProverCommand { Status(commands::StatusCommand), Requeue(requeue::Args), Restart(restart::Args), + #[command(about = "Displays L1 Batch proving stats for a given period")] + Stats(stats::Options), } pub async fn start() -> anyhow::Result<()> { @@ -47,6 +49,7 @@ pub async fn start() -> anyhow::Result<()> { ProverCommand::Requeue(args) => requeue::run(args, config).await?, ProverCommand::Restart(args) => restart::run(args).await?, ProverCommand::DebugProof(args) => debug_proof::run(args).await?, + ProverCommand::Stats(args) => stats::run(args, config).await?, }; Ok(()) diff --git a/prover/prover_cli/src/commands/mod.rs b/prover/prover_cli/src/commands/mod.rs index ec58554da508..4bc8b2eb392a 100644 --- a/prover/prover_cli/src/commands/mod.rs +++ b/prover/prover_cli/src/commands/mod.rs @@ -1,8 +1,9 @@ +pub(crate) use status::StatusCommand; pub(crate) mod config; pub(crate) mod debug_proof; pub(crate) mod delete; pub(crate) mod get_file_info; pub(crate) mod requeue; pub(crate) mod restart; +pub(crate) mod stats; pub(crate) mod status; -pub(crate) use status::StatusCommand; diff --git a/prover/prover_cli/src/commands/stats.rs b/prover/prover_cli/src/commands/stats.rs new file mode 100644 index 000000000000..307775fa27d3 --- /dev/null +++ b/prover/prover_cli/src/commands/stats.rs @@ -0,0 +1,63 @@ +use anyhow::Context; +use chrono::{self, NaiveTime}; +use clap::{Args, ValueEnum}; +use zksync_basic_types::prover_dal::ProofGenerationTime; +use zksync_db_connection::connection_pool::ConnectionPool; +use zksync_prover_dal::{Prover, ProverDal}; + +use crate::cli::ProverCLIConfig; + +#[derive(ValueEnum, Clone)] +enum StatsPeriod { + Day, + Week, +} + +#[derive(Args)] +pub(crate) struct Options { + #[clap( + short = 'p', + long = "period", + help = "Specify the time frame to look for stats", + default_value = "day" + )] + period: StatsPeriod, +} + +pub(crate) async fn run(opts: Options, config: ProverCLIConfig) -> anyhow::Result<()> { + let prover_connection_pool = ConnectionPool::::singleton(config.db_url) + .build() + .await + .context("failed to build a prover_connection_pool")?; + let mut conn = prover_connection_pool + .connection() + .await + .context("failed to get connection from pool")?; + + let start_date = match opts.period { + StatsPeriod::Day => chrono::offset::Local::now().date_naive(), + StatsPeriod::Week => { + (chrono::offset::Local::now() - chrono::Duration::days(7)).date_naive() + } + }; + let start_date = + start_date.and_time(NaiveTime::from_num_seconds_from_midnight_opt(0, 0).unwrap()); + let proof_generation_times = conn + .fri_witness_generator_dal() + .get_proof_generation_times_for_time_frame(start_date) + .await?; + display_proof_generation_time(proof_generation_times); + Ok(()) +} + +fn display_proof_generation_time(proof_generation_times: Vec) { + println!("Batch\tTime Taken\t\tCreated At"); + for proof_generation_time in proof_generation_times { + println!( + "{}\t{:?}\t\t{}", + proof_generation_time.l1_batch_number, + proof_generation_time.time_taken, + proof_generation_time.created_at + ); + } +} diff --git a/prover/prover_dal/.sqlx/query-081e2b928f0816c41d6645c1dedbb3402044d201e85e114ff4582394c32bd2bf.json b/prover/prover_dal/.sqlx/query-081e2b928f0816c41d6645c1dedbb3402044d201e85e114ff4582394c32bd2bf.json new file mode 100644 index 000000000000..918fb2817d26 --- /dev/null +++ b/prover/prover_dal/.sqlx/query-081e2b928f0816c41d6645c1dedbb3402044d201e85e114ff4582394c32bd2bf.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n comp.l1_batch_number,\n (comp.updated_at - wit.created_at) AS time_taken,\n wit.created_at\n FROM\n proof_compression_jobs_fri AS comp\n JOIN witness_inputs_fri AS wit ON comp.l1_batch_number = wit.l1_batch_number\n WHERE\n wit.created_at > $1\n ORDER BY\n time_taken DESC;\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "time_taken", + "type_info": "Interval" + }, + { + "ordinal": 2, + "name": "created_at", + "type_info": "Timestamp" + } + ], + "parameters": { + "Left": [ + "Timestamp" + ] + }, + "nullable": [ + false, + null, + false + ] + }, + "hash": "081e2b928f0816c41d6645c1dedbb3402044d201e85e114ff4582394c32bd2bf" +} diff --git a/prover/prover_dal/.sqlx/query-33d6be45b246523ad76f9ae512322ff6372f63ecadb504a329499b02e7d3550e.json b/prover/prover_dal/.sqlx/query-33d6be45b246523ad76f9ae512322ff6372f63ecadb504a329499b02e7d3550e.json deleted file mode 100644 index 76483cd73d31..000000000000 --- a/prover/prover_dal/.sqlx/query-33d6be45b246523ad76f9ae512322ff6372f63ecadb504a329499b02e7d3550e.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET\n status = 'queued'\n WHERE\n (l1_batch_number, circuit_id) IN (\n SELECT\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id\n FROM\n prover_jobs_fri\n JOIN leaf_aggregation_witness_jobs_fri lawj ON prover_jobs_fri.l1_batch_number = lawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = lawj.circuit_id\n WHERE\n lawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 0\n GROUP BY\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n lawj.number_of_basic_circuits\n HAVING\n COUNT(*) = lawj.number_of_basic_circuits\n )\n RETURNING\n l1_batch_number,\n circuit_id;\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "circuit_id", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false, - false - ] - }, - "hash": "33d6be45b246523ad76f9ae512322ff6372f63ecadb504a329499b02e7d3550e" -} diff --git a/prover/prover_dal/.sqlx/query-3941da180ee62a7c5d4e392ff4fe2d3a6ebb3657862b91e3ece34119f098fc2d.json b/prover/prover_dal/.sqlx/query-3941da180ee62a7c5d4e392ff4fe2d3a6ebb3657862b91e3ece34119f098fc2d.json new file mode 100644 index 000000000000..d0dd5f6976b5 --- /dev/null +++ b/prover/prover_dal/.sqlx/query-3941da180ee62a7c5d4e392ff4fe2d3a6ebb3657862b91e3ece34119f098fc2d.json @@ -0,0 +1,32 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET\n status = 'queued'\n WHERE\n (l1_batch_number, circuit_id, depth) IN (\n SELECT\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.depth\n FROM\n prover_jobs_fri\n JOIN node_aggregation_witness_jobs_fri nawj ON prover_jobs_fri.l1_batch_number = nawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = nawj.circuit_id\n AND prover_jobs_fri.depth = nawj.depth\n WHERE\n nawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 2\n GROUP BY\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.depth,\n nawj.number_of_dependent_jobs\n HAVING\n COUNT(*) = nawj.number_of_dependent_jobs\n )\n RETURNING\n l1_batch_number,\n circuit_id,\n depth;\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "circuit_id", + "type_info": "Int2" + }, + { + "ordinal": 2, + "name": "depth", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "3941da180ee62a7c5d4e392ff4fe2d3a6ebb3657862b91e3ece34119f098fc2d" +} diff --git a/prover/prover_dal/.sqlx/query-8182690d0326b820d23fba49d391578db18c29cdca85b8b6aad86fe2a9bf6bbe.json b/prover/prover_dal/.sqlx/query-8182690d0326b820d23fba49d391578db18c29cdca85b8b6aad86fe2a9bf6bbe.json deleted file mode 100644 index fac64c1ea3f9..000000000000 --- a/prover/prover_dal/.sqlx/query-8182690d0326b820d23fba49d391578db18c29cdca85b8b6aad86fe2a9bf6bbe.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET\n status = 'queued'\n WHERE\n (l1_batch_number, circuit_id, depth) IN (\n SELECT\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.depth\n FROM\n prover_jobs_fri\n JOIN node_aggregation_witness_jobs_fri nawj ON prover_jobs_fri.l1_batch_number = nawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = nawj.circuit_id\n AND prover_jobs_fri.depth = nawj.depth\n WHERE\n nawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 2\n GROUP BY\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.depth,\n nawj.number_of_dependent_jobs\n HAVING\n COUNT(*) = nawj.number_of_dependent_jobs\n )\n RETURNING\n l1_batch_number,\n circuit_id,\n depth;\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "circuit_id", - "type_info": "Int2" - }, - { - "ordinal": 2, - "name": "depth", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false, - false, - false - ] - }, - "hash": "8182690d0326b820d23fba49d391578db18c29cdca85b8b6aad86fe2a9bf6bbe" -} diff --git a/prover/prover_dal/.sqlx/query-aa91697157517322b0dbb53dca99f41220c51f58a03c61d6b7789eab0504e320.json b/prover/prover_dal/.sqlx/query-aa91697157517322b0dbb53dca99f41220c51f58a03c61d6b7789eab0504e320.json deleted file mode 100644 index 27d482317286..000000000000 --- a/prover/prover_dal/.sqlx/query-aa91697157517322b0dbb53dca99f41220c51f58a03c61d6b7789eab0504e320.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET\n status = 'queued'\n WHERE\n (l1_batch_number, circuit_id, depth) IN (\n SELECT\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.depth\n FROM\n prover_jobs_fri\n JOIN node_aggregation_witness_jobs_fri nawj ON prover_jobs_fri.l1_batch_number = nawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = nawj.circuit_id\n AND prover_jobs_fri.depth = nawj.depth\n WHERE\n nawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 1\n AND prover_jobs_fri.depth = 0\n GROUP BY\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.depth,\n nawj.number_of_dependent_jobs\n HAVING\n COUNT(*) = nawj.number_of_dependent_jobs\n )\n RETURNING\n l1_batch_number,\n circuit_id,\n depth;\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "circuit_id", - "type_info": "Int2" - }, - { - "ordinal": 2, - "name": "depth", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false, - false, - false - ] - }, - "hash": "aa91697157517322b0dbb53dca99f41220c51f58a03c61d6b7789eab0504e320" -} diff --git a/prover/prover_dal/.sqlx/query-abc93d27a8673b23e18d050e84c43c868c63c853edb5c4f41e48a3cc6378eca9.json b/prover/prover_dal/.sqlx/query-abc93d27a8673b23e18d050e84c43c868c63c853edb5c4f41e48a3cc6378eca9.json new file mode 100644 index 000000000000..fae5c1041a5d --- /dev/null +++ b/prover/prover_dal/.sqlx/query-abc93d27a8673b23e18d050e84c43c868c63c853edb5c4f41e48a3cc6378eca9.json @@ -0,0 +1,32 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET\n status = 'queued'\n WHERE\n (l1_batch_number, circuit_id, depth) IN (\n SELECT\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.depth\n FROM\n prover_jobs_fri\n JOIN node_aggregation_witness_jobs_fri nawj ON prover_jobs_fri.l1_batch_number = nawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = nawj.circuit_id\n AND prover_jobs_fri.depth = nawj.depth\n WHERE\n nawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 1\n AND prover_jobs_fri.depth = 0\n GROUP BY\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.depth,\n nawj.number_of_dependent_jobs\n HAVING\n COUNT(*) = nawj.number_of_dependent_jobs\n )\n RETURNING\n l1_batch_number,\n circuit_id,\n depth;\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "circuit_id", + "type_info": "Int2" + }, + { + "ordinal": 2, + "name": "depth", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "abc93d27a8673b23e18d050e84c43c868c63c853edb5c4f41e48a3cc6378eca9" +} diff --git a/prover/prover_dal/.sqlx/query-e743af4c18ec91eb46db5a19556fba74ec2cfc3c89c7e4e2ea475c3ce4092849.json b/prover/prover_dal/.sqlx/query-e743af4c18ec91eb46db5a19556fba74ec2cfc3c89c7e4e2ea475c3ce4092849.json new file mode 100644 index 000000000000..af6210ae91e4 --- /dev/null +++ b/prover/prover_dal/.sqlx/query-e743af4c18ec91eb46db5a19556fba74ec2cfc3c89c7e4e2ea475c3ce4092849.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET\n status = 'queued'\n WHERE\n (l1_batch_number, circuit_id) IN (\n SELECT\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id\n FROM\n prover_jobs_fri\n JOIN leaf_aggregation_witness_jobs_fri lawj ON prover_jobs_fri.l1_batch_number = lawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = lawj.circuit_id\n WHERE\n lawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 0\n GROUP BY\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n lawj.number_of_basic_circuits\n HAVING\n COUNT(*) = lawj.number_of_basic_circuits\n )\n RETURNING\n l1_batch_number,\n circuit_id;\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "circuit_id", + "type_info": "Int2" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false + ] + }, + "hash": "e743af4c18ec91eb46db5a19556fba74ec2cfc3c89c7e4e2ea475c3ce4092849" +} diff --git a/prover/prover_dal/src/fri_witness_generator_dal.rs b/prover/prover_dal/src/fri_witness_generator_dal.rs index 8db30e5a7f11..d884ce05aa16 100644 --- a/prover/prover_dal/src/fri_witness_generator_dal.rs +++ b/prover/prover_dal/src/fri_witness_generator_dal.rs @@ -1,19 +1,22 @@ #![doc = include_str!("../doc/FriWitnessGeneratorDal.md")] + use std::{collections::HashMap, str::FromStr, time::Duration}; -use sqlx::Row; +use sqlx::{types::chrono::NaiveDateTime, Row}; use zksync_basic_types::{ basic_fri_types::{AggregationRound, Eip4844Blobs}, protocol_version::{ProtocolSemanticVersion, ProtocolVersionId, VersionPatch}, prover_dal::{ BasicWitnessGeneratorJobInfo, JobCountStatistics, LeafAggregationJobMetadata, LeafWitnessGeneratorJobInfo, NodeAggregationJobMetadata, NodeWitnessGeneratorJobInfo, - RecursionTipWitnessGeneratorJobInfo, SchedulerWitnessGeneratorJobInfo, StuckJobs, - WitnessJobStatus, + ProofGenerationTime, RecursionTipWitnessGeneratorJobInfo, SchedulerWitnessGeneratorJobInfo, + StuckJobs, WitnessJobStatus, }, L1BatchNumber, }; -use zksync_db_connection::{connection::Connection, metrics::MethodLatency}; +use zksync_db_connection::{ + connection::Connection, metrics::MethodLatency, utils::naive_time_from_pg_interval, +}; use crate::{duration_to_naive_time, pg_interval_from_duration, Prover}; @@ -556,34 +559,34 @@ impl FriWitnessGeneratorDal<'_, '_> { pub async fn move_leaf_aggregation_jobs_from_waiting_to_queued(&mut self) -> Vec<(i64, u8)> { sqlx::query!( - r#" - UPDATE leaf_aggregation_witness_jobs_fri - SET - status = 'queued' - WHERE - (l1_batch_number, circuit_id) IN ( - SELECT - prover_jobs_fri.l1_batch_number, - prover_jobs_fri.circuit_id - FROM - prover_jobs_fri - JOIN leaf_aggregation_witness_jobs_fri lawj ON prover_jobs_fri.l1_batch_number = lawj.l1_batch_number - AND prover_jobs_fri.circuit_id = lawj.circuit_id - WHERE - lawj.status = 'waiting_for_proofs' - AND prover_jobs_fri.status = 'successful' - AND prover_jobs_fri.aggregation_round = 0 - GROUP BY - prover_jobs_fri.l1_batch_number, - prover_jobs_fri.circuit_id, - lawj.number_of_basic_circuits - HAVING - COUNT(*) = lawj.number_of_basic_circuits - ) - RETURNING - l1_batch_number, - circuit_id; - "#, + r#" + UPDATE leaf_aggregation_witness_jobs_fri + SET + status = 'queued' + WHERE + (l1_batch_number, circuit_id) IN ( + SELECT + prover_jobs_fri.l1_batch_number, + prover_jobs_fri.circuit_id + FROM + prover_jobs_fri + JOIN leaf_aggregation_witness_jobs_fri lawj ON prover_jobs_fri.l1_batch_number = lawj.l1_batch_number + AND prover_jobs_fri.circuit_id = lawj.circuit_id + WHERE + lawj.status = 'waiting_for_proofs' + AND prover_jobs_fri.status = 'successful' + AND prover_jobs_fri.aggregation_round = 0 + GROUP BY + prover_jobs_fri.l1_batch_number, + prover_jobs_fri.circuit_id, + lawj.number_of_basic_circuits + HAVING + COUNT(*) = lawj.number_of_basic_circuits + ) + RETURNING + l1_batch_number, + circuit_id; + "#, ) .fetch_all(self.storage.conn()) .await @@ -797,39 +800,39 @@ impl FriWitnessGeneratorDal<'_, '_> { pub async fn move_depth_zero_node_aggregation_jobs(&mut self) -> Vec<(i64, u8, u16)> { sqlx::query!( - r#" - UPDATE node_aggregation_witness_jobs_fri - SET - status = 'queued' - WHERE - (l1_batch_number, circuit_id, depth) IN ( - SELECT - prover_jobs_fri.l1_batch_number, - prover_jobs_fri.circuit_id, - prover_jobs_fri.depth - FROM - prover_jobs_fri - JOIN node_aggregation_witness_jobs_fri nawj ON prover_jobs_fri.l1_batch_number = nawj.l1_batch_number - AND prover_jobs_fri.circuit_id = nawj.circuit_id - AND prover_jobs_fri.depth = nawj.depth - WHERE - nawj.status = 'waiting_for_proofs' - AND prover_jobs_fri.status = 'successful' - AND prover_jobs_fri.aggregation_round = 1 - AND prover_jobs_fri.depth = 0 - GROUP BY - prover_jobs_fri.l1_batch_number, - prover_jobs_fri.circuit_id, - prover_jobs_fri.depth, - nawj.number_of_dependent_jobs - HAVING - COUNT(*) = nawj.number_of_dependent_jobs - ) - RETURNING - l1_batch_number, - circuit_id, - depth; - "#, + r#" + UPDATE node_aggregation_witness_jobs_fri + SET + status = 'queued' + WHERE + (l1_batch_number, circuit_id, depth) IN ( + SELECT + prover_jobs_fri.l1_batch_number, + prover_jobs_fri.circuit_id, + prover_jobs_fri.depth + FROM + prover_jobs_fri + JOIN node_aggregation_witness_jobs_fri nawj ON prover_jobs_fri.l1_batch_number = nawj.l1_batch_number + AND prover_jobs_fri.circuit_id = nawj.circuit_id + AND prover_jobs_fri.depth = nawj.depth + WHERE + nawj.status = 'waiting_for_proofs' + AND prover_jobs_fri.status = 'successful' + AND prover_jobs_fri.aggregation_round = 1 + AND prover_jobs_fri.depth = 0 + GROUP BY + prover_jobs_fri.l1_batch_number, + prover_jobs_fri.circuit_id, + prover_jobs_fri.depth, + nawj.number_of_dependent_jobs + HAVING + COUNT(*) = nawj.number_of_dependent_jobs + ) + RETURNING + l1_batch_number, + circuit_id, + depth; + "#, ) .fetch_all(self.storage.conn()) .await @@ -841,38 +844,38 @@ impl FriWitnessGeneratorDal<'_, '_> { pub async fn move_depth_non_zero_node_aggregation_jobs(&mut self) -> Vec<(i64, u8, u16)> { sqlx::query!( - r#" - UPDATE node_aggregation_witness_jobs_fri - SET - status = 'queued' - WHERE - (l1_batch_number, circuit_id, depth) IN ( - SELECT - prover_jobs_fri.l1_batch_number, - prover_jobs_fri.circuit_id, - prover_jobs_fri.depth - FROM - prover_jobs_fri - JOIN node_aggregation_witness_jobs_fri nawj ON prover_jobs_fri.l1_batch_number = nawj.l1_batch_number - AND prover_jobs_fri.circuit_id = nawj.circuit_id - AND prover_jobs_fri.depth = nawj.depth - WHERE - nawj.status = 'waiting_for_proofs' - AND prover_jobs_fri.status = 'successful' - AND prover_jobs_fri.aggregation_round = 2 - GROUP BY - prover_jobs_fri.l1_batch_number, - prover_jobs_fri.circuit_id, - prover_jobs_fri.depth, - nawj.number_of_dependent_jobs - HAVING - COUNT(*) = nawj.number_of_dependent_jobs - ) - RETURNING - l1_batch_number, - circuit_id, - depth; - "#, + r#" + UPDATE node_aggregation_witness_jobs_fri + SET + status = 'queued' + WHERE + (l1_batch_number, circuit_id, depth) IN ( + SELECT + prover_jobs_fri.l1_batch_number, + prover_jobs_fri.circuit_id, + prover_jobs_fri.depth + FROM + prover_jobs_fri + JOIN node_aggregation_witness_jobs_fri nawj ON prover_jobs_fri.l1_batch_number = nawj.l1_batch_number + AND prover_jobs_fri.circuit_id = nawj.circuit_id + AND prover_jobs_fri.depth = nawj.depth + WHERE + nawj.status = 'waiting_for_proofs' + AND prover_jobs_fri.status = 'successful' + AND prover_jobs_fri.aggregation_round = 2 + GROUP BY + prover_jobs_fri.l1_batch_number, + prover_jobs_fri.circuit_id, + prover_jobs_fri.depth, + nawj.number_of_dependent_jobs + HAVING + COUNT(*) = nawj.number_of_dependent_jobs + ) + RETURNING + l1_batch_number, + circuit_id, + depth; + "#, ) .fetch_all(self.storage.conn()) .await @@ -910,13 +913,13 @@ impl FriWitnessGeneratorDal<'_, '_> { l1_batch_number; "#, AggregationRound::NodeAggregation as i64, - ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| (row.l1_batch_number as u64)) - .collect() + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| (row.l1_batch_number as u64)) + .collect() } pub async fn move_scheduler_jobs_from_waiting_to_queued(&mut self) -> Vec { @@ -1903,4 +1906,38 @@ impl FriWitnessGeneratorDal<'_, '_> { AggregationRound::LeafAggregation | AggregationRound::NodeAggregation => "id", } } + + pub async fn get_proof_generation_times_for_time_frame( + &mut self, + time_frame: NaiveDateTime, + ) -> sqlx::Result> { + let proof_generation_times = sqlx::query!( + r#" + SELECT + comp.l1_batch_number, + (comp.updated_at - wit.created_at) AS time_taken, + wit.created_at + FROM + proof_compression_jobs_fri AS comp + JOIN witness_inputs_fri AS wit ON comp.l1_batch_number = wit.l1_batch_number + WHERE + wit.created_at > $1 + ORDER BY + time_taken DESC; + "#, + time_frame.into(), + ) + .fetch_all(self.storage.conn()) + .await? + .into_iter() + .map(|row| ProofGenerationTime { + l1_batch_number: L1BatchNumber(row.l1_batch_number as u32), + time_taken: naive_time_from_pg_interval( + row.time_taken.expect("time_taken must be present"), + ), + created_at: row.created_at, + }) + .collect(); + Ok(proof_generation_times) + } } diff --git a/prover/prover_fri_types/src/lib.rs b/prover/prover_fri_types/src/lib.rs index 0c6557c27ffc..425adc418628 100644 --- a/prover/prover_fri_types/src/lib.rs +++ b/prover/prover_fri_types/src/lib.rs @@ -25,8 +25,6 @@ use crate::keys::FriCircuitKey; pub mod keys; pub mod queue; -pub const EIP_4844_CIRCUIT_ID: u8 = 255; - // THESE VALUES SHOULD BE UPDATED ON ANY PROTOCOL UPGRADE OF PROVERS pub const PROVER_PROTOCOL_VERSION: ProtocolVersionId = ProtocolVersionId::Version24; pub const PROVER_PROTOCOL_PATCH: VersionPatch = VersionPatch(1);