-
Notifications
You must be signed in to change notification settings - Fork 2.1k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
feat(vm-runner): implement VM runner storage layer (#1651)
## What ❔ Adds an abstraction that can load unprocessed batches as well as implement `ReadStorageFactory`. Implementation largely reuses the existing primitives from state keeper and they had to be generalized slightly. ## Why ❔ One of the components for the upcoming VM runner ## Checklist <!-- Check your PR fulfills the following items. --> <!-- For draft PRs check the boxes as you complete them. --> - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `zk spellcheck`. - [x] Linkcheck has been run via `zk linkcheck`.
- Loading branch information
Showing
30 changed files
with
1,481 additions
and
232 deletions.
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -943,3 +943,4 @@ superset | |
80M | ||
780kb | ||
hyperchain | ||
storages |
22 changes: 22 additions & 0 deletions
22
...lib/dal/.sqlx/query-0d6916c4bd6ef223f921723642059ff8d1eef6198390b84127a50c8f460fd2de.json
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Oops, something went wrong.
23 changes: 23 additions & 0 deletions
23
...lib/dal/.sqlx/query-443b5c62c2c274369764ac5279d8f6f962ff3f07aa1cf7f0ffcefffccb633cdd.json
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Oops, something went wrong.
26 changes: 26 additions & 0 deletions
26
...lib/dal/.sqlx/query-9615d45082a848792bca181a3b4e4e7cd09ca6a5f2b5cac5b130f1476214f403.json
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,80 @@ | ||
use std::{sync::Arc, time::Instant}; | ||
|
||
use anyhow::Context; | ||
use once_cell::sync::OnceCell; | ||
use tokio::sync::watch; | ||
use zksync_dal::{ConnectionPool, Core}; | ||
use zksync_shared_metrics::{SnapshotRecoveryStage, APP_METRICS}; | ||
use zksync_storage::RocksDB; | ||
use zksync_types::L1BatchNumber; | ||
|
||
use crate::{RocksdbStorage, StateKeeperColumnFamily}; | ||
|
||
/// A runnable task that blocks until the provided RocksDB cache instance is caught up with | ||
/// Postgres. | ||
/// | ||
/// See [`ReadStorageFactory`] for more context. | ||
#[derive(Debug)] | ||
pub struct AsyncCatchupTask { | ||
pool: ConnectionPool<Core>, | ||
state_keeper_db_path: String, | ||
rocksdb_cell: Arc<OnceCell<RocksDB<StateKeeperColumnFamily>>>, | ||
to_l1_batch_number: Option<L1BatchNumber>, | ||
} | ||
|
||
impl AsyncCatchupTask { | ||
/// Create a new catch-up task with the provided Postgres and RocksDB instances. Optionally | ||
/// accepts the last L1 batch number to catch up to (defaults to latest if not specified). | ||
pub fn new( | ||
pool: ConnectionPool<Core>, | ||
state_keeper_db_path: String, | ||
rocksdb_cell: Arc<OnceCell<RocksDB<StateKeeperColumnFamily>>>, | ||
to_l1_batch_number: Option<L1BatchNumber>, | ||
) -> Self { | ||
Self { | ||
pool, | ||
state_keeper_db_path, | ||
rocksdb_cell, | ||
to_l1_batch_number, | ||
} | ||
} | ||
|
||
/// Block until RocksDB cache instance is caught up with Postgres. | ||
/// | ||
/// # Errors | ||
/// | ||
/// Propagates RocksDB and Postgres errors. | ||
pub async fn run(self, stop_receiver: watch::Receiver<bool>) -> anyhow::Result<()> { | ||
let started_at = Instant::now(); | ||
tracing::debug!("Catching up RocksDB asynchronously"); | ||
|
||
let mut rocksdb_builder = RocksdbStorage::builder(self.state_keeper_db_path.as_ref()) | ||
.await | ||
.context("Failed creating RocksDB storage builder")?; | ||
let mut connection = self.pool.connection().await?; | ||
let was_recovered_from_snapshot = rocksdb_builder | ||
.ensure_ready(&mut connection, &stop_receiver) | ||
.await | ||
.context("failed initializing state keeper RocksDB from snapshot or scratch")?; | ||
if was_recovered_from_snapshot { | ||
let elapsed = started_at.elapsed(); | ||
APP_METRICS.snapshot_recovery_latency[&SnapshotRecoveryStage::StateKeeperCache] | ||
.set(elapsed); | ||
tracing::info!("Recovered state keeper RocksDB from snapshot in {elapsed:?}"); | ||
} | ||
|
||
let rocksdb = rocksdb_builder | ||
.synchronize(&mut connection, &stop_receiver, self.to_l1_batch_number) | ||
.await | ||
.context("Failed to catch up RocksDB to Postgres")?; | ||
drop(connection); | ||
if let Some(rocksdb) = rocksdb { | ||
self.rocksdb_cell | ||
.set(rocksdb.into_rocksdb()) | ||
.map_err(|_| anyhow::anyhow!("Async RocksDB cache was initialized twice"))?; | ||
} else { | ||
tracing::info!("Synchronizing RocksDB interrupted"); | ||
} | ||
Ok(()) | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.