Skip to content
This repository has been archived by the owner on Aug 28, 2024. It is now read-only.

Commit

Permalink
feat(metadata-calculator): option to use VM runner for protective rea…
Browse files Browse the repository at this point in the history
…ds (matter-labs#2318)

## What ❔

<!-- What are the changes this PR brings about? -->
<!-- Example: This PR adds a PR template to the repo. -->
<!-- (For bigger PRs adding more context is appreciated) -->

## Why ❔

<!-- Why are these changes done? What goal do they contribute to? What
are the principles behind them? -->
<!-- Example: PR templates ensure PR reviewers, observers, and future
iterators are in context about the evolution of repos. -->

## Checklist

<!-- Check your PR fulfills the following items. -->
<!-- For draft PRs check the boxes as you complete them. -->

- [ ] PR title corresponds to the body of PR (we generate changelog
entries from PRs).
- [ ] Tests for the changes have been added / updated.
- [ ] Documentation comments have been added / updated.
- [ ] Code has been formatted via `zk fmt` and `zk lint`.
  • Loading branch information
itegulov authored and irnb committed Jul 12, 2024
1 parent 0b018a1 commit 0466655
Show file tree
Hide file tree
Showing 12 changed files with 277 additions and 91 deletions.
1 change: 1 addition & 0 deletions core/bin/external_node/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,7 @@ async fn run_tree(
.merkle_tree_include_indices_and_filters_in_block_cache,
memtable_capacity: config.optional.merkle_tree_memtable_capacity(),
stalled_writes_timeout: config.optional.merkle_tree_stalled_writes_timeout(),
sealed_batches_have_protective_reads: config.optional.protective_reads_persistence_enabled,
recovery: MetadataCalculatorRecoveryConfig {
desired_chunk_size: config.experimental.snapshots_recovery_tree_chunk_size,
parallel_persistence_buffer: config
Expand Down
4 changes: 4 additions & 0 deletions core/bin/external_node/src/node_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -306,6 +306,10 @@ impl ExternalNodeBuilder {
.merkle_tree_include_indices_and_filters_in_block_cache,
memtable_capacity: self.config.optional.merkle_tree_memtable_capacity(),
stalled_writes_timeout: self.config.optional.merkle_tree_stalled_writes_timeout(),
sealed_batches_have_protective_reads: self
.config
.optional
.protective_reads_persistence_enabled,
recovery: MetadataCalculatorRecoveryConfig {
desired_chunk_size: self.config.experimental.snapshots_recovery_tree_chunk_size,
parallel_persistence_buffer: self
Expand Down
2 changes: 2 additions & 0 deletions core/bin/zksync_server/src/node_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -175,9 +175,11 @@ impl MainNodeBuilder {
let merkle_tree_env_config = try_load_config!(self.configs.db_config).merkle_tree;
let operations_manager_env_config =
try_load_config!(self.configs.operations_manager_config);
let state_keeper_env_config = try_load_config!(self.configs.state_keeper_config);
let metadata_calculator_config = MetadataCalculatorConfig::for_main_node(
&merkle_tree_env_config,
&operations_manager_env_config,
&state_keeper_env_config,
);
let mut layer = MetadataCalculatorLayer::new(metadata_calculator_config);
if with_tree_api {
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

28 changes: 28 additions & 0 deletions core/lib/dal/src/vm_runner_dal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -84,4 +84,32 @@ impl VmRunnerDal<'_, '_> {
.await?;
Ok(())
}

pub async fn delete_protective_reads(
&mut self,
last_batch_to_keep: L1BatchNumber,
) -> DalResult<()> {
self.delete_protective_reads_inner(Some(last_batch_to_keep))
.await
}

async fn delete_protective_reads_inner(
&mut self,
last_batch_to_keep: Option<L1BatchNumber>,
) -> DalResult<()> {
let l1_batch_number = last_batch_to_keep.map_or(-1, |number| i64::from(number.0));
sqlx::query!(
r#"
DELETE FROM vm_runner_protective_reads
WHERE
l1_batch_number > $1
"#,
l1_batch_number
)
.instrument("delete_protective_reads")
.with_arg("l1_batch_number", &l1_batch_number)
.execute(self.storage)
.await?;
Ok(())
}
}
13 changes: 10 additions & 3 deletions core/node/consensus/src/testonly.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use zksync_concurrency::{ctx, error::Wrap as _, scope, sync, time};
use zksync_config::{
configs,
configs::{
chain::OperationsManagerConfig,
chain::{OperationsManagerConfig, StateKeeperConfig},
consensus as config,
database::{MerkleTreeConfig, MerkleTreeMode},
},
Expand Down Expand Up @@ -166,8 +166,15 @@ impl StateKeeper {
let operation_manager_config = OperationsManagerConfig {
delay_interval: 100, //`100ms`
};
let config =
MetadataCalculatorConfig::for_main_node(&merkle_tree_config, &operation_manager_config);
let state_keeper_config = StateKeeperConfig {
protective_reads_persistence_enabled: true,
..Default::default()
};
let config = MetadataCalculatorConfig::for_main_node(
&merkle_tree_config,
&operation_manager_config,
&state_keeper_config,
);
let metadata_calculator = MetadataCalculator::new(config, None, pool.0.clone())
.await
.context("MetadataCalculator::new()")?;
Expand Down
4 changes: 2 additions & 2 deletions core/node/metadata_calculator/src/api_server/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ use crate::tests::{gen_storage_logs, reset_db_state, run_calculator, setup_calcu
async fn merkle_tree_api() {
let pool = ConnectionPool::<Core>::test_pool().await;
let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB");
let (calculator, _) = setup_calculator(temp_dir.path(), pool.clone()).await;
let (calculator, _) = setup_calculator(temp_dir.path(), pool.clone(), true).await;
let api_addr = (Ipv4Addr::LOCALHOST, 0).into();

reset_db_state(&pool, 5).await;
Expand Down Expand Up @@ -114,7 +114,7 @@ async fn api_client_unparesable_response_error() {
async fn local_merkle_tree_client() {
let pool = ConnectionPool::<Core>::test_pool().await;
let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB");
let (calculator, _) = setup_calculator(temp_dir.path(), pool.clone()).await;
let (calculator, _) = setup_calculator(temp_dir.path(), pool.clone(), true).await;

reset_db_state(&pool, 5).await;
let tree_reader = calculator.tree_reader();
Expand Down
14 changes: 12 additions & 2 deletions core/node/metadata_calculator/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use std::{
use anyhow::Context as _;
use tokio::sync::{oneshot, watch};
use zksync_config::configs::{
chain::OperationsManagerConfig,
chain::{OperationsManagerConfig, StateKeeperConfig},
database::{MerkleTreeConfig, MerkleTreeMode},
};
use zksync_dal::{ConnectionPool, Core};
Expand Down Expand Up @@ -89,6 +89,8 @@ pub struct MetadataCalculatorConfig {
pub memtable_capacity: usize,
/// Timeout to wait for the Merkle tree database to run compaction on stalled writes.
pub stalled_writes_timeout: Duration,
/// Whether state keeper writes protective reads when it seals a batch.
pub sealed_batches_have_protective_reads: bool,
/// Configuration specific to the Merkle tree recovery.
pub recovery: MetadataCalculatorRecoveryConfig,
}
Expand All @@ -97,6 +99,7 @@ impl MetadataCalculatorConfig {
pub fn for_main_node(
merkle_tree_config: &MerkleTreeConfig,
operation_config: &OperationsManagerConfig,
state_keeper_config: &StateKeeperConfig,
) -> Self {
Self {
db_path: merkle_tree_config.path.clone(),
Expand All @@ -109,6 +112,8 @@ impl MetadataCalculatorConfig {
include_indices_and_filters_in_block_cache: false,
memtable_capacity: merkle_tree_config.memtable_capacity(),
stalled_writes_timeout: merkle_tree_config.stalled_writes_timeout(),
sealed_batches_have_protective_reads: state_keeper_config
.protective_reads_persistence_enabled,
// The main node isn't supposed to be recovered yet, so this value doesn't matter much
recovery: MetadataCalculatorRecoveryConfig::default(),
}
Expand Down Expand Up @@ -248,7 +253,12 @@ impl MetadataCalculator {
self.health_updater
.update(MerkleTreeHealth::MainLoop(tree_info).into());

let updater = TreeUpdater::new(tree, self.max_l1_batches_per_iter, self.object_store);
let updater = TreeUpdater::new(
tree,
self.max_l1_batches_per_iter,
self.object_store,
self.config.sealed_batches_have_protective_reads,
);
updater
.loop_updating_tree(self.delayer, &self.pool, stop_receiver)
.await
Expand Down
8 changes: 6 additions & 2 deletions core/node/metadata_calculator/src/recovery/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use tempfile::TempDir;
use test_casing::{test_casing, Product};
use tokio::sync::mpsc;
use zksync_config::configs::{
chain::OperationsManagerConfig,
chain::{OperationsManagerConfig, StateKeeperConfig},
database::{MerkleTreeConfig, MerkleTreeMode},
};
use zksync_dal::CoreDal;
Expand Down Expand Up @@ -113,7 +113,7 @@ async fn prepare_recovery_snapshot_with_genesis(
drop(storage);

// Ensure that metadata for L1 batch #1 is present in the DB.
let (calculator, _) = setup_calculator(&temp_dir.path().join("init"), pool).await;
let (calculator, _) = setup_calculator(&temp_dir.path().join("init"), pool, true).await;
let l1_batch_root_hash = run_calculator(calculator).await;

SnapshotRecoveryStatus {
Expand Down Expand Up @@ -306,6 +306,10 @@ async fn entire_recovery_workflow(case: RecoveryWorkflowCase) {
let calculator_config = MetadataCalculatorConfig::for_main_node(
&merkle_tree_config,
&OperationsManagerConfig { delay_interval: 50 },
&StateKeeperConfig {
protective_reads_persistence_enabled: true,
..Default::default()
},
);
let mut calculator = MetadataCalculator::new(calculator_config, None, pool.clone())
.await
Expand Down
Loading

0 comments on commit 0466655

Please sign in to comment.