Skip to content
This repository has been archived by the owner on Aug 28, 2024. It is now read-only.

Commit

Permalink
feat(en): file based configs for en (matter-labs#2110)
Browse files Browse the repository at this point in the history
## What ❔

Allow EN works with file-based config system. 
Also it brings new functionality based on config system to the main
node, because it was lacking before, such as custom api namespaces

## Why ❔

Part of the refactoring to the new config system

## Checklist

<!-- Check your PR fulfills the following items. -->
<!-- For draft PRs check the boxes as you complete them. -->

- [x] PR title corresponds to the body of PR (we generate changelog
entries from PRs).
- [x] Tests for the changes have been added / updated.
- [x] Documentation comments have been added / updated.
- [x] Code has been formatted via `zk fmt` and `zk lint`.
- [x] Spellcheck has been run via `zk spellcheck`.

---------

Signed-off-by: Danil <[email protected]>
Co-authored-by: Matías Ignacio González <[email protected]>
  • Loading branch information
Deniallugo and matias-gonz authored Jun 25, 2024
1 parent c4f7b92 commit 7940fa3
Show file tree
Hide file tree
Showing 42 changed files with 986 additions and 16 deletions.
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

396 changes: 394 additions & 2 deletions core/bin/external_node/src/config/mod.rs

Large diffs are not rendered by default.

35 changes: 35 additions & 0 deletions core/bin/external_node/src/config/observability.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ use std::{collections::HashMap, time::Duration};

use anyhow::Context as _;
use serde::Deserialize;
use zksync_config::configs::GeneralConfig;
use zksync_vlog::{prometheus::PrometheusExporterConfig, LogFormat};

use super::{ConfigurationSource, Environment};
Expand Down Expand Up @@ -97,4 +98,38 @@ impl ObservabilityENConfig {
}
Ok(guard)
}

pub(crate) fn from_configs(general_config: &GeneralConfig) -> anyhow::Result<Self> {
let (sentry_url, sentry_environment, log_format) =
if let Some(observability) = general_config.observability.as_ref() {
(
observability.sentry_url.clone(),
observability.sentry_environment.clone(),
observability
.log_format
.parse()
.context("Invalid log format")?,
)
} else {
(None, None, LogFormat::default())
};
let (prometheus_port, prometheus_pushgateway_url, prometheus_push_interval_ms) =
if let Some(prometheus) = general_config.prometheus_config.as_ref() {
(
Some(prometheus.listener_port),
Some(prometheus.pushgateway_url.clone()),
prometheus.push_interval_ms.unwrap_or_default(),
)
} else {
(None, None, 0)
};
Ok(Self {
prometheus_port,
prometheus_pushgateway_url,
prometheus_push_interval_ms,
sentry_url,
sentry_environment,
log_format,
})
}
}
29 changes: 28 additions & 1 deletion core/bin/external_node/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -710,6 +710,21 @@ struct Cli {
/// Comma-separated list of components to launch.
#[arg(long, default_value = "all")]
components: ComponentsToRun,
/// Path to the yaml config. If set, it will be used instead of env vars.
#[arg(
long,
requires = "secrets_path",
requires = "external_node_config_path"
)]
config_path: Option<std::path::PathBuf>,
/// Path to the yaml with secrets. If set, it will be used instead of env vars.
#[arg(long, requires = "config_path", requires = "external_node_config_path")]
secrets_path: Option<std::path::PathBuf>,
/// Path to the yaml with external node specific configuration. If set, it will be used instead of env vars.
#[arg(long, requires = "config_path", requires = "secrets_path")]
external_node_config_path: Option<std::path::PathBuf>,
/// Path to the yaml with consensus.
consensus_path: Option<std::path::PathBuf>,

/// Run the node using the node framework.
#[arg(long)]
Expand Down Expand Up @@ -770,7 +785,19 @@ async fn main() -> anyhow::Result<()> {
// Initial setup.
let opt = Cli::parse();

let mut config = ExternalNodeConfig::new().context("Failed to load node configuration")?;
let mut config = if let Some(config_path) = opt.config_path.clone() {
let secrets_path = opt.secrets_path.clone().unwrap();
let external_node_config_path = opt.external_node_config_path.clone().unwrap();
ExternalNodeConfig::from_files(
config_path,
external_node_config_path,
secrets_path,
opt.consensus_path.clone(),
)?
} else {
ExternalNodeConfig::new().context("Failed to load node configuration")?
};

if !opt.enable_consensus {
config.consensus = None;
}
Expand Down
8 changes: 8 additions & 0 deletions core/bin/external_node/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,10 @@ async fn external_node_basics(components_str: &'static str) {
let opt = Cli {
enable_consensus: false,
components,
config_path: None,
secrets_path: None,
external_node_config_path: None,
consensus_path: None,
use_node_framework: false,
};
let mut config = ExternalNodeConfig::mock(&temp_dir, &connection_pool);
Expand Down Expand Up @@ -266,6 +270,10 @@ async fn node_reacts_to_stop_signal_during_initial_reorg_detection() {
let opt = Cli {
enable_consensus: false,
components: "core".parse().unwrap(),
config_path: None,
secrets_path: None,
external_node_config_path: None,
consensus_path: None,
use_node_framework: false,
};
let mut config = ExternalNodeConfig::mock(&temp_dir, &connection_pool);
Expand Down
3 changes: 3 additions & 0 deletions core/bin/zksync_server/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -270,5 +270,8 @@ fn load_env_config() -> anyhow::Result<TempConfigStore> {
snapshot_creator: SnapshotsCreatorConfig::from_env().ok(),
protective_reads_writer_config: ProtectiveReadsWriterConfig::from_env().ok(),
core_object_store: ObjectStoreConfig::from_env().ok(),
commitment_generator: None,
pruning: None,
snapshot_recovery: None,
})
}
10 changes: 9 additions & 1 deletion core/bin/zksync_server/src/node_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,14 @@ impl MainNodeBuilder {
let circuit_breaker_config = try_load_config!(self.configs.circuit_breaker_config);
let with_debug_namespace = state_keeper_config.save_call_traces;

let mut namespaces = Namespace::DEFAULT.to_vec();
let mut namespaces = if let Some(namespaces) = &rpc_config.api_namespaces {
namespaces
.iter()
.map(|a| a.parse())
.collect::<Result<_, _>>()?
} else {
Namespace::DEFAULT.to_vec()
};
if with_debug_namespace {
namespaces.push(Namespace::Debug)
}
Expand All @@ -345,6 +352,7 @@ impl MainNodeBuilder {
rpc_config.websocket_requests_per_minute_limit(),
),
replication_lag_limit: circuit_breaker_config.replication_lag_limit(),
with_extended_tracing: rpc_config.extended_api_tracing,
..Default::default()
};
self.node.add_layer(Web3ServerLayer::ws(
Expand Down
9 changes: 9 additions & 0 deletions core/lib/config/src/configs/api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,13 @@ pub struct Web3JsonRpcConfig {
/// (additionally to natively bridged tokens).
#[serde(default)]
pub whitelisted_tokens_for_aa: Vec<Address>,
/// Enabled JSON RPC API namespaces. If not set, all namespaces will be available
#[serde(default)]
pub api_namespaces: Option<Vec<String>>,
/// Enables extended tracing of RPC calls. This may negatively impact performance for nodes under high load
/// (hundreds or thousands RPS).
#[serde(default)]
pub extended_api_tracing: bool,
}

impl Web3JsonRpcConfig {
Expand Down Expand Up @@ -251,6 +258,8 @@ impl Web3JsonRpcConfig {
mempool_cache_size: Default::default(),
tree_api_url: None,
whitelisted_tokens_for_aa: Default::default(),
api_namespaces: None,
extended_api_tracing: false,
}
}

Expand Down
10 changes: 10 additions & 0 deletions core/lib/config/src/configs/commitment_generator.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
use std::num::NonZeroU32;

use serde::Deserialize;

#[derive(Debug, Deserialize, Clone, PartialEq)]
pub struct CommitmentGeneratorConfig {
/// Maximum degree of parallelism during commitment generation, i.e., the maximum number of L1 batches being processed in parallel.
/// If not specified, commitment generator will use a value roughly equal to the number of CPU cores with some clamping applied.
pub max_parallelism: NonZeroU32,
}
19 changes: 19 additions & 0 deletions core/lib/config/src/configs/en_config.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
use std::num::NonZeroUsize;

use serde::Deserialize;
use zksync_basic_types::{
commitment::L1BatchCommitmentMode, url::SensitiveUrl, L1ChainId, L2ChainId,
};

/// Temporary config for initializing external node, will be completely replaced by consensus config later
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct ENConfig {
// Genesis
pub l2_chain_id: L2ChainId,
pub l1_chain_id: L1ChainId,
pub l1_batch_commit_data_generator_mode: L1BatchCommitmentMode,

// Main node configuration
pub main_node_url: SensitiveUrl,
pub main_node_rate_limit_rps: Option<NonZeroUsize>,
}
27 changes: 27 additions & 0 deletions core/lib/config/src/configs/experimental.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,21 @@ pub struct ExperimentalDBConfig {
/// Maximum number of files concurrently opened by state keeper cache RocksDB. Useful to fit into OS limits; can be used
/// as a rudimentary way to control RAM usage of the cache.
pub state_keeper_db_max_open_files: Option<NonZeroU32>,
/// Configures whether to persist protective reads when persisting L1 batches in the state keeper.
/// Protective reads are never required by full nodes so far, not until such a node runs a full Merkle tree
/// (presumably, to participate in L1 batch proving).
/// By default, set to `true` as a temporary safety measure.
#[serde(default = "ExperimentalDBConfig::default_protective_reads_persistence_enabled")]
pub protective_reads_persistence_enabled: bool,
// Merkle tree config
/// Processing delay between processing L1 batches in the Merkle tree.
#[serde(default = "ExperimentalDBConfig::default_merkle_tree_processing_delay_ms")]
pub processing_delay_ms: u64,
/// If specified, RocksDB indices and Bloom filters will be managed by the block cache, rather than
/// being loaded entirely into RAM on the RocksDB initialization. The block cache capacity should be increased
/// correspondingly; otherwise, RocksDB performance can significantly degrade.
#[serde(default)]
pub include_indices_and_filters_in_block_cache: bool,
}

impl Default for ExperimentalDBConfig {
Expand All @@ -20,6 +35,10 @@ impl Default for ExperimentalDBConfig {
state_keeper_db_block_cache_capacity_mb:
Self::default_state_keeper_db_block_cache_capacity_mb(),
state_keeper_db_max_open_files: None,
protective_reads_persistence_enabled:
Self::default_protective_reads_persistence_enabled(),
processing_delay_ms: Self::default_merkle_tree_processing_delay_ms(),
include_indices_and_filters_in_block_cache: false,
}
}
}
Expand All @@ -32,4 +51,12 @@ impl ExperimentalDBConfig {
pub fn state_keeper_db_block_cache_capacity(&self) -> usize {
self.state_keeper_db_block_cache_capacity_mb * super::BYTES_IN_MEGABYTE
}

const fn default_protective_reads_persistence_enabled() -> bool {
true
}

const fn default_merkle_tree_processing_delay_ms() -> u64 {
100
}
}
11 changes: 8 additions & 3 deletions core/lib/config/src/configs/general.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,12 @@ use crate::{
chain::{CircuitBreakerConfig, MempoolConfig, OperationsManagerConfig, StateKeeperConfig},
fri_prover_group::FriProverGroupConfig,
house_keeper::HouseKeeperConfig,
pruning::PruningConfig,
snapshot_recovery::SnapshotRecoveryConfig,
vm_runner::ProtectiveReadsWriterConfig,
FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig,
FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, ObservabilityConfig,
PrometheusConfig, ProofDataHandlerConfig,
CommitmentGeneratorConfig, FriProofCompressorConfig, FriProverConfig,
FriProverGatewayConfig, FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig,
ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig,
},
ApiConfig, ContractVerifierConfig, DBConfig, EthConfig, ObjectStoreConfig, PostgresConfig,
SnapshotsCreatorConfig,
Expand Down Expand Up @@ -35,5 +37,8 @@ pub struct GeneralConfig {
pub snapshot_creator: Option<SnapshotsCreatorConfig>,
pub observability: Option<ObservabilityConfig>,
pub protective_reads_writer_config: Option<ProtectiveReadsWriterConfig>,
pub commitment_generator: Option<CommitmentGeneratorConfig>,
pub snapshot_recovery: Option<SnapshotRecoveryConfig>,
pub pruning: Option<PruningConfig>,
pub core_object_store: Option<ObjectStoreConfig>,
}
7 changes: 7 additions & 0 deletions core/lib/config/src/configs/mod.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
// Public re-exports
pub use self::{
api::ApiConfig,
commitment_generator::CommitmentGeneratorConfig,
contract_verifier::ContractVerifierConfig,
contracts::{ContractsConfig, EcosystemContracts},
database::{DBConfig, PostgresConfig},
Expand All @@ -17,18 +18,22 @@ pub use self::{
object_store::ObjectStoreConfig,
observability::{ObservabilityConfig, OpentelemetryConfig},
proof_data_handler::ProofDataHandlerConfig,
pruning::PruningConfig,
secrets::{DatabaseSecrets, L1Secrets, Secrets},
snapshot_recovery::SnapshotRecoveryConfig,
snapshots_creator::SnapshotsCreatorConfig,
utils::PrometheusConfig,
vm_runner::ProtectiveReadsWriterConfig,
};

pub mod api;
pub mod chain;
mod commitment_generator;
pub mod consensus;
pub mod contract_verifier;
pub mod contracts;
pub mod database;
pub mod en_config;
pub mod eth_sender;
pub mod eth_watch;
mod experimental;
Expand All @@ -44,7 +49,9 @@ pub mod house_keeper;
pub mod object_store;
pub mod observability;
pub mod proof_data_handler;
pub mod pruning;
pub mod secrets;
pub mod snapshot_recovery;
pub mod snapshots_creator;
pub mod utils;
pub mod vm_runner;
Expand Down
19 changes: 19 additions & 0 deletions core/lib/config/src/configs/pruning.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
use std::num::NonZeroU64;

use serde::Deserialize;

#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct PruningConfig {
pub enabled: bool,
/// Number of L1 batches pruned at a time.
pub chunk_size: Option<u32>,
/// Delta between soft- and hard-removing data from Postgres. Should be reasonably large (order of 60 seconds).
/// The default value is 60 seconds.
pub removal_delay_sec: Option<NonZeroU64>,
/// If set, L1 batches will be pruned after the batch timestamp is this old (in seconds). Note that an L1 batch
/// may be temporarily retained for other reasons; e.g., a batch cannot be pruned until it is executed on L1,
/// which happens roughly 24 hours after its generation on the mainnet. Thus, in practice this value can specify
/// the retention period greater than that implicitly imposed by other criteria (e.g., 7 or 30 days).
/// If set to 0, L1 batches will not be retained based on their timestamp. The default value is 1 hour.
pub data_retention_sec: Option<u64>,
}
44 changes: 44 additions & 0 deletions core/lib/config/src/configs/snapshot_recovery.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
use std::num::NonZeroUsize;

use serde::Deserialize;
use zksync_basic_types::L1BatchNumber;

use crate::ObjectStoreConfig;

#[derive(Debug, Clone, PartialEq, Deserialize, Default)]
pub struct TreeRecoveryConfig {
/// Approximate chunk size (measured in the number of entries) to recover in a single iteration.
/// Reasonable values are order of 100,000 (meaning an iteration takes several seconds).
///
/// **Important.** This value cannot be changed in the middle of tree recovery (i.e., if a node is stopped in the middle
/// of recovery and then restarted with a different config).
pub chunk_size: Option<u64>,
/// Buffer capacity for parallel persistence operations. Should be reasonably small since larger buffer means more RAM usage;
/// buffer elements are persisted tree chunks. OTOH, small buffer can lead to persistence parallelization being inefficient.
///
/// If not set, parallel persistence will be disabled.
pub parallel_persistence_buffer: Option<NonZeroUsize>,
}

#[derive(Debug, Clone, PartialEq, Deserialize, Default)]
pub struct PostgresRecoveryConfig {
/// Maximum concurrency factor for the concurrent parts of snapshot recovery for Postgres. It may be useful to
/// reduce this factor to about 5 if snapshot recovery overloads I/O capacity of the node. Conversely,
/// if I/O capacity of your infra is high, you may increase concurrency to speed up Postgres recovery.
pub max_concurrency: Option<NonZeroUsize>,
}

#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct SnapshotRecoveryConfig {
/// Enables application-level snapshot recovery. Required to start a node that was recovered from a snapshot,
/// or to initialize a node from a snapshot. Has no effect if a node that was initialized from a Postgres dump
/// or was synced from genesis.
///
/// This is an experimental and incomplete feature; do not use unless you know what you're doing.
pub enabled: bool,
/// L1 batch number of the snapshot to use during recovery. Specifying this parameter is mostly useful for testing.
pub l1_batch: Option<L1BatchNumber>,
pub tree: TreeRecoveryConfig,
pub postgres: PostgresRecoveryConfig,
pub object_store: Option<ObjectStoreConfig>,
}
Loading

0 comments on commit 7940fa3

Please sign in to comment.