diff --git a/Cargo.lock b/Cargo.lock index c2398a928534..a537ea6c4f8f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8465,6 +8465,7 @@ dependencies = [ "rand 0.8.5", "serde", "serde_json", + "strum", "test-casing", "thiserror", "thread_local", diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index b47ae3f8886e..35750cfa4e7d 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -2,6 +2,7 @@ use std::{ env, ffi::OsString, num::{NonZeroU32, NonZeroU64, NonZeroUsize}, + path::PathBuf, time::Duration, }; @@ -11,10 +12,12 @@ use zksync_config::{ configs::{ api::{MaxResponseSize, MaxResponseSizeOverrides}, consensus::{ConsensusConfig, ConsensusSecrets}, + en_config::ENConfig, + GeneralConfig, Secrets, }, ObjectStoreConfig, }; -use zksync_core_leftovers::temp_config_store::decode_yaml_repr; +use zksync_core_leftovers::temp_config_store::{decode_yaml_repr, read_yaml_repr}; #[cfg(test)] use zksync_dal::{ConnectionPool, Core}; use zksync_metadata_calculator::MetadataCalculatorRecoveryConfig; @@ -41,6 +44,32 @@ pub(crate) mod observability; #[cfg(test)] mod tests; +macro_rules! load_optional_config_or_default { + ($config:expr, $($name:ident).+, $default:ident) => { + $config + .as_ref() + .map(|a| a.$($name).+.map(|a| a.try_into())).flatten().transpose()? + .unwrap_or_else(Self::$default) + }; +} + +macro_rules! load_config_or_default { + ($config:expr, $($name:ident).+, $default:ident) => { + $config + .as_ref() + .map(|a| a.$($name).+.clone().try_into()).transpose()? + .unwrap_or_else(Self::$default) + }; +} + +macro_rules! load_config { + ($config:expr, $($name:ident).+) => { + $config + .as_ref() + .map(|a| a.$($name).+.clone().map(|a| a.try_into())).flatten().transpose()? + }; +} + const BYTES_IN_MEGABYTE: usize = 1_024 * 1_024; /// Encapsulation of configuration source with a mock implementation used in tests. @@ -407,12 +436,232 @@ pub(crate) struct OptionalENConfig { /// may be temporarily retained for other reasons; e.g., a batch cannot be pruned until it is executed on L1, /// which happens roughly 24 hours after its generation on the mainnet. Thus, in practice this value can specify /// the retention period greater than that implicitly imposed by other criteria (e.g., 7 or 30 days). - /// If set to 0, L1 batches will not be retained based on their timestamp. The default value is 1 hour. + /// If set to 0, L1 batches will not be retained based on their timestamp. The default value is 7 days. #[serde(default = "OptionalENConfig::default_pruning_data_retention_sec")] pruning_data_retention_sec: u64, } impl OptionalENConfig { + fn from_configs(general_config: &GeneralConfig, enconfig: &ENConfig) -> anyhow::Result { + let api_namespaces = load_config!(general_config.api_config, web3_json_rpc.api_namespaces) + .map(|a: Vec| a.iter().map(|a| a.parse()).collect::>()) + .transpose()?; + + Ok(OptionalENConfig { + filters_limit: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.filters_limit, + default_filters_limit + ), + subscriptions_limit: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.subscriptions_limit, + default_subscriptions_limit + ), + req_entities_limit: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.req_entities_limit, + default_req_entities_limit + ), + max_tx_size_bytes: load_config_or_default!( + general_config.api_config, + web3_json_rpc.max_tx_size, + default_max_tx_size_bytes + ), + vm_execution_cache_misses_limit: load_config!( + general_config.api_config, + web3_json_rpc.vm_execution_cache_misses_limit + ), + fee_history_limit: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.fee_history_limit, + default_fee_history_limit + ), + max_batch_request_size: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.max_batch_request_size, + default_max_batch_request_size + ), + max_response_body_size_mb: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.max_response_body_size_mb, + default_max_response_body_size_mb + ), + max_response_body_size_overrides_mb: load_config_or_default!( + general_config.api_config, + web3_json_rpc.max_response_body_size_overrides_mb, + default_max_response_body_size_overrides_mb + ), + pubsub_polling_interval_ms: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.pubsub_polling_interval, + default_polling_interval + ), + max_nonce_ahead: load_config_or_default!( + general_config.api_config, + web3_json_rpc.max_nonce_ahead, + default_max_nonce_ahead + ), + vm_concurrency_limit: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.vm_concurrency_limit, + default_vm_concurrency_limit + ), + factory_deps_cache_size_mb: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.factory_deps_cache_size_mb, + default_factory_deps_cache_size_mb + ), + initial_writes_cache_size_mb: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.initial_writes_cache_size_mb, + default_initial_writes_cache_size_mb + ), + latest_values_cache_size_mb: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.latest_values_cache_size_mb, + default_latest_values_cache_size_mb + ), + filters_disabled: general_config + .api_config + .as_ref() + .map(|a| a.web3_json_rpc.filters_disabled) + .unwrap_or_default(), + mempool_cache_update_interval_ms: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.mempool_cache_update_interval, + default_mempool_cache_update_interval_ms + ), + mempool_cache_size: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.mempool_cache_size, + default_mempool_cache_size + ), + + healthcheck_slow_time_limit_ms: load_config!( + general_config.api_config, + healthcheck.slow_time_limit_ms + ), + healthcheck_hard_time_limit_ms: load_config!( + general_config.api_config, + healthcheck.hard_time_limit_ms + ), + estimate_gas_scale_factor: load_config_or_default!( + general_config.api_config, + web3_json_rpc.estimate_gas_scale_factor, + default_estimate_gas_scale_factor + ), + estimate_gas_acceptable_overestimation: load_config_or_default!( + general_config.api_config, + web3_json_rpc.estimate_gas_acceptable_overestimation, + default_estimate_gas_acceptable_overestimation + ), + gas_price_scale_factor: load_config_or_default!( + general_config.api_config, + web3_json_rpc.gas_price_scale_factor, + default_gas_price_scale_factor + ), + merkle_tree_max_l1_batches_per_iter: load_config_or_default!( + general_config.db_config, + merkle_tree.max_l1_batches_per_iter, + default_merkle_tree_max_l1_batches_per_iter + ), + merkle_tree_max_open_files: load_config!( + general_config.db_config, + experimental.state_keeper_db_max_open_files + ), + merkle_tree_multi_get_chunk_size: load_config_or_default!( + general_config.db_config, + merkle_tree.multi_get_chunk_size, + default_merkle_tree_multi_get_chunk_size + ), + merkle_tree_block_cache_size_mb: load_config_or_default!( + general_config.db_config, + merkle_tree.block_cache_size_mb, + default_merkle_tree_block_cache_size_mb + ), + merkle_tree_memtable_capacity_mb: load_config_or_default!( + general_config.db_config, + merkle_tree.memtable_capacity_mb, + default_merkle_tree_memtable_capacity_mb + ), + merkle_tree_stalled_writes_timeout_sec: load_config_or_default!( + general_config.db_config, + merkle_tree.stalled_writes_timeout_sec, + default_merkle_tree_stalled_writes_timeout_sec + ), + database_long_connection_threshold_ms: load_config!( + general_config.postgres_config, + long_connection_threshold_ms + ), + database_slow_query_threshold_ms: load_config!( + general_config.postgres_config, + slow_query_threshold_ms + ), + l2_block_seal_queue_capacity: load_config_or_default!( + general_config.state_keeper_config, + l2_block_seal_queue_capacity, + default_l2_block_seal_queue_capacity + ), + l1_batch_commit_data_generator_mode: enconfig.l1_batch_commit_data_generator_mode, + snapshots_recovery_enabled: general_config + .snapshot_recovery + .as_ref() + .map(|a| a.enabled) + .unwrap_or_default(), + snapshots_recovery_postgres_max_concurrency: load_optional_config_or_default!( + general_config.snapshot_recovery, + postgres.max_concurrency, + default_snapshots_recovery_postgres_max_concurrency + ), + pruning_enabled: general_config + .pruning + .as_ref() + .map(|a| a.enabled) + .unwrap_or_default(), + pruning_chunk_size: load_optional_config_or_default!( + general_config.pruning, + chunk_size, + default_pruning_chunk_size + ), + pruning_removal_delay_sec: load_optional_config_or_default!( + general_config.pruning, + removal_delay_sec, + default_pruning_removal_delay_sec + ), + pruning_data_retention_sec: load_optional_config_or_default!( + general_config.pruning, + data_retention_sec, + default_pruning_data_retention_sec + ), + protective_reads_persistence_enabled: general_config + .db_config + .as_ref() + .map(|a| a.experimental.protective_reads_persistence_enabled) + .unwrap_or(true), + merkle_tree_processing_delay_ms: load_config_or_default!( + general_config.db_config, + experimental.processing_delay_ms, + default_merkle_tree_processing_delay_ms + ), + merkle_tree_include_indices_and_filters_in_block_cache: general_config + .db_config + .as_ref() + .map(|a| a.experimental.include_indices_and_filters_in_block_cache) + .unwrap_or_default(), + extended_rpc_tracing: load_config_or_default!( + general_config.api_config, + web3_json_rpc.extended_api_tracing, + default_extended_api_tracing + ), + main_node_rate_limit_rps: enconfig + .main_node_rate_limit_rps + .unwrap_or_else(Self::default_main_node_rate_limit_rps), + api_namespaces, + contracts_diamond_proxy_addr: None, + }) + } + const fn default_filters_limit() -> usize { 10_000 } @@ -504,6 +753,10 @@ impl OptionalENConfig { 10 } + fn default_max_response_body_size_overrides_mb() -> MaxResponseSizeOverrides { + MaxResponseSizeOverrides::empty() + } + const fn default_l2_block_seal_queue_capacity() -> usize { 10 } @@ -674,6 +927,37 @@ impl RequiredENConfig { .context("could not load external node config") } + fn from_configs( + general: &GeneralConfig, + en_config: &ENConfig, + secrets: &Secrets, + ) -> anyhow::Result { + let api_config = general + .api_config + .as_ref() + .context("Api config is required")?; + let db_config = general + .db_config + .as_ref() + .context("Database config is required")?; + Ok(RequiredENConfig { + l1_chain_id: en_config.l1_chain_id, + l2_chain_id: en_config.l2_chain_id, + http_port: api_config.web3_json_rpc.http_port, + ws_port: api_config.web3_json_rpc.ws_port, + healthcheck_port: api_config.healthcheck.port, + eth_client_url: secrets + .l1 + .as_ref() + .context("L1 secrets are required")? + .l1_rpc_url + .clone(), + main_node_url: en_config.main_node_url.clone(), + state_cache_path: db_config.state_keeper_db_path.clone(), + merkle_tree_path: db_config.merkle_tree.path.clone(), + }) + } + #[cfg(test)] fn mock(temp_dir: &tempfile::TempDir) -> Self { Self { @@ -794,6 +1078,35 @@ impl ExperimentalENConfig { pub fn state_keeper_db_block_cache_capacity(&self) -> usize { self.state_keeper_db_block_cache_capacity_mb * BYTES_IN_MEGABYTE } + + pub fn from_configs(general_config: &GeneralConfig) -> anyhow::Result { + Ok(Self { + state_keeper_db_block_cache_capacity_mb: load_config_or_default!( + general_config.db_config, + experimental.state_keeper_db_block_cache_capacity_mb, + default_state_keeper_db_block_cache_capacity_mb + ), + + state_keeper_db_max_open_files: load_config!( + general_config.db_config, + experimental.state_keeper_db_max_open_files + ), + snapshots_recovery_l1_batch: load_config!(general_config.snapshot_recovery, l1_batch), + snapshots_recovery_tree_chunk_size: load_optional_config_or_default!( + general_config.snapshot_recovery, + tree.chunk_size, + default_snapshots_recovery_tree_chunk_size + ), + snapshots_recovery_tree_parallel_persistence_buffer: load_config!( + general_config.snapshot_recovery, + tree.parallel_persistence_buffer + ), + commitment_generator_max_parallelism: general_config + .commitment_generator + .as_ref() + .map(|a| a.max_parallelism), + }) + } } pub(crate) fn read_consensus_secrets() -> anyhow::Result> { @@ -832,11 +1145,32 @@ pub struct ApiComponentConfig { pub tree_api_remote_url: Option, } +impl ApiComponentConfig { + fn from_configs(general_config: &GeneralConfig) -> Self { + ApiComponentConfig { + tree_api_remote_url: general_config + .api_config + .as_ref() + .and_then(|a| a.web3_json_rpc.tree_api_url.clone()), + } + } +} + #[derive(Debug, Deserialize)] pub struct TreeComponentConfig { pub api_port: Option, } +impl TreeComponentConfig { + fn from_configs(general_config: &GeneralConfig) -> Self { + let api_port = general_config + .api_config + .as_ref() + .map(|a| a.merkle_tree.port); + TreeComponentConfig { api_port } + } +} + /// External Node Config contains all the configuration required for the EN operation. /// It is split into three parts: required, optional and remote for easier navigation. #[derive(Debug)] @@ -874,6 +1208,64 @@ impl ExternalNodeConfig<()> { }) } + pub fn from_files( + general_config_path: PathBuf, + external_node_config_path: PathBuf, + secrets_configs_path: PathBuf, + consensus_config_path: Option, + ) -> anyhow::Result { + let general_config = read_yaml_repr::(general_config_path) + .context("failed decoding general YAML config")?; + let external_node_config = + read_yaml_repr::(external_node_config_path) + .context("failed decoding external node YAML config")?; + let secrets_config = read_yaml_repr::(secrets_configs_path) + .context("failed decoding secrets YAML config")?; + + let consensus = consensus_config_path + .map(read_yaml_repr::) + .transpose() + .context("failed decoding consensus YAML config")?; + + let required = RequiredENConfig::from_configs( + &general_config, + &external_node_config, + &secrets_config, + )?; + let optional = OptionalENConfig::from_configs(&general_config, &external_node_config)?; + let postgres = PostgresConfig { + database_url: secrets_config + .database + .as_ref() + .context("DB secrets is required")? + .server_url + .clone() + .context("Server url is required")?, + max_connections: general_config + .postgres_config + .as_ref() + .context("Postgres config is required")? + .max_connections()?, + }; + let observability = ObservabilityENConfig::from_configs(&general_config)?; + let experimental = ExperimentalENConfig::from_configs(&general_config)?; + + let api_component = ApiComponentConfig::from_configs(&general_config); + let tree_component = TreeComponentConfig::from_configs(&general_config); + + Ok(Self { + required, + postgres, + optional, + observability, + experimental, + consensus, + api_component, + tree_component, + remote: (), + }) + } + /// Fetches contracts addresses from the main node, completing the configuration. pub async fn fetch_remote( self, diff --git a/core/bin/external_node/src/config/observability.rs b/core/bin/external_node/src/config/observability.rs index 34054dcd1d40..39b86b8f0452 100644 --- a/core/bin/external_node/src/config/observability.rs +++ b/core/bin/external_node/src/config/observability.rs @@ -2,6 +2,7 @@ use std::{collections::HashMap, time::Duration}; use anyhow::Context as _; use serde::Deserialize; +use zksync_config::configs::GeneralConfig; use zksync_vlog::{prometheus::PrometheusExporterConfig, LogFormat}; use super::{ConfigurationSource, Environment}; @@ -97,4 +98,38 @@ impl ObservabilityENConfig { } Ok(guard) } + + pub(crate) fn from_configs(general_config: &GeneralConfig) -> anyhow::Result { + let (sentry_url, sentry_environment, log_format) = + if let Some(observability) = general_config.observability.as_ref() { + ( + observability.sentry_url.clone(), + observability.sentry_environment.clone(), + observability + .log_format + .parse() + .context("Invalid log format")?, + ) + } else { + (None, None, LogFormat::default()) + }; + let (prometheus_port, prometheus_pushgateway_url, prometheus_push_interval_ms) = + if let Some(prometheus) = general_config.prometheus_config.as_ref() { + ( + Some(prometheus.listener_port), + Some(prometheus.pushgateway_url.clone()), + prometheus.push_interval_ms.unwrap_or_default(), + ) + } else { + (None, None, 0) + }; + Ok(Self { + prometheus_port, + prometheus_pushgateway_url, + prometheus_push_interval_ms, + sentry_url, + sentry_environment, + log_format, + }) + } } diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index 25b6f81a6b5a..c54bdc1dab19 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -710,6 +710,21 @@ struct Cli { /// Comma-separated list of components to launch. #[arg(long, default_value = "all")] components: ComponentsToRun, + /// Path to the yaml config. If set, it will be used instead of env vars. + #[arg( + long, + requires = "secrets_path", + requires = "external_node_config_path" + )] + config_path: Option, + /// Path to the yaml with secrets. If set, it will be used instead of env vars. + #[arg(long, requires = "config_path", requires = "external_node_config_path")] + secrets_path: Option, + /// Path to the yaml with external node specific configuration. If set, it will be used instead of env vars. + #[arg(long, requires = "config_path", requires = "secrets_path")] + external_node_config_path: Option, + /// Path to the yaml with consensus. + consensus_path: Option, /// Run the node using the node framework. #[arg(long)] @@ -770,7 +785,19 @@ async fn main() -> anyhow::Result<()> { // Initial setup. let opt = Cli::parse(); - let mut config = ExternalNodeConfig::new().context("Failed to load node configuration")?; + let mut config = if let Some(config_path) = opt.config_path.clone() { + let secrets_path = opt.secrets_path.clone().unwrap(); + let external_node_config_path = opt.external_node_config_path.clone().unwrap(); + ExternalNodeConfig::from_files( + config_path, + external_node_config_path, + secrets_path, + opt.consensus_path.clone(), + )? + } else { + ExternalNodeConfig::new().context("Failed to load node configuration")? + }; + if !opt.enable_consensus { config.consensus = None; } diff --git a/core/bin/external_node/src/tests.rs b/core/bin/external_node/src/tests.rs index a7b944f15714..6d3e8f278f32 100644 --- a/core/bin/external_node/src/tests.rs +++ b/core/bin/external_node/src/tests.rs @@ -157,6 +157,10 @@ async fn external_node_basics(components_str: &'static str) { let opt = Cli { enable_consensus: false, components, + config_path: None, + secrets_path: None, + external_node_config_path: None, + consensus_path: None, use_node_framework: false, }; let mut config = ExternalNodeConfig::mock(&temp_dir, &connection_pool); @@ -266,6 +270,10 @@ async fn node_reacts_to_stop_signal_during_initial_reorg_detection() { let opt = Cli { enable_consensus: false, components: "core".parse().unwrap(), + config_path: None, + secrets_path: None, + external_node_config_path: None, + consensus_path: None, use_node_framework: false, }; let mut config = ExternalNodeConfig::mock(&temp_dir, &connection_pool); diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index 5e1d5480d75c..dcd9f3718352 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -270,5 +270,8 @@ fn load_env_config() -> anyhow::Result { snapshot_creator: SnapshotsCreatorConfig::from_env().ok(), protective_reads_writer_config: ProtectiveReadsWriterConfig::from_env().ok(), core_object_store: ObjectStoreConfig::from_env().ok(), + commitment_generator: None, + pruning: None, + snapshot_recovery: None, }) } diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 2909f5283af1..2e5a70011b8d 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -329,7 +329,14 @@ impl MainNodeBuilder { let circuit_breaker_config = try_load_config!(self.configs.circuit_breaker_config); let with_debug_namespace = state_keeper_config.save_call_traces; - let mut namespaces = Namespace::DEFAULT.to_vec(); + let mut namespaces = if let Some(namespaces) = &rpc_config.api_namespaces { + namespaces + .iter() + .map(|a| a.parse()) + .collect::>()? + } else { + Namespace::DEFAULT.to_vec() + }; if with_debug_namespace { namespaces.push(Namespace::Debug) } @@ -345,6 +352,7 @@ impl MainNodeBuilder { rpc_config.websocket_requests_per_minute_limit(), ), replication_lag_limit: circuit_breaker_config.replication_lag_limit(), + with_extended_tracing: rpc_config.extended_api_tracing, ..Default::default() }; self.node.add_layer(Web3ServerLayer::ws( diff --git a/core/lib/config/src/configs/api.rs b/core/lib/config/src/configs/api.rs index 3b33ef43343f..e039ab10116a 100644 --- a/core/lib/config/src/configs/api.rs +++ b/core/lib/config/src/configs/api.rs @@ -213,6 +213,13 @@ pub struct Web3JsonRpcConfig { /// (additionally to natively bridged tokens). #[serde(default)] pub whitelisted_tokens_for_aa: Vec
, + /// Enabled JSON RPC API namespaces. If not set, all namespaces will be available + #[serde(default)] + pub api_namespaces: Option>, + /// Enables extended tracing of RPC calls. This may negatively impact performance for nodes under high load + /// (hundreds or thousands RPS). + #[serde(default)] + pub extended_api_tracing: bool, } impl Web3JsonRpcConfig { @@ -251,6 +258,8 @@ impl Web3JsonRpcConfig { mempool_cache_size: Default::default(), tree_api_url: None, whitelisted_tokens_for_aa: Default::default(), + api_namespaces: None, + extended_api_tracing: false, } } diff --git a/core/lib/config/src/configs/commitment_generator.rs b/core/lib/config/src/configs/commitment_generator.rs new file mode 100644 index 000000000000..9ec4d805b8fe --- /dev/null +++ b/core/lib/config/src/configs/commitment_generator.rs @@ -0,0 +1,10 @@ +use std::num::NonZeroU32; + +use serde::Deserialize; + +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub struct CommitmentGeneratorConfig { + /// Maximum degree of parallelism during commitment generation, i.e., the maximum number of L1 batches being processed in parallel. + /// If not specified, commitment generator will use a value roughly equal to the number of CPU cores with some clamping applied. + pub max_parallelism: NonZeroU32, +} diff --git a/core/lib/config/src/configs/en_config.rs b/core/lib/config/src/configs/en_config.rs new file mode 100644 index 000000000000..32dc5b7c7b49 --- /dev/null +++ b/core/lib/config/src/configs/en_config.rs @@ -0,0 +1,19 @@ +use std::num::NonZeroUsize; + +use serde::Deserialize; +use zksync_basic_types::{ + commitment::L1BatchCommitmentMode, url::SensitiveUrl, L1ChainId, L2ChainId, +}; + +/// Temporary config for initializing external node, will be completely replaced by consensus config later +#[derive(Debug, Clone, PartialEq, Deserialize)] +pub struct ENConfig { + // Genesis + pub l2_chain_id: L2ChainId, + pub l1_chain_id: L1ChainId, + pub l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, + + // Main node configuration + pub main_node_url: SensitiveUrl, + pub main_node_rate_limit_rps: Option, +} diff --git a/core/lib/config/src/configs/experimental.rs b/core/lib/config/src/configs/experimental.rs index ad0ef5a4d5b8..e362715d3d4a 100644 --- a/core/lib/config/src/configs/experimental.rs +++ b/core/lib/config/src/configs/experimental.rs @@ -12,6 +12,21 @@ pub struct ExperimentalDBConfig { /// Maximum number of files concurrently opened by state keeper cache RocksDB. Useful to fit into OS limits; can be used /// as a rudimentary way to control RAM usage of the cache. pub state_keeper_db_max_open_files: Option, + /// Configures whether to persist protective reads when persisting L1 batches in the state keeper. + /// Protective reads are never required by full nodes so far, not until such a node runs a full Merkle tree + /// (presumably, to participate in L1 batch proving). + /// By default, set to `true` as a temporary safety measure. + #[serde(default = "ExperimentalDBConfig::default_protective_reads_persistence_enabled")] + pub protective_reads_persistence_enabled: bool, + // Merkle tree config + /// Processing delay between processing L1 batches in the Merkle tree. + #[serde(default = "ExperimentalDBConfig::default_merkle_tree_processing_delay_ms")] + pub processing_delay_ms: u64, + /// If specified, RocksDB indices and Bloom filters will be managed by the block cache, rather than + /// being loaded entirely into RAM on the RocksDB initialization. The block cache capacity should be increased + /// correspondingly; otherwise, RocksDB performance can significantly degrade. + #[serde(default)] + pub include_indices_and_filters_in_block_cache: bool, } impl Default for ExperimentalDBConfig { @@ -20,6 +35,10 @@ impl Default for ExperimentalDBConfig { state_keeper_db_block_cache_capacity_mb: Self::default_state_keeper_db_block_cache_capacity_mb(), state_keeper_db_max_open_files: None, + protective_reads_persistence_enabled: + Self::default_protective_reads_persistence_enabled(), + processing_delay_ms: Self::default_merkle_tree_processing_delay_ms(), + include_indices_and_filters_in_block_cache: false, } } } @@ -32,4 +51,12 @@ impl ExperimentalDBConfig { pub fn state_keeper_db_block_cache_capacity(&self) -> usize { self.state_keeper_db_block_cache_capacity_mb * super::BYTES_IN_MEGABYTE } + + const fn default_protective_reads_persistence_enabled() -> bool { + true + } + + const fn default_merkle_tree_processing_delay_ms() -> u64 { + 100 + } } diff --git a/core/lib/config/src/configs/general.rs b/core/lib/config/src/configs/general.rs index 9f249d655f57..312f404225cb 100644 --- a/core/lib/config/src/configs/general.rs +++ b/core/lib/config/src/configs/general.rs @@ -3,10 +3,12 @@ use crate::{ chain::{CircuitBreakerConfig, MempoolConfig, OperationsManagerConfig, StateKeeperConfig}, fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, + pruning::PruningConfig, + snapshot_recovery::SnapshotRecoveryConfig, vm_runner::ProtectiveReadsWriterConfig, - FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig, - FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, ObservabilityConfig, - PrometheusConfig, ProofDataHandlerConfig, + CommitmentGeneratorConfig, FriProofCompressorConfig, FriProverConfig, + FriProverGatewayConfig, FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, + ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, }, ApiConfig, ContractVerifierConfig, DBConfig, EthConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, @@ -35,5 +37,8 @@ pub struct GeneralConfig { pub snapshot_creator: Option, pub observability: Option, pub protective_reads_writer_config: Option, + pub commitment_generator: Option, + pub snapshot_recovery: Option, + pub pruning: Option, pub core_object_store: Option, } diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index b2d9571ad292..9e04f483357f 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -1,6 +1,7 @@ // Public re-exports pub use self::{ api::ApiConfig, + commitment_generator::CommitmentGeneratorConfig, contract_verifier::ContractVerifierConfig, contracts::{ContractsConfig, EcosystemContracts}, database::{DBConfig, PostgresConfig}, @@ -17,7 +18,9 @@ pub use self::{ object_store::ObjectStoreConfig, observability::{ObservabilityConfig, OpentelemetryConfig}, proof_data_handler::ProofDataHandlerConfig, + pruning::PruningConfig, secrets::{DatabaseSecrets, L1Secrets, Secrets}, + snapshot_recovery::SnapshotRecoveryConfig, snapshots_creator::SnapshotsCreatorConfig, utils::PrometheusConfig, vm_runner::ProtectiveReadsWriterConfig, @@ -25,10 +28,12 @@ pub use self::{ pub mod api; pub mod chain; +mod commitment_generator; pub mod consensus; pub mod contract_verifier; pub mod contracts; pub mod database; +pub mod en_config; pub mod eth_sender; pub mod eth_watch; mod experimental; @@ -44,7 +49,9 @@ pub mod house_keeper; pub mod object_store; pub mod observability; pub mod proof_data_handler; +pub mod pruning; pub mod secrets; +pub mod snapshot_recovery; pub mod snapshots_creator; pub mod utils; pub mod vm_runner; diff --git a/core/lib/config/src/configs/pruning.rs b/core/lib/config/src/configs/pruning.rs new file mode 100644 index 000000000000..d2a5b0e5e9df --- /dev/null +++ b/core/lib/config/src/configs/pruning.rs @@ -0,0 +1,19 @@ +use std::num::NonZeroU64; + +use serde::Deserialize; + +#[derive(Debug, Clone, PartialEq, Deserialize)] +pub struct PruningConfig { + pub enabled: bool, + /// Number of L1 batches pruned at a time. + pub chunk_size: Option, + /// Delta between soft- and hard-removing data from Postgres. Should be reasonably large (order of 60 seconds). + /// The default value is 60 seconds. + pub removal_delay_sec: Option, + /// If set, L1 batches will be pruned after the batch timestamp is this old (in seconds). Note that an L1 batch + /// may be temporarily retained for other reasons; e.g., a batch cannot be pruned until it is executed on L1, + /// which happens roughly 24 hours after its generation on the mainnet. Thus, in practice this value can specify + /// the retention period greater than that implicitly imposed by other criteria (e.g., 7 or 30 days). + /// If set to 0, L1 batches will not be retained based on their timestamp. The default value is 1 hour. + pub data_retention_sec: Option, +} diff --git a/core/lib/config/src/configs/snapshot_recovery.rs b/core/lib/config/src/configs/snapshot_recovery.rs new file mode 100644 index 000000000000..ba26583a8a63 --- /dev/null +++ b/core/lib/config/src/configs/snapshot_recovery.rs @@ -0,0 +1,44 @@ +use std::num::NonZeroUsize; + +use serde::Deserialize; +use zksync_basic_types::L1BatchNumber; + +use crate::ObjectStoreConfig; + +#[derive(Debug, Clone, PartialEq, Deserialize, Default)] +pub struct TreeRecoveryConfig { + /// Approximate chunk size (measured in the number of entries) to recover in a single iteration. + /// Reasonable values are order of 100,000 (meaning an iteration takes several seconds). + /// + /// **Important.** This value cannot be changed in the middle of tree recovery (i.e., if a node is stopped in the middle + /// of recovery and then restarted with a different config). + pub chunk_size: Option, + /// Buffer capacity for parallel persistence operations. Should be reasonably small since larger buffer means more RAM usage; + /// buffer elements are persisted tree chunks. OTOH, small buffer can lead to persistence parallelization being inefficient. + /// + /// If not set, parallel persistence will be disabled. + pub parallel_persistence_buffer: Option, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Default)] +pub struct PostgresRecoveryConfig { + /// Maximum concurrency factor for the concurrent parts of snapshot recovery for Postgres. It may be useful to + /// reduce this factor to about 5 if snapshot recovery overloads I/O capacity of the node. Conversely, + /// if I/O capacity of your infra is high, you may increase concurrency to speed up Postgres recovery. + pub max_concurrency: Option, +} + +#[derive(Debug, Clone, PartialEq, Deserialize)] +pub struct SnapshotRecoveryConfig { + /// Enables application-level snapshot recovery. Required to start a node that was recovered from a snapshot, + /// or to initialize a node from a snapshot. Has no effect if a node that was initialized from a Postgres dump + /// or was synced from genesis. + /// + /// This is an experimental and incomplete feature; do not use unless you know what you're doing. + pub enabled: bool, + /// L1 batch number of the snapshot to use during recovery. Specifying this parameter is mostly useful for testing. + pub l1_batch: Option, + pub tree: TreeRecoveryConfig, + pub postgres: PostgresRecoveryConfig, + pub object_store: Option, +} diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index b60fd95a5c10..fd1059b0f32f 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -97,6 +97,9 @@ impl Distribution for EncodeDist { mempool_cache_update_interval: self.sample(rng), mempool_cache_size: self.sample(rng), whitelisted_tokens_for_aa: self.sample_range(rng).map(|_| rng.gen()).collect(), + api_namespaces: self + .sample_opt(|| self.sample_range(rng).map(|_| self.sample(rng)).collect()), + extended_api_tracing: self.sample(rng), } } } @@ -281,6 +284,9 @@ impl Distribution for EncodeDist { configs::ExperimentalDBConfig { state_keeper_db_block_cache_capacity_mb: self.sample(rng), state_keeper_db_max_open_files: self.sample(rng), + protective_reads_persistence_enabled: self.sample(rng), + processing_delay_ms: self.sample(rng), + include_indices_and_filters_in_block_cache: self.sample(rng), } } } diff --git a/core/lib/env_config/src/api.rs b/core/lib/env_config/src/api.rs index 6f1948241c9e..68af37393bba 100644 --- a/core/lib/env_config/src/api.rs +++ b/core/lib/env_config/src/api.rs @@ -98,6 +98,8 @@ mod tests { addr("0x0000000000000000000000000000000000000001"), addr("0x0000000000000000000000000000000000000002"), ], + api_namespaces: Some(vec!["debug".to_string()]), + extended_api_tracing: true, }, prometheus: PrometheusConfig { listener_port: 3312, @@ -129,6 +131,8 @@ mod tests { API_WEB3_JSON_RPC_MAX_NONCE_AHEAD=5 API_WEB3_JSON_RPC_GAS_PRICE_SCALE_FACTOR=1.2 API_WEB3_JSON_RPC_REQUEST_TIMEOUT=10 + API_WEB3_JSON_RPC_API_NAMESPACES=debug + API_WEB3_JSON_RPC_EXTENDED_API_TRACING=true API_WEB3_JSON_RPC_ACCOUNT_PKS="0x0000000000000000000000000000000000000000000000000000000000000001,0x0000000000000000000000000000000000000000000000000000000000000002" API_WEB3_JSON_RPC_WHITELISTED_TOKENS_FOR_AA="0x0000000000000000000000000000000000000001,0x0000000000000000000000000000000000000002" API_WEB3_JSON_RPC_ESTIMATE_GAS_SCALE_FACTOR=1.0 diff --git a/core/lib/protobuf_config/src/api.rs b/core/lib/protobuf_config/src/api.rs index fe0cfb3e0d6e..4eac849773f3 100644 --- a/core/lib/protobuf_config/src/api.rs +++ b/core/lib/protobuf_config/src/api.rs @@ -69,7 +69,11 @@ impl ProtoRepr for proto::Web3JsonRpc { }) .collect::>() .context("max_response_body_size_overrides")?; - + let api_namespaces = if self.api_namespaces.is_empty() { + None + } else { + Some(self.api_namespaces.clone()) + }; Ok(Self::Type { http_port: required(&self.http_port) .and_then(|p| Ok((*p).try_into()?)) @@ -154,6 +158,8 @@ impl ProtoRepr for proto::Web3JsonRpc { .map(|(i, k)| parse_h160(k).context(i)) .collect::, _>>() .context("account_pks")?, + extended_api_tracing: self.extended_api_tracing.unwrap_or_default(), + api_namespaces, }) } @@ -222,6 +228,8 @@ impl ProtoRepr for proto::Web3JsonRpc { .iter() .map(|k| format!("{:?}", k)) .collect(), + extended_api_tracing: Some(this.extended_api_tracing), + api_namespaces: this.api_namespaces.clone().unwrap_or_default(), } } } diff --git a/core/lib/protobuf_config/src/commitment_generator.rs b/core/lib/protobuf_config/src/commitment_generator.rs new file mode 100644 index 000000000000..23af3ccce76e --- /dev/null +++ b/core/lib/protobuf_config/src/commitment_generator.rs @@ -0,0 +1,24 @@ +use std::num::NonZeroU32; + +use anyhow::Context as _; +use zksync_config::configs::CommitmentGeneratorConfig; +use zksync_protobuf::{repr::ProtoRepr, required}; + +use crate::proto::commitment_generator as proto; + +impl ProtoRepr for proto::CommitmentGenerator { + type Type = CommitmentGeneratorConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + max_parallelism: NonZeroU32::new( + *required(&self.max_parallelism).context("max_parallelism")?, + ) + .context("cannot be 0")?, + }) + } + fn build(this: &Self::Type) -> Self { + Self { + max_parallelism: Some(this.max_parallelism.into()), + } + } +} diff --git a/core/lib/protobuf_config/src/en.rs b/core/lib/protobuf_config/src/en.rs new file mode 100644 index 000000000000..b72a5b142cfb --- /dev/null +++ b/core/lib/protobuf_config/src/en.rs @@ -0,0 +1,50 @@ +use std::{num::NonZeroUsize, str::FromStr}; + +use anyhow::Context; +use zksync_basic_types::{url::SensitiveUrl, L1ChainId, L2ChainId}; +use zksync_config::configs::en_config::ENConfig; +use zksync_protobuf::{required, ProtoRepr}; + +use crate::proto::en as proto; + +impl ProtoRepr for proto::ExternalNode { + type Type = ENConfig; + + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + main_node_url: SensitiveUrl::from_str( + required(&self.main_node_url).context("main_node_url")?, + )?, + l1_chain_id: required(&self.l1_chain_id) + .map(|x| L1ChainId(*x)) + .context("l1_chain_id")?, + l2_chain_id: required(&self.l2_chain_id) + .and_then(|x| L2ChainId::try_from(*x).map_err(|a| anyhow::anyhow!(a))) + .context("l2_chain_id")?, + l1_batch_commit_data_generator_mode: required( + &self.l1_batch_commit_data_generator_mode, + ) + .and_then(|x| Ok(crate::proto::genesis::L1BatchCommitDataGeneratorMode::try_from(*x)?)) + .context("l1_batch_commit_data_generator_mode")? + .parse(), + main_node_rate_limit_rps: self + .main_node_rate_limit_rps + .and_then(|a| NonZeroUsize::new(a as usize)), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + main_node_url: Some(this.main_node_url.expose_str().to_string()), + l1_chain_id: Some(this.l1_chain_id.0), + l2_chain_id: Some(this.l2_chain_id.as_u64()), + l1_batch_commit_data_generator_mode: Some( + crate::proto::genesis::L1BatchCommitDataGeneratorMode::new( + &this.l1_batch_commit_data_generator_mode, + ) + .into(), + ), + main_node_rate_limit_rps: this.main_node_rate_limit_rps.map(|a| a.get() as u32), + } + } +} diff --git a/core/lib/protobuf_config/src/experimental.rs b/core/lib/protobuf_config/src/experimental.rs index c4fe17aadf43..8d92f3ef87a8 100644 --- a/core/lib/protobuf_config/src/experimental.rs +++ b/core/lib/protobuf_config/src/experimental.rs @@ -21,6 +21,13 @@ impl ProtoRepr for proto::Db { .map(|count| NonZeroU32::new(count).context("cannot be 0")) .transpose() .context("state_keeper_db_max_open_files")?, + protective_reads_persistence_enabled: self + .reads_persistence_enabled + .unwrap_or_default(), + processing_delay_ms: self.processing_delay_ms.unwrap_or_default(), + include_indices_and_filters_in_block_cache: self + .include_indices_and_filters_in_block_cache + .unwrap_or_default(), }) } @@ -34,6 +41,11 @@ impl ProtoRepr for proto::Db { state_keeper_db_max_open_files: this .state_keeper_db_max_open_files .map(NonZeroU32::get), + reads_persistence_enabled: Some(this.protective_reads_persistence_enabled), + processing_delay_ms: Some(this.processing_delay_ms), + include_indices_and_filters_in_block_cache: Some( + this.include_indices_and_filters_in_block_cache, + ), } } } diff --git a/core/lib/protobuf_config/src/general.rs b/core/lib/protobuf_config/src/general.rs index 834977759ae2..9ea3a3265541 100644 --- a/core/lib/protobuf_config/src/general.rs +++ b/core/lib/protobuf_config/src/general.rs @@ -41,6 +41,11 @@ impl ProtoRepr for proto::GeneralConfig { .context("protective_reads_writer")?, core_object_store: read_optional_repr(&self.core_object_store) .context("core_object_store")?, + commitment_generator: read_optional_repr(&self.commitment_generator) + .context("commitment_generator")?, + pruning: read_optional_repr(&self.pruning).context("pruning")?, + snapshot_recovery: read_optional_repr(&self.snapshot_recovery) + .context("snapshot_recovery")?, }) } @@ -76,6 +81,9 @@ impl ProtoRepr for proto::GeneralConfig { .protective_reads_writer_config .as_ref() .map(ProtoRepr::build), + commitment_generator: this.commitment_generator.as_ref().map(ProtoRepr::build), + snapshot_recovery: this.snapshot_recovery.as_ref().map(ProtoRepr::build), + pruning: this.pruning.as_ref().map(ProtoRepr::build), core_object_store: this.core_object_store.as_ref().map(ProtoRepr::build), } } diff --git a/core/lib/protobuf_config/src/genesis.rs b/core/lib/protobuf_config/src/genesis.rs index 9cab754150d2..52045ed9dbed 100644 --- a/core/lib/protobuf_config/src/genesis.rs +++ b/core/lib/protobuf_config/src/genesis.rs @@ -11,20 +11,21 @@ use zksync_protobuf::{repr::ProtoRepr, required}; use crate::{parse_h160, parse_h256, proto::genesis as proto}; impl proto::L1BatchCommitDataGeneratorMode { - fn new(n: &L1BatchCommitmentMode) -> Self { + pub(crate) fn new(n: &L1BatchCommitmentMode) -> Self { match n { L1BatchCommitmentMode::Rollup => Self::Rollup, L1BatchCommitmentMode::Validium => Self::Validium, } } - fn parse(&self) -> L1BatchCommitmentMode { + pub(crate) fn parse(&self) -> L1BatchCommitmentMode { match self { Self::Rollup => L1BatchCommitmentMode::Rollup, Self::Validium => L1BatchCommitmentMode::Validium, } } } + impl ProtoRepr for proto::Genesis { type Type = configs::GenesisConfig; fn read(&self) -> anyhow::Result { diff --git a/core/lib/protobuf_config/src/lib.rs b/core/lib/protobuf_config/src/lib.rs index 2fd9bbd9e059..14e4f5455f5f 100644 --- a/core/lib/protobuf_config/src/lib.rs +++ b/core/lib/protobuf_config/src/lib.rs @@ -7,10 +7,12 @@ mod api; mod chain; mod circuit_breaker; +mod commitment_generator; mod consensus; mod contract_verifier; mod contracts; mod database; +mod en; mod eth; mod experimental; mod general; @@ -21,8 +23,11 @@ mod observability; mod proof_data_handler; pub mod proto; mod prover; +mod pruning; mod secrets; mod snapshots_creator; + +mod snapshot_recovery; pub mod testonly; #[cfg(test)] mod tests; diff --git a/core/lib/protobuf_config/src/proto/config/api.proto b/core/lib/protobuf_config/src/proto/config/api.proto index 09503056a3f1..4fea0691f79d 100644 --- a/core/lib/protobuf_config/src/proto/config/api.proto +++ b/core/lib/protobuf_config/src/proto/config/api.proto @@ -40,7 +40,8 @@ message Web3JsonRpc { optional uint64 mempool_cache_size = 29; // optional repeated string whitelisted_tokens_for_aa = 30; // optional repeated MaxResponseSizeOverride max_response_body_size_overrides = 31; - + repeated string api_namespaces = 32; // Optional, if empty all namespaces are available + optional bool extended_api_tracing = 33; // optional, default false reserved 15; reserved "l1_to_l2_transactions_compatibility_mode"; } diff --git a/core/lib/protobuf_config/src/proto/config/commitment_generator.proto b/core/lib/protobuf_config/src/proto/config/commitment_generator.proto new file mode 100644 index 000000000000..62b9566e1866 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/config/commitment_generator.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; + +package zksync.config.commitment_generator; + +message CommitmentGenerator { + optional uint32 max_parallelism = 1; +} diff --git a/core/lib/protobuf_config/src/proto/config/en.proto b/core/lib/protobuf_config/src/proto/config/en.proto new file mode 100644 index 000000000000..ac7cb59b156a --- /dev/null +++ b/core/lib/protobuf_config/src/proto/config/en.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; +import "zksync/config/genesis.proto"; + +package zksync.config.en; + +message ExternalNode { + optional string main_node_url = 1; // required + optional uint64 l2_chain_id = 2; // required + optional uint64 l1_chain_id = 3; // required + optional uint32 main_node_rate_limit_rps = 6; // optional + optional config.genesis.L1BatchCommitDataGeneratorMode l1_batch_commit_data_generator_mode = 7; // optional, default to rollup +} diff --git a/core/lib/protobuf_config/src/proto/config/experimental.proto b/core/lib/protobuf_config/src/proto/config/experimental.proto index 4f456b9aca39..6f9ec426d8bb 100644 --- a/core/lib/protobuf_config/src/proto/config/experimental.proto +++ b/core/lib/protobuf_config/src/proto/config/experimental.proto @@ -8,4 +8,12 @@ package zksync.config.experimental; message DB { optional uint64 state_keeper_db_block_cache_capacity_mb = 1; // MB; required optional uint32 state_keeper_db_max_open_files = 2; // optional + optional bool reads_persistence_enabled = 3; + optional uint64 processing_delay_ms = 4; + optional bool include_indices_and_filters_in_block_cache = 5; +} + +// Experimental part of the Snapshot recovery configuration. +message SnapshotRecovery { + optional uint64 tree_recovery_parallel_persistence_buffer = 1; } diff --git a/core/lib/protobuf_config/src/proto/config/general.proto b/core/lib/protobuf_config/src/proto/config/general.proto index fdfe257aecf1..7d2423f6b71b 100644 --- a/core/lib/protobuf_config/src/proto/config/general.proto +++ b/core/lib/protobuf_config/src/proto/config/general.proto @@ -14,6 +14,9 @@ import "zksync/config/observability.proto"; import "zksync/config/snapshots_creator.proto"; import "zksync/config/utils.proto"; import "zksync/config/vm_runner.proto"; +import "zksync/config/commitment_generator.proto"; +import "zksync/config/snapshot_recovery.proto"; +import "zksync/config/pruning.proto"; import "zksync/config/object_store.proto"; message GeneralConfig { @@ -39,4 +42,7 @@ message GeneralConfig { optional config.observability.Observability observability = 32; optional config.vm_runner.ProtectiveReadsWriter protective_reads_writer = 33; optional config.object_store.ObjectStore core_object_store = 34; + optional config.snapshot_recovery.SnapshotRecovery snapshot_recovery = 35; + optional config.pruning.Pruning pruning = 36; + optional config.commitment_generator.CommitmentGenerator commitment_generator = 37; } diff --git a/core/lib/protobuf_config/src/proto/config/pruning.proto b/core/lib/protobuf_config/src/proto/config/pruning.proto new file mode 100644 index 000000000000..351f353bf060 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/config/pruning.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package zksync.config.pruning; + +message Pruning { + optional bool enabled = 1; + optional uint32 chunk_size = 2; + optional uint64 removal_delay_sec = 3; + optional uint64 data_retention_sec = 4; +} diff --git a/core/lib/protobuf_config/src/proto/config/snapshot_recovery.proto b/core/lib/protobuf_config/src/proto/config/snapshot_recovery.proto new file mode 100644 index 000000000000..9eceda12ad86 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/config/snapshot_recovery.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; +import "zksync/config/object_store.proto"; +import "zksync/config/experimental.proto"; + +package zksync.config.snapshot_recovery; + +message Tree { + optional uint64 chunk_size = 1; +} + +message Postgres { + optional uint64 max_concurrency = 1; +} + +message SnapshotRecovery { + optional bool enabled = 1; + optional Postgres postgres = 2; + optional Tree tree = 3; + optional uint32 l1_batch = 4; + optional config.object_store.ObjectStore object_store = 5; + optional experimental.SnapshotRecovery experimental = 6; +} diff --git a/core/lib/protobuf_config/src/pruning.rs b/core/lib/protobuf_config/src/pruning.rs new file mode 100644 index 000000000000..ed0ebb10b92f --- /dev/null +++ b/core/lib/protobuf_config/src/pruning.rs @@ -0,0 +1,28 @@ +use std::num::NonZeroU64; + +use zksync_config::configs::PruningConfig; +use zksync_protobuf::ProtoRepr; + +use crate::proto::pruning as proto; + +impl ProtoRepr for proto::Pruning { + type Type = PruningConfig; + + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + enabled: self.enabled.unwrap_or_default(), + chunk_size: self.chunk_size, + removal_delay_sec: self.removal_delay_sec.and_then(NonZeroU64::new), + data_retention_sec: self.data_retention_sec, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + enabled: Some(this.enabled), + chunk_size: this.chunk_size, + removal_delay_sec: this.removal_delay_sec.map(|a| a.get()), + data_retention_sec: this.data_retention_sec, + } + } +} diff --git a/core/lib/protobuf_config/src/snapshot_recovery.rs b/core/lib/protobuf_config/src/snapshot_recovery.rs new file mode 100644 index 000000000000..4023cbb0c097 --- /dev/null +++ b/core/lib/protobuf_config/src/snapshot_recovery.rs @@ -0,0 +1,96 @@ +use std::num::NonZeroUsize; + +use anyhow::Context; +use zksync_basic_types::L1BatchNumber; +use zksync_config::configs::{ + snapshot_recovery::{PostgresRecoveryConfig, TreeRecoveryConfig}, + SnapshotRecoveryConfig, +}; +use zksync_protobuf::ProtoRepr; + +use crate::{proto::snapshot_recovery as proto, read_optional_repr}; + +impl ProtoRepr for proto::Postgres { + type Type = PostgresRecoveryConfig; + + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + max_concurrency: self + .max_concurrency + .and_then(|a| NonZeroUsize::new(a as usize)), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + max_concurrency: this.max_concurrency.map(|a| a.get() as u64), + } + } +} + +impl ProtoRepr for proto::SnapshotRecovery { + type Type = SnapshotRecoveryConfig; + + fn read(&self) -> anyhow::Result { + let tree = self + .tree + .as_ref() + .map(|tree| { + let chunk_size = tree.chunk_size; + let parallel_persistence_buffer = self + .experimental + .as_ref() + .and_then(|a| { + a.tree_recovery_parallel_persistence_buffer + .map(|a| NonZeroUsize::new(a as usize)) + }) + .flatten(); + TreeRecoveryConfig { + chunk_size, + parallel_persistence_buffer, + } + }) + .unwrap_or_default(); + + Ok(Self::Type { + enabled: self.enabled.unwrap_or_default(), + tree, + postgres: read_optional_repr(&self.postgres) + .context("postgres")? + .unwrap_or_default(), + l1_batch: self.l1_batch.map(L1BatchNumber), + object_store: read_optional_repr(&self.object_store).context("object store")?, + }) + } + + fn build(this: &Self::Type) -> Self { + let (tree, experimental) = if this.tree == TreeRecoveryConfig::default() { + (None, None) + } else { + ( + Some(proto::Tree { + chunk_size: this.tree.chunk_size, + }), + Some(crate::proto::experimental::SnapshotRecovery { + tree_recovery_parallel_persistence_buffer: this + .tree + .parallel_persistence_buffer + .map(|a| a.get() as u64), + }), + ) + }; + let postgres = if this.postgres == PostgresRecoveryConfig::default() { + None + } else { + Some(this.postgres.clone()) + }; + Self { + enabled: Some(this.enabled), + postgres: postgres.as_ref().map(ProtoRepr::build), + tree, + experimental, + l1_batch: this.l1_batch.map(|a| a.0), + object_store: this.object_store.as_ref().map(ProtoRepr::build), + } + } +} diff --git a/core/lib/protobuf_config/src/tests.rs b/core/lib/protobuf_config/src/tests.rs index fad37700ae5f..d9693aaffcbe 100644 --- a/core/lib/protobuf_config/src/tests.rs +++ b/core/lib/protobuf_config/src/tests.rs @@ -65,4 +65,6 @@ fn verify_file_parsing() { decode_yaml_repr::(&base_path.join("contracts.yaml"), true) .unwrap(); decode_yaml_repr::(&base_path.join("secrets.yaml"), true).unwrap(); + decode_yaml_repr::(&base_path.join("external_node.yaml"), true) + .unwrap(); } diff --git a/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs b/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs index cb3e0d08794d..60a610c359f8 100644 --- a/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs +++ b/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs @@ -1,3 +1,6 @@ +use std::path::PathBuf; + +use anyhow::Context; use zksync_config::{ configs::{ api::{HealthCheckConfig, MerkleTreeApiConfig, Web3JsonRpcConfig}, @@ -8,9 +11,10 @@ use zksync_config::{ fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, wallets::{AddressWallet, EthSender, StateKeeper, Wallet, Wallets}, - FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig, - FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, GeneralConfig, - ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, ProtectiveReadsWriterConfig, + CommitmentGeneratorConfig, FriProofCompressorConfig, FriProverConfig, + FriProverGatewayConfig, FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, + GeneralConfig, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, + ProtectiveReadsWriterConfig, PruningConfig, SnapshotRecoveryConfig, }, ApiConfig, ContractVerifierConfig, DBConfig, EthConfig, EthWatchConfig, GasAdjusterConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, @@ -23,6 +27,11 @@ pub fn decode_yaml_repr(yaml: &str) -> anyhow::Result { this.read() } +pub fn read_yaml_repr(path_buf: PathBuf) -> anyhow::Result { + let yaml = std::fs::read_to_string(path_buf).context("failed reading YAML config")?; + decode_yaml_repr::(&yaml) +} + // TODO (QIT-22): This structure is going to be removed when components will be responsible for their own configs. /// A temporary config store allowing to pass deserialized configs from `zksync_server` to `zksync_core`. /// All the configs are optional, since for some component combination it is not needed to pass all the configs. @@ -56,6 +65,9 @@ pub struct TempConfigStore { pub snapshot_creator: Option, pub protective_reads_writer_config: Option, pub core_object_store: Option, + pub commitment_generator: Option, + pub pruning: Option, + pub snapshot_recovery: Option, } impl TempConfigStore { @@ -83,6 +95,9 @@ impl TempConfigStore { observability: self.observability.clone(), protective_reads_writer_config: self.protective_reads_writer_config.clone(), core_object_store: self.core_object_store.clone(), + commitment_generator: self.commitment_generator.clone(), + snapshot_recovery: self.snapshot_recovery.clone(), + pruning: self.pruning.clone(), } } diff --git a/core/node/api_server/Cargo.toml b/core/node/api_server/Cargo.toml index 787b1e2f634a..9a026846f003 100644 --- a/core/node/api_server/Cargo.toml +++ b/core/node/api_server/Cargo.toml @@ -48,6 +48,7 @@ pin-project-lite.workspace = true hex.workspace = true http.workspace = true tower.workspace = true +strum = { workspace = true, features = ["derive"] } tower-http = { workspace = true, features = ["cors", "metrics"] } lru.workspace = true diff --git a/core/node/api_server/src/web3/mod.rs b/core/node/api_server/src/web3/mod.rs index b86666ea6868..7b2dec7abb35 100644 --- a/core/node/api_server/src/web3/mod.rs +++ b/core/node/api_server/src/web3/mod.rs @@ -86,8 +86,9 @@ enum ApiTransport { Http(SocketAddr), } -#[derive(Debug, Deserialize, Clone, PartialEq)] +#[derive(Debug, Deserialize, Clone, PartialEq, strum::EnumString)] #[serde(rename_all = "lowercase")] +#[strum(serialize_all = "lowercase")] pub enum Namespace { Eth, Net, diff --git a/core/node/node_framework/examples/main_node.rs b/core/node/node_framework/examples/main_node.rs index fe111155d829..9fb81aa4069b 100644 --- a/core/node/node_framework/examples/main_node.rs +++ b/core/node/node_framework/examples/main_node.rs @@ -304,6 +304,7 @@ impl MainNodeBuilder { rpc_config.websocket_requests_per_minute_limit(), ), replication_lag_limit: circuit_breaker_config.replication_lag_limit(), + with_extended_tracing: rpc_config.extended_api_tracing, ..Default::default() }; self.node.add_layer(Web3ServerLayer::ws( diff --git a/core/node/node_framework/src/implementations/layers/web3_api/server.rs b/core/node/node_framework/src/implementations/layers/web3_api/server.rs index 2ae4c34da34e..e45583e2cfc6 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/server.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/server.rs @@ -56,6 +56,7 @@ impl Web3ServerOptionalConfig { api_builder = api_builder .with_websocket_requests_per_minute_limit(websocket_requests_per_minute_limit); } + api_builder = api_builder.with_extended_tracing(self.with_extended_tracing); api_builder } } diff --git a/etc/env/file_based/external_node.yaml b/etc/env/file_based/external_node.yaml new file mode 100644 index 000000000000..675baf739686 --- /dev/null +++ b/etc/env/file_based/external_node.yaml @@ -0,0 +1,6 @@ +l1_chain_id: 9 +l2_chain_id: 270 +l1_batch_commit_data_generator_mode: Rollup + +main_node_url: http://localhost:3050 +main_node_rate_limit_rps: 1000 diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 5f58b21237b6..9c6e0144187d 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -63,6 +63,7 @@ api: estimate_gas_scale_factor: 1.2 estimate_gas_acceptable_overestimation: 1000 max_tx_size: 1000000 + api_namespaces: [ eth,net,web3,zks,pubsub ] max_response_body_size_overrides: - method: eth_getTransactionReceipt # no size specified, meaning no size limit - method: zks_getProof @@ -129,7 +130,7 @@ eth: aggregated_block_execute_deadline: 10 timestamp_criteria_max_allowed_lag: 30 max_eth_tx_data_size: 120000 - aggregated_proof_sizes: [ 1,4 ] + aggregated_proof_sizes: [ 1 ] max_aggregated_tx_gas: 4000000 max_acceptable_priority_fee_in_gwei: 100000000000 pubdata_sending_mode: BLOBS @@ -333,6 +334,23 @@ protective_reads_writer: window_size: 3 first_processed_batch: 0 +snapshot_recovery: + enabled: true + postgres: + max_concurrency: 10 + tree: + chunk_size: 200000 + experimental: + tree_recovery_parallel_persistence_buffer: 1 +pruning: + enabled: true + chunk_size: 10 + removal_delay_sec: 60 + data_retention_sec: 3600 + +commitment_generator: + max_parallelism: 10 + core_object_store: file_backed: diff --git a/prover/config/src/lib.rs b/prover/config/src/lib.rs index f501dd2d6e06..2c05b57e16cf 100644 --- a/prover/config/src/lib.rs +++ b/prover/config/src/lib.rs @@ -50,6 +50,9 @@ fn load_env_config() -> anyhow::Result { snapshot_creator: SnapshotsCreatorConfig::from_env().ok(), protective_reads_writer_config: ProtectiveReadsWriterConfig::from_env().ok(), core_object_store: ObjectStoreConfig::from_env().ok(), + commitment_generator: None, + pruning: None, + snapshot_recovery: None, }) }