Skip to content

Commit

Permalink
chore: merge 'main' into pe-80-add-endpoint-to-get-gas-per-pubdata|merge
Browse files Browse the repository at this point in the history
  • Loading branch information
dimazhornyk committed Apr 26, 2024
2 parents 399255a + 356be4e commit b56d791
Show file tree
Hide file tree
Showing 86 changed files with 1,033 additions and 1,126 deletions.
28 changes: 22 additions & 6 deletions .github/workflows/protobuf.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,10 @@ on:
# (unless we improve our github setup).
# Therefore on post-merge we will execute the
# compatibility check as well (TODO: alerting).
branches: ["main"]
branches: [ "main" ]

permissions:
contents: read

env:
CARGO_TERM_COLOR: always
Expand All @@ -21,18 +24,27 @@ env:
SCCACHE_GHA_ENABLED: "true"
RUST_BACKTRACE: "1"
SQLX_OFFLINE: true,
# github.base_ref -> github.head_ref for pull_request
BASE: ${{ github.event.pull_request.base.sha || github.event.before }}
# github.event.before -> github.event.after for push
HEAD: ${{ github.event.pull_request.head.sha || github.event.after }}

jobs:
compatibility:
runs-on: [ubuntu-22.04-github-hosted-16core]
steps:
# github.base_ref -> github.head_ref for pull_request
# github.event.before -> github.event.after for push
- uses: mozilla-actions/[email protected]

# before
- uses: actions/checkout@v4
with:
ref: ${{ github.base_ref || github.event.before }}
ref: ${{ env.BASE }}
path: before
fetch-depth: 0 # fetches all branches and tags, which is needed to compute the LCA.
- name: checkout LCA
run:
git checkout $(git merge-base $BASE $HEAD)
working-directory: ./before
- name: compile before
run: cargo check --all-targets
working-directory: ./before/
Expand All @@ -41,9 +53,11 @@ jobs:
perl -ne 'print "$1\n" if /PROTOBUF_DESCRIPTOR="(.*)"/'
`find ./before/target/debug/build/*/output`
| xargs cat > ./before.binpb
# after
- uses: actions/checkout@v4
with:
ref: ${{ github.head_ref || github.event.after }}
ref: ${{ env.HEAD }}
path: after
- name: compile after
run: cargo check --all-targets
Expand All @@ -53,8 +67,10 @@ jobs:
perl -ne 'print "$1\n" if /PROTOBUF_DESCRIPTOR="(.*)"/'
`find ./after/target/debug/build/*/output`
| xargs cat > ./after.binpb
# compare
- uses: bufbuild/buf-setup-action@v1
with:
github_token: ${{ github.token }}
- name: buf breaking
run: buf breaking './after.binpb' --against './before.binpb' --config '{"version":"v1","breaking":{"use":["WIRE_JSON"]}}' --error-format 'github-actions'
run: buf breaking './after.binpb' --against './before.binpb' --config '{"version":"v1","breaking":{"use":["WIRE_JSON","WIRE"]}}' --error-format 'github-actions'
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion core/bin/external_node/src/init.rs
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ pub(crate) async fn ensure_storage_initialized(
Box::new(main_node_client.for_component("snapshot_recovery")),
blob_store,
);
app_health.insert_component(snapshots_applier_task.health_check());
app_health.insert_component(snapshots_applier_task.health_check())?;

let recovery_started_at = Instant::now();
let stats = snapshots_applier_task
Expand Down
34 changes: 11 additions & 23 deletions core/bin/external_node/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ async fn run_tree(
.with_recovery_pool(recovery_pool);

let tree_reader = Arc::new(metadata_calculator.tree_reader());
app_health.insert_component(metadata_calculator.tree_health_check());
app_health.insert_component(metadata_calculator.tree_health_check())?;

if let Some(api_config) = api_config {
let address = (Ipv4Addr::UNSPECIFIED, api_config.port).into();
Expand Down Expand Up @@ -201,7 +201,7 @@ async fn run_core(
) -> anyhow::Result<SyncState> {
// Create components.
let sync_state = SyncState::default();
app_health.insert_custom_component(Arc::new(sync_state.clone()));
app_health.insert_custom_component(Arc::new(sync_state.clone()))?;
let (action_queue_sender, action_queue) = ActionQueue::new();

let (persistence, miniblock_sealer) = StateKeeperPersistence::new(
Expand Down Expand Up @@ -299,18 +299,6 @@ async fn run_core(
task_handles.push(tokio::spawn(db_pruner.run(stop_receiver.clone())));
}

let reorg_detector = ReorgDetector::new(main_node_client.clone(), connection_pool.clone());
app_health.insert_component(reorg_detector.health_check().clone());
task_handles.push(tokio::spawn({
let stop = stop_receiver.clone();
async move {
reorg_detector
.run(stop)
.await
.context("reorg_detector.run()")
}
}));

let sk_handle = task::spawn(state_keeper.run());
let fee_params_fetcher_handle =
tokio::spawn(fee_params_fetcher.clone().run(stop_receiver.clone()));
Expand Down Expand Up @@ -359,7 +347,7 @@ async fn run_core(
.context("cannot initialize consistency checker")?
.with_diamond_proxy_addr(diamond_proxy_addr);

app_health.insert_component(consistency_checker.health_check().clone());
app_health.insert_component(consistency_checker.health_check().clone())?;
let consistency_checker_handle = tokio::spawn(consistency_checker.run(stop_receiver.clone()));

let batch_status_updater = BatchStatusUpdater::new(
Expand All @@ -369,14 +357,14 @@ async fn run_core(
.await
.context("failed to build a connection pool for BatchStatusUpdater")?,
);
app_health.insert_component(batch_status_updater.health_check());
app_health.insert_component(batch_status_updater.health_check())?;

let commitment_generator_pool = singleton_pool_builder
.build()
.await
.context("failed to build a commitment_generator_pool")?;
let commitment_generator = CommitmentGenerator::new(commitment_generator_pool);
app_health.insert_component(commitment_generator.health_check());
app_health.insert_component(commitment_generator.health_check())?;
let commitment_generator_handle = tokio::spawn(commitment_generator.run(stop_receiver.clone()));

let updater_handle = task::spawn(batch_status_updater.run(stop_receiver.clone()));
Expand Down Expand Up @@ -533,7 +521,7 @@ async fn run_api(
.run(stop_receiver.clone())
.await
.context("Failed initializing HTTP JSON-RPC server")?;
app_health.insert_component(http_server_handles.health_check);
app_health.insert_component(http_server_handles.health_check)?;
task_handles.extend(http_server_handles.tasks);
}

Expand Down Expand Up @@ -562,7 +550,7 @@ async fn run_api(
.run(stop_receiver.clone())
.await
.context("Failed initializing WS JSON-RPC server")?;
app_health.insert_component(ws_server_handles.health_check);
app_health.insert_component(ws_server_handles.health_check)?;
task_handles.extend(ws_server_handles.tasks);
}

Expand Down Expand Up @@ -674,7 +662,7 @@ async fn init_tasks(
if let Some(port) = config.optional.prometheus_port {
let (prometheus_health_check, prometheus_health_updater) =
ReactiveHealthCheck::new("prometheus_exporter");
app_health.insert_component(prometheus_health_check);
app_health.insert_component(prometheus_health_check)?;
task_handles.push(tokio::spawn(async move {
prometheus_health_updater.update(HealthStatus::Ready.into());
let result = PrometheusExporterConfig::pull(port)
Expand Down Expand Up @@ -887,10 +875,10 @@ async fn run_node(
));
app_health.insert_custom_component(Arc::new(MainNodeHealthCheck::from(
main_node_client.clone(),
)));
)))?;
app_health.insert_custom_component(Arc::new(ConnectionPoolHealthCheck::new(
connection_pool.clone(),
)));
)))?;

// Start the health check server early into the node lifecycle so that its health can be monitored from the very start.
let healthcheck_handle = HealthCheckHandle::spawn_server(
Expand Down Expand Up @@ -983,7 +971,7 @@ async fn run_node(
tracing::info!("Rollback successfully completed");
}

app_health.insert_component(reorg_detector.health_check().clone());
app_health.insert_component(reorg_detector.health_check().clone())?;
task_handles.push(tokio::spawn({
let stop = stop_receiver.clone();
async move {
Expand Down
5 changes: 1 addition & 4 deletions core/bin/external_node/src/version_sync_task.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,7 @@ pub async fn get_l1_batch_remote_protocol_version(
main_node_client: &BoxedL2Client,
l1_batch_number: L1BatchNumber,
) -> anyhow::Result<Option<ProtocolVersionId>> {
let Some((miniblock, _)) = main_node_client
.get_miniblock_range(l1_batch_number)
.await?
else {
let Some((miniblock, _)) = main_node_client.get_l2_block_range(l1_batch_number).await? else {
return Ok(None);
};
let sync_block = main_node_client
Expand Down
6 changes: 0 additions & 6 deletions core/lib/config/src/configs/fri_witness_generator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,16 +15,10 @@ pub struct FriWitnessGeneratorConfig {
pub node_generation_timeout_in_secs: Option<u16>,
/// Max attempts for generating witness
pub max_attempts: u32,
// Percentage of the blocks that gets proven in the range [0.0, 1.0]
// when 0.0 implies all blocks are skipped and 1.0 implies all blocks are proven.
pub blocks_proving_percentage: Option<u8>,
pub dump_arguments_for_blocks: Vec<u32>,
// Optional l1 batch number to process block until(inclusive).
// This parameter is used in case of performing circuit upgrades(VK/Setup keys),
// to not let witness-generator pick new job and finish all the existing jobs with old circuit.
pub last_l1_batch_to_process: Option<u32>,
// Force process block with specified number when sampling is enabled.
pub force_process_block: Option<u32>,

// whether to write to public GCS bucket for https://github.com/matter-labs/era-boojum-validator-cli
pub shall_save_to_public_bucket: bool,
Expand Down
3 changes: 0 additions & 3 deletions core/lib/config/src/testonly.rs
Original file line number Diff line number Diff line change
Expand Up @@ -542,10 +542,7 @@ impl Distribution<configs::FriWitnessGeneratorConfig> for EncodeDist {
node_generation_timeout_in_secs: self.sample(rng),
scheduler_generation_timeout_in_secs: self.sample(rng),
max_attempts: self.sample(rng),
blocks_proving_percentage: self.sample(rng),
dump_arguments_for_blocks: self.sample_collect(rng),
last_l1_batch_to_process: self.sample(rng),
force_process_block: self.sample(rng),
shall_save_to_public_bucket: self.sample(rng),
}
}
Expand Down
9 changes: 0 additions & 9 deletions core/lib/env_config/src/fri_witness_generator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,7 @@ mod tests {
node_generation_timeout_in_secs: Some(800u16),
scheduler_generation_timeout_in_secs: Some(900u16),
max_attempts: 4,
blocks_proving_percentage: Some(30),
dump_arguments_for_blocks: vec![2, 3],
last_l1_batch_to_process: None,
force_process_block: Some(1),
shall_save_to_public_bucket: true,
}
}
Expand All @@ -41,9 +38,6 @@ mod tests {
FRI_WITNESS_NODE_GENERATION_TIMEOUT_IN_SECS=800
FRI_WITNESS_SCHEDULER_GENERATION_TIMEOUT_IN_SECS=900
FRI_WITNESS_MAX_ATTEMPTS=4
FRI_WITNESS_DUMP_ARGUMENTS_FOR_BLOCKS="2,3"
FRI_WITNESS_BLOCKS_PROVING_PERCENTAGE="30"
FRI_WITNESS_FORCE_PROCESS_BLOCK="1"
FRI_WITNESS_SHALL_SAVE_TO_PUBLIC_BUCKET=true
"#;
lock.set_env(config);
Expand All @@ -64,9 +58,6 @@ mod tests {
FRI_WITNESS_BASIC_GENERATION_TIMEOUT_IN_SECS=100
FRI_WITNESS_SCHEDULER_GENERATION_TIMEOUT_IN_SECS=200
FRI_WITNESS_MAX_ATTEMPTS=4
FRI_WITNESS_DUMP_ARGUMENTS_FOR_BLOCKS="2,3"
FRI_WITNESS_BLOCKS_PROVING_PERCENTAGE="30"
FRI_WITNESS_FORCE_PROCESS_BLOCK="1"
FRI_WITNESS_SHALL_SAVE_TO_PUBLIC_BUCKET=true
"#;
lock.set_env(config);
Expand Down
1 change: 1 addition & 0 deletions core/lib/health_check/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ async-trait.workspace = true
futures.workspace = true
serde = { workspace = true, features = ["derive"] }
serde_json.workspace = true
thiserror.workspace = true
tokio = { workspace = true, features = ["sync", "time"] }
tracing.workspace = true

Expand Down
34 changes: 27 additions & 7 deletions core/lib/health_check/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,14 @@ impl From<HealthStatus> for Health {
}
}

#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum AppHealthCheckError {
/// Component is redefined.
#[error("cannot insert health check for component `{0}`: it is redefined")]
RedefinedComponent(&'static str),
}

/// Application health check aggregating health from multiple components.
#[derive(Debug)]
pub struct AppHealthCheck {
Expand Down Expand Up @@ -132,24 +140,36 @@ impl AppHealthCheck {
}

/// Inserts health check for a component.
pub fn insert_component(&self, health_check: ReactiveHealthCheck) {
self.insert_custom_component(Arc::new(health_check));
///
/// # Errors
///
/// Returns an error if the component with the same name is already defined.
pub fn insert_component(
&self,
health_check: ReactiveHealthCheck,
) -> Result<(), AppHealthCheckError> {
self.insert_custom_component(Arc::new(health_check))
}

/// Inserts a custom health check for a component.
pub fn insert_custom_component(&self, health_check: Arc<dyn CheckHealth>) {
///
/// # Errors
///
/// Returns an error if the component with the same name is already defined.
pub fn insert_custom_component(
&self,
health_check: Arc<dyn CheckHealth>,
) -> Result<(), AppHealthCheckError> {
let health_check_name = health_check.name();
let mut guard = self
.components
.lock()
.expect("`AppHealthCheck` is poisoned");
if guard.iter().any(|check| check.name() == health_check_name) {
tracing::warn!(
"Health check with name `{health_check_name}` is redefined; only the last mention \
will be present in `/health` endpoint output"
);
return Err(AppHealthCheckError::RedefinedComponent(health_check_name));
}
guard.push(health_check);
Ok(())
}

/// Checks the overall application health. This will query all component checks concurrently.
Expand Down
14 changes: 14 additions & 0 deletions core/lib/health_check/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -134,3 +134,17 @@ async fn aggregating_health_checks() {
HealthStatus::Affected
);
}

#[test]
fn adding_duplicate_component() {
let checks = AppHealthCheck::default();
let (health_check, _health_updater) = ReactiveHealthCheck::new("test");
checks.insert_component(health_check.clone()).unwrap();

let err = checks.insert_component(health_check.clone()).unwrap_err();
assert_matches!(err, AppHealthCheckError::RedefinedComponent("test"));
let err = checks
.insert_custom_component(Arc::new(health_check))
.unwrap_err();
assert_matches!(err, AppHealthCheckError::RedefinedComponent("test"));
}
2 changes: 1 addition & 1 deletion core/lib/multivm/src/interface/types/inputs/l2_block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ pub struct L2BlockEnv {
}

impl L2BlockEnv {
pub fn from_miniblock_data(miniblock_execution_data: &L2BlockExecutionData) -> Self {
pub fn from_l2_block_data(miniblock_execution_data: &L2BlockExecutionData) -> Self {
Self {
number: miniblock_execution_data.number.0,
timestamp: miniblock_execution_data.timestamp,
Expand Down
7 changes: 3 additions & 4 deletions core/lib/protobuf_config/src/proto/config/prover.proto
Original file line number Diff line number Diff line change
Expand Up @@ -70,16 +70,15 @@ message ProverGateway {

message WitnessGenerator {
optional uint32 generation_timeout_in_secs = 1; // required;
optional uint32 max_attempts = 2; // required
optional uint32 blocks_proving_percentage = 3; // optional; 0-100
repeated uint32 dump_arguments_for_blocks = 4;
optional uint32 max_attempts = 2; // required;
optional uint32 last_l1_batch_to_process = 5; // optional
optional uint32 force_process_block = 6; // optional
optional bool shall_save_to_public_bucket = 7; // required
optional uint32 basic_generation_timeout_in_secs = 8; // optional;
optional uint32 leaf_generation_timeout_in_secs = 9; // optional;
optional uint32 node_generation_timeout_in_secs = 10; // optional;
optional uint32 scheduler_generation_timeout_in_secs = 11; // optional;
reserved 3, 4, 6;
reserved "dump_arguments_for_blocks", "force_process_block", "blocks_proving_percentage";
}

message WitnessVectorGenerator {
Expand Down
Loading

0 comments on commit b56d791

Please sign in to comment.